Compare commits
363 Commits
chart/v8.6
...
master
Author | SHA1 | Date |
---|---|---|
|
9081324e6b | |
|
cc19ca0746 | |
|
db5d38e826 | |
|
a8e8f4aacd | |
|
e6fa8e56e3 | |
|
4f12337556 | |
|
dd98ecd66a | |
|
c815e182b2 | |
|
96a49735a8 | |
|
d25610acbe | |
|
0f94681cfb | |
|
5d1dea8ba8 | |
|
7f3c6a8868 | |
|
214320b2e4 | |
|
433ac459a0 | |
|
79170dbc4a | |
|
2f95c68bf1 | |
|
da250b7cc7 | |
|
08d78948ac | |
|
d14c84c690 | |
|
2ae71716cc | |
|
6c161bd268 | |
|
9b6894249f | |
|
cffdd53f8e | |
|
ccdc52db1d | |
|
600dcaf4b8 | |
|
def2e22bc2 | |
|
1c5819bce5 | |
|
0e1a1d1d9d | |
|
c8f597d7ce | |
|
faf6f7a057 | |
|
672c554e16 | |
|
f0afe10599 | |
|
120314d95b | |
|
368ca59863 | |
|
79917553b1 | |
|
a4933625e8 | |
|
e053f3b716 | |
|
1f8b0cb718 | |
|
2e92aa8a21 | |
|
d784486390 | |
|
c2cd9ddfc6 | |
|
20e3f63e7c | |
|
f30725562c | |
|
f8829ee1d8 | |
|
ab7b539a47 | |
|
b1fb2d6bfb | |
|
46ab826f03 | |
|
88712774f4 | |
|
c5e28b8fbe | |
|
2b124a957d | |
|
d9f94770a3 | |
|
d0e902dceb | |
|
95495a54c2 | |
|
c9a542be33 | |
|
233307cb95 | |
|
a47f7482e1 | |
|
33a9ce8c80 | |
|
c55dad2cf3 | |
|
dfcf01298f | |
|
8e7ee3b7cf | |
|
9f6c9b536e | |
|
02e88319b7 | |
|
f853d23884 | |
|
ac2dc64c66 | |
|
010ac84078 | |
|
30c8adca06 | |
|
4734dff37d | |
|
0bca8b0352 | |
|
b7929aed97 | |
|
3ab7632e37 | |
|
778869f935 | |
|
680e3dc3d0 | |
|
eb817604ec | |
|
4103bc151d | |
|
70cfbfb935 | |
|
5d5eaa0677 | |
|
6521f85a7e | |
|
642c78428c | |
|
d04280e3a8 | |
|
e8ca65046a | |
|
5f7ac30080 | |
|
2947049e06 | |
|
16c971e3d3 | |
|
1d56ae0965 | |
|
1c96a9df06 | |
|
8b343d5989 | |
|
8d75fd48bc | |
|
9292c47470 | |
|
2d36cf21e0 | |
|
084034461e | |
|
99256f1e12 | |
|
3b18d9a636 | |
|
ab531b7a2b | |
|
f88f6e0e45 | |
|
7b7d39e261 | |
|
75b4a5e445 | |
|
31dd587529 | |
|
bd0dbefdb7 | |
|
0fee7f95bd | |
|
f772a0c332 | |
|
76985900e5 | |
|
d781ecfb9e | |
|
a6afd065b6 | |
|
7a2e1b6d67 | |
|
2bf8880e95 | |
|
2c21e72146 | |
|
799c9a5a39 | |
|
53a1631519 | |
|
086914f235 | |
|
42eab8ab69 | |
|
f621c33a4a | |
|
56a235c1a1 | |
|
3de3ff793e | |
|
3b61f68cdc | |
|
ed2aef94f3 | |
|
d8831a40de | |
|
8239684191 | |
|
1026db35ad | |
|
f9e27c94a8 | |
|
c8edb16264 | |
|
628aba93e5 | |
|
c57fc34b3b | |
|
c543058d66 | |
|
90d7afef29 | |
|
14acd8e8c5 | |
|
72bd2c7ae7 | |
|
2a12d65a44 | |
|
cade34b2d9 | |
|
b43c95bd6d | |
|
19a15543c0 | |
|
3ce2ba28c1 | |
|
c11896ba67 | |
|
2cac062f7f | |
|
5a7c9b90c5 | |
|
f76e5d4642 | |
|
43aab8470d | |
|
ee7c77ad01 | |
|
a289c618aa | |
|
819628d48d | |
|
982be874e1 | |
|
d04b904ab6 | |
|
bbfcd578ab | |
|
25e6949fae | |
|
e3944b055c | |
|
0ebded51d0 | |
|
be9403d528 | |
|
b2a901df2c | |
|
deee4153d1 | |
|
2c9cfaa933 | |
|
71b981eeb9 | |
|
85bba5d86f | |
|
7b7be436c5 | |
|
cc9502a511 | |
|
644eba62f2 | |
|
65f43d1fe5 | |
|
f89c3595ed | |
|
576ff7de02 | |
|
512c71f313 | |
|
63222e20cf | |
|
a56bfce8b7 | |
|
229abf0750 | |
|
ec880db9fe | |
|
cf7ad46a89 | |
|
a5a4c688b1 | |
|
66e86429bc | |
|
81de0a26fc | |
|
6b6b628ab9 | |
|
999f292b84 | |
|
0341aa7adb | |
|
ebea5f5a55 | |
|
f4c0535e10 | |
|
3d01d338a5 | |
|
6dc259f448 | |
|
3164b8bf61 | |
|
36b3d213ca | |
|
ccbf179736 | |
|
1eccac57d5 | |
|
ac662313a1 | |
|
7fc11933e4 | |
|
1cd8650cb9 | |
|
d94b4c21a2 | |
|
cac12065fb | |
|
90c31adb0e | |
|
27adba6eab | |
|
9e1ea1c9aa | |
|
8b7723c0a9 | |
|
47c72fa0f7 | |
|
7e23a9b924 | |
|
c5fc4a7ce4 | |
|
ac19ea6609 | |
|
28ff281739 | |
|
c316a95e2a | |
|
9209418d67 | |
|
fe02667366 | |
|
0a620e8abb | |
|
2b55a41868 | |
|
bc148a7620 | |
|
650418f364 | |
|
2820aafdf7 | |
|
89b318f94a | |
|
e7f36a6555 | |
|
57344cd774 | |
|
767c429dc3 | |
|
179ee90179 | |
|
59f9c9185c | |
|
10171ad4ca | |
|
9fe6be4d95 | |
|
12418ed2ff | |
|
ccaf980639 | |
|
ba9e399058 | |
|
772ce1e044 | |
|
5d07d29382 | |
|
4d936bfcdd | |
|
68e7e3ce88 | |
|
4c8cc88f38 | |
|
06a0d7b45c | |
|
2513ae1c13 | |
|
f937f08d63 | |
|
e4d0b43df7 | |
|
4a8a0132dd | |
|
78ee2df1fc | |
|
89228f169a | |
|
585922f950 | |
|
40ad84ce0a | |
|
da9b271c36 | |
|
3d5635fc99 | |
|
dc39f11a17 | |
|
f3ab7183df | |
|
7f56afa587 | |
|
d6a828895a | |
|
3155b60e8a | |
|
9593640f2c | |
|
2a88d45bb6 | |
|
f17e1898c4 | |
|
5dd181f03e | |
|
e9014c6493 | |
|
56ec74de75 | |
|
b987a5c2d3 | |
|
f6a6ac57df | |
|
e5848545c4 | |
|
1ac7b74f29 | |
|
a28ef97c45 | |
|
cb6eea73cb | |
|
172ad72ab4 | |
|
330582b9a4 | |
|
3099436e2d | |
|
30a6ddd292 | |
|
03a32471a3 | |
|
d740937f36 | |
|
a7fc5d9343 | |
|
faf29b6012 | |
|
a666525df7 | |
|
b965751835 | |
|
5c5325ffeb | |
|
60867c4329 | |
|
fb7455dd68 | |
|
adb17e3894 | |
|
74fe4ed608 | |
|
934711a92a | |
|
114e2a3bc7 | |
|
eef86a4afe | |
|
31734e52ef | |
|
40b9d4690d | |
|
cd26813101 | |
|
8489c4067f | |
|
3950d5962e | |
|
f2b7a68586 | |
|
87747d5ad7 | |
|
5107d72b37 | |
|
4008635f93 | |
|
6ab7c1a40d | |
|
a2e71ecc0e | |
|
a13b6cc131 | |
|
d0cb1f7f2d | |
|
658f22ea83 | |
|
04bd2b25bb | |
|
ead6531e22 | |
|
f2299728d9 | |
|
f07f87ad0d | |
|
91b41ffda8 | |
|
2552454023 | |
|
3a61acc77b | |
|
11534472d7 | |
|
aeb71dfced | |
|
f1c7c67d6c | |
|
cc7ca53fbc | |
|
910b0573fb | |
|
dc35488c10 | |
|
9b16792ec0 | |
|
397aadef61 | |
|
4ee26bca64 | |
|
5698251d8f | |
|
1429b4a166 | |
|
671cbe915c | |
|
239dc73f8f | |
|
d48da3019d | |
|
f1e9138549 | |
|
674cec267f | |
|
e135c08387 | |
|
6d065220c0 | |
|
5ddb009343 | |
|
88a5a286fa | |
|
7631b1a55f | |
|
c28d9fa4fc | |
|
998453d2b1 | |
|
55aaa7187a | |
|
d50e69b8e9 | |
|
f7ae900603 | |
|
cc2d8c05fd | |
|
d4ce1e5db7 | |
|
969d44e6ae | |
|
faf305224d | |
|
2deda8724f | |
|
778688a74e | |
|
bfefad2bee | |
|
9db6319897 | |
|
561d0f9667 | |
|
19f9e0069e | |
|
3151e99ec8 | |
|
5d7c00b916 | |
|
51fdee2e28 | |
|
0c5052fcf2 | |
|
34d6008f6f | |
|
e08ee1bfa5 | |
|
390332c114 | |
|
c03d0107e7 | |
|
3ecb1972de | |
|
b32fd681c4 | |
|
b25ba6428e | |
|
1eb324b3ef | |
|
e44c54eed3 | |
|
db9c2fcddc | |
|
d21b3e5cc5 | |
|
b27207eaa1 | |
|
43b612222d | |
|
77af561057 | |
|
350c65708e | |
|
1fc1d04b43 | |
|
b7265c260f | |
|
bc5904f1fe | |
|
04b3841e03 | |
|
7ec36acae5 | |
|
a08532d766 | |
|
c65e8b6109 | |
|
af6ed4bf64 | |
|
70008af3e9 | |
|
5e7b7a6115 | |
|
7afe60257b | |
|
0d8ad37f35 | |
|
35913cd9b9 | |
|
0768ff846f | |
|
eacbb1a300 | |
|
ba882cdf38 | |
|
1854ffdf85 | |
|
6595ae997c | |
|
c62be2cd83 | |
|
16b1173f1b | |
|
7574f1f890 | |
|
3b1036d5b7 | |
|
232a480530 | |
|
9bb6a876c4 | |
|
9b19425da2 |
|
@ -8,14 +8,15 @@ This document is intended for developers looking to contribute to the Emissary-i
|
|||
|
||||
> Looking for end user guides for Emissary-ingress? You can check out the end user guides at <https://www.getambassador.io/docs/emissary/>.
|
||||
|
||||
After reading this document if you have questions we encourage you to join us on our [Slack channel](https://d6e.co/slack) in the [#emissary-dev](https://datawire-oss.slack.com/archives/CB46TNG83) channel.
|
||||
After reading this document if you have questions we encourage you to join us on our [Slack channel](https://communityinviter.com/apps/cloud-native/cncf) in the #emissary-ingress channel.
|
||||
|
||||
- [Code of Conduct](../Community/CODE_OF_CONDUCT.md)
|
||||
- [Governance](../Community/GOVERNANCE.md)
|
||||
- [Maintainers](../Community/MAINTAINERS.md)
|
||||
|
||||
**Table of Contents**
|
||||
## Table of Contents
|
||||
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [Development Setup](#development-setup)
|
||||
- [Step 1: Install Build Dependencies](#step-1-install-build-dependencies)
|
||||
- [Step 2: Clone Project](#step-2-clone-project)
|
||||
|
@ -47,16 +48,16 @@ After reading this document if you have questions we encourage you to join us on
|
|||
- [Shutting up the pod labels error](#shutting-up-the-pod-labels-error)
|
||||
- [Extra credit](#extra-credit)
|
||||
- [Debugging and Developing Envoy Configuration](#debugging-and-developing-envoy-configuration)
|
||||
- [Mockery](#mockery)
|
||||
- [Ambassador Dump](#ambassador-dump)
|
||||
- [Making changes to Envoy](#making-changes-to-envoy)
|
||||
- [1. Preparing your machine](#1-preparing-your-machine)
|
||||
- [2. Setting up your workspace to hack on Envoy](#2-setting-up-your-workspace-to-hack-on-envoy)
|
||||
- [3. Hacking on Envoy](#3-hacking-on-envoy)
|
||||
- [4. Building and testing your hacked-up Envoy](#4-building-and-testing-your-hacked-up-envoy)
|
||||
- [5. Finalizing your changes](#5-finalizing-your-changes)
|
||||
- [6. Checklist for landing the changes](#6-checklist-for-landing-the-changes)
|
||||
- [Developing Emissary-ingress (Ambassador Labs -only advice)](#developing-emissary-ingress-ambassador-labs--only-advice)
|
||||
- [5. Test Devloop](#5-test-devloop)
|
||||
- [6. Protobuf changes](#6-protobuf-changes)
|
||||
- [7. Finalizing your changes](#7-finalizing-your-changes)
|
||||
- [8. Final Checklist](#8-final-checklist)
|
||||
- [Developing Emissary-ingress (Maintainers-only advice)](#developing-emissary-ingress-maintainers-only-advice)
|
||||
- [Updating license documentation](#updating-license-documentation)
|
||||
- [Upgrading Python dependencies](#upgrading-python-dependencies)
|
||||
- [FAQ](#faq)
|
||||
|
@ -70,9 +71,7 @@ After reading this document if you have questions we encourage you to join us on
|
|||
- [My editor is changing `go.mod` or `go.sum`, should I commit that?](#my-editor-is-changing-gomod-or-gosum-should-i-commit-that)
|
||||
- [How do I debug "This should not happen in CI" errors?](#how-do-i-debug-this-should-not-happen-in-ci-errors)
|
||||
- [How do I run Emissary-ingress tests?](#how-do-i-run-emissary-ingress-tests)
|
||||
- [How do I update the python test cache?](#how-do-i-update-the-python-test-cache)
|
||||
- [How do I type check my python code?](#how-do-i-type-check-my-python-code)
|
||||
- [How do I get the source code for a release?](#how-do-i-get-the-source-code-for-a-release)
|
||||
|
||||
## Development Setup
|
||||
|
||||
|
@ -535,10 +534,7 @@ You should now be able to launch ambassador if you set the
|
|||
#### Getting envoy
|
||||
|
||||
If you do not have envoy in your path already, the entrypoint will use
|
||||
docker to run it. At the moment this is untested for macs which probably
|
||||
means it is broken since localhost communication does not work by
|
||||
default on macs. This can be made to work as soon an intrepid volunteer
|
||||
with a mac reaches out to me (rhs@datawire.io).
|
||||
docker to run it.
|
||||
|
||||
#### Shutting up the pod labels error
|
||||
|
||||
|
@ -569,108 +565,6 @@ we need to push both the code and any relevant kubernetes resources
|
|||
into the cluster. The following sections will provide tips for improving
|
||||
this development experience.
|
||||
|
||||
#### Mockery
|
||||
|
||||
Fortunately we have the `mockery` tool which lets us run the compiler
|
||||
code directly on kubernetes resources without having to push that code
|
||||
or the relevant kubernetes resources into the cluster. This is the
|
||||
fastest way to hack on and debug the compiler.
|
||||
|
||||
The `mockery` tool runs inside the Docker container used to build
|
||||
Ambassador, using `make shell`, so it's important to realize that it
|
||||
won't have access to your entire filesystem. There are two easy ways
|
||||
to arrange to get data in and out of the container:
|
||||
|
||||
1. If you `make sync`, everything in the Ambassador source tree gets rsync'd
|
||||
into the container's `/buildroot/ambassador`. The first time you start the
|
||||
shell, this can take a bit, but after that it's pretty fast. You'll
|
||||
probably need to use `docker cp` to get data out of the container, though.
|
||||
|
||||
2. You may be able to use Docker volume mounts by exporting `BUILDER_MOUNTS`
|
||||
with the appropriate `-v` switches before running `make shell` -- e.g.
|
||||
|
||||
```bash
|
||||
export BUILDER_MOUNTS=$(pwd)/xfer:/xfer
|
||||
make shell
|
||||
```
|
||||
|
||||
will cause the dev shell to mount `xfer` in your current directory as `/xfer`.
|
||||
This is known to work well on MacOS (though volume mounts are slow on Mac,
|
||||
so moving gigabytes of data around this way isn't ideal).
|
||||
|
||||
Once you've sorted out how to move data around:
|
||||
|
||||
1. Put together a set of Ambassador configuration CRDs in a file that's somewhere
|
||||
that you'll be able to get them into the builder container. The easy way to do
|
||||
this is to use the files you'd feed to `kubectl apply`; they should be actual
|
||||
Kubernetes objects with `metadata` and `spec` sections, etc. (If you want to
|
||||
use annotations, that's OK too, just put the whole `Service` object in there.)
|
||||
|
||||
2. Run `make compile shell` to build everything and start the dev shell.
|
||||
|
||||
3. From inside the build shell, run
|
||||
|
||||
```bash
|
||||
mockery $path_to_your_file
|
||||
```
|
||||
|
||||
If you're using a non-default `ambassador_id` you need to provide it in the
|
||||
environment:
|
||||
|
||||
```bash
|
||||
AMBASSADOR_ID=whatever mockery $path_to_your_file
|
||||
```
|
||||
|
||||
Finally, if you're trying to mimic `KAT`, copy the `/tmp/k8s-AmbassadorTest.yaml`
|
||||
file from a KAT run to use as input, then
|
||||
|
||||
```bash
|
||||
mockery --kat $kat_test_name $path_to_k8s_AmbassadorTest.yaml
|
||||
```
|
||||
|
||||
where `$kat_test_name` is the class name of a `KAT` test class, like `LuaTest` or
|
||||
`TLSContextTest`.
|
||||
|
||||
4. Once it's done, `/tmp/ambassador/snapshots` will have all the output from the
|
||||
compiler phase of Ambassador.
|
||||
|
||||
The point of `mockery` is that it mimics the configuration cycle of real Ambassador,
|
||||
without relying at all on a Kubernetes cluster. This means that you can easily and
|
||||
quickly take a Kubernetes input and look at the generated Envoy configuration without
|
||||
any other infrastructure.
|
||||
|
||||
#### Ambassador Dump
|
||||
|
||||
The `ambassador dump` tool is also useful for debugging and hacking on
|
||||
the compiler. After running `make shell`, you'll also be able to use
|
||||
the `ambassador` CLI, which can export the most import data structures
|
||||
that Ambassador works with as JSON. It works from an input which can
|
||||
be either a single file or a directory full of files in the following
|
||||
formats:
|
||||
|
||||
- raw Ambassador resources like you'll find in the `demo/config` directory; or
|
||||
- an annotated Kubernetes resources like you'll find in `/tmp/k8s-AmbassadorTest.yaml` after running `make test`; or
|
||||
- a `watt` snapshot like you'll find in the `$AMBASSADOR_CONFIG_BASE_DIR/snapshots/snapshot.yaml` (which is a JSON file, I know, it's misnamed).
|
||||
|
||||
Given an input source, running
|
||||
|
||||
```bash
|
||||
ambassador dump --ir --xds [$input_flags] $input > test.json
|
||||
```
|
||||
|
||||
will dump the Ambassador IR and v2 Envoy configuration into `test.json`. Here
|
||||
`$input_flags` will be
|
||||
|
||||
- nothing for raw Ambassador resources;
|
||||
- `--k8s` for Kubernetes resources; or
|
||||
- `--watt` for a `watt` snapshot.
|
||||
|
||||
You can get more information with
|
||||
|
||||
```bash
|
||||
ambassador dump --help
|
||||
```
|
||||
|
||||
### Making changes to Envoy
|
||||
|
||||
Emissary-ingress is built on top of Envoy and leverages a vendored version of Envoy (*we track upstream very closely*). This section will go into how to make changes to the Envoy that is packaged with Emissary-ingress.
|
||||
|
@ -712,30 +606,7 @@ and tests on a RAM disk (see the `/etc/fstab` line above).
|
|||
make $PWD/_cxx/envoy
|
||||
git -C _cxx/envoy checkout -b YOUR_BRANCHNAME
|
||||
```
|
||||
|
||||
2. Tell the build system that, yes, you really would like to be
|
||||
compiling envoy, as you'll be modifying Envoy:
|
||||
|
||||
```shell
|
||||
export YES_I_AM_OK_WITH_COMPILING_ENVOY=true
|
||||
export ENVOY_COMMIT='-'
|
||||
```
|
||||
|
||||
Building Envoy is slow, and most Emissary-ingress contributors do not
|
||||
want to rebuild Envoy, so we require the first two environment
|
||||
variables as a safety.
|
||||
|
||||
Setting `ENVOY_COMMIT=-` does 3 things:
|
||||
1. Tell it to use whatever is currently checked out in
|
||||
`./_cxx/envoy/` (instead of checking out a specific commit), so
|
||||
that you are free to modify those sources.
|
||||
2. Don't try to download a cached build of Envoy from a Docker
|
||||
cache (since it wouldn't know which `ENVOY_COMMIT` do download
|
||||
the cached build for).
|
||||
3. Don't push the build of Envoy to a Docker cache (since you're
|
||||
still actively working on it).
|
||||
|
||||
3. To build Envoy in FIPS mode, set the following variable:
|
||||
2. To build Envoy in FIPS mode, set the following variable:
|
||||
|
||||
```shell
|
||||
export FIPS_MODE=true
|
||||
|
@ -746,54 +617,64 @@ and tests on a RAM disk (see the `/etc/fstab` line above).
|
|||
Emissary does not claim to be FIPS compliant or certified.
|
||||
See [here](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/security/ssl#fips-140-2) for more information on FIPS and Envoy.
|
||||
|
||||
> _NOTE:_ FIPS_MODE is NOT supported by the emissary-ingress maintainers but we provide this for developers as convience
|
||||
|
||||
#### 3. Hacking on Envoy
|
||||
|
||||
Modify the sources in `./_cxx/envoy/`.
|
||||
Modify the sources in `./_cxx/envoy/`. or update the branch and/or `ENVOY_COMMIT` as necessary in `./_cxx/envoy.mk`
|
||||
|
||||
#### 4. Building and testing your hacked-up Envoy
|
||||
|
||||
- **Build Envoy** with `make update-base`. Again, this is *not* a
|
||||
quick process. The build happens in a Docker container; you can
|
||||
set `DOCKER_HOST` to point to a powerful machine if you like.
|
||||
> See `./_cxx/envoy.mk` for the full list of targets.
|
||||
|
||||
- **Test Envoy** and run with Envoy's test suite (which we don't run
|
||||
during normal Ambassador development) by running `make check-envoy`.
|
||||
Be warned that Envoy's full **test suite** requires several hundred
|
||||
gigabytes of disk space to run.
|
||||
Multiple Phony targets are provided so that developers can run the steps they are interested in when developing, here are few of the key ones:
|
||||
|
||||
Inner dev-loop steps:
|
||||
- `make update-base`: will perform all the steps necessary to verify, build envoy, build docker images, push images to the container repository and compile the updated protos.
|
||||
|
||||
- To run just specific tests, instead of the whole test suite, set
|
||||
the `ENVOY_TEST_LABEL` environment variable. For example, to run
|
||||
just the unit tests in
|
||||
`test/common/network/listener_impl_test.cc`, you should run
|
||||
- `make build-envoy`: will build the envoy binaries using the same build container as the upstream Envoy project. Build outputs are mounted to the `_cxx/envoy-docker-build` directory and Bazel will write the results there.
|
||||
|
||||
```shell
|
||||
ENVOY_TEST_LABEL='//test/common/network:listener_impl_test' make check-envoy
|
||||
```
|
||||
- `make build-base-envoy-image`: will use the release outputs from building envoy to generate a new `base-envoy` container which is then used in the main emissary-ingress container build.
|
||||
|
||||
- You can run `make envoy-shell` to get a Bash shell in the Docker
|
||||
container that does the Envoy builds.
|
||||
- `make push-base-envoy`: will push the built container to the remote container repository.
|
||||
|
||||
Interpreting the test results:
|
||||
- `make check-envoy`: will use the build docker container to run the Envoy test suite against the currently checked out envoy in the `_cxx/envoy` folder.
|
||||
|
||||
- If you see the following message, don't worry, it's harmless; the
|
||||
tests still ran:
|
||||
- `make envoy-shell`: will run the envoy build container and open a bash shell session. The `_cxx/envoy` folder is volume mounted into the container and the user is set to the `envoybuild` user in the container to ensure you are not running as root to ensure hermetic builds.
|
||||
|
||||
```text
|
||||
There were tests whose specified size is too big. Use the --test_verbose_timeout_warnings command line option to see which ones these are.
|
||||
```
|
||||
#### 5. Test Devloop
|
||||
|
||||
The message means that the test passed, but it passed too
|
||||
quickly, and Bazel is suggesting that you declare it as smaller.
|
||||
Something along the lines of "This test only took 2s, but you
|
||||
declared it as being in the 60s-300s ('moderate') bucket,
|
||||
consider declaring it as being in the 0s-60s ('short')
|
||||
bucket".
|
||||
Running the Envoy test suite will compile all the test targets. This is a slow process and can use lots of disk space.
|
||||
|
||||
Don't be confused (as I was) in to thinking that it was saying
|
||||
that the test was too big and was skipped and that you need to
|
||||
throw more hardware at it.
|
||||
The Envoy Inner Devloop for build and testing:
|
||||
|
||||
- You can make a change to Envoy code and run the whole test by just calling `make check-envoy`
|
||||
- You can run a specific test instead of the whole test suite by setting the `ENVOY_TEST_LABEL` environment variable.
|
||||
- For example, to run just the unit tests in `test/common/network/listener_impl_test.cc`, you should run:
|
||||
|
||||
```shell
|
||||
ENVOY_TEST_LABEL='//test/common/network:listener_impl_test' make check-envoy
|
||||
```
|
||||
|
||||
- Alternatively, you can run `make envoy-shell` to get a bash shell into the Docker container that does the Envoy builds and you are free to interact with `Bazel` directly.
|
||||
|
||||
Interpreting the test results:
|
||||
|
||||
- If you see the following message, don't worry, it's harmless; the tests still ran:
|
||||
|
||||
```text
|
||||
There were tests whose specified size is too big. Use the --test_verbose_timeout_warnings command line option to see which ones these are.
|
||||
```
|
||||
|
||||
The message means that the test passed, but it passed too
|
||||
quickly, and Bazel is suggesting that you declare it as smaller.
|
||||
Something along the lines of "This test only took 2s, but you
|
||||
declared it as being in the 60s-300s ('moderate') bucket,
|
||||
consider declaring it as being in the 0s-60s ('short')
|
||||
bucket".
|
||||
|
||||
Don't be confused (as I was) in to thinking that it was saying
|
||||
that the test was too big and was skipped and that you need to
|
||||
throw more hardware at it.
|
||||
|
||||
- **Build or test Emissary-ingress** with the usual `make` commands, with
|
||||
the exception that you MUST run `make update-base` first whenever
|
||||
|
@ -802,86 +683,68 @@ Modify the sources in `./_cxx/envoy/`.
|
|||
`make update-base && make test`, and `make images` to just build
|
||||
Emissary-ingress would become `make update-base && make images`.
|
||||
|
||||
#### 5. Finalizing your changes
|
||||
The Envoy changes with Emissary-ingress:
|
||||
|
||||
Once you're happy with your changes to Envoy:
|
||||
- Either run `make update-base` to build, and push a new base container and then you can run `make test` for the Emissary-ingress test suite.
|
||||
- If you do not want to push the container you can instead:
|
||||
- Build Envoy - `make build-envoy`
|
||||
- Build container - `make build-base-envoy-image`
|
||||
- Test Emissary - `make test`
|
||||
|
||||
1. Ensure they're committed to `_cxx/envoy/` and push/PR them into
|
||||
<https://github.com/datawire/envoy> branch `rebase/master`.
|
||||
#### 6. Protobuf changes
|
||||
|
||||
If you're outside of Ambassador Labs, you'll need to
|
||||
a. Create a fork of <https://github.com/datawire/envoy> on the
|
||||
GitHub web interface
|
||||
b. Add it as a remote to your `./_cxx/envoy/`:
|
||||
`git remote add my-fork git@github.com:YOUR_USERNAME/envoy.git`
|
||||
c. Push the branch to that fork:
|
||||
`git push my-fork YOUR_BRANCHNAME`
|
||||
If you made any changes to the Protocol Buffer files or if you bumped versions of Envoy then you
|
||||
should make sure that you are re-compiling the Protobufs so that they are available and checked-in
|
||||
to the emissary.git repository.
|
||||
|
||||
2. Update `ENVOY_COMMIT` in `_cxx/envoy.mk`
|
||||
```sh
|
||||
make compile-envoy-protos
|
||||
```
|
||||
|
||||
3. Unset `ENVOY_COMMIT=-` and run a final `make update-base` to
|
||||
push a cached build:
|
||||
This will copy over the raw proto files, compile and copy the generated go code over to emisary-ignress repository.
|
||||
|
||||
```shell
|
||||
export YES_I_AM_OK_WITH_COMPILING_ENVOY=true
|
||||
unset ENVOY_COMMIT
|
||||
make update-base
|
||||
```
|
||||
#### 7. Finalizing your changes
|
||||
|
||||
The image will be pushed to `$ENVOY_DOCKER_REPO`, by default
|
||||
`ENVOY_DOCKER_REPO=docker.io/datawire/ambassador-base`; if you're
|
||||
outside of Ambassador Labs, you can skip this step if you don't want to
|
||||
share your Envoy binary anywhere. If you don't skip this step,
|
||||
you'll need to `export
|
||||
ENVOY_DOCKER_REPO=${your-envoy-docker-registry}` to tell it to push
|
||||
somewhere other than Datawire's registry.
|
||||
> NOTE: we are no longer accepting PR's in `datawire/envoy.git`.
|
||||
|
||||
If you're at Ambassador Labs, you'll then want to make sure that the image
|
||||
is also pushed to the backup container registries:
|
||||
If you have custom changes then land them in your custom envoy repository and update the `ENVOY_COMMIT` and `ENVOY_DOCKER_REPO` variable in `_cxx/envoy.mk` so that the image will be pushed to the correct repository.
|
||||
|
||||
```shell
|
||||
# upload image to the mirror in GCR
|
||||
SHA=GET_THIS_FROM_THE_make_update-base_OUTPUT
|
||||
TAG="envoy-0.$SHA.opt"
|
||||
FULL_TAG="envoy-full-0.$SHA.opt"
|
||||
docker pull "docker.io/emissaryingress/base-envoy:envoy-0.$TAG.opt"
|
||||
docker tag "docker.io/emissaryingress/base-envoy:$TAG" "gcr.io/datawire/ambassador-base:$TAG"
|
||||
docker push "gcr.io/datawire/ambassador-base:$TAG"
|
||||
Then run `make update-base` does all the bits so assuming that was successful then are all good.
|
||||
|
||||
## repeat for the "FULL" version which has debug symbols enabled for envoy. It is large (GB's) big.
|
||||
TAG=envoy-full-0.386367b8c99f843fbc2a42a38fe625fce480de19.opt
|
||||
docker pull "docker.io/emissaryingress/base-envoy:$FULL_TAG"
|
||||
docker tag "docker.io/emissaryingress/base-envoy:$FULL_TAG" "gcr.io/datawire/ambassador-base:$FULL_TAG"
|
||||
docker push "gcr.io/datawire/ambassador-base:$FULL_TAG"
|
||||
```
|
||||
**For maintainers:**
|
||||
|
||||
If you're outside of Ambassador Labs, you can skip this step if you
|
||||
don't want to share your Envoy binary anywhere. If you don't
|
||||
skip this step, you'll need to `export
|
||||
ENVOY_DOCKER_REPO=${your-envoy-docker-registry}` to tell it to
|
||||
push somewhere other than Datawire's registry.
|
||||
You will want to make sure that the image is pushed to the backup container registries:
|
||||
|
||||
4. Push and PR the `envoy.mk` `ENVOY_COMMIT` change to
|
||||
<https://github.com/emissary-ingress/emissary>.
|
||||
```shell
|
||||
# upload image to the mirror in GCR
|
||||
SHA=GET_THIS_FROM_THE_make_update-base_OUTPUT
|
||||
TAG="envoy-0.$SHA.opt"
|
||||
docker pull "docker.io/emissaryingress/base-envoy:envoy-0.$TAG.opt"
|
||||
docker tag "docker.io/emissaryingress/base-envoy:$TAG" "gcr.io/datawire/ambassador-base:$TAG"
|
||||
docker push "gcr.io/datawire/ambassador-base:$TAG"
|
||||
```
|
||||
|
||||
#### 6. Checklist for landing the changes
|
||||
#### 8. Final Checklist
|
||||
|
||||
I'd put this in the pull request template, but so few PRs change Envoy...
|
||||
**For Maintainers Only**
|
||||
|
||||
Here is a checklist of things to do when bumping the `base-envoy` version:
|
||||
|
||||
- [ ] The image has been pushed to...
|
||||
- [ ] `docker.io/emissaryingress/base-envoy`
|
||||
- [ ] `gcr.io/datawire/ambassador-base`
|
||||
- [ ] The envoy.git commit has been tagged as `datawire-$(gitdescribe --tags --match='v*')`
|
||||
- [ ] The `datawire/envoy.git` commit has been tagged as `datawire-$(git describe --tags --match='v*')`
|
||||
(the `--match` is to prevent `datawire-*` tags from stacking on each other).
|
||||
- [ ] It's been tested with...
|
||||
- [ ] `make check-envoy`
|
||||
|
||||
The `check-envoy-version` CI job should check all of those things,
|
||||
except for `make check-envoy`.
|
||||
The `check-envoy-version` CI job will double check all these things, with the exception of running
|
||||
the Envoy tests. If the `check-envoy-version` is failing then double check the above, fix them and
|
||||
re-run the job.
|
||||
|
||||
### Developing Emissary-ingress (Ambassador Labs -only advice)
|
||||
### Developing Emissary-ingress (Maintainers-only advice)
|
||||
|
||||
At the moment, these techniques will only work internally to Ambassador Labs. Mostly
|
||||
At the moment, these techniques will only work internally to Maintainers. Mostly
|
||||
this is because they require credentials to access internal resources at the
|
||||
moment, though in several cases we're working to fix that.
|
||||
|
||||
|
@ -924,14 +787,9 @@ using the `BUILD_ARCH` environment variable (e.g. `BUILD_ARCH=linux/arm64 make i
|
|||
|
||||
### How do I develop on Windows using WSL?
|
||||
|
||||
As the Emissary-ingress build system requires docker communication via a UNIX socket, using WSL 1 is not possible.
|
||||
Not even with a `DOCKER_HOST` environment variable set. As a result, you have to use WSL 2, including using the
|
||||
WSL 2 version of docker-for-windows.
|
||||
|
||||
Additionally, if your hostname contains an upper-case character, the build script will break. This is based on the
|
||||
`NAME` environment variable, which should contain your hostname. You can solve this issue by doing `export NAME=my-lowercase-host-name`.
|
||||
If you do this *after* you've already run `make images` once, you will manually have to clean up the docker images
|
||||
that have been created using your upper-case host name.
|
||||
- [WSL 2](https://learn.microsoft.com/en-us/windows/wsl/)
|
||||
- [Docker Desktop for Windows](https://docs.docker.com/desktop/windows/wsl/)
|
||||
- [VS Code](https://code.visualstudio.com/)
|
||||
|
||||
### How do I test using a private Docker repository?
|
||||
|
||||
|
@ -953,7 +811,7 @@ curl localhost:8877/ambassador/v0/diag/?loglevel=debug
|
|||
```
|
||||
|
||||
Note: This affects diagd and Envoy, but NOT the AES `amb-sidecar`.
|
||||
See the AES `DEVELOPING.md` for how to do that.
|
||||
See the AES `CONTRIBUTING.md` for how to do that.
|
||||
|
||||
### Can I build from a docker container instead of on my local computer?
|
||||
|
||||
|
@ -1069,10 +927,3 @@ Ambassador code should produce *no* warnings and *no* errors.
|
|||
If you're concerned that the mypy cache is somehow wrong, delete the
|
||||
`.mypy_cache/` directory to clear the cache.
|
||||
|
||||
### How do I get the source code for a release?
|
||||
|
||||
The current shipping release of Ambassador lives on the `master`
|
||||
branch. It is tagged with its version (e.g. `v0.78.0`).
|
||||
|
||||
Changes on `master` after the last tag have not been released yet, but
|
||||
will be included in the next release of Ambassador.
|
|
@ -1,4 +1,4 @@
|
|||
name: 'Collect Logs'
|
||||
name: "Collect Logs"
|
||||
description: >-
|
||||
Store any log files as artifacts.
|
||||
inputs:
|
||||
|
@ -49,7 +49,7 @@ runs:
|
|||
cp /tmp/*.yaml /tmp/test-logs || true
|
||||
cp /tmp/kat-client-*.log /tmp/test-logs || true
|
||||
- name: "Upload Logs"
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: logs-${{ inputs.jobname }}
|
||||
path: /tmp/test-logs
|
||||
|
|
|
@ -5,6 +5,9 @@ updates:
|
|||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
ignore:
|
||||
- dependency-name: "sigs.k8s.io/gateway-api"
|
||||
- dependency-name: "go.opentelemetry.io/proto/otlp"
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/tools/src/yq"
|
||||
|
@ -65,6 +68,11 @@ updates:
|
|||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
ignore:
|
||||
- dependency-name: pytest
|
||||
- dependency-name: urllib3
|
||||
versions:
|
||||
- "<2.0"
|
||||
- package-ecosystem: docker
|
||||
directory: "/docker/base-python"
|
||||
schedule:
|
||||
|
@ -94,3 +102,9 @@ updates:
|
|||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
open-pull-requests-limit: 10
|
|
@ -46,6 +46,6 @@ A few sentences describing what testing you've done, e.g., manual tests, automat
|
|||
- We should lean on the bulk of code being covered by unit tests, but...
|
||||
- ... an end-to-end test should cover the integration points
|
||||
|
||||
- [ ] **I updated `DEVELOPING.md` with any any special dev tricks I had to use to work on this code efficiently.**
|
||||
- [ ] **I updated `CONTRIBUTING.md` with any special dev tricks I had to use to work on this code efficiently.**
|
||||
|
||||
- [ ] **The changes in this PR have been reviewed for security concerns and adherence to security best practices.**
|
||||
|
|
|
@ -22,7 +22,7 @@ name: Check branch version
|
|||
|
||||
jobs:
|
||||
check-branch-version:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
|
|
|
@ -10,7 +10,7 @@ name: job-promote-to-passed
|
|||
|
||||
jobs:
|
||||
lint: ########################################################################
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
# See docker/base-python.docker.gen
|
||||
BASE_PYTHON_REPO: ${{ secrets.BASE_PYTHON_REPO }}
|
||||
|
@ -27,10 +27,12 @@ jobs:
|
|||
run: |
|
||||
make lint
|
||||
- uses: ./.github/actions/after-job
|
||||
with:
|
||||
jobname: lint
|
||||
if: always()
|
||||
|
||||
generate: ####################################################################
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
# See docker/base-python.docker.gen
|
||||
BASE_PYTHON_REPO: ${{ secrets.BASE_PYTHON_REPO }}
|
||||
|
@ -75,10 +77,46 @@ jobs:
|
|||
- uses: ./.github/actions/git-dirty-check
|
||||
name: "Check Git not dirty from 'make generate' (again!)"
|
||||
- uses: ./.github/actions/after-job
|
||||
with:
|
||||
jobname: generate
|
||||
if: always()
|
||||
|
||||
check-envoy-protos: ####################################################################
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
# See docker/base-python.docker.gen
|
||||
BASE_PYTHON_REPO: ${{ secrets.BASE_PYTHON_REPO }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Install Deps
|
||||
uses: ./.github/actions/setup-deps
|
||||
- name: "Git Login"
|
||||
run: |
|
||||
if [[ -n '${{ secrets.GHA_SSH_KEY }}' ]]; then
|
||||
install -m700 -d ~/.ssh
|
||||
install -m600 /dev/stdin ~/.ssh/id_rsa <<<'${{ secrets.GHA_SSH_KEY }}'
|
||||
fi
|
||||
- name: "Docker Login"
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ (!startsWith(secrets.RELEASE_REGISTRY, 'docker.io/')) && secrets.RELEASE_REGISTRY || null }}
|
||||
username: ${{ secrets.GH_DOCKER_RELEASE_USERNAME }}
|
||||
password: ${{ secrets.GH_DOCKER_RELEASE_TOKEN }}
|
||||
- name: "'make compile-envoy-protos'"
|
||||
shell: bash
|
||||
run: |
|
||||
make compile-envoy-protos
|
||||
- name: "Check Git not dirty from 'make compile-envoy-protos'"
|
||||
uses: ./.github/actions/git-dirty-check
|
||||
- uses: ./.github/actions/after-job
|
||||
with:
|
||||
jobname: check-envoy-protos
|
||||
if: always()
|
||||
|
||||
check-envoy-version: #########################################################
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
# See docker/base-python.docker.gen
|
||||
BASE_PYTHON_REPO: ${{ secrets.BASE_PYTHON_REPO }}
|
||||
|
@ -103,11 +141,44 @@ jobs:
|
|||
password: ${{ secrets.GH_DOCKER_BUILD_TOKEN }}
|
||||
- run: make check-envoy-version
|
||||
- uses: ./.github/actions/after-job
|
||||
with:
|
||||
jobname: check-envoy-version
|
||||
if: always()
|
||||
|
||||
# Tests ######################################################################
|
||||
apiext-e2e:
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
# See docker/base-python.docker.gen
|
||||
BASE_PYTHON_REPO: ${{ secrets.BASE_PYTHON_REPO }}
|
||||
APIEXT_E2E: ""
|
||||
APIEXT_BUILD_ARCH: linux/amd64
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver-opts: |
|
||||
network=host
|
||||
- name: Install Deps
|
||||
uses: ./.github/actions/setup-deps
|
||||
- name: Install k3d
|
||||
shell: bash
|
||||
run: |
|
||||
curl -s https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | TAG=v5.6.0 bash
|
||||
k3d --version
|
||||
- name: go mod vendor
|
||||
shell: bash
|
||||
run: |
|
||||
make vendor
|
||||
- name: run apiext-e2e tests
|
||||
shell: bash
|
||||
run: |
|
||||
go test -p 1 -parallel 1 -v -tags=apiext ./test/apiext/... -timeout 15m
|
||||
check-gotest:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
# See docker/base-python.docker.gen
|
||||
BASE_PYTHON_REPO: ${{ secrets.BASE_PYTHON_REPO }}
|
||||
|
@ -126,13 +197,13 @@ jobs:
|
|||
- name: make gotest
|
||||
shell: bash
|
||||
run: |
|
||||
export DEV_KUBE_NO_PVC=yes
|
||||
export KAT_REQ_LIMIT=900
|
||||
make gotest
|
||||
- uses: ./.github/actions/after-job
|
||||
with:
|
||||
jobname: check-gotest
|
||||
if: always()
|
||||
check-pytest:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
# See docker/base-python.docker.gen
|
||||
BASE_PYTHON_REPO: ${{ secrets.BASE_PYTHON_REPO }}
|
||||
|
@ -144,12 +215,12 @@ jobs:
|
|||
fail-fast: false
|
||||
matrix:
|
||||
test:
|
||||
- integration
|
||||
- kat-envoy3-1-of-5
|
||||
- kat-envoy3-2-of-5
|
||||
- kat-envoy3-3-of-5
|
||||
- kat-envoy3-4-of-5
|
||||
- kat-envoy3-5-of-5
|
||||
- integration-tests
|
||||
- kat-envoy3-tests-1-of-5
|
||||
- kat-envoy3-tests-2-of-5
|
||||
- kat-envoy3-tests-3-of-5
|
||||
- kat-envoy3-tests-4-of-5
|
||||
- kat-envoy3-tests-5-of-5
|
||||
name: pytest-${{ matrix.test }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
@ -163,14 +234,21 @@ jobs:
|
|||
registry: ${{ (!startsWith(secrets.DEV_REGISTRY, 'docker.io/')) && secrets.DEV_REGISTRY || null }}
|
||||
username: ${{ secrets.GH_DOCKER_BUILD_USERNAME }}
|
||||
password: ${{ secrets.GH_DOCKER_BUILD_TOKEN }}
|
||||
- name: make pytest-${{ matrix.test }}
|
||||
- name: Create integration test cluster
|
||||
run: |
|
||||
export USE_LOCAL_K3S_CLUSTER=1
|
||||
sudo sysctl -w fs.file-max=1600000
|
||||
sudo sysctl -w fs.inotify.max_user_instances=4096
|
||||
|
||||
make ci/setup-k3d
|
||||
|
||||
- name: Setup integration test environment
|
||||
run: |
|
||||
export DEV_KUBE_NO_PVC=yes
|
||||
export KAT_REQ_LIMIT=900
|
||||
export DEV_KUBECONFIG=~/.kube/config
|
||||
export DEV_REGISTRY=${{ secrets.DEV_REGISTRY }}
|
||||
make python-integration-test-environment
|
||||
- name: Run ${{ matrix.test }}
|
||||
run: |
|
||||
export DEV_KUBE_NO_PVC=yes
|
||||
export KAT_REQ_LIMIT=900
|
||||
export DEV_KUBECONFIG=~/.kube/config
|
||||
|
@ -181,17 +259,11 @@ jobs:
|
|||
with:
|
||||
jobname: check-pytest-${{ matrix.test }}
|
||||
check-pytest-unit:
|
||||
# pytest-unit is separate from pytests (above) because we know for certain that no cluster is needed.
|
||||
# XXX This is pretty much a crock.
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
# See docker/base-python.docker.gen
|
||||
BASE_PYTHON_REPO: ${{ secrets.BASE_PYTHON_REPO }}
|
||||
strategy:
|
||||
matrix:
|
||||
test:
|
||||
- unit
|
||||
name: pytest-${{ matrix.test }}
|
||||
name: pytest-unit
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
|
@ -204,23 +276,20 @@ jobs:
|
|||
registry: ${{ (!startsWith(secrets.DEV_REGISTRY, 'docker.io/')) && secrets.DEV_REGISTRY || null }}
|
||||
username: ${{ secrets.GH_DOCKER_BUILD_USERNAME }}
|
||||
password: ${{ secrets.GH_DOCKER_BUILD_TOKEN }}
|
||||
- name: make pytest-${{ matrix.test }}
|
||||
- name: Create Python virtual environment
|
||||
run: |
|
||||
sudo sysctl -w fs.file-max=1600000
|
||||
sudo sysctl -w fs.inotify.max_user_instances=4096
|
||||
|
||||
export DEV_KUBE_NO_PVC=yes
|
||||
export KAT_REQ_LIMIT=900
|
||||
export DEV_KUBECONFIG=~/.kube/config
|
||||
export DEV_REGISTRY=${{ secrets.DEV_REGISTRY }}
|
||||
make python-virtual-environment
|
||||
- name: Run Python unit tests
|
||||
run: |
|
||||
export PYTEST_ARGS=' --cov-branch --cov=ambassador --cov-report html:/tmp/cov_html '
|
||||
make pytest-${{ matrix.test }}
|
||||
make pytest-unit-tests
|
||||
- uses: ./.github/actions/after-job
|
||||
if: always()
|
||||
with:
|
||||
jobname: check-pytest-${{ matrix.test }}
|
||||
jobname: check-pytest-unit
|
||||
if: always()
|
||||
check-chart:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
DEV_REGISTRY: ${{ secrets.DEV_REGISTRY }}
|
||||
# See docker/base-python.docker.gen
|
||||
|
@ -230,28 +299,33 @@ jobs:
|
|||
DOCKER_BUILD_USERNAME: ${{ secrets.GH_DOCKER_BUILD_USERNAME }}
|
||||
DOCKER_BUILD_PASSWORD: ${{ secrets.GH_DOCKER_BUILD_TOKEN }}
|
||||
steps:
|
||||
- uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ (!startsWith(secrets.DEV_REGISTRY, 'docker.io/')) && secrets.DEV_REGISTRY || null }}
|
||||
username: ${{ secrets.GH_DOCKER_BUILD_USERNAME }}
|
||||
password: ${{ secrets.GH_DOCKER_BUILD_TOKEN }}
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: Install Deps
|
||||
uses: ./.github/actions/setup-deps
|
||||
- name: make test-chart
|
||||
- name: Warn about skip
|
||||
run: |
|
||||
make ci/setup-k3d
|
||||
export DEV_KUBECONFIG=~/.kube/config
|
||||
echo "SKIPPING CHART TEST; check the charts manually"
|
||||
# - uses: docker/login-action@v2
|
||||
# with:
|
||||
# registry: ${{ (!startsWith(secrets.DEV_REGISTRY, 'docker.io/')) && secrets.DEV_REGISTRY || null }}
|
||||
# username: ${{ secrets.GH_DOCKER_BUILD_USERNAME }}
|
||||
# password: ${{ secrets.GH_DOCKER_BUILD_TOKEN }}
|
||||
# - uses: actions/checkout@v3
|
||||
# with:
|
||||
# fetch-depth: 0
|
||||
# ref: ${{ github.event.pull_request.head.sha }}
|
||||
# - name: Install Deps
|
||||
# uses: ./.github/actions/setup-deps
|
||||
# - name: make test-chart
|
||||
# run: |
|
||||
# make ci/setup-k3d
|
||||
# export DEV_KUBECONFIG=~/.kube/config
|
||||
|
||||
make test-chart
|
||||
- uses: ./.github/actions/after-job
|
||||
if: always()
|
||||
# make test-chart
|
||||
# - uses: ./.github/actions/after-job
|
||||
# with:
|
||||
# jobname: check-chart
|
||||
# if: always()
|
||||
|
||||
build: #######################################################################
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
DEV_REGISTRY: ${{ secrets.DEV_REGISTRY }}
|
||||
# See docker/base-python.docker.gen
|
||||
|
@ -285,12 +359,14 @@ jobs:
|
|||
run: |
|
||||
make push-dev
|
||||
- uses: ./.github/actions/after-job
|
||||
with:
|
||||
jobname: build
|
||||
if: always()
|
||||
|
||||
######################################################################
|
||||
######################### CVE Scanning ###############################
|
||||
trivy-container-scan:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
needs: [build]
|
||||
steps:
|
||||
# upload of results to github uses git so checkout of code is needed
|
||||
|
@ -320,16 +396,18 @@ jobs:
|
|||
pass:
|
||||
name: "job-promote-to-passed" # This is the job name that the branch protection looks for
|
||||
needs:
|
||||
- apiext-e2e
|
||||
- lint
|
||||
- build
|
||||
- generate
|
||||
- check-envoy-protos
|
||||
- check-envoy-version
|
||||
- check-gotest
|
||||
- check-pytest
|
||||
- check-pytest-unit
|
||||
- check-chart
|
||||
- trivy-container-scan
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: No-Op
|
||||
if: ${{ false }}
|
||||
|
|
|
@ -3,42 +3,44 @@ on:
|
|||
schedule:
|
||||
# run at noon on sundays to prepare for monday
|
||||
# used https://crontab.guru/ to generate
|
||||
- cron: '0 12 * * SUN'
|
||||
- cron: "0 12 * * SUN"
|
||||
jobs:
|
||||
generate: ####################################################################
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
# See docker/base-python.docker.gen
|
||||
BASE_PYTHON_REPO: ${{ secrets.BASE_PYTHON_REPO }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Install Deps
|
||||
uses: ./.github/actions/setup-deps
|
||||
- name: "Git Login"
|
||||
run: |
|
||||
if [[ -n '${{ secrets.GHA_SSH_KEY }}' ]]; then
|
||||
install -m700 -d ~/.ssh
|
||||
install -m600 /dev/stdin ~/.ssh/id_rsa <<<'${{ secrets.GHA_SSH_KEY }}'
|
||||
fi
|
||||
- name: "Docker Login"
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ (!startsWith(secrets.RELEASE_REGISTRY, 'docker.io/')) && secrets.RELEASE_REGISTRY || null }}
|
||||
username: ${{ secrets.GH_DOCKER_RELEASE_USERNAME }}
|
||||
password: ${{ secrets.GH_DOCKER_RELEASE_TOKEN }}
|
||||
- name: "'make generate'"
|
||||
shell: bash
|
||||
run: |
|
||||
make generate
|
||||
- uses: ./.github/actions/git-dirty-check
|
||||
name: "Check Git not dirty from 'make generate'"
|
||||
- name: "'make generate' (again!)"
|
||||
shell: bash
|
||||
run: |
|
||||
make generate
|
||||
- uses: ./.github/actions/git-dirty-check
|
||||
name: "Check Git not dirty from 'make generate' (again!)"
|
||||
- uses: ./.github/actions/after-job
|
||||
if: always()
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
# See docker/base-python.docker.gen
|
||||
BASE_PYTHON_REPO: ${{ secrets.BASE_PYTHON_REPO }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Install Deps
|
||||
uses: ./.github/actions/setup-deps
|
||||
- name: "Git Login"
|
||||
run: |
|
||||
if [[ -n '${{ secrets.GHA_SSH_KEY }}' ]]; then
|
||||
install -m700 -d ~/.ssh
|
||||
install -m600 /dev/stdin ~/.ssh/id_rsa <<<'${{ secrets.GHA_SSH_KEY }}'
|
||||
fi
|
||||
- name: "Docker Login"
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ (!startsWith(secrets.RELEASE_REGISTRY, 'docker.io/')) && secrets.RELEASE_REGISTRY || null }}
|
||||
username: ${{ secrets.GH_DOCKER_RELEASE_USERNAME }}
|
||||
password: ${{ secrets.GH_DOCKER_RELEASE_TOKEN }}
|
||||
- name: "'make generate'"
|
||||
shell: bash
|
||||
run: |
|
||||
make generate
|
||||
- uses: ./.github/actions/git-dirty-check
|
||||
name: "Check Git not dirty from 'make generate'"
|
||||
- name: "'make generate' (again!)"
|
||||
shell: bash
|
||||
run: |
|
||||
make generate
|
||||
- uses: ./.github/actions/git-dirty-check
|
||||
name: "Check Git not dirty from 'make generate' (again!)"
|
||||
- uses: ./.github/actions/after-job
|
||||
with:
|
||||
jobname: generate-base-python
|
||||
if: always()
|
||||
|
|
|
@ -0,0 +1,74 @@
|
|||
name: k8s-e2e
|
||||
|
||||
"on":
|
||||
pull_request: {}
|
||||
schedule:
|
||||
- cron: "0 7 * * *" # at 7am UTC everyday
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
acceptance_tests:
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
# See docker/base-python.docker.gen
|
||||
BASE_PYTHON_REPO: ${{ secrets.BASE_PYTHON_REPO }}
|
||||
# See pkg/kubeapply/resource_kubeapply.go
|
||||
DEV_USE_IMAGEPULLSECRET: ${{ secrets.DEV_USE_IMAGEPULLSECRET }}
|
||||
DOCKER_BUILD_USERNAME: ${{ secrets.GH_DOCKER_BUILD_USERNAME }}
|
||||
DOCKER_BUILD_PASSWORD: ${{ secrets.GH_DOCKER_BUILD_TOKEN }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
k8s:
|
||||
[
|
||||
{ k3s: 1.26.9+k3s1, kubectl: 1.26.9 },
|
||||
{ k3s: 1.27.6+k3s1, kubectl: 1.27.6 },
|
||||
{ k3s: 1.28.2+k3s1, kubectl: 1.28.2 },
|
||||
]
|
||||
test:
|
||||
- integration-tests
|
||||
- kat-envoy3-tests-1-of-5
|
||||
- kat-envoy3-tests-2-of-5
|
||||
- kat-envoy3-tests-3-of-5
|
||||
- kat-envoy3-tests-4-of-5
|
||||
- kat-envoy3-tests-5-of-5
|
||||
name: ${{matrix.k8s.kubectl}}-${{ matrix.test }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Install Deps
|
||||
uses: ./.github/actions/setup-deps
|
||||
- name: "Docker Login"
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ (!startsWith(secrets.DEV_REGISTRY, 'docker.io/')) && secrets.DEV_REGISTRY || null }}
|
||||
username: ${{ secrets.GH_DOCKER_BUILD_USERNAME }}
|
||||
password: ${{ secrets.GH_DOCKER_BUILD_TOKEN }}
|
||||
- name: Create integration test cluster
|
||||
env:
|
||||
K3S_VERSION: ${{matrix.k8s.k3s}}
|
||||
KUBECTL_VERSION: ${{matrix.k8s.kubectl}}
|
||||
run: |
|
||||
sudo sysctl -w fs.file-max=1600000
|
||||
sudo sysctl -w fs.inotify.max_user_instances=4096
|
||||
|
||||
make ci/setup-k3d
|
||||
- name: Setup integration test environment
|
||||
run: |
|
||||
export DEV_KUBE_NO_PVC=yes
|
||||
export KAT_REQ_LIMIT=900
|
||||
export DEV_KUBECONFIG=~/.kube/config
|
||||
export DEV_REGISTRY=${{ secrets.DEV_REGISTRY }}
|
||||
make python-integration-test-environment
|
||||
- name: Run ${{ matrix.test }}
|
||||
run: |
|
||||
export DEV_KUBE_NO_PVC=yes
|
||||
export KAT_REQ_LIMIT=900
|
||||
export DEV_KUBECONFIG=~/.kube/config
|
||||
export DEV_REGISTRY=${{ secrets.DEV_REGISTRY }}
|
||||
make pytest-${{ matrix.test }}
|
||||
- uses: ./.github/actions/after-job
|
||||
if: always()
|
||||
with:
|
||||
jobname: check-pytest-${{matrix.k8s.kubectl}}-${{ matrix.test }}
|
|
@ -2,10 +2,10 @@ name: promote-to-ga
|
|||
"on":
|
||||
push:
|
||||
tags:
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+'
|
||||
- "v[0-9]+.[0-9]+.[0-9]+"
|
||||
jobs:
|
||||
promote-to-ga:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
name: promote-to-ga
|
||||
env:
|
||||
AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }}
|
||||
|
@ -30,6 +30,8 @@ jobs:
|
|||
run: |
|
||||
make release/promote-oss/to-ga
|
||||
- uses: ./.github/actions/after-job
|
||||
with:
|
||||
jobname: promote-to-ga-1
|
||||
if: always()
|
||||
- id: check-slack-webhook
|
||||
name: Assign slack webhook variable
|
||||
|
@ -41,18 +43,20 @@ jobs:
|
|||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
success_text: 'Emissary GA for ${env.GITHUB_REF} successfully built'
|
||||
failure_text: '${env.GITHUB_WORKFLOW} (${env.GITHUB_RUN_NUMBER}) build failed'
|
||||
cancelled_text: '${env.GITHUB_WORKFLOW} (${env.GITHUB_RUN_NUMBER}) build was cancelled'
|
||||
success_text: "Emissary GA for ${env.GITHUB_REF} successfully built"
|
||||
failure_text: "${env.GITHUB_WORKFLOW} (${env.GITHUB_RUN_NUMBER}) build failed"
|
||||
cancelled_text: "${env.GITHUB_WORKFLOW} (${env.GITHUB_RUN_NUMBER}) build was cancelled"
|
||||
fields: |
|
||||
[{ "title": "Repository", "value": "${env.GITHUB_REPOSITORY}", "short": true },
|
||||
{ "title": "Branch", "value": "${env.GITHUB_REF}", "short": true },
|
||||
{ "title": "Action URL", "value": "${env.GITHUB_SERVER_URL}/${env.GITHUB_REPOSITORY}/actions/runs/${env.GITHUB_RUN_ID}"}
|
||||
]
|
||||
- uses: ./.github/actions/after-job
|
||||
with:
|
||||
jobname: promote-to-ga-2
|
||||
if: always()
|
||||
create-gh-release:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
needs: [promote-to-ga]
|
||||
name: "Create GitHub release"
|
||||
env:
|
||||
|
@ -80,13 +84,15 @@ jobs:
|
|||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
success_text: 'Emissary GitHub release was created: ${{ steps.step-create-gh-release.outputs.url }}'
|
||||
failure_text: 'Emissary GitHub release failed'
|
||||
cancelled_text: 'Emissary GitHub release was was cancelled'
|
||||
success_text: "Emissary GitHub release was created: ${{ steps.step-create-gh-release.outputs.url }}"
|
||||
failure_text: "Emissary GitHub release failed"
|
||||
cancelled_text: "Emissary GitHub release was was cancelled"
|
||||
fields: |
|
||||
[{ "title": "Repository", "value": "${env.GITHUB_REPOSITORY}", "short": true },
|
||||
{ "title": "Branch", "value": "${env.GITHUB_REF}", "short": true },
|
||||
{ "title": "Action URL", "value": "${env.GITHUB_SERVER_URL}/${env.GITHUB_REPOSITORY}/actions/runs/${env.GITHUB_RUN_ID}"}
|
||||
]
|
||||
- uses: ./.github/actions/after-job
|
||||
with:
|
||||
jobname: create-gh-release
|
||||
if: always()
|
||||
|
|
|
@ -2,11 +2,11 @@ name: promote-to-rc
|
|||
"on":
|
||||
push:
|
||||
tags:
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+'
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+-dev'
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+"
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-dev"
|
||||
jobs:
|
||||
promote-to-rc:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
name: promote-to-rc
|
||||
env:
|
||||
AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }}
|
||||
|
@ -49,12 +49,14 @@ jobs:
|
|||
export AMBASSADOR_MANIFEST_URL=https://app.getambassador.io/yaml/emissary/${{ steps.step-main.outputs.version }}
|
||||
export HELM_CHART_VERSION=${{ steps.step-main.outputs.chart_version }}
|
||||
\`\`\`
|
||||
failure_text: '${env.GITHUB_WORKFLOW} (${env.GITHUB_RUN_NUMBER}) build failed'
|
||||
cancelled_text: '${env.GITHUB_WORKFLOW} (${env.GITHUB_RUN_NUMBER}) build was cancelled'
|
||||
failure_text: "${env.GITHUB_WORKFLOW} (${env.GITHUB_RUN_NUMBER}) build failed"
|
||||
cancelled_text: "${env.GITHUB_WORKFLOW} (${env.GITHUB_RUN_NUMBER}) build was cancelled"
|
||||
fields: |
|
||||
[{ "title": "Repository", "value": "${env.GITHUB_REPOSITORY}", "short": true },
|
||||
{ "title": "Branch", "value": "${env.GITHUB_REF}", "short": true },
|
||||
{ "title": "Action URL", "value": "${env.GITHUB_SERVER_URL}/${env.GITHUB_REPOSITORY}/actions/runs/${env.GITHUB_RUN_ID}"}
|
||||
]
|
||||
- uses: ./.github/actions/after-job
|
||||
with:
|
||||
jobname: promote-to-rc
|
||||
if: always()
|
||||
|
|
|
@ -2,10 +2,10 @@ name: chart-publish
|
|||
"on":
|
||||
push:
|
||||
tags:
|
||||
- 'chart/v*'
|
||||
- "chart/v*"
|
||||
jobs:
|
||||
chart-publish:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
name: chart-publish
|
||||
env:
|
||||
AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }}
|
||||
|
@ -34,18 +34,20 @@ jobs:
|
|||
with:
|
||||
status: ${{ job.status }}
|
||||
success_text: "Chart successfully published for ${env.GITHUB_REF}"
|
||||
failure_text: '${env.GITHUB_WORKFLOW} (${env.GITHUB_RUN_NUMBER}) build failed'
|
||||
cancelled_text: '${env.GITHUB_WORKFLOW} (${env.GITHUB_RUN_NUMBER}) build was cancelled'
|
||||
failure_text: "${env.GITHUB_WORKFLOW} (${env.GITHUB_RUN_NUMBER}) build failed"
|
||||
cancelled_text: "${env.GITHUB_WORKFLOW} (${env.GITHUB_RUN_NUMBER}) build was cancelled"
|
||||
fields: |
|
||||
[{ "title": "Repository", "value": "${env.GITHUB_REPOSITORY}", "short": true },
|
||||
{ "title": "Branch", "value": "${env.GITHUB_REF}", "short": true },
|
||||
{ "title": "Action URL", "value": "${env.GITHUB_SERVER_URL}/${env.GITHUB_REPOSITORY}/actions/runs/${env.GITHUB_RUN_ID}"}
|
||||
]
|
||||
- uses: ./.github/actions/after-job
|
||||
with:
|
||||
jobname: chart-publish
|
||||
if: always()
|
||||
chart-create-gh-release:
|
||||
if: ${{ ! contains(github.ref, '-') }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
needs: [chart-publish]
|
||||
name: "Create GitHub release"
|
||||
steps:
|
||||
|
@ -71,13 +73,15 @@ jobs:
|
|||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
success_text: 'Chart GitHub release was created: ${{ steps.step-create-gh-release.outputs.url }}'
|
||||
failure_text: 'Chart GitHub release failed'
|
||||
cancelled_text: 'Chart GitHub release was was cancelled'
|
||||
success_text: "Chart GitHub release was created: ${{ steps.step-create-gh-release.outputs.url }}"
|
||||
failure_text: "Chart GitHub release failed"
|
||||
cancelled_text: "Chart GitHub release was was cancelled"
|
||||
fields: |
|
||||
[{ "title": "Repository", "value": "${env.GITHUB_REPOSITORY}", "short": true },
|
||||
{ "title": "Branch", "value": "${env.GITHUB_REF}", "short": true },
|
||||
{ "title": "Action URL", "value": "${env.GITHUB_SERVER_URL}/${env.GITHUB_REPOSITORY}/actions/runs/${env.GITHUB_RUN_ID}"}
|
||||
]
|
||||
- uses: ./.github/actions/after-job
|
||||
with:
|
||||
jobname: chart-create-gh-release
|
||||
if: always()
|
||||
|
|
|
@ -12,16 +12,19 @@ linters-settings:
|
|||
gofmt:
|
||||
simplify: true
|
||||
depguard:
|
||||
list-type: blacklist
|
||||
include-go-root: true
|
||||
packages-with-error-message:
|
||||
- log: "Use `github.com/datawire/dlib/dlog` instead of `log`"
|
||||
- github.com/sirupsen/logrus: "Use `github.com/datawire/dlib/dlog` instead of `github.com/sirupsen/logrus`"
|
||||
- github.com/datawire/dlib/dutil: "Use either `github.com/datawire/dlib/derror` or `github.com/datawire/dlib/dhttp` instead of `github.com/datawire/dlib/dutil`"
|
||||
- github.com/gogo/protobuf: "Use `google.golang.org/protobuf` instead of `github.com/gogo/protobuf`"
|
||||
- github.com/golang/protobuf: "Use `google.golang.org/protobuf` instead of `github.com/golang/protobuf`"
|
||||
- github.com/google/shlex: "Use `github.com/kballard/go-shellquote` instead of `github.com/google/shlex`"
|
||||
- golang.org/x/net/http2/h2c: "Use `github.com/datawire/dlib/dhttp` instead of `golang.org/x/net/http2/h2c`"
|
||||
rules:
|
||||
main:
|
||||
deny:
|
||||
- pkg: "github.com/datawire/dlib/dutil"
|
||||
desc: "Use either `github.com/datawire/dlib/derror` or `github.com/datawire/dlib/dhttp` instead of `github.com/datawire/dlib/dutil`"
|
||||
- pkg: "github.com/gogo/protobuf"
|
||||
desc: "Use `google.golang.org/protobuf` instead of `github.com/gogo/protobuf`"
|
||||
- pkg: "github.com/golang/protobuf"
|
||||
desc: "Use `google.golang.org/protobuf` instead of `github.com/golang/protobuf`"
|
||||
- pkg: "github.com/google/shlex"
|
||||
desc: "Use `github.com/kballard/go-shellquote` instead of `github.com/google/shlex`"
|
||||
- pkg: "golang.org/x/net/http2/h2c"
|
||||
desc: "Use `github.com/datawire/dlib/dhttp` instead of `golang.org/x/net/http2/h2c`"
|
||||
unused:
|
||||
# treat code as a program (not a library) and report unused
|
||||
# exported identifiers
|
||||
|
|
170
CHANGELOG.md
170
CHANGELOG.md
|
@ -85,12 +85,142 @@ it will be removed; but as it won't be user-visible this isn't considered a brea
|
|||
|
||||
## RELEASE NOTES
|
||||
|
||||
## [3.6.0] TBD
|
||||
## [3.10.0] July 29, 2025
|
||||
[3.10.0]: https://github.com/emissary-ingress/emissary/compare/v3.9.0...v3.10.0
|
||||
|
||||
### Emissary-ingress and Ambassador Edge Stack
|
||||
|
||||
- Feature: This upgrades Emissary-ingress to be built on Envoy v1.28.0 which provides security,
|
||||
performance and feature enhancements. You can read more about them here: <a
|
||||
href="https://www.envoyproxy.io/docs/envoy/v1.28.0/version_history/version_history">Envoy Proxy
|
||||
1.28.0 Release Notes</a>
|
||||
|
||||
- Change: Emissary-ingress will no longer publish YAML manifest with the Ambassador Agent being
|
||||
installed by default. This is an optional component that provides additional features on top of
|
||||
Emissary-ingress and we recommend installing it using the instructions found in the <a
|
||||
href="https://github.com/datawire/ambassador-agenty">Ambassador Agent Repo</a>.
|
||||
|
||||
- Change: Upgraded Emissary-ingress to the latest release of Golang as part of our general
|
||||
dependency upgrade process.
|
||||
|
||||
- Bugfix: Emissary-ingress was incorrectly caching Mappings with regex headers using the header name
|
||||
instead of the Mapping name, which could reduce the cache's effectiveness. This has been fixed so
|
||||
that the correct key is used. ([Incorrect Cache Key for Mapping])
|
||||
|
||||
- Feature: Emissary-ingress now supports resolving Endpoints from EndpointSlices in addition to the
|
||||
existing support for Endpoints, supporting Services with more than 1000 endpoints.
|
||||
|
||||
- Feature: Emissary-ingress now passes the client TLS certificate and SNI, if any, to the external
|
||||
auth service. These are available in the `source.certificate` and `tls_session.sni` fields, as
|
||||
described in the <a
|
||||
href="https://www.envoyproxy.io/docs/envoy/latest/api-v3/service/auth/v3/attribute_context.proto">
|
||||
Envoy extauth documentation</a>.
|
||||
|
||||
- Change: The `ambex` component of Emissary-ingress now uses `xxhash64` instead of `md5`, since
|
||||
`md5` can cause problems in crypto-restricted environments (e.g. FIPS) ([Remove usage of md5])
|
||||
|
||||
[Incorrect Cache Key for Mapping]: https://github.com/emissary-ingress/emissary/issues/5714
|
||||
[Remove usage of md5]: https://github.com/emissary-ingress/emissary/pull/5794
|
||||
|
||||
## [3.9.0] November 13, 2023
|
||||
[3.9.0]: https://github.com/emissary-ingress/emissary/compare/v3.8.0...v3.9.0
|
||||
|
||||
### Emissary-ingress and Ambassador Edge Stack
|
||||
|
||||
- Feature: This upgrades Emissary-ingress to be built on Envoy v1.27.2 which provides security,
|
||||
performance and feature enhancements. You can read more about them here: <a
|
||||
href="https://www.envoyproxy.io/docs/envoy/v1.27.2/version_history/version_history">Envoy Proxy
|
||||
1.27.2 Release Notes</a>
|
||||
|
||||
- Feature: By default, Emissary-ingress will return an `UNAVAILABLE` code when a request using gRPC
|
||||
is rate limited. The `RateLimitService` resource now exposes a new
|
||||
`grpc.use_resource_exhausted_code` field that when set to `true`, Emissary-ingress will return a
|
||||
`RESOURCE_EXHAUSTED` gRPC code instead. Thanks to <a href="https://github.com/jeromefroe">Jerome
|
||||
Froelich</a> for contributing this feature!
|
||||
|
||||
- Feature: Envoy runtime fields that were provided to mitigate the recent HTTP/2 rapid reset
|
||||
vulnerability can now be configured via the Module resource so the configuration will persist
|
||||
between restarts. This configuration is added to the Envoy bootstrap config, so restarting
|
||||
Emissary is necessary after changing these fields for the configuration to take effect.
|
||||
|
||||
- Change: APIExt would previously allow for TLS 1.0 connections. We have updated it to now only use
|
||||
a minimum TLS version of 1.3 to resolve security concerns.
|
||||
|
||||
- Change: - Update default image to Emissary-ingress v3.9.0. <br/>
|
||||
|
||||
- Bugfix: The APIExt server provides CRD conversion between the stored version v2 and the version
|
||||
watched for by Emissary-ingress v3alpha1. Since this component is required to operate
|
||||
Emissary-ingress, we have introduced an init container that will ensure it is available before
|
||||
starting. This will help address some of the intermittent issues seen during install and upgrades.
|
||||
|
||||
## [3.8.0] August 29, 2023
|
||||
[3.8.0]: https://github.com/emissary-ingress/emissary/compare/v3.7.2...v3.8.0
|
||||
|
||||
### Emissary-ingress and Ambassador Edge Stack
|
||||
|
||||
- Bugfix: As of v2.2.2, if two mappings were associated with different Hosts through host
|
||||
mappingSelector labels but share the same prefix, the labels were not taken into account which
|
||||
would cause one Mapping to be correctly routed but the other not.
|
||||
This change fixes this issue so
|
||||
that Mappings sharing the same prefix but associated with different Hosts will be correctly
|
||||
routed. ([Canary grouping must take labels into account])
|
||||
|
||||
- Bugfix: In previous versions, if multiple Headers/QueryParameters where used in a v3alpha1
|
||||
mapping, these values would duplicate and cause all the Headers/QueryParameters to have the same
|
||||
value. This is no longer the case and the expected values for unique Headers/QueryParameters will
|
||||
apply.
|
||||
This issue was only present in v3alpha1 Mappings. For users who may have this issue, please
|
||||
be sure to re-apply any v3alpha1 Mappings in order to update the stored v2 Mapping and resolve the
|
||||
issue.
|
||||
|
||||
- Change: When the Ambassador agent is being used, it will no longer attempt to collect and report
|
||||
Envoy metrics. In previous versions, Emissary-ingress would always create an Envoy stats sink for
|
||||
the agent as long as the AMBASSADOR_GRPC_METRICS_SINK environmet variable was provided. This
|
||||
environment variable was hardcoded on the release manifests and has now been removed and an Envoy
|
||||
stats sink for the agent is no longer created.
|
||||
|
||||
[Canary grouping must take labels into account]: https://github.com/emissary-ingress/emissary/issues/4170
|
||||
|
||||
## [3.7.2] July 25, 2023
|
||||
[3.7.2]: https://github.com/emissary-ingress/emissary/compare/v3.7.1...v3.7.2
|
||||
|
||||
### Emissary-ingress and Ambassador Edge Stack
|
||||
|
||||
- Security: This upgrades Emissary-ingress to be built on Envoy v1.26.4 which includes a security
|
||||
fixes for CVE-2023-35942, CVE-2023-35943, VE-2023-35944.
|
||||
|
||||
## [3.7.1] July 13, 2023
|
||||
[3.7.1]: https://github.com/emissary-ingress/emissary/compare/v3.7.0...v3.7.1
|
||||
|
||||
### Emissary-ingress and Ambassador Edge Stack
|
||||
|
||||
- Security: This upgrades Emissary-ingress to be built on Envoy v1.26.3 which includes a security
|
||||
fix for CVE-2023-35945.
|
||||
|
||||
## [3.7.0] June 20, 2023
|
||||
[3.7.0]: https://github.com/emissary-ingress/emissary/compare/v3.6.0...v3.7.0
|
||||
|
||||
### Emissary-ingress and Ambassador Edge Stack
|
||||
|
||||
- Security: Upgrading to the latest release of Golang as part of our general dependency upgrade
|
||||
process. This includes security fixes for CVE-2023-24539, CVE-2023-24540, CVE-2023-29400.
|
||||
|
||||
- Feature: This upgrades Emissary-ingress to be built on Envoy v1.26.1. This provides
|
||||
Emissary-ingress with the latest security patches, performances enhancments, and features offered
|
||||
by the envoy proxy.
|
||||
|
||||
- Feature: By default, Envoy will return an `UNAVAILABLE` gRPC code when a request is rate limited.
|
||||
The `RateLimitService` resource now exposes the <a
|
||||
href="https://www.envoyproxy.io/docs/envoy/v1.26.0/configuration/http/http_filters/rate_limit_filter">use_resource_exhausted_code</a>
|
||||
option. Set `grpc.use_resource_exhausted_code: true` so Envoy will return a `RESOURCE_EXHAUSTED`
|
||||
gRPC code instead.
|
||||
|
||||
## [3.6.0] April 17, 2023
|
||||
[3.6.0]: https://github.com/emissary-ingress/emissary/compare/v3.5.0...v3.6.0
|
||||
|
||||
### Emissary-ingress and Ambassador Edge Stack
|
||||
|
||||
- Feature: This upgrades Emissary-ingress to be built on Envoy v1.25.3. This provides
|
||||
- Feature: This upgrades Emissary-ingress to be built on Envoy v1.25.4. This provides
|
||||
Emissary-ingress with the latest security patches, performances enhancments, and features offered
|
||||
by the envoy proxy.
|
||||
|
||||
|
@ -102,10 +232,10 @@ it will be removed; but as it won't be user-visible this isn't considered a brea
|
|||
- Security: Upgrading to the latest release of Golang as part of our general dependency upgrade
|
||||
process. This includes security fixes for CVE-2022-41725, CVE-2022-41723.
|
||||
|
||||
- Feature: In Envoy 1.24, experimental support for a native OpenTelemetry tracing driver was
|
||||
introduced that allows exporting spans in the otlp format. Many Observability platforms accept
|
||||
- Feature: In Envoy 1.24, experimental support for a native OpenTelemetry tracing driver was
|
||||
introduced that allows exporting spans in the otlp format. Many Observability platforms accept
|
||||
that format and is the recommend replacement for the LightStep driver. Emissary-ingress now
|
||||
supports setting the `TracingService.spec.driver=opentelemetry` to export spans in otlp
|
||||
supports setting the `TracingService.spec.driver=opentelemetry` to export spans in otlp
|
||||
format.<br/><br/>
|
||||
Thanks to <a href="https://github.com/psalaberria002">Paul</a> for helping us
|
||||
get this tested and implemented!
|
||||
|
@ -124,14 +254,14 @@ it will be removed; but as it won't be user-visible this isn't considered a brea
|
|||
- Change: Previously, specifying backend ports by name in Ingress was not supported and would result
|
||||
in defaulting to port 80. This allows emissary-ingress to now resolve port names for backend
|
||||
services. If the port number cannot be resolved by the name (e.g named port in the Service doesn't
|
||||
exist) then it defaults back to the original behavior. (Thanks to <a
|
||||
exist) then it defaults back to the original behavior. (Thanks to <a
|
||||
href="https://github.com/antonu17">Anton Ustyuzhanin</a>!). ([#4809])
|
||||
|
||||
- Change: The `emissary-apiext` server is a Kubernetes Conversion Webhook that converts between the
|
||||
- Change: The `emissary-apiext` server is a Kubernetes Conversion Webhook that converts between the
|
||||
Emissary-ingress CRD versions. On startup, it ensures that a self-signed cert is available so that
|
||||
K8s API Server can talk to the conversion webhook (*TLS is required by K8s*). We have introduced
|
||||
a startupProbe to ensure that emissary-apiext server has enough time to configure the webhooks
|
||||
before running liveness and readiness probes. This is to ensure slow startup doesn't cause K8s to
|
||||
K8s API Server can talk to the conversion webhook (*TLS is required by K8s*). We have introduced a
|
||||
startupProbe to ensure that emissary-apiext server has enough time to configure the webhooks
|
||||
before running liveness and readiness probes. This is to ensure slow startup doesn't cause K8s to
|
||||
needlessly restart the pod.
|
||||
|
||||
[fix: hostname port issue]: https://github.com/emissary-ingress/emissary/pull/4816
|
||||
|
@ -163,17 +293,17 @@ it will be removed; but as it won't be user-visible this isn't considered a brea
|
|||
server to.
|
||||
- `AMBASSADOR_HEALTHCHECK_IP_FAMILY`: The IP family to use for the healthcheck
|
||||
server.
|
||||
This allows the healthcheck server to be configured to use IPv6-only k8s environments.
|
||||
This allows the healthcheck server to be configured to use IPv6-only k8s environments.
|
||||
(Thanks to <a href="https://github.com/TimonOmsk">Dmitry Golushko</a>!).
|
||||
|
||||
- Feature: This upgrades Emissary-ingress to be built on Envoy v1.24.1. One notable change is that
|
||||
- Feature: This upgrades Emissary-ingress to be built on Envoy v1.24.1. One notable change is that
|
||||
the team at LightStep and Envoy Maintainers have decided to no longer support the native
|
||||
*LightStep* tracing driver in favor of using the Open Telemetry driver. The code for LightStep
|
||||
driver has been completely removed from Envoy code base so Emissary-ingress will no longer
|
||||
support it either.
|
||||
The recommended upgrade path is to leverage a supported Tracing driver such as
|
||||
`Zipkin` and use the [Open Telemetry Collector](https://opentelemetry.io/docs/collector/) to
|
||||
collect and forward Observabity data to LightStep.
|
||||
*LightStep* tracing driver in favor of using the Open Telemetry driver. The code for LightStep
|
||||
driver has been completely removed from Envoy code base so Emissary-ingress will no longer support
|
||||
it either.
|
||||
The recommended upgrade path is to leverage a supported Tracing driver such as `Zipkin`
|
||||
and use the [Open Telemetry Collector](https://opentelemetry.io/docs/collector/) to collect and
|
||||
forward Observabity data to LightStep.
|
||||
|
||||
- Feature: /ready endpoint used by emissary is using the admin port (8001 by default). This
|
||||
generates a problem during config reloads with large configs as the admin thread is blocking so
|
||||
|
@ -284,7 +414,7 @@ it will be removed; but as it won't be user-visible this isn't considered a brea
|
|||
releases, or a `Host` with or without a `TLSContext` as in prior 2.y releases.
|
||||
|
||||
- Bugfix: Prior releases of Emissary-ingress had the arbitrary limitation that a `TCPMapping` cannot
|
||||
be used on the same port that HTTP is served on, even if TLS+SNI would make this possible.
|
||||
be used on the same port that HTTP is served on, even if TLS+SNI would make this possible.
|
||||
Emissary-ingress now allows `TCPMappings` to be used on the same `Listener` port as HTTP `Hosts`,
|
||||
as long as that `Listener` terminates TLS.
|
||||
|
||||
|
@ -450,7 +580,7 @@ it will be removed; but as it won't be user-visible this isn't considered a brea
|
|||
releases, or a `Host` with or without a `TLSContext` as in prior 2.y releases.
|
||||
|
||||
- Bugfix: Prior releases of Emissary-ingress had the arbitrary limitation that a `TCPMapping` cannot
|
||||
be used on the same port that HTTP is served on, even if TLS+SNI would make this possible.
|
||||
be used on the same port that HTTP is served on, even if TLS+SNI would make this possible.
|
||||
Emissary-ingress now allows `TCPMappings` to be used on the same `Listener` port as HTTP `Hosts`,
|
||||
as long as that `Listener` terminates TLS.
|
||||
|
||||
|
|
|
@ -7,17 +7,15 @@ maintainer responsibilities.
|
|||
|
||||
Maintainers are listed in alphabetical order.
|
||||
|
||||
| Maintainer | GitHub ID | Affiliation |
|
||||
| ---------------- | --------------------------------------------- | --------------------------------------------------- |
|
||||
| Alex Gervais | [alexgervais](https://github.com/alexgervais) | [Ambassador Labs](https://www.github.com/datawire/) |
|
||||
| Alice Wasko | [aliceproxy](https://github.com/aliceproxy) | [Ambassador Labs](https://www.github.com/datawire/) |
|
||||
| David Dymko | [ddymko](https://github.com/ddymko) | [Ambassador Labs](https://www.github.com/datawire/) |
|
||||
| Flynn | [kflynn](https://github.com/kflynn) | [Buoyant](https://www.buoyant.io) |
|
||||
| Hamzah Qudsi | [haq204](https://github.com/haq204) | [Ambassador Labs](https://www.github.com/datawire/) |
|
||||
| Lance Austin | [lanceea](https://github.com/lanceea) | [Ambassador Labs](https://www.github.com/datawire/) |
|
||||
| Luke Shumaker | [lukeshu](https://github.com/lukeshu) | [Ambassador Labs](https://www.github.com/datawire/) |
|
||||
| Rafael Schloming | [rhs](https://github.com/rhs) | [Ambassador Labs](https://www.github.com/datawire/) |
|
||||
|
||||
| Maintainer | GitHub ID | Affiliation |
|
||||
| ---------------- | ------------------------------------------------------ | --------------------------------------------------- |
|
||||
| Alice Wasko | [aliceproxy](https://github.com/aliceproxy) | [Ambassador Labs](https://www.github.com/datawire/) |
|
||||
| David Dymko | [ddymko](https://github.com/ddymko) | [CoreWeave](https://www.coreweave.com) |
|
||||
| Flynn | [kflynn](https://github.com/kflynn) | [Buoyant](https://www.buoyant.io) |
|
||||
| Hamzah Qudsi | [haq204](https://github.com/haq204) | [Ambassador Labs](https://www.github.com/datawire/) |
|
||||
| Mark Schlachter | [the-wondersmith](https://github.com/the-wondersmith) | [Shuttle](https://www.shuttle.rs) |
|
||||
| Phil Peble | [ppeble](https://github.com/ppeble) | [ActiveCampaign](https://www.activecampaign.com/) |
|
||||
| Rafael Schloming | [rhs](https://github.com/rhs) | [Ambassador Labs](https://www.github.com/datawire/) |
|
||||
|
||||
|
||||
In addition to the maintainers, Emissary releases may be created by any
|
||||
|
@ -32,6 +30,9 @@ of the following (also listed in alphabetical order):
|
|||
* Ava Hahn ([aidanhahn](https://github.com/aidanhahn))
|
||||
* Alix Cook ([acookin](https://github.com/acookin))
|
||||
* John Esmet ([esmet](https://github.com/esmet))
|
||||
* Luke T. Shumaker ([lukeshu](https://github.com/lukeshu))
|
||||
* Alex Gervais ([alexgervais](https://github.com/alexgervais))
|
||||
* Lance Austin ([LanceEa](https://github.com/LanceEa))
|
||||
|
||||
## Releasers Emeriti
|
||||
|
||||
|
|
|
@ -1,18 +1,11 @@
|
|||
# Community Meeting Schedule
|
||||
|
||||
## Weekly Troubleshooting
|
||||
|
||||
We hold troubleshooting sessions once a week on Thursdays, at 2:30 pm Eastern. These sessions are a way to connect in person with project maintainers and get help with any problems you might be encountering while using Emissary-ingress.
|
||||
|
||||
**Zoom Meeting Link**: https://us02web.zoom.us/j/83032365622
|
||||
|
||||
|
||||
## Monthly Contributors Meeting
|
||||
|
||||
The Emissary-ingress Contributors Meeting is held on the first Wednesday of every month at 3:30pm Eastern. The focus of this meeting is discussion of technical issues related to development of Emissary-ingress.
|
||||
|
||||
New contributors are always welcome! Check out our [contributor's guide](../DevDocumentation/DEVELOPING.md) to learn how you can help make Emissary-ingress better.
|
||||
New contributors are always welcome! Check out our [contributor's guide](../DevDocumentation/CONTRIBUTING.md) to learn how you can help make Emissary-ingress better.
|
||||
|
||||
**Zoom Meeting Link**: [https://ambassadorlabs.zoom.us/j/86139262248?pwd=bzZlcU96WjAxN2E1RFZFZXJXZ1FwQT09](https://ambassadorlabs.zoom.us/j/86139262248?pwd=bzZlcU96WjAxN2E1RFZFZXJXZ1FwQT09)
|
||||
- Meeting ID: 861 3926 2248
|
||||
- Passcode: 113675
|
||||
**Zoom Meeting Link**: [https://ambassadorlabs.zoom.us/j/81589589470?pwd=U8qNvZSqjQx7abIzwRtGryFU35pi3T.1](https://ambassadorlabs.zoom.us/j/81589589470?pwd=U8qNvZSqjQx7abIzwRtGryFU35pi3T.1)
|
||||
- Meeting ID: 815 8958 9470
|
||||
- Passcode: 199217
|
||||
|
|
|
@ -1,16 +1,12 @@
|
|||
## Support for deploying and using Ambassador
|
||||
## Support for deploying and using Emissary
|
||||
|
||||
Welcome to Ambassador! We use GitHub for tracking bugs and feature requests. If you need support, the following resources are available. Thanks for understanding.
|
||||
Welcome to Emissary! The Emissary community is the best current resource for
|
||||
Emissary support, with the best options being:
|
||||
|
||||
### Documentation
|
||||
- Checking out the [documentation] at https://emissary-ingress.dev/
|
||||
- Joining the `#emissary-ingress` channel in the [CNCF Slack]
|
||||
- [Opening an issue][GitHub] in [GitHub]
|
||||
|
||||
* [User Documentation](https://www.getambassador.io/docs)
|
||||
* [Troubleshooting Guide](https://www.getambassador.io/reference/debugging)
|
||||
|
||||
### Real-time Chat
|
||||
|
||||
* [Slack](https://d6e.co/slack): The `#ambassador` channel is a good place to start.
|
||||
|
||||
### Commercial Support
|
||||
|
||||
* Commercial Support is available as part of [Ambassador Pro](https://www.getambassador.io/pro/).
|
||||
[CNCF Slack]: https://communityinviter.com/apps/cloud-native/cncf)
|
||||
[documentation]: https://emissary-ingress.dev/
|
||||
[GitHub]: https://github.com/emissary-ingress/emissary/issues
|
||||
|
|
385
DEPENDENCIES.md
385
DEPENDENCIES.md
|
@ -1,180 +1,219 @@
|
|||
The Go module "github.com/emissary-ingress/emissary/v3" incorporates the
|
||||
following Free and Open Source software:
|
||||
|
||||
Name Version License(s)
|
||||
---- ------- ----------
|
||||
the Go language standard library ("std") v1.20.1 3-clause BSD license
|
||||
cloud.google.com/go/compute v1.2.0 Apache License 2.0
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 MIT license
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible Apache License 2.0
|
||||
github.com/Azure/go-autorest/autorest v0.11.24 Apache License 2.0
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.18 Apache License 2.0
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 Apache License 2.0
|
||||
github.com/Azure/go-autorest/logger v0.2.1 Apache License 2.0
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 Apache License 2.0
|
||||
github.com/MakeNowJust/heredoc v1.0.0 MIT license
|
||||
github.com/Masterminds/goutils v1.1.1 Apache License 2.0
|
||||
github.com/Masterminds/semver v1.5.0 MIT license
|
||||
github.com/Masterminds/sprig v2.22.0+incompatible MIT license
|
||||
github.com/PuerkitoBio/purell v1.1.1 3-clause BSD license
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 3-clause BSD license
|
||||
github.com/armon/go-metrics v0.3.10 MIT license
|
||||
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d MIT license
|
||||
github.com/census-instrumentation/opencensus-proto v0.3.0 Apache License 2.0
|
||||
github.com/cncf/xds/go v0.0.0-20220121163655-4a2b9fdd466b Apache License 2.0
|
||||
github.com/datawire/dlib v1.3.0 Apache License 2.0
|
||||
github.com/datawire/dtest v0.0.0-20210928162311-722b199c4c2f Apache License 2.0
|
||||
github.com/datawire/go-mkopensource v0.0.7 Apache License 2.0
|
||||
github.com/davecgh/go-spew v1.1.1 ISC license
|
||||
github.com/docker/distribution v2.8.1+incompatible Apache License 2.0
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.7 Apache License 2.0
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible 3-clause BSD license
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f MIT license
|
||||
github.com/fatih/camelcase v1.0.0 MIT license
|
||||
github.com/fatih/color v1.13.0 MIT license
|
||||
github.com/fsnotify/fsnotify v1.6.0 3-clause BSD license
|
||||
github.com/go-errors/errors v1.4.2 MIT license
|
||||
github.com/go-logr/logr v0.4.0 Apache License 2.0
|
||||
github.com/go-openapi/jsonpointer v0.19.5 Apache License 2.0
|
||||
github.com/go-openapi/jsonreference v0.19.6 Apache License 2.0
|
||||
github.com/go-openapi/spec v0.20.4 Apache License 2.0
|
||||
github.com/go-openapi/swag v0.21.1 Apache License 2.0
|
||||
github.com/gobuffalo/flect v0.2.3 MIT license
|
||||
github.com/gogo/protobuf v1.3.2 3-clause BSD license
|
||||
github.com/golang-jwt/jwt/v4 v4.2.0 MIT license
|
||||
github.com/golang/protobuf v1.5.2 3-clause BSD license
|
||||
github.com/google/btree v1.0.1 Apache License 2.0
|
||||
github.com/google/go-cmp v0.5.8 3-clause BSD license
|
||||
github.com/google/gofuzz v1.2.0 Apache License 2.0
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 Apache License 2.0
|
||||
github.com/google/uuid v1.3.0 3-clause BSD license
|
||||
github.com/googleapis/gnostic v0.5.5 Apache License 2.0
|
||||
github.com/gorilla/websocket v1.5.0 2-clause BSD license
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 MIT license
|
||||
github.com/hashicorp/consul/api v1.12.0 Mozilla Public License 2.0
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 Mozilla Public License 2.0
|
||||
github.com/hashicorp/go-hclog v1.1.0 MIT license
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 Mozilla Public License 2.0
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 Mozilla Public License 2.0
|
||||
github.com/hashicorp/golang-lru v0.5.4 Mozilla Public License 2.0
|
||||
github.com/hashicorp/serf v0.9.7 Mozilla Public License 2.0
|
||||
github.com/huandu/xstrings v1.3.2 MIT license
|
||||
github.com/imdario/mergo v0.3.12 3-clause BSD license
|
||||
github.com/inconshreveable/mousetrap v1.0.0 Apache License 2.0
|
||||
github.com/josharian/intern v1.0.1-0.20211109044230-42b52b674af5 MIT license
|
||||
github.com/json-iterator/go v1.1.12 MIT license
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 MIT license
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de 3-clause BSD license
|
||||
github.com/mailru/easyjson v0.7.7 MIT license
|
||||
github.com/mattn/go-colorable v0.1.12 MIT license
|
||||
github.com/mattn/go-isatty v0.0.14 MIT license
|
||||
github.com/mitchellh/copystructure v1.2.0 MIT license
|
||||
github.com/mitchellh/go-homedir v1.1.0 MIT license
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 MIT license
|
||||
github.com/mitchellh/mapstructure v1.4.3 MIT license
|
||||
github.com/mitchellh/reflectwalk v1.0.2 MIT license
|
||||
github.com/moby/spdystream v0.2.0 Apache License 2.0
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 Apache License 2.0
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd Apache License 2.0
|
||||
github.com/modern-go/reflect2 v1.0.2 Apache License 2.0
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 MIT license
|
||||
github.com/opencontainers/go-digest v1.0.0 Apache License 2.0
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible MIT license
|
||||
github.com/pkg/errors v0.9.1 2-clause BSD license
|
||||
github.com/pmezard/go-difflib v1.0.0 3-clause BSD license
|
||||
github.com/prometheus/client_model v0.2.0 Apache License 2.0
|
||||
github.com/russross/blackfriday v1.6.0 2-clause BSD license
|
||||
github.com/sirupsen/logrus v1.9.0 MIT license
|
||||
github.com/spf13/cobra v1.5.0 Apache License 2.0
|
||||
github.com/spf13/pflag v1.0.5 3-clause BSD license
|
||||
github.com/stretchr/testify v1.8.1 MIT license
|
||||
github.com/xlab/treeprint v1.1.0 MIT license
|
||||
go.opentelemetry.io/proto/otlp v0.18.0 Apache License 2.0
|
||||
go.starlark.net v0.0.0-20220203230714-bb14e151c28f 3-clause BSD license
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa 3-clause BSD license
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 3-clause BSD license
|
||||
golang.org/x/net v0.7.0 3-clause BSD license
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 3-clause BSD license
|
||||
golang.org/x/sys v0.5.0 3-clause BSD license
|
||||
golang.org/x/term v0.5.0 3-clause BSD license
|
||||
golang.org/x/text v0.7.0 3-clause BSD license
|
||||
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 3-clause BSD license
|
||||
golang.org/x/tools v0.1.12 3-clause BSD license
|
||||
google.golang.org/appengine v1.6.7 Apache License 2.0
|
||||
google.golang.org/genproto v0.0.0-20220204002441-d6cc3cc0770e Apache License 2.0
|
||||
google.golang.org/grpc v1.44.0 Apache License 2.0
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 Apache License 2.0
|
||||
google.golang.org/protobuf v1.28.1 3-clause BSD license
|
||||
gopkg.in/inf.v0 v0.9.1 3-clause BSD license
|
||||
gopkg.in/yaml.v2 v2.4.0 Apache License 2.0, MIT license
|
||||
gopkg.in/yaml.v3 v3.0.1 Apache License 2.0, MIT license
|
||||
k8s.io/api v0.21.9 Apache License 2.0
|
||||
k8s.io/apiextensions-apiserver v0.21.9 Apache License 2.0
|
||||
k8s.io/apimachinery v0.21.9 3-clause BSD license, Apache License 2.0
|
||||
k8s.io/apiserver v0.21.9 Apache License 2.0
|
||||
k8s.io/cli-runtime v0.21.9 Apache License 2.0
|
||||
k8s.io/client-go v0.21.9 3-clause BSD license, Apache License 2.0
|
||||
github.com/emissary-ingress/code-generator (modified from k8s.io/code-generator) v0.21.10-rc.0.0.20220204004229-4708b255a33a Apache License 2.0
|
||||
k8s.io/component-base v0.21.9 Apache License 2.0
|
||||
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 Apache License 2.0
|
||||
k8s.io/klog/v2 v2.10.0 Apache License 2.0
|
||||
k8s.io/kube-openapi v0.0.0-20211110012726-3cc51fd1e909 Apache License 2.0
|
||||
k8s.io/kubectl v0.21.9 Apache License 2.0
|
||||
k8s.io/kubernetes v1.21.9 Apache License 2.0
|
||||
k8s.io/metrics v0.21.9 Apache License 2.0
|
||||
k8s.io/utils v0.0.0-20210802155522-efc7438f0176 3-clause BSD license, Apache License 2.0
|
||||
sigs.k8s.io/controller-runtime v0.9.7 Apache License 2.0
|
||||
github.com/emissary-ingress/controller-tools (modified from sigs.k8s.io/controller-tools) v0.6.3-0.20220204053320-db507acbb466 Apache License 2.0
|
||||
sigs.k8s.io/gateway-api v0.2.0 Apache License 2.0
|
||||
sigs.k8s.io/kustomize/api v0.8.8 Apache License 2.0
|
||||
sigs.k8s.io/kustomize/kyaml v0.10.17 Apache License 2.0, MIT license
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 Apache License 2.0
|
||||
sigs.k8s.io/yaml v1.3.0 3-clause BSD license, MIT license
|
||||
Name Version License(s)
|
||||
---- ------- ----------
|
||||
the Go language standard library ("std") v1.23.3 3-clause BSD license
|
||||
cel.dev/expr v0.19.2 Apache License 2.0
|
||||
dario.cat/mergo v1.0.1 3-clause BSD license
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c MIT license
|
||||
github.com/MakeNowJust/heredoc v1.0.0 MIT license
|
||||
github.com/Masterminds/goutils v1.1.1 Apache License 2.0
|
||||
github.com/Masterminds/semver v1.5.0 MIT license
|
||||
github.com/Masterminds/sprig v2.22.0+incompatible MIT license
|
||||
github.com/Microsoft/go-winio v0.6.2 MIT license
|
||||
github.com/ProtonMail/go-crypto v1.1.5 3-clause BSD license
|
||||
github.com/antlr4-go/antlr/v4 v4.13.1 3-clause BSD license
|
||||
github.com/armon/go-metrics v0.4.1 MIT license
|
||||
github.com/beorn7/perks v1.0.1 MIT license
|
||||
github.com/blang/semver/v4 v4.0.0 MIT license
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 MIT license
|
||||
github.com/census-instrumentation/opencensus-proto v0.4.1 Apache License 2.0
|
||||
github.com/cespare/xxhash/v2 v2.3.0 MIT license
|
||||
github.com/chai2010/gettext-go v1.0.3 3-clause BSD license
|
||||
github.com/cloudflare/circl v1.6.0 3-clause BSD license
|
||||
github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 Apache License 2.0
|
||||
github.com/cyphar/filepath-securejoin v0.4.1 3-clause BSD license
|
||||
github.com/datawire/dlib v1.3.1 Apache License 2.0
|
||||
github.com/datawire/dtest v0.0.0-20210928162311-722b199c4c2f Apache License 2.0
|
||||
github.com/LukeShu/go-mkopensource (modified from github.com/datawire/go-mkopensource) v0.0.0-20250206080114-4ff6b660d8d4 Apache License 2.0
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ISC license
|
||||
github.com/distribution/reference v0.6.0 Apache License 2.0
|
||||
github.com/emicklei/go-restful/v3 v3.12.1 MIT license
|
||||
github.com/emirpasic/gods v1.18.1 2-clause BSD license, ISC license
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 Apache License 2.0
|
||||
github.com/evanphx/json-patch/v5 v5.9.11 3-clause BSD license
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f MIT license
|
||||
github.com/fatih/camelcase v1.0.0 MIT license
|
||||
github.com/fatih/color v1.18.0 MIT license
|
||||
github.com/fsnotify/fsnotify v1.8.0 3-clause BSD license
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 MIT license
|
||||
github.com/go-errors/errors v1.5.1 MIT license
|
||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 3-clause BSD license
|
||||
github.com/go-git/go-billy/v5 v5.6.2 Apache License 2.0
|
||||
github.com/go-git/go-git/v5 v5.13.2 Apache License 2.0
|
||||
github.com/go-logr/logr v1.4.2 Apache License 2.0
|
||||
github.com/go-logr/zapr v1.3.0 Apache License 2.0
|
||||
github.com/go-openapi/jsonpointer v0.21.0 Apache License 2.0
|
||||
github.com/go-openapi/jsonreference v0.21.0 Apache License 2.0
|
||||
github.com/go-openapi/swag v0.23.0 Apache License 2.0
|
||||
github.com/gobuffalo/flect v1.0.3 MIT license
|
||||
github.com/gogo/protobuf v1.3.2 3-clause BSD license
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 Apache License 2.0
|
||||
github.com/golang/protobuf v1.5.4 3-clause BSD license
|
||||
github.com/google/btree v1.1.3 Apache License 2.0
|
||||
github.com/google/cel-go v0.23.2 3-clause BSD license, Apache License 2.0
|
||||
github.com/google/gnostic-models v0.6.9 Apache License 2.0
|
||||
github.com/google/go-cmp v0.6.0 3-clause BSD license
|
||||
github.com/google/gofuzz v1.2.0 Apache License 2.0
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 Apache License 2.0
|
||||
github.com/google/uuid v1.6.0 3-clause BSD license
|
||||
github.com/gorilla/websocket v1.5.3 2-clause BSD license
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 MIT license
|
||||
github.com/hashicorp/consul/api v1.31.0 Mozilla Public License 2.0
|
||||
github.com/hashicorp/errwrap v1.1.0 Mozilla Public License 2.0
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 Mozilla Public License 2.0
|
||||
github.com/hashicorp/go-hclog v1.6.3 MIT license
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 Mozilla Public License 2.0
|
||||
github.com/hashicorp/go-metrics v0.5.4 MIT license
|
||||
github.com/hashicorp/go-multierror v1.1.1 Mozilla Public License 2.0
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 Mozilla Public License 2.0
|
||||
github.com/hashicorp/golang-lru v1.0.2 Mozilla Public License 2.0
|
||||
github.com/hashicorp/hcl v1.0.0 Mozilla Public License 2.0
|
||||
github.com/hashicorp/serf v0.10.2 Mozilla Public License 2.0
|
||||
github.com/huandu/xstrings v1.5.0 MIT license
|
||||
github.com/imdario/mergo v0.3.16 3-clause BSD license
|
||||
github.com/inconshreveable/mousetrap v1.1.0 Apache License 2.0
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 MIT license
|
||||
github.com/josharian/intern v1.0.1-0.20211109044230-42b52b674af5 MIT license
|
||||
github.com/json-iterator/go v1.1.12 MIT license
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 MIT license
|
||||
github.com/kevinburke/ssh_config v1.2.0 MIT license
|
||||
github.com/klauspost/compress v1.17.11 3-clause BSD license, Apache License 2.0, MIT license
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de 3-clause BSD license
|
||||
github.com/magiconair/properties v1.8.9 2-clause BSD license
|
||||
github.com/mailru/easyjson v0.9.0 MIT license
|
||||
github.com/mattn/go-colorable v0.1.14 MIT license
|
||||
github.com/mattn/go-isatty v0.0.20 MIT license
|
||||
github.com/mitchellh/copystructure v1.2.0 MIT license
|
||||
github.com/mitchellh/go-homedir v1.1.0 MIT license
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 MIT license
|
||||
github.com/mitchellh/mapstructure v1.5.0 MIT license
|
||||
github.com/mitchellh/reflectwalk v1.0.2 MIT license
|
||||
github.com/moby/spdystream v0.5.0 Apache License 2.0
|
||||
github.com/moby/term v0.5.2 Apache License 2.0
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd Apache License 2.0
|
||||
github.com/modern-go/reflect2 v1.0.2 Apache License 2.0
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 MIT license
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 3-clause BSD license
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f 3-clause BSD license
|
||||
github.com/opencontainers/go-digest v1.0.0 Apache License 2.0
|
||||
github.com/pelletier/go-toml/v2 v2.2.3 MIT license
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible MIT license
|
||||
github.com/pjbgf/sha1cd v0.3.2 Apache License 2.0
|
||||
github.com/pkg/errors v0.9.1 2-clause BSD license
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 3-clause BSD license
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 3-clause BSD license
|
||||
github.com/prometheus/client_golang v1.20.5 3-clause BSD license, Apache License 2.0
|
||||
github.com/prometheus/client_model v0.6.1 Apache License 2.0
|
||||
github.com/prometheus/common v0.62.0 Apache License 2.0
|
||||
github.com/prometheus/procfs v0.15.1 Apache License 2.0
|
||||
github.com/russross/blackfriday/v2 v2.1.0 2-clause BSD license
|
||||
github.com/sagikazarmark/locafero v0.7.0 MIT license
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 3-clause BSD license
|
||||
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 MIT license
|
||||
github.com/sirupsen/logrus v1.9.3 MIT license
|
||||
github.com/skeema/knownhosts v1.3.1 Apache License 2.0
|
||||
github.com/sourcegraph/conc v0.3.0 MIT license
|
||||
github.com/spf13/afero v1.12.0 Apache License 2.0
|
||||
github.com/spf13/cast v1.7.1 MIT license
|
||||
github.com/spf13/cobra v1.8.1 Apache License 2.0
|
||||
github.com/spf13/pflag v1.0.6 3-clause BSD license
|
||||
github.com/spf13/viper v1.19.0 MIT license
|
||||
github.com/stoewer/go-strcase v1.3.0 MIT license
|
||||
github.com/stretchr/testify v1.10.0 MIT license
|
||||
github.com/subosito/gotenv v1.6.0 MIT license
|
||||
github.com/vladimirvivien/gexe v0.4.1 MIT license
|
||||
github.com/x448/float16 v0.8.4 MIT license
|
||||
github.com/xanzy/ssh-agent v0.3.3 Apache License 2.0
|
||||
github.com/xlab/treeprint v1.2.0 MIT license
|
||||
go.opentelemetry.io/otel v1.34.0 Apache License 2.0
|
||||
go.opentelemetry.io/otel/trace v1.34.0 Apache License 2.0
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 Apache License 2.0
|
||||
go.uber.org/goleak v1.3.0 MIT license
|
||||
go.uber.org/multierr v1.11.0 MIT license
|
||||
go.uber.org/zap v1.27.0 MIT license
|
||||
golang.org/x/crypto v0.32.0 3-clause BSD license
|
||||
golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c 3-clause BSD license
|
||||
golang.org/x/mod v0.23.0 3-clause BSD license
|
||||
golang.org/x/net v0.34.0 3-clause BSD license
|
||||
golang.org/x/oauth2 v0.26.0 3-clause BSD license
|
||||
golang.org/x/sync v0.11.0 3-clause BSD license
|
||||
golang.org/x/sys v0.30.0 3-clause BSD license
|
||||
golang.org/x/term v0.29.0 3-clause BSD license
|
||||
golang.org/x/text v0.22.0 3-clause BSD license
|
||||
golang.org/x/time v0.10.0 3-clause BSD license
|
||||
golang.org/x/tools v0.29.0 3-clause BSD license
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 Apache License 2.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250204164813-702378808489 Apache License 2.0
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250204164813-702378808489 Apache License 2.0
|
||||
google.golang.org/grpc v1.70.0 Apache License 2.0
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 Apache License 2.0
|
||||
google.golang.org/protobuf v1.36.4 3-clause BSD license
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 3-clause BSD license
|
||||
gopkg.in/inf.v0 v0.9.1 3-clause BSD license
|
||||
gopkg.in/ini.v1 v1.67.0 Apache License 2.0
|
||||
gopkg.in/warnings.v0 v0.1.2 2-clause BSD license
|
||||
gopkg.in/yaml.v2 v2.4.0 Apache License 2.0, MIT license
|
||||
gopkg.in/yaml.v3 v3.0.1 Apache License 2.0, MIT license
|
||||
k8s.io/api v0.32.1 Apache License 2.0
|
||||
k8s.io/apiextensions-apiserver v0.32.1 Apache License 2.0
|
||||
k8s.io/apimachinery v0.32.1 3-clause BSD license, Apache License 2.0
|
||||
k8s.io/apiserver v0.32.1 Apache License 2.0
|
||||
k8s.io/cli-runtime v0.32.1 Apache License 2.0
|
||||
k8s.io/client-go v0.32.1 3-clause BSD license, Apache License 2.0
|
||||
github.com/emissary-ingress/code-generator (modified from k8s.io/code-generator) v0.32.2-0.20250205235421-4d5bf4656f71 Apache License 2.0
|
||||
k8s.io/component-base v0.32.1 Apache License 2.0
|
||||
k8s.io/component-helpers v0.32.1 Apache License 2.0
|
||||
k8s.io/controller-manager v0.32.1 Apache License 2.0
|
||||
k8s.io/gengo/v2 v2.0.0-20250130153323-76c5745d3511 Apache License 2.0
|
||||
k8s.io/klog/v2 v2.130.1 Apache License 2.0
|
||||
k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 3-clause BSD license, Apache License 2.0, MIT license
|
||||
k8s.io/kubectl v0.32.1 Apache License 2.0
|
||||
k8s.io/kubernetes v1.32.1 Apache License 2.0
|
||||
k8s.io/metrics v0.32.1 Apache License 2.0
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758 3-clause BSD license, Apache License 2.0
|
||||
sigs.k8s.io/controller-runtime v0.20.1 Apache License 2.0
|
||||
sigs.k8s.io/controller-tools v0.17.1 Apache License 2.0
|
||||
sigs.k8s.io/e2e-framework v0.6.0 Apache License 2.0
|
||||
sigs.k8s.io/gateway-api v0.2.0 Apache License 2.0
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 3-clause BSD license, Apache License 2.0
|
||||
sigs.k8s.io/kustomize/api v0.19.0 Apache License 2.0
|
||||
sigs.k8s.io/kustomize/kyaml v0.19.0 Apache License 2.0
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.5.0 Apache License 2.0
|
||||
sigs.k8s.io/yaml v1.4.0 3-clause BSD license, Apache License 2.0, MIT license
|
||||
|
||||
The Emissary-ingress Python code makes use of the following Free and Open Source
|
||||
libraries:
|
||||
|
||||
Name Version License(s)
|
||||
---- ------- ----------
|
||||
Cython 0.29.32 Apache License 2.0
|
||||
Flask 2.2.2 3-clause BSD license
|
||||
Jinja2 3.1.2 3-clause BSD license
|
||||
MarkupSafe 2.1.1 3-clause BSD license
|
||||
PyYAML 6.0 MIT license
|
||||
Werkzeug 2.2.3 3-clause BSD license
|
||||
build 0.9.0 MIT license
|
||||
cachetools 5.3.0 MIT license
|
||||
certifi 2022.9.24 Mozilla Public License 2.0
|
||||
charset-normalizer 2.1.1 MIT license
|
||||
click 8.1.3 3-clause BSD license
|
||||
dpath 2.1.4 MIT license
|
||||
durationpy 0.5 MIT license
|
||||
expiringdict 1.2.2 Apache License 2.0
|
||||
google-auth 2.16.1 Apache License 2.0
|
||||
gunicorn 20.1.0 MIT license
|
||||
idna 3.4 3-clause BSD license
|
||||
itsdangerous 2.1.2 3-clause BSD license
|
||||
jsonpatch 1.32 3-clause BSD license
|
||||
jsonpointer 2.3 3-clause BSD license
|
||||
kubernetes 21.7.0 Apache License 2.0
|
||||
oauthlib 3.2.2 3-clause BSD license
|
||||
orjson 3.6.6 Apache License 2.0, MIT license
|
||||
packaging 21.3 2-clause BSD license, Apache License 2.0
|
||||
pep517 0.13.0 MIT license
|
||||
pip-tools 6.12.1 3-clause BSD license
|
||||
prometheus-client 0.15.0 Apache License 2.0
|
||||
pyasn1 0.4.8 2-clause BSD license
|
||||
pyasn1-modules 0.2.8 2-clause BSD license
|
||||
pyparsing 3.0.9 MIT license
|
||||
python-dateutil 2.8.2 3-clause BSD license, Apache License 2.0
|
||||
python-json-logger 2.0.4 2-clause BSD license
|
||||
requests 2.28.1 Apache License 2.0
|
||||
requests-oauthlib 1.3.1 ISC license
|
||||
retrying 1.3.3 Apache License 2.0
|
||||
rsa 4.9 Apache License 2.0
|
||||
semantic-version 2.10.0 2-clause BSD license
|
||||
six 1.16.0 MIT license
|
||||
tomli 2.0.1 MIT license
|
||||
typing_extensions 4.4.0 Python Software Foundation license
|
||||
urllib3 1.26.13 MIT license
|
||||
websocket-client 1.4.2 Apache License 2.0
|
||||
Name Version License(s)
|
||||
---- ------- ----------
|
||||
Cython 0.29.37 Apache License 2.0
|
||||
Flask 3.1.0 3-clause BSD license
|
||||
Jinja2 3.1.6 3-clause BSD license
|
||||
MarkupSafe 3.0.2 2-clause BSD license
|
||||
PyYAML 6.0.1 MIT license
|
||||
Werkzeug 3.1.3 3-clause BSD license
|
||||
blinker 1.9.0 MIT license
|
||||
build 1.2.2.post1 MIT license
|
||||
certifi 2025.1.31 Mozilla Public License 2.0
|
||||
charset-normalizer 3.4.1 MIT license
|
||||
click 8.1.8 3-clause BSD license
|
||||
durationpy 0.9 MIT license
|
||||
expiringdict 1.2.2 Apache License 2.0
|
||||
gunicorn 23.0.0 MIT license
|
||||
idna 3.10 3-clause BSD license
|
||||
itsdangerous 2.2.0 3-clause BSD license
|
||||
jsonpatch 1.33 3-clause BSD license
|
||||
jsonpointer 3.0.0 3-clause BSD license
|
||||
orjson 3.10.15 Apache License 2.0, MIT license
|
||||
packaging 23.1 2-clause BSD license, Apache License 2.0
|
||||
pip-tools 7.3.0 3-clause BSD license
|
||||
prometheus_client 0.21.1 Apache License 2.0
|
||||
pyparsing 3.0.9 MIT license
|
||||
pyproject_hooks 1.2.0 MIT license
|
||||
python-json-logger 3.2.1 2-clause BSD license
|
||||
requests 2.32.3 Apache License 2.0
|
||||
semantic-version 2.10.0 2-clause BSD license
|
||||
typing_extensions 4.12.2 Python Software Foundation license
|
||||
urllib3 2.3.0 MIT license
|
||||
|
|
|
@ -1,3 +1,224 @@
|
|||
# Emissary-Ingress Architecture
|
||||
|
||||
WIP - we are working to get this updated. Check back soon!
|
||||
In this document you will find information about the internal design and architecture of the Emissary-ingress (formerly known as Ambassador API Gateway). Emissary-ingress provides a Kubernetes-native load balancer, API gateway and ingress controller that is built on top of [Envoy Proxy](https://www.envoyproxy.io).
|
||||
|
||||
> Looking for end user guides for Emissary-ingress? You can check out the end user guides at <https://www.getambassador.io/docs/emissary/>.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [Overview](#overview)
|
||||
- [Custom Resource Definitions (CRD)](#custom-resource-definitions-crd)
|
||||
- [Apiext](#apiext)
|
||||
- [Additional Reading](#additional-reading)
|
||||
- [Emissary-ingress Container](#emissary-ingress-container)
|
||||
- [Startup and Busyambassador](#startup-and-busyambassador)
|
||||
- [Entrypoint](#entrypoint)
|
||||
- [Watch All The Things (Watt)](#watch-all-the-things-watt)
|
||||
- [Diagd](#diagd)
|
||||
- [Ambex](#ambex)
|
||||
- [Envoy](#envoy)
|
||||
- [Testing Components](#testing-components)
|
||||
- [kat-client](#kat-client)
|
||||
- [kat-server](#kat-server)
|
||||
|
||||
## Overview
|
||||
|
||||
Emissary-ingress is a Kubernetes native API Gateway built on top of Envoy Proxy. We utilize Kubernetes CRDs to provide an expressive API to configure Envoy Proxy to handle routing traffic into your cluster.
|
||||
|
||||
Check [this blog post](https://blog.getambassador.io/building-ambassador-an-open-source-api-gateway-on-kubernetes-and-envoy-ed01ed520844) for additional context around the motivations and architecture decisions made for Emissary-ingress.
|
||||
|
||||
At the core of Emissary-ingress is Envoy Proxy which has very extensive configuration and extensions points. Getting this right can be challenging so Emissary-ingress provides Kubernetes Administrators and Developers a cloud-native way to configure Envoy using declarative yaml files. Here are the core components of Emissary-Ingress:
|
||||
|
||||
- CRDs - extend K8s to enable Emissary-ingress's abstractions (*generated yaml*)
|
||||
- Apiext - A server that implements the Webhook Conversion interface for CRD's (**own container**)
|
||||
- Diagd - provides diagnostic ui, translates snapshots/ir into envoy configuration (*in-process*)
|
||||
- Ambex - gRPC server implementation of envoy xDS for dynamic envoy configration (*in-process*)
|
||||
- Envoy Proxy - Proxy that handles routing all user traffic (*in-process*)
|
||||
- Ambassador Agent - provides connectivity between cluster and Ambassador Cloud. (*own container*)
|
||||
|
||||
## Custom Resource Definitions (CRD)
|
||||
|
||||
Kubernetes allows extending its API through the use of [Customer Resource Definitions](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/) (aka CRDs) which allow solutions like Emissary-ingress to add custom resources to K8s and allow developers to treat them like any other K8s resource. CRDs provide validation, strong typing, structured data, versioning and are persisted in `etcd` along with the core Kuberenetes resources.
|
||||
|
||||
Emissary-ingress provides a set of CRD's that are applied to a cluster and then are watched by Emissary-ingress. Emissary-ingress then uses the data from these CRD's along with the standard K8's resources (services, endpoints, etc...) to dynamically generate Envoy Proxy configuration. Depending on the version of Emissary-ingress there might be multiple versions of the CRD's that are suppported.
|
||||
|
||||
You can read the user documentation (see additional reading below) to find out more about all the various CRDs that are used and how to configure them. For understanding, how they are defined you can take a look in `pkg/getambassador.io/*` directory. In this directory, you will find a directory per version of the CRDs and for each version you will see the `Golang` structs that define the data structures that are used for each of the Emissary-ingress custom resources. It's recommended to read the `doc.go` file for information about API guidelines followed and how the comment markers are used by the build system.
|
||||
|
||||
The build system (`make`) uses [controller-gen](https://book.kubebuilder.io/reference/controller-gen.html) to generate the required YAML representation for the customer resources that can be found at `pkg/getambassador.io/crds.yaml`. This file is auto-generated and checked into the respository. This is the file that is applied to a cluster extending the Kubernetes API. If any changes are made to the custom resources then it needs to be re-generated and checked-in as part of your PR. Running `make generate` will trigger the generation of this file and other generated files (`protobufs`) that are checked into the respository as well. If you want to see more about the build process take a look at `build-aux/generate.mk`.
|
||||
|
||||
> **Annotations**: K8s allows developers to provide Annotations as well to the standard K8s Resources (Services, Ingress, etc...). Annotations were the preferred method of configuring early versions of Emissary-ingress but annotations did not provide validation and can be error prone. However, with the introduction of CRD's these are now the preferred method and annotations are only supported for backwards compatibility. We won't discuss the annotations much here due to this but rather making you aware that they exist.
|
||||
|
||||
### Apiext
|
||||
|
||||
Kubernetes provides the ability to have multiple versions of Custom Resources similiar to the core K8s resources but it is only capable of having a single `storage` version that is persisted in `etcd`. Custom Resource Definitions can define a `ConversionWebHook` that Kubernetes will call whenever it receives a version that is not the storage version.
|
||||
|
||||
You can check the current storage version by looking at `pkg/getambassador.io/crds.yaml` and searching for the `storage: true` field and seeing which version is the storage version of the custom resource (*at the time of writing this it is `v2`*).
|
||||
|
||||
The `apiext` container is the Emissary-ingress's server implementation for the conversion webhook that is registered with our custom resources. Each custom resource will have a section similiar to the following in the `pkg/getambassador.io/crds.yaml`:
|
||||
|
||||
```yaml
|
||||
conversion:
|
||||
strategy: Webhook
|
||||
webhook:
|
||||
clientConfig:
|
||||
service:
|
||||
name: emissary-apiext
|
||||
namespace: emissary-system
|
||||
conversionReviewVersions:
|
||||
- v1
|
||||
```
|
||||
|
||||
This is telling the Kubernetes API Server to call a WebHook using a `Service` within the cluster that is called `emissary-apiext` that can be found in the `emissary-system` namespace. It also states that our server implementation supports the `v1` version of the WebHook protocol so the K8s API Server will send the request and expect the response in the format for `v1`.
|
||||
|
||||
The implementation of the `apiext` server can be found in `cmd/apiext` and it leverages the [controller-runtime](https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.11.2starts) library which is vendored in `vendor/sigs.k8s.io/controller-runtime`. When this process starts up it will do the following:
|
||||
|
||||
1. Register the Emissary-Ingress CRD schemas using the Go structs described previously
|
||||
2. Ensure a self-signed certificate is generated that our server can register with for `https`.
|
||||
3. Kick off a Go routine that handles watching our CRD's and enriching the WebHook Conversion section (*outlined in yaml above*) so that it includes our self-signed certs, port and path that the apiext server is listening on.
|
||||
4. Starts up our two servers one for container liveness/readiness probes and one for the WebHook implementation that performs the conversion between CRD versions.
|
||||
|
||||
### Additional Reading
|
||||
|
||||
- [Ambassador Labs Docs - Custom Resources](https://www.getambassador.io/docs/emissary/latest/topics/running/host-crd/)
|
||||
- [Ambassador Labs Docs - Declarative Configuration](https://www.getambassador.io/docs/emissary/latest/topics/concepts/gitops-continuous-delivery/#policies-declarative-configuration-and-custom-resource-definitions)
|
||||
- [K8s Docs - Custom Resouce Definition](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/)
|
||||
- [K8s Docs - Version CRD's](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/)
|
||||
- [K8s Docs - Webhook Conversion](<https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#webhook-conversion>)
|
||||
|
||||
## Emissary-ingress Container
|
||||
|
||||
One of the major goals of the Emissary-ingress is to simplify the deployment of Envoy Proxy in a cloud-native friendly way using containers and declarative CRD's. To honor this goal Emissary-ingress is packaged up into a single image with all the necessary components.
|
||||
|
||||
This section will give a high-level overview of each of these components and will help provide you direction on where you can find more information on each of the components.
|
||||
|
||||
### Startup and Busyambassador
|
||||
|
||||
Emissary-ingress has evolved over many years, many contributors and many versions of Kubernetes which has led to the internal components being implemented in different programming languages. Some of the components are pre-built binaries like envoy, first-party python programs and first-party golang binaries. To provide a single entrypoint for the container startup the Golang binary called `busyambassador` was introduced.
|
||||
|
||||
/buildroot/ambassador/python/entrypoint.sh
|
||||
|
||||
The `busyambassador` binary provides a busybox like interface that dispatches the CMD's that are provided to a container for the various configured Golang binaries. This enables a single image to support multiple binaries on startup that are declartively set within a `deployment` in the `command` field when setting the image for a deployment. An example of this can be seen in the `ambassador-agent` deployment.
|
||||
|
||||
The image takes advantage of the `ENTRYPOINT` and `CMD` fields within a docker image manifest. You can see this in `builder/Dockerfile` in the final optimized image on the last line there is `ENTRYPOINT [ "bash", "/buildroot/ambassador/python/entrypoint.sh" ]`. This entrypoint cannot be overriden by the user and will run that bash script. By default the bash script will run the `entrypoint` binary which will be discussed in the next section but if passed a known binary name such as the `agent` example then `busyambassador` will run the correct command.
|
||||
|
||||
To learn more about `busyambassador` the code can be found:
|
||||
|
||||
- `cmd/busyambassador`
|
||||
- `pkg/busy`
|
||||
|
||||
> Note: the bash script will just exec into the `busyambassador` Golang binary in most cases and is still around for historical reasons and advanced debugging scenarios.
|
||||
|
||||
> Additional Reading: If you want to know more about how containers work with entrypoint and commands then take a look at this blogpost. <https://www.bmc.com/blogs/docker-cmd-vs-entrypoint/>
|
||||
|
||||
### Entrypoint
|
||||
|
||||
The `entrypoint` Golang binary is the default binary that `busyambassador` will run on container startup. It is the parent process for all the other processes that are run within the single `ambassador` image for Emissary-Ingress. At a high-level it starts and manages multiple go-routines, starts other child processes such as `diagd` (python program) and `envoy` (c++ compiled binary).
|
||||
|
||||
Here is a list of everything managed by the `entrypoint` binary. Each one is indicated by whether its a child OS process that is started or a goroutine (*note: some of the OS processes are started/managed in goroutines but the core logic resides within the child process thus they are marked as such*).
|
||||
|
||||
| Description | Goroutine | OS.Exec |
|
||||
| ------------------------------------------------------------------------- | :----------------: | :----------------: |
|
||||
| `diagd` - admin ui & config processor | | :white_check_mark: |
|
||||
| `ambex` - the Envoy ADS Server | :white_check_mark: | |
|
||||
| `envoy` - proxy routing data | | :white_check_mark: |
|
||||
| SnapshotServer - expose in-memory snapshot over localhost | :white_check_mark: | |
|
||||
| ExternalSnapshotServer - Ambassador Cloud friendly exposed over localhost | :white_check_mark: | |
|
||||
| HealthCheck - endpoints for K8s liveness/readiness probes | :white_check_mark: | |
|
||||
| Watt - Watch k8s, consul & files for cluster changes | :white_check_mark: | |
|
||||
| Sidecar Processes - start various side car processes | | :white_check_mark: |
|
||||
|
||||
Some of these items will be discussed in more detail but the best places to get started looking at the `entrypoint` is by looking at `cmd/entrypoint/entrypoint.go`.
|
||||
|
||||
> To see how the container passes `entrypoint` as the default binary to run on container startup you can look at `python/entrypoint.sh` where it calls `exec busyambassador entrypoint "$@"` which will drop the shell process and will run the entrypoint process via busyambassador.
|
||||
|
||||
#### Watch All The Things (Watt)
|
||||
|
||||
Watch All The Things (aka Watt) is tasked with watching alot of things, hence the name :smile:. Specifically, its job is to watch for changes in the K8s Cluster and potentially Consul and file system changes. Watt is the beginning point for the end-to-end data flow from developer applying the configuration to envoy being configured. You can find the code for this in the `cmd/entrypoint/watcher.go` file.
|
||||
|
||||
The watching of the K8s Cluster changes is where Emissary-ingress will get most of its configuration by looking for K8s Resources (e.g. services,ingresss, etc...) as well as the Emissary-ingress CRD Resources (e.g. Host, Mapping, Listeners, etc...). A `consulWatcher` will be started if a user has configured a Mapping to use the `ConsulResolver`. You can find this code in `cmd/entrypoint/consul.go`. The filesystem is also watched for changes to support `istio` and how it mounts certificates to the filesystem.
|
||||
|
||||
Here is the general flow:
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
subgraph watchers
|
||||
K8s & consulW(Consul) & file(FileSystem)
|
||||
K8s -.ConsulResolver.-> consulW
|
||||
K8s & consulW(Consul) & file(FileSystem) --> sgen
|
||||
end
|
||||
API[K8s API] --> K8s
|
||||
consul[Consul Manager] --> consulW
|
||||
istio[Pod Filesystem] --> file
|
||||
|
||||
sgen("Snapshot \n Generation") --> update("Update Snapshot \n(in-memory)") --> diagd(Notify diagD);
|
||||
update -.isEdgeStack?.-> AES(Notify AES)
|
||||
```
|
||||
|
||||
## Diagd
|
||||
|
||||
Provides two main functions:
|
||||
|
||||
1. A Diagnostic Admin UI for viewing the current state of Emissary-Ingress
|
||||
2. Processing Cluster changes into Envoy ready configuration
|
||||
1. This process has all the steps i'm outlining below
|
||||
|
||||
- receives "CONFIG" event and pushes on queue
|
||||
- event queue loop listens for commands and pops them off
|
||||
- on CONFIG event it calls back to emissary Snapshot Server to grab current snapshot stored in-memory
|
||||
- It is serialized and stored in `/ambassador/snapshots/snapshot-tmp.yaml`.
|
||||
- A SecretHandler and Config is initialized
|
||||
- A ResourceFetcher (aka, parse the snapshot into an in-memory representation)
|
||||
- Generate IR and envoy configs (load_ir function)
|
||||
- Take each Resource generated in ResourceFetcher and add it to the Config object as strongly typed objects
|
||||
- Store Config Object in `/ambassador/snapshots/aconf-tmp.json`
|
||||
- Check Deltas for Mappings cache and determine if we needs to be reset
|
||||
- Create IR with a Config, Cache, and invalidated items
|
||||
- IR is generated which basically just converts our stuff to strongly typed generic "envoy" items (handling filters, clusters, listeners, removing duplicates, etc...)
|
||||
- IR is updated in-memory for diagd process
|
||||
- IR is persisted to temp storage in `/ambassador/snapshots/ir-tmp.json`
|
||||
- generate envoy config from IR and cache
|
||||
- Split envoy config into bootstrap config, ads_config and clustermap config
|
||||
- Validate econfig
|
||||
- Rotate Snapshots for each of the files `aconf`, `econf`, `ir`, `snapshot` that get persisted in the snapshot path `/ambassador/snapshots`.
|
||||
- Rotating them allows for seeing the history of snapshots up to a limit and then they are dropped
|
||||
- this also renames the `-tmp` files written above into
|
||||
- Persist bootstrap, envoy ads config and clustermap config to base directory:
|
||||
- `/ambassador/bootstrap-ads.json` # this is used by envoy during startup to initial config itself and let it know about the static ADS Service
|
||||
- `/ambassador/enovy/envoy.json` # this is used in `ambex` to generate the ADS snapshots along with the fastPath items
|
||||
- `/ambassador/clustermap.json` # this might not be used either...
|
||||
- Notify `envoy` and `ambex` that a new snapshot has been persisted using signal SIGHUP
|
||||
- the Goroutine within `entrypoint` that starts up `envoy` is blocking waiting for this signal to start envoy
|
||||
- the `ambex` process continuously listens for this signal and it triggers a configuration update for ambex.
|
||||
- Update the appropriate status fields with metatdata by making calls to the `kubestatus` binary found in `cmd/kubestatus` which handles the communication to the cluster
|
||||
|
||||
## Ambex
|
||||
|
||||
This is the gRPC server implementation of the envoy xDS v2 and v3 api's based on ...
|
||||
|
||||
- listens for SIGHUP from diagd
|
||||
- converts `envoy.json` into in-memory snapshots that are cached for v2/v3
|
||||
- implements ADS v2/v3 Apis that envoy is configured to listen to
|
||||
|
||||
## Envoy
|
||||
|
||||
We maintain our own [fork](https://github.com/datawire/envoy) of Envoy that includes some additional commits for implementing some features in Emissary-Ingress.
|
||||
|
||||
Envoy does all the heavy-lifting
|
||||
|
||||
- does all routing, filtering, TLS termination, metrics collection, tracing, etc...
|
||||
- It is bootstraps from the output of diagd
|
||||
- It is dynamically updated using the xDS services and specifically the ADS service
|
||||
- Our implementation of this is `ambex`
|
||||
|
||||
## Testing Components
|
||||
|
||||
TODO: talk about testing performed by kat-client/kat-server.
|
||||
|
||||
### kat-client
|
||||
|
||||
TODO: discuss the purpose of kat-client
|
||||
|
||||
### kat-server
|
||||
|
||||
TODO: discuss the purpose of kat-client
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
Building Ambassador
|
||||
===================
|
||||
|
||||
The content in this document has been moved to [DEVELOPING.md].
|
||||
The content in this document has been moved to [CONTRIBUTING.md].
|
||||
|
|
|
@ -0,0 +1,929 @@
|
|||
# Developing Emissary-ingress
|
||||
|
||||
Welcome to the Emissary-ingress Community!
|
||||
|
||||
Thank you for contributing, we appreciate small and large contributions and look forward to working with you to make Emissary-ingress better.
|
||||
|
||||
This document is intended for developers looking to contribute to the Emissary-ingress project. In this document you will learn how to get your development environment setup and how to contribute to the project. Also, you will find more information about the internal components of Emissary-ingress and other questions about working on the project.
|
||||
|
||||
> Looking for end user guides for Emissary-ingress? You can check out the end user guides at <https://www.getambassador.io/docs/emissary/>.
|
||||
|
||||
After reading this document if you have questions we encourage you to join us on our [Slack channel](https://communityinviter.com/apps/cloud-native/cncf) in the #emissary-ingress channel.
|
||||
|
||||
- [Code of Conduct](../Community/CODE_OF_CONDUCT.md)
|
||||
- [Governance](../Community/GOVERNANCE.md)
|
||||
- [Maintainers](../Community/MAINTAINERS.md)
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [Development Setup](#development-setup)
|
||||
- [Step 1: Install Build Dependencies](#step-1-install-build-dependencies)
|
||||
- [Step 2: Clone Project](#step-2-clone-project)
|
||||
- [Step 3: Configuration](#step-3-configuration)
|
||||
- [Step 4: Building](#step-4-building)
|
||||
- [Step 5: Push](#step-5-push)
|
||||
- [Step 6: Deploy](#step-6-deploy)
|
||||
- [Step 7: Dev-loop](#step-7-dev-loop)
|
||||
- [What should I do next?](#what-should-i-do-next)
|
||||
- [Contributing](#contributing)
|
||||
- [Submitting a Pull Request (PR)](#submitting-a-pull-request-pr)
|
||||
- [Pull Request Review Process](#pull-request-review-process)
|
||||
- [Rebasing a branch under review](#rebasing-a-branch-under-review)
|
||||
- [Fixup commits during PR review](#fixup-commits-during-pr-review)
|
||||
- [Development Workflow](#development-workflow)
|
||||
- [Branching Strategy](#branching-strategy)
|
||||
- [Backport Strategy](#backport-strategy)
|
||||
- [What if I need a patch to land in a previous supported version?](#what-if-i-need-a-patch-to-land-in-a-previous-supported-version)
|
||||
- [What if my patch is only for a previous supported version?](#what-if-my-patch-is-only-for-a-previous-supported-version)
|
||||
- [What if I'm still not sure?](#what-if-im-still-not-sure)
|
||||
- [Merge Strategy](#merge-strategy)
|
||||
- [What about merge commit strategy?](#what-about-merge-commit-strategy)
|
||||
- [Contributing to the Docs](#contributing-to-the-docs)
|
||||
- [Advanced Topics](#advanced-topics)
|
||||
- [Running Emissary-ingress internals locally](#running-emissary-ingress-internals-locally)
|
||||
- [Setting up diagd](#setting-up-diagd)
|
||||
- [Changing the ambassador root](#changing-the-ambassador-root)
|
||||
- [Getting envoy](#getting-envoy)
|
||||
- [Shutting up the pod labels error](#shutting-up-the-pod-labels-error)
|
||||
- [Extra credit](#extra-credit)
|
||||
- [Debugging and Developing Envoy Configuration](#debugging-and-developing-envoy-configuration)
|
||||
- [Making changes to Envoy](#making-changes-to-envoy)
|
||||
- [1. Preparing your machine](#1-preparing-your-machine)
|
||||
- [2. Setting up your workspace to hack on Envoy](#2-setting-up-your-workspace-to-hack-on-envoy)
|
||||
- [3. Hacking on Envoy](#3-hacking-on-envoy)
|
||||
- [4. Building and testing your hacked-up Envoy](#4-building-and-testing-your-hacked-up-envoy)
|
||||
- [5. Test Devloop](#5-test-devloop)
|
||||
- [6. Protobuf changes](#6-protobuf-changes)
|
||||
- [7. Finalizing your changes](#7-finalizing-your-changes)
|
||||
- [8. Final Checklist](#8-final-checklist)
|
||||
- [Developing Emissary-ingress (Maintainers-only advice)](#developing-emissary-ingress-maintainers-only-advice)
|
||||
- [Updating license documentation](#updating-license-documentation)
|
||||
- [Upgrading Python dependencies](#upgrading-python-dependencies)
|
||||
- [FAQ](#faq)
|
||||
- [How do I find out what build targets are available?](#how-do-i-find-out-what-build-targets-are-available)
|
||||
- [How do I develop on a Mac with Apple Silicon?](#how-do-i-develop-on-a-mac-with-apple-silicon)
|
||||
- [How do I develop on Windows using WSL?](#how-do-i-develop-on-windows-using-wsl)
|
||||
- [How do I test using a private Docker repository?](#how-do-i-test-using-a-private-docker-repository)
|
||||
- [How do I change the loglevel at runtime?](#how-do-i-change-the-loglevel-at-runtime)
|
||||
- [Can I build from a docker container instead of on my local computer?](#can-i-build-from-a-docker-container-instead-of-on-my-local-computer)
|
||||
- [How do I clear everything out to make sure my build runs like it will in CI?](#how-do-i-clear-everything-out-to-make-sure-my-build-runs-like-it-will-in-ci)
|
||||
- [My editor is changing `go.mod` or `go.sum`, should I commit that?](#my-editor-is-changing-gomod-or-gosum-should-i-commit-that)
|
||||
- [How do I debug "This should not happen in CI" errors?](#how-do-i-debug-this-should-not-happen-in-ci-errors)
|
||||
- [How do I run Emissary-ingress tests?](#how-do-i-run-emissary-ingress-tests)
|
||||
- [How do I type check my python code?](#how-do-i-type-check-my-python-code)
|
||||
|
||||
## Development Setup
|
||||
|
||||
This section provides the steps for getting started developing on Emissary-ingress. There are a number of prerequisites that need to be setup. In general, our tooling tries to detect any missing requirements and provide a friendly error message. If you ever find that this is not the case please file an issue.
|
||||
|
||||
> **Note:** To enable developers contributing on Macs with Apple Silicon, we ensure that the artifacts are built for `linux/amd64`
|
||||
> rather than the host `linux/arm64` architecture. This can be overriden using the `BUILD_ARCH` environment variable. Pull Request are welcome :).
|
||||
|
||||
### Step 1: Install Build Dependencies
|
||||
|
||||
Here is a list of tools that are used by the build system to generate the build artifacts, packaging them up into containers, generating crds, helm charts and for running tests.
|
||||
|
||||
- git
|
||||
- make
|
||||
- docker (make sure you can run docker commands as your dev user without sudo)
|
||||
- bash
|
||||
- rsync
|
||||
- golang - `go.mod` for current version
|
||||
- python (>=3.10.9)
|
||||
- kubectl
|
||||
- a kubernetes cluster (you need permissions to create resources, i.e. crds, deployments, services, etc...)
|
||||
- a Docker registry
|
||||
- bsdtar (Provided by libarchive-tools on Ubuntu 19.10 and newer)
|
||||
- gawk
|
||||
- jq
|
||||
- helm
|
||||
|
||||
### Step 2: Clone Project
|
||||
|
||||
If you haven't already then this would be a good time to clone the project running the following commands:
|
||||
|
||||
```bash
|
||||
# clone to your preferred folder
|
||||
git clone https://github.com/emissary-ingress/emissary.git
|
||||
|
||||
# navigate to project
|
||||
cd emissary
|
||||
```
|
||||
|
||||
### Step 3: Configuration
|
||||
|
||||
You can configure the build system using environment variables, two required variables are used for setting the container registry and the kubeconfig used.
|
||||
|
||||
> **Important**: the test and build system perform destructive operations against your cluster. Therefore, we recommend that you
|
||||
> use a development cluster. Setting the DEV_KUBECONFIG variable described below ensures you don't accidently perform actions on a production cluster.
|
||||
|
||||
Open a terminal in the location where you cloned the repository and run the following commands:
|
||||
|
||||
```bash
|
||||
# set container registry using `export DEV_REGISTRY=<your-registry>
|
||||
# note: you need to be logged in and have permissions to push
|
||||
# Example:
|
||||
export DEV_REGISTRY=docker.io/parsec86
|
||||
|
||||
# set kube config file using `export DEV_KUBECONFIG=<dev-kubeconfig>`
|
||||
# your cluster needs the ability to read from the configured container registry
|
||||
export DEV_KUBECONFIG="$HOME/.kube/dev-config.yaml"
|
||||
|
||||
```
|
||||
|
||||
### Step 4: Building
|
||||
|
||||
The build system for this project leverages `make` and multi-stage `docker` builds to produce the following containers:
|
||||
|
||||
- `emissary.local/emissary` - single deployable container for Emissary-ingress
|
||||
- `emissary.local/kat-client` - test client container used for testing
|
||||
- `emissary.local/kat-server` - test server container used for testing
|
||||
|
||||
Using the terminal session you opened in step 2, run the following commands
|
||||
|
||||
>
|
||||
|
||||
```bash
|
||||
# This will pull and build the necessary docker containers and produce multiple containers.
|
||||
# If this is the first time running this command it will take a little bit while the base images are built up and cached.
|
||||
make images
|
||||
|
||||
# verify containers were successfully created, you should also see some of the intermediate builder containers as well
|
||||
docker images | grep emissary.local
|
||||
```
|
||||
|
||||
*What just happened?*
|
||||
|
||||
The build system generated a build container that pulled in envoy, the build dependencies, built various binaries from within this project and packaged them into a single deployable container. More information on this can be found in the [Architecture Document](ARCHITECTURE.md).
|
||||
|
||||
### Step 5: Push
|
||||
|
||||
Now that you have successfully built the containers its time to push them to your container registry which you setup in step 2.
|
||||
|
||||
In the same terminal session you can run the following command:
|
||||
|
||||
```bash
|
||||
# re-tags the images and pushes them to your configured container registry
|
||||
# docker must be able to login to your registry and you have to have push permissions
|
||||
make push
|
||||
|
||||
# you can view the newly tag images by running
|
||||
docker images | grep <your -registry>
|
||||
|
||||
# alternatively, we have two make targets that provide information as well
|
||||
make env
|
||||
|
||||
# or in a bash export friendly format
|
||||
make export
|
||||
```
|
||||
|
||||
### Step 6: Deploy
|
||||
|
||||
Now its time to deploy the container out to your Kubernetes cluster that was configured in step 2. Hopefully, it is already becoming apparent that we love to leverage Make to handle the complexity for you :).
|
||||
|
||||
```bash
|
||||
# generate helm charts and K8's Configs with your container swapped in and apply them to your cluster
|
||||
make deploy
|
||||
|
||||
# check your cluster to see if emissary is running
|
||||
# note: kubectl doesn't know about DEV_KUBECONFIG so you may need to ensure KUBECONFIG is pointing to the correct cluster
|
||||
kubectl get pod -n ambassador
|
||||
```
|
||||
|
||||
🥳 If all has gone well then you should have your development environment setup for building and testing Emissary-ingress.
|
||||
|
||||
### Step 7: Dev-loop
|
||||
|
||||
Now that you are all setup and able to deploy a development container of Emissary-ingress to a cluster, it is time to start making some changes.
|
||||
|
||||
Lookup an issue that you want to work on, assign it to yourself and if you have any questions feel free to ping us on slack in the #emissary-dev channel.
|
||||
|
||||
Make a change to Emissary-ingress and when you want to test it in a live cluster just re-run
|
||||
|
||||
`make deploy`
|
||||
|
||||
This will:
|
||||
|
||||
- recompile the go binary
|
||||
- rebuild containers
|
||||
- push them to the docker registry
|
||||
- rebuild helm charts and manifest
|
||||
- reapply manifest to cluster and re-deploy Emissary-ingress to the cluster
|
||||
|
||||
> *Do I have to run the other make targets `make images` or `make push` ?*
|
||||
> No you don't have to because `make deploy` will actually run those commands for you. The steps above were meant to introduce you to the various make targets so that you aware of them and have options when developing.
|
||||
|
||||
### What should I do next?
|
||||
|
||||
Now that you have your dev system up and running here are some additional content that we recommend you check out:
|
||||
|
||||
- [Emissary-ingress Architecture](ARCHITECTURE.md)
|
||||
- [Contributing Code](#contributing)
|
||||
- [Contributing to Docs](#contributing-to-the-docs)
|
||||
- [Advanced Topics](#advanced-topics)
|
||||
- [Faq](#faq)
|
||||
|
||||
## Contributing
|
||||
|
||||
This section goes over how to contribute code to the project and how to get started contributing. More information on how we manage our branches can be found below in [Development Workflow](#development-workflow).
|
||||
|
||||
Before contributing be sure to read our [Code of Conduct](../Community/CODE_OF_CONDUCT.md) and [Governance](../Community/GOVERNANCE.md) to get an understanding of how our project is structured.
|
||||
|
||||
### Submitting a Pull Request (PR)
|
||||
|
||||
> If you haven't set up your development environment then please see the [Development Setup](#development-setup) section.
|
||||
|
||||
When submitting a Pull Request (PR) here are a set of guidelines to follow:
|
||||
|
||||
1. Search for an [existing issue](https://github.com/emissary-ingress/emissary/issues) or create a [new issue](https://github.com/emissary-ingress/emissary/issues/new/choose).
|
||||
|
||||
2. Be sure to describe your proposed change and any open questions you might have in the issue. This allows us to collect historical context around an issue, provide feedback on the proposed solution and discuss what versions a fix should target.
|
||||
|
||||
3. If you haven't done so already create a fork of the respository and clone it locally
|
||||
|
||||
```shell
|
||||
git clone <your-fork>
|
||||
```
|
||||
|
||||
4. Cut a new patch branch from `master`:
|
||||
|
||||
```shell
|
||||
git checkout master
|
||||
git checkout -b my-patch-branch master
|
||||
```
|
||||
|
||||
5. Make necessary code changes.
|
||||
|
||||
- Make sure you include test coverage for the change, see [How do I run Tests](#how-do-i-run-emissary-ingress-tests)
|
||||
- Ensure code linting is passing by running `make lint`
|
||||
- Code changes must have associated documentation updates.
|
||||
- Make changes in <https://github.com/datawire/ambassador-docs> as necessary, and include a reference to those changes the pull request for your code changes.
|
||||
- See [Contributing to Docs](#contributing-to-the-docs) for more details.
|
||||
|
||||
> Smaller pull requests are easier to review and can get merged faster thus reducing potential for merge conflicts so it is recommend to keep them small and focused.
|
||||
|
||||
6. Commit your changes using descriptive commit messages.
|
||||
- we **require** that all commits are signed off so please be sure to commit using the `--signoff` flag, e.g. `git commit --signoff`
|
||||
- commit message should summarize the fix and motivation for the proposed fix. Include issue # that the fix looks to address.
|
||||
- we are "ok" with multiple commits but we may ask you to squash some commits during the PR review process
|
||||
|
||||
7. Push your branch to your forked repository:
|
||||
|
||||
> It is good practice to make sure your change is rebased on the latest master to ensure it will merge cleanly so if it has been awhile since you rebased on upstream you should do it now to ensure there are no merge conflicts
|
||||
|
||||
```shell
|
||||
git push origin my-patch-branch
|
||||
```
|
||||
|
||||
8. Submit a Pull Request from your fork targeting upstream `emissary/master`.
|
||||
|
||||
Thanks for your contribution! One of the [Maintainers](../Community/MAINTAINERS.md) will review your PR and discuss any changes that need to be made.
|
||||
|
||||
### Pull Request Review Process
|
||||
|
||||
This is an opportunity for the Maintainers to review the code for accuracy and ensure that it solves the problem outlined in the issue. This is an iterative process and meant to ensure the quality of the code base. During this process we may ask you to break up Pull Request into smaller changes, squash commits, rebase on master, etc...
|
||||
|
||||
Once you have been provided feedback:
|
||||
|
||||
1. Make the required updates to the code per the review discussion
|
||||
2. Retest the code and ensure linting is still passing
|
||||
3. Commit the changes and push to Github
|
||||
- see [Fixup Commits](#fixup-commits-during-pr-review) below
|
||||
4. Repeat these steps as necessary
|
||||
|
||||
Once you have **two approvals** then one of the Maintainers will merge the PR.
|
||||
|
||||
:tada: Thank you for contributing and being apart of the Emissary-ingress Community!
|
||||
|
||||
### Rebasing a branch under review
|
||||
|
||||
Many times the base branch will have new commits added to it which may cause merge conflicts with your open pull request. First, a good rule of thumb is to make pull request small so that these conflicts are less likely to occur but this is not always possible when have multiple people working on similiar features. Second, if it is just addressing commit feedback a `fixup` commit is also a good option so that the reviewers can see what changed since their last review.
|
||||
|
||||
If you need to address merge conflicts then it is preferred that you use **Rebase** on the base branch rather than merging base branch into the feature branch. This ensures that when the PR is merged that it will cleanly replay on top of the base branch ensuring we maintain a clean linear history.
|
||||
|
||||
To do a rebase you can do the following:
|
||||
|
||||
```shell
|
||||
# add emissary.git as a remote repository, only needs to be done once
|
||||
git remote add upstream https://github.com/emissary-ingress/emissary.git
|
||||
|
||||
# fetch upstream master
|
||||
git fetch upstream master
|
||||
|
||||
# checkout local master and update it from upstream master
|
||||
git checkout master
|
||||
git pull -ff upstream master
|
||||
|
||||
# rebase patch branch on local master
|
||||
git checkout my-patch-branch
|
||||
git rebase -i master
|
||||
```
|
||||
|
||||
Once the merge conflicts are addressed and you are ready to push the code up you will need to force push your changes because during the rebase process the commit sha's are re-written and it has diverged from what is in your remote fork (Github).
|
||||
|
||||
To force push a branch you can:
|
||||
|
||||
```shell
|
||||
git push head --force-with-lease
|
||||
```
|
||||
|
||||
> Note: the `--force-with-lease` is recommended over `--force` because it is safer because it will check if the remote branch had new commits added during your rebase. You can read more detail here: <https://itnext.io/git-force-vs-force-with-lease-9d0e753e8c41>
|
||||
|
||||
### Fixup commits during PR review
|
||||
|
||||
One of the major downsides to rebasing a branch is that it requires force pushing over the remote (Github) which then marks all the existing review history outdated. This makes it hard for a reviewer to figure out whether or not the new changes addressed the feedback.
|
||||
|
||||
One way you can help the reviewer out is by using **fixup** commits. Fixup commits are special git commits that append `fixup!` to the subject of a commit. `Git` provides tools for easily creating these and also squashing them after the PR review process is done.
|
||||
|
||||
Since this is a new commit on top of the other commits, you will not lose your previous review and the new commit can be reviewed independently to determine if the new changes addressed the feedback correctly. Then once the reviewers are happy we will ask you to squash them so that we when it is merged we will maintain a clean linear history.
|
||||
|
||||
Here is a quick read on it: <https://jordanelver.co.uk/blog/2020/06/04/fixing-commits-with-git-commit-fixup-and-git-rebase-autosquash/>
|
||||
|
||||
TL;DR;
|
||||
|
||||
```shell
|
||||
# make code change and create new commit
|
||||
git commit --fixup <sha>
|
||||
|
||||
# push to Github for review
|
||||
git push
|
||||
|
||||
# reviewers are happy and ask you to do a final rebase before merging
|
||||
git rebase -i --autosquash master
|
||||
|
||||
# final push before merging
|
||||
git push --force-with-lease
|
||||
```
|
||||
|
||||
## Development Workflow
|
||||
|
||||
This section introduces the development workflow used for this repository. It is recommended that both Contributors, Release Engineers and Maintainers familiarize themselves with this content.
|
||||
|
||||
### Branching Strategy
|
||||
|
||||
This repository follows a trunk based development workflow. Depending on what article you read there are slight nuances to this so this section will outline how this repository interprets that workflow.
|
||||
|
||||
The most important branch is `master` this is our **Next Release** version and it should always be in a shippable state. This means that CI should be green and at any point we can decided to ship a new release from it. In a traditional trunk based development workflow, developers are encouraged to land partially finished work daily and to keep that work hidden behind feature flags. This repository does **NOT** follow that and instead if code lands on master it is something we are comfortable with shipping.
|
||||
|
||||
We ship release candidate (RC) builds from the `master` branch (current major) and also from `release/v{major.minor}` branches (last major version) during our development cycles. Therefore, it is important that it remains shippable at all times!
|
||||
|
||||
When we do a final release then we will cut a new `release/v{major.minor}` branch. These are long lived release branches which capture a snapshot in time for that release. For example here are some of the current release branches (as of writing this):
|
||||
|
||||
- release/v3.2
|
||||
- release/v3.1
|
||||
- release/v3.0
|
||||
- release/v2.4
|
||||
- release/v2.3
|
||||
- release/v1.14
|
||||
|
||||
These branches contain the codebase as it was at that time when the release was done. These branches have branch protection enabled to ensure that they are not removed or accidently overwritten. If we needed to do a security fix or bug patch then we may cut a new `.Z` patch release from an existing release branch. For example, the `release/v2.4` branch is currently on `2.4.1`.
|
||||
|
||||
As you can see we currently support mutliple major versions of Emissary-ingress and you can read more about our [End-of-Life Policy](https://www.getambassador.io/docs/emissary/latest/about/aes-emissary-eol/).
|
||||
|
||||
For more information on our current RC and Release process you can find that in our [Release Wiki](https://github.com/emissary-ingress/emissary/wiki).
|
||||
|
||||
### Backport Strategy
|
||||
|
||||
Since we follow a trunk based development workflow this means that the majority of the time your patch branch will be based off from `master` and that most Pull Request will target `master`.
|
||||
|
||||
This ensures that we do not miss bug fixes or features for the "Next" shippable release and simplifies the mental-model for deciding how to get started contributing code.
|
||||
|
||||
#### What if I need a patch to land in a previous supported version?
|
||||
|
||||
Let's say I have a bug fix for CRD round trip conversion for AuthService, which is affecting both `v2.y` and `v3.y`.
|
||||
|
||||
First within the issue we should discuss what versions we want to target. This can depend on current cycle work and any upcoming releases we may have.
|
||||
|
||||
The general rules we follow are:
|
||||
|
||||
1. land patch in "next" version which is `master`
|
||||
2. backport patch to any `release/v{major}.{minor}` branches
|
||||
|
||||
So, let's say we discuss it and say that the "next" major version is a long ways away so we want to do a z patch release on our current minor version(`v3.2`) and we also want to do a z patch release on our last supported major version (`v2.4`).
|
||||
|
||||
This means that these patches need to land in three separate branches:
|
||||
|
||||
1. `master` - next release
|
||||
2. `release/v3.2` - patch release
|
||||
3. `release/v2.4` - patch release
|
||||
|
||||
In this scenario, we first ask you to land the patch in the `master` branch and then provide separate PR's with the commits backported onto the `release/v*` branches.
|
||||
|
||||
> Recommendation: using the `git cherry-pick -x` will add the source commit sha to the commit message. This helps with tracing work back to the original commit.
|
||||
|
||||
#### What if my patch is only for a previous supported version?
|
||||
|
||||
Although, this should be an edge case, it does happen where the code has diverged enough that a fix may only be relevant to an existing supported version. In these cases we may need to do a patch release for that older supported version.
|
||||
|
||||
A good example, if we were to find a bug in the Envoy v2 protocol configuration we would only want to target the v2 release.
|
||||
|
||||
In this scenario, the base branch that we would create our feature branch off from would be the latest `minor` version for that release. As of writing this, that would be the `release/v2.4` branch. We would **not** need to target master.
|
||||
|
||||
But, let's say during our fix we notice other things that need to be addressed that would also need to be fixed in `master`. Then you need to submit a **separate Pull Request** that should first land on master and then follow the normal backporting process for the other patches.
|
||||
|
||||
#### What if I'm still not sure?
|
||||
|
||||
This is what the issue discussions and disucssion in Slack are for so that we can help guide you so feel free to ping us in the `#emissary-dev` channel on Slack to discuss directly with us.
|
||||
|
||||
### Merge Strategy
|
||||
|
||||
> The audience for this section is the Maintainers but also beneficial for Contributors so that they are familiar with how the project operates.
|
||||
|
||||
Having a clean linear commit history for a repository makes it easier to understand what is being changed and reduces the mental load for new comers to the project.
|
||||
|
||||
To maintain a clean linear commit history the following rules should be followed:
|
||||
|
||||
First, always rebase patch branch on to base branch. This means **NO** merge commits from merging base branch into the patch branch. This can be accomplished using git rebase.
|
||||
|
||||
```shell
|
||||
# first, make sure you pull latest upstream changes
|
||||
git fetch upstream
|
||||
git checkout master
|
||||
git pull -ff upstream/master
|
||||
|
||||
# checkout patch branch and rebase interactive
|
||||
# you may have merge conflicts you need to resolve
|
||||
git checkout my-patch-branch
|
||||
git rebase -i master
|
||||
```
|
||||
|
||||
> Note: this does rewrite your commit shas so be aware when sharing branches with co-workers.
|
||||
|
||||
Once the Pull Request is reviewed and has **two approvals** then a Maintainer can merge. Maintainers should follow prefer the following merge strategies:
|
||||
|
||||
1. rebase and merge
|
||||
2. squash merge
|
||||
|
||||
When `rebase and merge` is used your commits are played on top of the base branch so that it creates a clean linear history. This will maintain all the commits from the Pull Request. In most cases this should be the **preferred** merge strategy.
|
||||
|
||||
When a Pull Request has lots of fixup commits, or pr feedback fixes then you should ask the Contributor to squash them as part of the PR process.
|
||||
|
||||
If the contributor is unable to squash them then using a `squash merge` in some cases makes sense. **IMPORTANT**, when this does happen it is important that the commit messages are cleaned up and not just blindly accepted the way proposed by Github. Since it is easy to miss that cleanup step, this should be used less frequently compared to `rebase and merge`.
|
||||
|
||||
#### What about merge commit strategy?
|
||||
|
||||
> The audience for this section is the Maintainers but also beneficial for Contributors so that they are familiar with how the project operates.
|
||||
|
||||
When maintaining a linear commit history, each commit tells the story of what was changed in the repository. When using `merge commits` it
|
||||
adds an additional commit to the history that is not necessary because the commit history and PR history already tell the story.
|
||||
|
||||
Now `merge commits` can be useful when you are concerned with not rewriting the commit sha. Based on the current release process which includes using `rel/v` branches that are tagged and merged into `release/v` branches we must use a `merge commit` when merging these branches. This ensures that the commit sha a Git Tag is pointing at still exists once merged into the `release/v` branch.
|
||||
|
||||
## Contributing to the Docs
|
||||
|
||||
The Emissary-ingress community will all benefit from having documentation that is useful and correct. If you have found an issue with the end user documentation, then please help us out by submitting an issue and/or pull request with a fix!
|
||||
|
||||
The end user documentation for Emissary-ingress lives in a different repository and can be found at <https://github.com/datawire/ambassador-docs>.
|
||||
|
||||
See this repository for details on how to contribute to either a `pre-release` or already-released version of Emissary-ingress.
|
||||
|
||||
## Advanced Topics
|
||||
|
||||
This section is for more advanced topics that provide more detailed instructions. Make sure you go through the Development Setup and read the Architecture document before exploring these topics.
|
||||
|
||||
### Running Emissary-ingress internals locally
|
||||
|
||||
The main entrypoint is written in go. It strives to be as compatible as possible
|
||||
with the normal go toolchain. You can run it with:
|
||||
|
||||
```bash
|
||||
go run ./cmd/busyambassador entrypoint
|
||||
```
|
||||
|
||||
Of course just because you can run it this way does not mean it will succeed.
|
||||
The entrypoint needs to launch `diagd` and `envoy` in order to function, and it
|
||||
also expect to be able to write to the `/ambassador` directory.
|
||||
|
||||
#### Setting up diagd
|
||||
|
||||
If you want to hack on diagd, its easiest to setup a virtualenv with an editable
|
||||
copy and launch your `go run` from within that virtualenv. Note that these
|
||||
instructions depend on the virtualenvwrapper
|
||||
(<https://virtualenvwrapper.readthedocs.io/en/latest/>) package:
|
||||
|
||||
```bash
|
||||
# Create a virtualenv named venv with all the python requirements
|
||||
# installed.
|
||||
python3 -m venv venv
|
||||
. venv/bin/activate
|
||||
# If you're doing this in Datawire's apro.git, then:
|
||||
cd ambassador
|
||||
# Update pip and install dependencies
|
||||
pip install --upgrade pip
|
||||
pip install orjson # see below
|
||||
pip install -r builder/requirements.txt
|
||||
# Created an editable installation of ambassador:
|
||||
pip install -e python/
|
||||
# Check that we do indeed have diagd in our path.
|
||||
which diagd
|
||||
# If you're doing this in Datawire's apro.git, then:
|
||||
cd ..
|
||||
```
|
||||
|
||||
(Note: it shouldn't be necessary to install `orjson` by hand. The fact that it is
|
||||
at the moment is an artifact of the way Ambassador builds currently happen.)
|
||||
|
||||
#### Changing the ambassador root
|
||||
|
||||
You should now be able to launch ambassador if you set the
|
||||
`ambassador_root` environment variable to a writable location:
|
||||
|
||||
ambassador_root=/tmp go run ./cmd/busyambassador entrypoint
|
||||
|
||||
#### Getting envoy
|
||||
|
||||
If you do not have envoy in your path already, the entrypoint will use
|
||||
docker to run it.
|
||||
|
||||
#### Shutting up the pod labels error
|
||||
|
||||
An astute observe of the logs will notice that ambassador complains
|
||||
vociferously that pod labels are not mounted in the ambassador
|
||||
container. To reduce this noise, you can:
|
||||
|
||||
```bash
|
||||
mkdir /tmp/ambassador-pod-info && touch /tmp/ambassador-pod-info/labels
|
||||
```
|
||||
|
||||
#### Extra credit
|
||||
|
||||
When you run ambassador locally it will configure itself exactly as it
|
||||
would in the cluster. That means with two caveats you can actually
|
||||
interact with it and it will function normally:
|
||||
|
||||
1. You need to run `telepresence connect` or equivalent so it can
|
||||
connect to the backend services in its configuration.
|
||||
|
||||
2. You need to supply the host header when you talk to it.
|
||||
|
||||
### Debugging and Developing Envoy Configuration
|
||||
|
||||
Envoy configuration is generated by the ambassador compiler. Debugging
|
||||
the ambassador compiler by running it in kubernetes is very slow since
|
||||
we need to push both the code and any relevant kubernetes resources
|
||||
into the cluster. The following sections will provide tips for improving
|
||||
this development experience.
|
||||
|
||||
### Making changes to Envoy
|
||||
|
||||
Emissary-ingress is built on top of Envoy and leverages a vendored version of Envoy (*we track upstream very closely*). This section will go into how to make changes to the Envoy that is packaged with Emissary-ingress.
|
||||
|
||||
This is a bit more complex than anyone likes, but here goes:
|
||||
|
||||
#### 1. Preparing your machine
|
||||
|
||||
Building and testing Envoy can be very resource intensive. A laptop
|
||||
often can build Envoy... if you plug in an external hard drive, point
|
||||
a fan at it, and leave it running overnight and most of the next day.
|
||||
At Ambassador Labs, we'll often spin up a temporary build machine in GCE, so
|
||||
that we can build it very quickly.
|
||||
|
||||
As of Envoy 1.15.0, we've measure the resource use to build and test
|
||||
it as:
|
||||
|
||||
> | Command | Disk Size | Disk Used | Duration[1] |
|
||||
> |--------------------|-----------|-----------|-------------|
|
||||
> | `make update-base` | 450G | 12GB | ~11m |
|
||||
> | `make check-envoy` | 450G | 424GB | ~45m |
|
||||
>
|
||||
> [1] On a "Machine type: custom (32 vCPUs, 512 GB memory)" VM on GCE,
|
||||
> with the following entry in its `/etc/fstab`:
|
||||
>
|
||||
> ```bash
|
||||
> tmpfs:docker /var/lib/docker tmpfs size=450G 0 0
|
||||
> ```
|
||||
|
||||
If you have the RAM, we've seen huge speed gains from doing the builds
|
||||
and tests on a RAM disk (see the `/etc/fstab` line above).
|
||||
|
||||
#### 2. Setting up your workspace to hack on Envoy
|
||||
|
||||
1. From your `emissary.git` checkout, get Emissary-ingress's current
|
||||
version of the Envoy sources, and create a branch from that:
|
||||
|
||||
```shell
|
||||
make $PWD/_cxx/envoy
|
||||
git -C _cxx/envoy checkout -b YOUR_BRANCHNAME
|
||||
```
|
||||
2. To build Envoy in FIPS mode, set the following variable:
|
||||
|
||||
```shell
|
||||
export FIPS_MODE=true
|
||||
```
|
||||
|
||||
It is important to note that while building Envoy in FIPS mode is
|
||||
required for FIPS compliance, additional steps may be necessary.
|
||||
Emissary does not claim to be FIPS compliant or certified.
|
||||
See [here](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/security/ssl#fips-140-2) for more information on FIPS and Envoy.
|
||||
|
||||
> _NOTE:_ FIPS_MODE is NOT supported by the emissary-ingress maintainers but we provide this for developers as convience
|
||||
|
||||
#### 3. Hacking on Envoy
|
||||
|
||||
Modify the sources in `./_cxx/envoy/`. or update the branch and/or `ENVOY_COMMIT` as necessary in `./_cxx/envoy.mk`
|
||||
|
||||
#### 4. Building and testing your hacked-up Envoy
|
||||
|
||||
> See `./_cxx/envoy.mk` for the full list of targets.
|
||||
|
||||
Multiple Phony targets are provided so that developers can run the steps they are interested in when developing, here are few of the key ones:
|
||||
|
||||
- `make update-base`: will perform all the steps necessary to verify, build envoy, build docker images, push images to the container repository and compile the updated protos.
|
||||
|
||||
- `make build-envoy`: will build the envoy binaries using the same build container as the upstream Envoy project. Build outputs are mounted to the `_cxx/envoy-docker-build` directory and Bazel will write the results there.
|
||||
|
||||
- `make build-base-envoy-image`: will use the release outputs from building envoy to generate a new `base-envoy` container which is then used in the main emissary-ingress container build.
|
||||
|
||||
- `make push-base-envoy`: will push the built container to the remote container repository.
|
||||
|
||||
- `make check-envoy`: will use the build docker container to run the Envoy test suite against the currently checked out envoy in the `_cxx/envoy` folder.
|
||||
|
||||
- `make envoy-shell`: will run the envoy build container and open a bash shell session. The `_cxx/envoy` folder is volume mounted into the container and the user is set to the `envoybuild` user in the container to ensure you are not running as root to ensure hermetic builds.
|
||||
|
||||
#### 5. Test Devloop
|
||||
|
||||
Running the Envoy test suite will compile all the test targets. This is a slow process and can use lots of disk space.
|
||||
|
||||
The Envoy Inner Devloop for build and testing:
|
||||
|
||||
- You can make a change to Envoy code and run the whole test by just calling `make check-envoy`
|
||||
- You can run a specific test instead of the whole test suite by setting the `ENVOY_TEST_LABEL` environment variable.
|
||||
- For example, to run just the unit tests in `test/common/network/listener_impl_test.cc`, you should run:
|
||||
|
||||
```shell
|
||||
ENVOY_TEST_LABEL='//test/common/network:listener_impl_test' make check-envoy
|
||||
```
|
||||
|
||||
- Alternatively, you can run `make envoy-shell` to get a bash shell into the Docker container that does the Envoy builds and you are free to interact with `Bazel` directly.
|
||||
|
||||
Interpreting the test results:
|
||||
|
||||
- If you see the following message, don't worry, it's harmless; the tests still ran:
|
||||
|
||||
```text
|
||||
There were tests whose specified size is too big. Use the --test_verbose_timeout_warnings command line option to see which ones these are.
|
||||
```
|
||||
|
||||
The message means that the test passed, but it passed too
|
||||
quickly, and Bazel is suggesting that you declare it as smaller.
|
||||
Something along the lines of "This test only took 2s, but you
|
||||
declared it as being in the 60s-300s ('moderate') bucket,
|
||||
consider declaring it as being in the 0s-60s ('short')
|
||||
bucket".
|
||||
|
||||
Don't be confused (as I was) in to thinking that it was saying
|
||||
that the test was too big and was skipped and that you need to
|
||||
throw more hardware at it.
|
||||
|
||||
- **Build or test Emissary-ingress** with the usual `make` commands, with
|
||||
the exception that you MUST run `make update-base` first whenever
|
||||
Envoy needs to be recompiled; it won't happen automatically. So
|
||||
`make test` to build-and-test Emissary-ingress would become
|
||||
`make update-base && make test`, and `make images` to just build
|
||||
Emissary-ingress would become `make update-base && make images`.
|
||||
|
||||
The Envoy changes with Emissary-ingress:
|
||||
|
||||
- Either run `make update-base` to build, and push a new base container and then you can run `make test` for the Emissary-ingress test suite.
|
||||
- If you do not want to push the container you can instead:
|
||||
- Build Envoy - `make build-envoy`
|
||||
- Build container - `make build-base-envoy-image`
|
||||
- Test Emissary - `make test`
|
||||
|
||||
#### 6. Protobuf changes
|
||||
|
||||
If you made any changes to the Protocol Buffer files or if you bumped versions of Envoy then you
|
||||
should make sure that you are re-compiling the Protobufs so that they are available and checked-in
|
||||
to the emissary.git repository.
|
||||
|
||||
```sh
|
||||
make compile-envoy-protos
|
||||
```
|
||||
|
||||
This will copy over the raw proto files, compile and copy the generated go code over to emisary-ignress repository.
|
||||
|
||||
#### 7. Finalizing your changes
|
||||
|
||||
> NOTE: we are no longer accepting PR's in `datawire/envoy.git`.
|
||||
|
||||
If you have custom changes then land them in your custom envoy repository and update the `ENVOY_COMMIT` and `ENVOY_DOCKER_REPO` variable in `_cxx/envoy.mk` so that the image will be pushed to the correct repository.
|
||||
|
||||
Then run `make update-base` does all the bits so assuming that was successful then are all good.
|
||||
|
||||
**For maintainers:**
|
||||
|
||||
You will want to make sure that the image is pushed to the backup container registries:
|
||||
|
||||
```shell
|
||||
# upload image to the mirror in GCR
|
||||
SHA=GET_THIS_FROM_THE_make_update-base_OUTPUT
|
||||
TAG="envoy-0.$SHA.opt"
|
||||
docker pull "docker.io/emissaryingress/base-envoy:envoy-0.$TAG.opt"
|
||||
docker tag "docker.io/emissaryingress/base-envoy:$TAG" "gcr.io/datawire/ambassador-base:$TAG"
|
||||
docker push "gcr.io/datawire/ambassador-base:$TAG"
|
||||
```
|
||||
|
||||
#### 8. Final Checklist
|
||||
|
||||
**For Maintainers Only**
|
||||
|
||||
Here is a checklist of things to do when bumping the `base-envoy` version:
|
||||
|
||||
- [ ] The image has been pushed to...
|
||||
- [ ] `docker.io/emissaryingress/base-envoy`
|
||||
- [ ] `gcr.io/datawire/ambassador-base`
|
||||
- [ ] The `datawire/envoy.git` commit has been tagged as `datawire-$(git describe --tags --match='v*')`
|
||||
(the `--match` is to prevent `datawire-*` tags from stacking on each other).
|
||||
- [ ] It's been tested with...
|
||||
- [ ] `make check-envoy`
|
||||
|
||||
The `check-envoy-version` CI job will double check all these things, with the exception of running
|
||||
the Envoy tests. If the `check-envoy-version` is failing then double check the above, fix them and
|
||||
re-run the job.
|
||||
|
||||
### Developing Emissary-ingress (Maintainers-only advice)
|
||||
|
||||
At the moment, these techniques will only work internally to Maintainers. Mostly
|
||||
this is because they require credentials to access internal resources at the
|
||||
moment, though in several cases we're working to fix that.
|
||||
|
||||
#### Updating license documentation
|
||||
|
||||
When new dependencies are added or existing ones are updated, run
|
||||
`make generate` and commit changes to `DEPENDENCIES.md` and
|
||||
`DEPENDENCY_LICENSES.md`
|
||||
|
||||
#### Upgrading Python dependencies
|
||||
|
||||
Delete `python/requirements.txt`, then run `make generate`.
|
||||
|
||||
If there are some dependencies you don't want to upgrade, but want to
|
||||
upgrade everything else, then
|
||||
|
||||
1. Remove from `python/requirements.txt` all of the entries except
|
||||
for those you want to pin.
|
||||
2. Delete `python/requirements.in` (if it exists).
|
||||
3. Run `make generate`.
|
||||
|
||||
> **Note**: If you are updating orjson you will need to also update `docker/base-python/Dockerfile` before running `make generate` for the new version. orjson uses rust bindings and the default wheels on PyPI rely on glibc. Because our base python image is Alpine based, it is built from scratch using rustc to build a musl compatable version.
|
||||
|
||||
> :warning: You may run into an error when running `make generate` where it can't detect the licenses for new or upgraded dependencies, which is needed so that so that we can properly generate DEPENDENCIES.md and DEPENDENCY_LICENSES.md. If that is the case, you may also have to update `build-aux/tools/src/py-mkopensource/main.go:parseLicenses` for any license changes then run `make generate` again.
|
||||
|
||||
## FAQ
|
||||
|
||||
This section contains a set of Frequently Asked Questions that may answer a question you have. Also, feel free to ping us in Slack.
|
||||
|
||||
### How do I find out what build targets are available?
|
||||
|
||||
Use `make help` and `make targets` to see what build targets are
|
||||
available along with documentation for what each target does.
|
||||
|
||||
### How do I develop on a Mac with Apple Silicon?
|
||||
|
||||
To ensure that developers using a Mac with Apple Silicon can contribute, the build system ensures
|
||||
the build artifacts are `linux/amd64` rather than the host architecture. This behavior can be overriden
|
||||
using the `BUILD_ARCH` environment variable (e.g. `BUILD_ARCH=linux/arm64 make images`).
|
||||
|
||||
### How do I develop on Windows using WSL?
|
||||
|
||||
- [WSL 2](https://learn.microsoft.com/en-us/windows/wsl/)
|
||||
- [Docker Desktop for Windows](https://docs.docker.com/desktop/windows/wsl/)
|
||||
- [VS Code](https://code.visualstudio.com/)
|
||||
|
||||
### How do I test using a private Docker repository?
|
||||
|
||||
If you are pushing your development images to a private Docker repo,
|
||||
then:
|
||||
|
||||
```sh
|
||||
export DEV_USE_IMAGEPULLSECRET=true
|
||||
export DOCKER_BUILD_USERNAME=...
|
||||
export DOCKER_BUILD_PASSWORD=...
|
||||
```
|
||||
|
||||
and the test machinery should create an `imagePullSecret` from those Docker credentials such that it can pull the images.
|
||||
|
||||
### How do I change the loglevel at runtime?
|
||||
|
||||
```console
|
||||
curl localhost:8877/ambassador/v0/diag/?loglevel=debug
|
||||
```
|
||||
|
||||
Note: This affects diagd and Envoy, but NOT the AES `amb-sidecar`.
|
||||
See the AES `CONTRIBUTING.md` for how to do that.
|
||||
|
||||
### Can I build from a docker container instead of on my local computer?
|
||||
|
||||
If you want to build within a container instead of setting up dependencies on your local machine then you can run the build within a docker container and leverage "Docker in Docker" to build it.
|
||||
|
||||
1. `docker pull docker:latest`
|
||||
2. `docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -it docker:latest sh`
|
||||
3. `apk add --update --no-cache bash build-base go curl rsync python3 python2 git libarchive-tools gawk jq`
|
||||
4. `git clone https://github.com/emissary-ingress/emissary.git && cd emissary`
|
||||
5. `make images`
|
||||
|
||||
Steps 0 and 1 are run on your machine, and 2 - 4 are from within the docker container. The base image is a "Docker in Docker" image, ran with `-v /var/run/docker.sock:/var/run/docker.sock` in order to connect to your local daemon from the docker inside the container. More info on Docker in Docker [here](https://hub.docker.com/_/docker).
|
||||
|
||||
The images will be created and tagged as defined above, and will be available in docker on your local machine.
|
||||
|
||||
### How do I clear everything out to make sure my build runs like it will in CI?
|
||||
|
||||
Use `make clobber` to completely remove all derived objects, all cached artifacts, everything, and get back to a clean slate. This is recommended if you change branches within a clone, or if you need to `make generate` when you're not *certain* that your last `make generate` was using the same Envoy version.
|
||||
|
||||
Use `make clean` to remove derived objects, but *not* clear the caches.
|
||||
|
||||
### My editor is changing `go.mod` or `go.sum`, should I commit that?
|
||||
|
||||
If you notice this happening, run `make go-mod-tidy`, and commit that.
|
||||
|
||||
(If you're in Ambassador Labs, you should do this from `apro/`, not
|
||||
`apro/ambassador/`, so that apro.git's files are included too.)
|
||||
|
||||
### How do I debug "This should not happen in CI" errors?
|
||||
|
||||
These checks indicate that some output file changed in the middle of a
|
||||
run, when it should only change if a source file has changed. Since
|
||||
CI isn't editing the source files, this shouldn't happen in CI!
|
||||
|
||||
This is problematic because it means that running the build multiple
|
||||
times can give different results, and that the tests are probably not
|
||||
testing the same image that would be released.
|
||||
|
||||
These checks will show you a patch showing how the output file
|
||||
changed; it is up to you to figure out what is happening in the
|
||||
build/test system that would cause that change in the middle of a run.
|
||||
For the most part, this is pretty simple... except when the output
|
||||
file is a Docker image; you just see that one image hash is different
|
||||
than another image hash.
|
||||
|
||||
Fortunately, the failure showing the changed image hash is usually
|
||||
immediately preceded by a `docker build`. Earlier in the CI output,
|
||||
you should find an identical `docker build` command from the first time it
|
||||
ran. In the second `docker build`'s output, each step should say
|
||||
`---> Using cache`; the first few steps will say this, but at some
|
||||
point later steps will stop saying this; find the first step that is
|
||||
missing the `---> Using cache` line, and try to figure out what could
|
||||
have changed between the two runs that would cause it to not use the
|
||||
cache.
|
||||
|
||||
If that step is an `ADD` command that is adding a directory, the
|
||||
problem is probably that you need to add something to `.dockerignore`.
|
||||
To help figure out what you need to add, try adding a `RUN find
|
||||
DIRECTORY -exec ls -ld -- {} +` step after the `ADD` step, so that you
|
||||
can see what it added, and see what is different on that between the
|
||||
first and second `docker build` commands.
|
||||
|
||||
### How do I run Emissary-ingress tests?
|
||||
|
||||
- `export DEV_REGISTRY=<your-dev-docker-registry>` (you need to be logged in and have permission to push)
|
||||
- `export DEV_KUBECONFIG=<your-dev-kubeconfig>`
|
||||
|
||||
If you want to run the Go tests for `cmd/entrypoint`, you'll need `diagd`
|
||||
in your `PATH`. See the instructions below about `Setting up diagd` to do
|
||||
that.
|
||||
|
||||
| Group | Command |
|
||||
| --------------- | ---------------------------------------------------------------------- |
|
||||
| All Tests | `make test` |
|
||||
| All Golang | `make gotest` |
|
||||
| All Python | `make pytest` |
|
||||
| Some/One Golang | `make gotest GOTEST_PKGS=./cmd/entrypoint GOTEST_ARGS="-run TestName"` |
|
||||
| Some/One Python | `make pytest PYTEST_ARGS="-k TestName"` |
|
||||
|
||||
Please note the python tests use a local cache to speed up test
|
||||
results. If you make a code update that changes the generated envoy
|
||||
configuration, those tests will fail and you will need to update the
|
||||
python test cache.
|
||||
|
||||
Note that it is invalid to run one of the `main[Plain.*]` Python tests
|
||||
without running all of the other `main[Plain*]` tests; the test will
|
||||
fail to run (not even showing up as a failure or xfail--it will fail
|
||||
to run at all). For example, `PYTEST_ARGS="-k WebSocket"` would match
|
||||
the `main[Plain.WebSocketMapping-GRPC]` test, and that test would fail
|
||||
to run; one should instead say `PYTEST_ARGS="-k Plain or WebSocket"`
|
||||
to avoid breaking the sub-tests of "Plain".
|
||||
|
||||
### How do I type check my python code?
|
||||
|
||||
Ambassador uses Python 3 type hinting and the `mypy` static type checker to
|
||||
help find bugs before runtime. If you haven't worked with hinting before, a
|
||||
good place to start is
|
||||
[the `mypy` cheat sheet](https://mypy.readthedocs.io/en/latest/cheat_sheet_py3.html).
|
||||
|
||||
New code must be hinted, and the build process will verify that the type
|
||||
check passes when you `make test`. Fair warning: this means that
|
||||
PRs will not pass CI if the type checker fails.
|
||||
|
||||
We strongly recommend using an editor that can do realtime type checking
|
||||
(at Datawire we tend to use PyCharm and VSCode a lot, but many many editors
|
||||
can do this now) and also running the type checker by hand before submitting
|
||||
anything:
|
||||
|
||||
- `make lint/mypy` will check all the Ambassador code
|
||||
|
||||
Ambassador code should produce *no* warnings and *no* errors.
|
||||
|
||||
If you're concerned that the mypy cache is somehow wrong, delete the
|
||||
`.mypy_cache/` directory to clear the cache.
|
||||
|
|
@ -111,7 +111,7 @@ These steps should be completed within the 1-7 days of Disclosure.
|
|||
[CVSS](https://www.first.org/cvss/specification-document) using the [CVSS
|
||||
Calculator](https://www.first.org/cvss/calculator/3.0). The Fix Lead makes the final call on the
|
||||
calculated CVSS; it is better to move quickly than to spend time making the CVSS perfect.
|
||||
- The Fix Team will work per the usual [Emissary Development Process](DEVELOPING.md), including
|
||||
- The Fix Team will work per the usual [Emissary Development Process](CONTRIBUTING.md), including
|
||||
fix branches, PRs, reviews, etc.
|
||||
- The Fix Team will notify the Fix Lead that work on the fix branch is complete once the fix is
|
||||
present in the relevant release branch(es) in the private security repo.
|
||||
|
|
5
Makefile
5
Makefile
|
@ -56,15 +56,16 @@ endif
|
|||
|
||||
# Everything else...
|
||||
|
||||
NAME ?= emissary
|
||||
EMISSARY_NAME ?= emissary
|
||||
|
||||
_git_remote_urls := $(shell git remote | xargs -n1 git remote get-url --all)
|
||||
IS_PRIVATE ?= $(findstring private,$(_git_remote_urls))
|
||||
|
||||
include $(OSS_HOME)/build-aux/ci.mk
|
||||
include $(OSS_HOME)/build-aux/deps.mk
|
||||
include $(OSS_HOME)/build-aux/main.mk
|
||||
include $(OSS_HOME)/build-aux/check.mk
|
||||
include $(OSS_HOME)/build-aux/builder.mk
|
||||
include $(OSS_HOME)/build-aux/check.mk
|
||||
include $(OSS_HOME)/_cxx/envoy.mk
|
||||
include $(OSS_HOME)/releng/release.mk
|
||||
|
||||
|
|
|
@ -0,0 +1,176 @@
|
|||
# Emissary-ingress 3.10 Quickstart
|
||||
|
||||
**We recommend using Helm** to install Emissary.
|
||||
|
||||
### Installing if you're starting fresh
|
||||
|
||||
**If you are already running Emissary and just want to upgrade, DO NOT FOLLOW
|
||||
THESE DIRECTIONS.** Instead, check out "Upgrading from an earlier Emissary"
|
||||
below.
|
||||
|
||||
If you're starting from scratch and you don't need to worry about older CRD
|
||||
versions, install using `--set enableLegacyVersions=false` to avoid install
|
||||
the old versions of the CRDs and the conversion webhook:
|
||||
|
||||
```bash
|
||||
helm install emissary-crds \
|
||||
--namespace emissary --create-namespace \
|
||||
oci://ghcr.io/emissary-ingress/emissary-crds-chart --version=3.10.0 \
|
||||
--set enableLegacyVersions=false \
|
||||
--wait
|
||||
```
|
||||
|
||||
This will install only v3alpha1 CRDs and skip the conversion webhook entirely.
|
||||
It will create the `emissary` namespace for you, but there won't be anything
|
||||
in it at this point.
|
||||
|
||||
Next up, install Emissary itself, with `--set waitForApiext.enabled=false` to
|
||||
tell Emissary not to wait for the conversion webhook to be ready:
|
||||
|
||||
```bash
|
||||
helm install emissary \
|
||||
--namespace emissary \
|
||||
oci://ghcr.io/emissary-ingress/emissary-ingress --version=3.10.0 \
|
||||
--set waitForApiext.enabled=false \
|
||||
--wait
|
||||
```
|
||||
|
||||
### Upgrading from an earlier Emissary
|
||||
|
||||
First, install the CRDs and the conversion webhook:
|
||||
|
||||
```bash
|
||||
helm install emissary-crds \
|
||||
--namespace emissary-system --create-namespace \
|
||||
oci://ghcr.io/emissary-ingress/emissary-crds-chart --version=3.10.0 \
|
||||
--wait
|
||||
```
|
||||
|
||||
This will install all the versions of the CRDs (v1, v2, and v3alpha1) and the
|
||||
conversion webhook into the `emissary-system` namespace. Once that's done, you'll install Emissary itself:
|
||||
|
||||
```bash
|
||||
helm install emissary \
|
||||
--namespace emissary --create-namespace \
|
||||
oci://ghcr.io/emissary-ingress/emissary-ingress --version=3.10.0 \
|
||||
--wait
|
||||
```
|
||||
|
||||
### Using Emissary
|
||||
|
||||
In either case above, you should have a running Emissary behind the Service
|
||||
named `emissary-emissary-ingress` in the `emissary` namespace. How exactly you
|
||||
connect to that Service will vary with your cluster provider, but you can
|
||||
start with
|
||||
|
||||
```bash
|
||||
kubectl get svc -n emissary emissary-emissary-ingress
|
||||
```
|
||||
|
||||
and that should get you started. Or, of course, you can use something like
|
||||
|
||||
```bash
|
||||
kubectl port-forward -n emissary svc/emissary-emissary-ingress 8080:80
|
||||
```
|
||||
|
||||
(after you configure a Listener!) and then talk to localhost:8080 with any
|
||||
kind of cluster.
|
||||
|
||||
## Using Faces for a sanity check
|
||||
|
||||
[Faces Demo]: https://github.com/buoyantio/faces-demo
|
||||
|
||||
If you like, you can continue by using the [Faces Demo] as a quick sanity
|
||||
check. First, install Faces itself using Helm:
|
||||
|
||||
```bash
|
||||
helm install faces \
|
||||
--namespace faces --create-namespace \
|
||||
oci://ghcr.io/buoyantio/faces-chart --version 2.0.0-rc.4 \
|
||||
--wait
|
||||
```
|
||||
|
||||
Next, you'll need to configure Emissary to route to Faces. First, we'll do the
|
||||
basic configuration to tell Emissary to listen for HTTP traffic:
|
||||
|
||||
```bash
|
||||
kubectl apply -f - <<EOF
|
||||
---
|
||||
apiVersion: getambassador.io/v3alpha1
|
||||
kind: Listener
|
||||
metadata:
|
||||
name: ambassador-https-listener
|
||||
spec:
|
||||
port: 8443
|
||||
protocol: HTTPS
|
||||
securityModel: XFP
|
||||
hostBinding:
|
||||
namespace:
|
||||
from: ALL
|
||||
---
|
||||
apiVersion: getambassador.io/v3alpha1
|
||||
kind: Listener
|
||||
metadata:
|
||||
name: ambassador-http-listener
|
||||
spec:
|
||||
port: 8080
|
||||
protocol: HTTP
|
||||
securityModel: XFP
|
||||
hostBinding:
|
||||
namespace:
|
||||
from: ALL
|
||||
---
|
||||
apiVersion: getambassador.io/v3alpha1
|
||||
kind: Host
|
||||
metadata:
|
||||
name: wildcard-host
|
||||
spec:
|
||||
hostname: "*"
|
||||
requestPolicy:
|
||||
insecure:
|
||||
action: Route
|
||||
EOF
|
||||
```
|
||||
|
||||
(This actually supports both HTTPS and HTTP, but since we haven't set up TLS
|
||||
certificates, we'll just stick with HTTP.)
|
||||
|
||||
Next, we need two Mappings:
|
||||
|
||||
| Prefix | Routes to Service | in Namespace |
|
||||
| --------- | ----------------- | ------------ |
|
||||
| `/faces/` | `faces-gui` | `faces` |
|
||||
| `/face/` | `face` | `faces` |
|
||||
|
||||
```bash
|
||||
kubectl apply -f - <<EOF
|
||||
---
|
||||
apiVersion: getambassador.io/v3alpha1
|
||||
kind: Mapping
|
||||
metadata:
|
||||
name: gui-mapping
|
||||
namespace: faces
|
||||
spec:
|
||||
hostname: "*"
|
||||
prefix: /faces/
|
||||
service: faces-gui.faces
|
||||
rewrite: /
|
||||
timeout_ms: 0
|
||||
---
|
||||
apiVersion: getambassador.io/v3alpha1
|
||||
kind: Mapping
|
||||
metadata:
|
||||
name: face-mapping
|
||||
namespace: faces
|
||||
spec:
|
||||
hostname: "*"
|
||||
prefix: /face/
|
||||
service: face.faces
|
||||
timeout_ms: 0
|
||||
EOF
|
||||
```
|
||||
|
||||
Once that's done, then you'll be able to access the Faces Demo at `/faces/`,
|
||||
on whatever IP address or hostname your cluster provides for the
|
||||
`emissary-emissary-ingress` Service. Or you can port-forward as above and
|
||||
access it at `http://localhost:8080/faces/`.
|
134
README.md
134
README.md
|
@ -6,69 +6,105 @@ Emissary-ingress
|
|||
[![Docker Repository][badge-docker-img]][badge-docker-link]
|
||||
[![Join Slack][badge-slack-img]][badge-slack-link]
|
||||
[![Core Infrastructure Initiative: Best Practices][badge-cii-img]][badge-cii-link]
|
||||
[![Artifact HUB][badge-artifacthub-img]][badge-artifacthub-link]
|
||||
|
||||
[badge-version-img]: https://img.shields.io/docker/v/emissaryingress/emissary?sort=semver
|
||||
[badge-version-link]: https://github.com/emissary-ingress/emissary/releases
|
||||
[badge-docker-img]: https://img.shields.io/docker/pulls/emissaryingress/emissary
|
||||
[badge-docker-link]: https://hub.docker.com/r/emissaryingress/emissary
|
||||
[badge-slack-img]: https://img.shields.io/badge/slack-join-orange.svg
|
||||
[badge-slack-link]: https://a8r.io/slack
|
||||
[badge-slack-link]: https://communityinviter.com/apps/cloud-native/cncf
|
||||
[badge-cii-img]: https://bestpractices.coreinfrastructure.org/projects/1852/badge
|
||||
[badge-cii-link]: https://bestpractices.coreinfrastructure.org/projects/1852
|
||||
[badge-artifacthub-img]: https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/emissary-ingress
|
||||
[badge-artifacthub-link]: https://artifacthub.io/packages/helm/datawire/emissary-ingress
|
||||
|
||||
<!-- Links are (mostly) at the end of this document, for legibility. -->
|
||||
|
||||
[Emissary-Ingress](https://www.getambassador.io) is an open-source Kubernetes-native API Gateway +
|
||||
Layer 7 load balancer + Kubernetes Ingress built on [Envoy Proxy](https://www.envoyproxy.io).
|
||||
Emissary-ingress is a CNCF incubation project (and was formerly known as Ambassador API Gateway).
|
||||
---
|
||||
|
||||
Emissary-ingress enables its users to:
|
||||
* Manage ingress traffic with [load balancing], support for multiple protocols ([gRPC and HTTP/2], [TCP], and [web sockets]), and Kubernetes integration
|
||||
* Manage changes to routing with an easy to use declarative policy engine and [self-service configuration], via Kubernetes [CRDs] or annotations
|
||||
* Secure microservices with [authentication], [rate limiting], and [TLS]
|
||||
* Ensure high availability with [sticky sessions], [rate limiting], and [circuit breaking]
|
||||
* Leverage observability with integrations with [Grafana], [Prometheus], and [Datadog], and comprehensive [metrics] support
|
||||
* Enable progressive delivery with [canary releases]
|
||||
* Connect service meshes including [Consul], [Linkerd], and [Istio]
|
||||
## QUICKSTART
|
||||
|
||||
Looking to get started as quickly as possible? Check out [the
|
||||
QUICKSTART](https://emissary-ingress.dev/docs/3.10/quick-start/)!
|
||||
|
||||
### Latest Release
|
||||
|
||||
The latest production version of Emissary is **3.10.0**.
|
||||
|
||||
**Note well** that there is also an Ambassador Edge Stack 3.10.0, but
|
||||
**Emissary 3.10 and Edge Stack 3.10 are not equivalent**. Their codebases have
|
||||
diverged and will continue to do so.
|
||||
|
||||
---
|
||||
|
||||
Emissary-ingress
|
||||
================
|
||||
|
||||
[Emissary-ingress](https://www.getambassador.io/docs/open-source) is an
|
||||
open-source, developer-centric, Kubernetes-native API gateway built on [Envoy
|
||||
Proxy]. Emissary-ingress is a CNCF incubating project (and was formerly known
|
||||
as Ambassador API Gateway).
|
||||
|
||||
### Design Goals
|
||||
|
||||
The first problem faced by any organization trying to develop cloud-native
|
||||
applications is the _ingress problem_: allowing users outside the cluster to
|
||||
access the application running inside the cluster. Emissary is built around
|
||||
the idea that the application developers should be able to solve the ingress
|
||||
problem themselves, without needing to become Kubernetes experts and without
|
||||
needing dedicated operations staff: a self-service, developer-centric workflow
|
||||
is necessary to develop at scale.
|
||||
|
||||
Emissary is open-source, developer-centric, role-oriented, opinionated, and
|
||||
Kubernatives-native.
|
||||
|
||||
- open-source: Emissary is licensed under the Apache 2 license, permitting use
|
||||
or modification by anyone.
|
||||
- developer-centric: Emissary is designed taking the application developer
|
||||
into account first.
|
||||
- role-oriented: Emissary's configuration deliberately tries to separate
|
||||
elements to allow separation of concerns between developers and operations.
|
||||
- opinionated: Emissary deliberately tries to make easy things easy, even if
|
||||
that comes of the cost of not allowing some uncommon features.
|
||||
|
||||
### Features
|
||||
|
||||
Emissary supports all the table-stakes features needed for a modern API
|
||||
gateway:
|
||||
|
||||
* Per-request [load balancing]
|
||||
* Support for routing [gRPC], [HTTP/2], [TCP], and [web sockets]
|
||||
* Declarative configuration via Kubernetes [custom resources]
|
||||
* Fine-grained [authentication] and [authorization]
|
||||
* Advanced routing features like [canary releases], [A/B testing], [dynamic routing], and [sticky sessions]
|
||||
* Resilience features like [retries], [rate limiting], and [circuit breaking]
|
||||
* Observability features including comprehensive [metrics] support using the [Prometheus] stack
|
||||
* Easy service mesh integration with [Linkerd], [Istio], [Consul], etc.
|
||||
* [Knative serverless integration]
|
||||
|
||||
See the full list of [features](https://www.getambassador.io/features/) here.
|
||||
See the full list of [features](https://www.getambassador.io/docs/emissary) here.
|
||||
|
||||
Branches
|
||||
========
|
||||
### Branches
|
||||
|
||||
(If you are looking at this list on a branch other than `master`, it
|
||||
may be out of date.)
|
||||
|
||||
- [`master`](https://github.com/emissary-ingress/emissary/tree/master) - branch for Emissary-ingress 3.6.z work (:heavy_check_mark: upcoming release)
|
||||
- [`release/v3.5`](https://github.com/emissary-ingress/emissary/tree/release/v3.5) - branch for Emissary-ingress 3.5.z work
|
||||
- [`release/v2.5`](https://github.com/emissary-ingress/emissary/tree/release/v2.5) - branch for Emissary-ingress 2.5.z work (:heavy_check_mark: upcoming release)
|
||||
- [`release/v1.14`](https://github.com/emissary-ingress/emissary/tree/release/v1.14) - branch for Emissary-ingress 1.14.z work (:heavy_check_mark: maintenance, supported through September 2022)
|
||||
- [`main`](https://github.com/emissary-ingress/emissary/tree/main): Emissary 4 development work
|
||||
|
||||
Architecture
|
||||
============
|
||||
**No further development is planned on any branches listed below.**
|
||||
|
||||
Emissary is configured via Kubernetes CRDs, or via annotations on Kubernetes `Service`s. Internally,
|
||||
it uses the [Envoy Proxy] to actually handle routing data; externally, it relies on Kubernetes for
|
||||
scaling and resiliency. For more on Emissary's architecture and motivation, read [this blog post](https://blog.getambassador.io/building-ambassador-an-open-source-api-gateway-on-kubernetes-and-envoy-ed01ed520844).
|
||||
- [`master`](https://github.com/emissary-ingress/emissary/tree/master) - **Frozen** at Emissary 3.10.0
|
||||
- [`release/v3.10`](https://github.com/emissary-ingress/emissary/tree/release/v3.10) - Emissary-ingress 3.10.0 release branch
|
||||
- [`release/v3.9`](https://github.com/emissary-ingress/emissary/tree/release/v3.9)
|
||||
- Emissary-ingress 3.9.1 release branch
|
||||
- [`release/v2.5`](https://github.com/emissary-ingress/emissary/tree/release/v2.5) - Emissary-ingress 2.5.1 release branch
|
||||
|
||||
Getting Started
|
||||
===============
|
||||
**Note well** that there is also an Ambassador Edge Stack 3.10.0, but
|
||||
**Emissary 3.10 and Edge Stack 3.10 are not equivalent**. Their codebases have
|
||||
diverged and will continue to do so.
|
||||
|
||||
You can get Emissary up and running in just three steps. Follow the instructions here: https://www.getambassador.io/docs/emissary/latest/tutorials/getting-started/
|
||||
|
||||
If you are looking for a Kubernetes ingress controller, Emissary provides a superset of the functionality of a typical ingress controller. (It does the traditional routing, and layers on a raft of configuration options.) This blog post covers [Kubernetes ingress](https://blog.getambassador.io/kubernetes-ingress-nodeport-load-balancers-and-ingress-controllers-6e29f1c44f2d).
|
||||
|
||||
For other common questions, view this [FAQ page](https://www.getambassador.io/docs/emissary/latest/about/faq/).
|
||||
|
||||
You can also use Helm to install Emissary. For more information, see the instructions in the [Helm installation documentation](https://www.getambassador.io/docs/emissary/latest/topics/install/helm/)
|
||||
|
||||
Check out full the [Emissary
|
||||
documentation](https://www.getambassador.io/docs/emissary/) at
|
||||
www.getambassador.io.
|
||||
|
||||
Community
|
||||
=========
|
||||
#### Community
|
||||
|
||||
Emissary-ingress is a CNCF Incubating project and welcomes any and all
|
||||
contributors.
|
||||
|
@ -83,21 +119,21 @@ the way the community is run, including:
|
|||
regular trouble-shooting meetings and contributor meetings
|
||||
- how to get [`SUPPORT.md`](Community/SUPPORT.md).
|
||||
|
||||
The best way to join the community is to join our [Slack
|
||||
channel](https://a8r.io/slack).
|
||||
|
||||
Check out the [`DevDocumentation/`](DevDocumentation/) directory for
|
||||
information on the technicals of Emissary, most notably the
|
||||
[`DEVELOPING.md`](DevDocumentation/DEVELOPING.md) contributor's guide.
|
||||
The best way to join the community is to join the `#emissary-ingress` channel
|
||||
in the [CNCF Slack]. This is also the best place for technical information
|
||||
about Emissary's architecture or development.
|
||||
|
||||
If you're interested in contributing, here are some ways:
|
||||
* Write a blog post for [our blog](https://blog.getambassador.io)
|
||||
* Investigate an [open issue](https://github.com/emissary-ingress/emissary/issues)
|
||||
* Add [more tests](https://github.com/emissary-ingress/emissary/tree/master/ambassador/tests)
|
||||
|
||||
The Ambassador Edge Stack is a superset of Emissary-ingress that provides additional functionality including OAuth/OpenID Connect, advanced rate limiting, Swagger/OpenAPI support, integrated ACME support for automatic TLS certificate management, and a cloud-based UI. For more information, visit https://www.getambassador.io/editions/.
|
||||
* Add [more tests](https://github.com/emissary-ingress/emissary/tree/main/ambassador/tests)
|
||||
|
||||
<!-- Please keep this list sorted. -->
|
||||
[CNCF Slack]: https://communityinviter.com/apps/cloud-native/cncf
|
||||
[Envoy Proxy]: https://www.envoyproxy.io
|
||||
|
||||
<!-- Legacy: clean up these links! -->
|
||||
|
||||
[authentication]: https://www.getambassador.io/docs/emissary/latest/topics/running/services/auth-service/
|
||||
[canary releases]: https://www.getambassador.io/docs/emissary/latest/topics/using/canary/
|
||||
[circuit breaking]: https://www.getambassador.io/docs/emissary/latest/topics/using/circuit-breakers/
|
||||
|
|
|
@ -4,3 +4,6 @@
|
|||
/envoy-build-container.txt
|
||||
|
||||
/go-control-plane/
|
||||
|
||||
# folder is mounted to envoy build container and build outputs are copied here
|
||||
/envoy-docker-build
|
546
_cxx/envoy.mk
546
_cxx/envoy.mk
|
@ -1,32 +1,33 @@
|
|||
#
|
||||
# Variables that the dev might set in the env or CLI
|
||||
|
||||
# Set to non-empty to enable compiling Envoy as-needed.
|
||||
YES_I_AM_OK_WITH_COMPILING_ENVOY ?=
|
||||
|
||||
# Adjust to run just a subset of the tests.
|
||||
ENVOY_TEST_LABEL ?= //test/...
|
||||
# Set RSYNC_EXTRAS=Pv or something to increase verbosity.
|
||||
RSYNC_EXTRAS ?=
|
||||
ENVOY_TEST_LABEL ?= //contrib/golang/... //test/...
|
||||
export ENVOY_TEST_LABEL
|
||||
|
||||
#
|
||||
# Variables that are meant to be set by editing this file
|
||||
|
||||
# IF YOU MESS WITH ANY OF THESE VALUES, YOU MUST RUN `make update-base`.
|
||||
ENVOY_REPO ?= $(if $(IS_PRIVATE),git@github.com:datawire/envoy-private.git,https://github.com/datawire/envoy.git)
|
||||
# rebase/release/v1.25.3
|
||||
ENVOY_COMMIT ?= b8eb98c4a04bd1e0d21230e7a7c99f37a04f255b
|
||||
ENVOY_COMPILATION_MODE ?= opt
|
||||
# Increment BASE_ENVOY_RELVER on changes to `docker/base-envoy/Dockerfile`, or Envoy recipes.
|
||||
# You may reset BASE_ENVOY_RELVER when adjusting ENVOY_COMMIT.
|
||||
BASE_ENVOY_RELVER ?= 0
|
||||
ENVOY_REPO ?= https://github.com/datawire/envoy.git
|
||||
|
||||
# Set to non-empty to enable compiling Envoy in FIPS mode.
|
||||
FIPS_MODE ?=
|
||||
# https://github.com/datawire/envoy/tree/rebase/release/v1.31.3
|
||||
ENVOY_COMMIT ?= 628f5afc75a894a08504fa0f416269ec50c07bf9
|
||||
|
||||
ENVOY_DOCKER_REPO ?= $(if $(IS_PRIVATE),quay.io/datawire-private/base-envoy,docker.io/emissaryingress/base-envoy)
|
||||
ENVOY_DOCKER_VERSION ?= $(BASE_ENVOY_RELVER).$(ENVOY_COMMIT).$(ENVOY_COMPILATION_MODE)$(if $(FIPS_MODE),.FIPS)
|
||||
ENVOY_DOCKER_TAG ?= $(ENVOY_DOCKER_REPO):envoy-$(ENVOY_DOCKER_VERSION)
|
||||
ENVOY_FULL_DOCKER_TAG ?= $(ENVOY_DOCKER_REPO):envoy-full-$(ENVOY_DOCKER_VERSION)
|
||||
ENVOY_COMPILATION_MODE ?= opt
|
||||
# Increment BASE_ENVOY_RELVER on changes to `docker/base-envoy/Dockerfile`, or Envoy recipes.
|
||||
# You may reset BASE_ENVOY_RELVER when adjusting ENVOY_COMMIT.
|
||||
BASE_ENVOY_RELVER ?= 0
|
||||
|
||||
# Set to non-empty to enable compiling Envoy in FIPS mode.
|
||||
FIPS_MODE ?=
|
||||
export FIPS_MODE
|
||||
|
||||
# ENVOY_DOCKER_REPO ?= docker.io/emissaryingress/base-envoy
|
||||
ENVOY_DOCKER_REPO ?= gcr.io/datawire/ambassador-base
|
||||
ENVOY_DOCKER_VERSION ?= $(BASE_ENVOY_RELVER).$(ENVOY_COMMIT).$(ENVOY_COMPILATION_MODE)$(if $(FIPS_MODE),.FIPS)
|
||||
ENVOY_DOCKER_TAG ?= $(ENVOY_DOCKER_REPO):envoy-$(ENVOY_DOCKER_VERSION)
|
||||
# END LIST OF VARIABLES REQUIRING `make update-base`.
|
||||
|
||||
# How to set ENVOY_GO_CONTROL_PLANE_COMMIT: In github.com/envoyproxy/go-control-plane.git, the majority
|
||||
|
@ -37,27 +38,209 @@ RSYNC_EXTRAS ?=
|
|||
# which commits are ancestors, I added `make guess-envoy-go-control-plane-commit` to do that in an
|
||||
# automated way! Still look at the commit yourself to make sure it seems sane; blindly trusting
|
||||
# machines is bad, mmkay?
|
||||
ENVOY_GO_CONTROL_PLANE_COMMIT = 335df8c6b7f10ee07fa8322126911b9da27ff94b
|
||||
ENVOY_GO_CONTROL_PLANE_COMMIT = f888b4f71207d0d268dee7cb824de92848da9ede
|
||||
|
||||
# Set ENVOY_DOCKER_REPO to the list of mirrors that we should
|
||||
# sanity-check that things get pushed to.
|
||||
ifneq ($(IS_PRIVATE),)
|
||||
# If $(IS_PRIVATE), then just the private repo...
|
||||
ENVOY_DOCKER_REPOS = $(ENVOY_DOCKER_REPO)
|
||||
else
|
||||
# ...otherwise, this list of repos:
|
||||
ENVOY_DOCKER_REPOS = docker.io/emissaryingress/base-envoy
|
||||
ENVOY_DOCKER_REPOS += gcr.io/datawire/ambassador-base
|
||||
endif
|
||||
# Set ENVOY_DOCKER_REPO to the list of mirrors to check
|
||||
# ENVOY_DOCKER_REPOS = docker.io/emissaryingress/base-envoy
|
||||
# ENVOY_DOCKER_REPOS += gcr.io/datawire/ambassador-base
|
||||
|
||||
#
|
||||
# Intro
|
||||
|
||||
include $(OSS_HOME)/build-aux/prelude.mk
|
||||
|
||||
# for builder.mk...
|
||||
export ENVOY_DOCKER_TAG
|
||||
|
||||
|
||||
#
|
||||
#################### Envoy cxx and build image targets #####################
|
||||
|
||||
$(OSS_HOME)/_cxx/envoy: FORCE
|
||||
@echo "Getting Envoy sources..."
|
||||
# Ensure that GIT_DIR and GIT_WORK_TREE are unset so that `git bisect`
|
||||
# and friends work properly.
|
||||
@PS4=; set -ex; { \
|
||||
unset GIT_DIR GIT_WORK_TREE; \
|
||||
git init $@; \
|
||||
cd $@; \
|
||||
if git remote get-url origin &>/dev/null; then \
|
||||
git remote set-url origin $(ENVOY_REPO); \
|
||||
else \
|
||||
git remote add origin $(ENVOY_REPO); \
|
||||
fi; \
|
||||
if [[ $(ENVOY_REPO) == http://github.com/* || $(ENVOY_REPO) == https://github.com/* || $(ENVOY_REPO) == git://github.com/* ]]; then \
|
||||
git remote set-url --push origin git@github.com:$(word 3,$(subst /, ,$(ENVOY_REPO)))/$(patsubst %.git,%,$(word 4,$(subst /, ,$(ENVOY_REPO)))).git; \
|
||||
fi; \
|
||||
git fetch --tags origin; \
|
||||
if [ $(ENVOY_COMMIT) != '-' ]; then \
|
||||
git checkout $(ENVOY_COMMIT); \
|
||||
elif ! git rev-parse HEAD >/dev/null 2>&1; then \
|
||||
git checkout origin/master; \
|
||||
fi; \
|
||||
}
|
||||
|
||||
$(OSS_HOME)/_cxx/envoy.clean: %.clean:
|
||||
$(if $(filter-out -,$(ENVOY_COMMIT)),rm -rf $*)
|
||||
clobber: $(OSS_HOME)/_cxx/envoy.clean
|
||||
|
||||
# cleanup existing build outputs
|
||||
$(OSS_HOME)/_cxx/envoy-docker-build.clean: %.clean:
|
||||
$(if $(filter-out -,$(ENVOY_COMMIT)),sudo rm -rf $*)
|
||||
clobber: $(OSS_HOME)/_cxx/envoy-docker-build.clean
|
||||
|
||||
$(OSS_HOME)/_cxx/envoy-build-image.txt: $(OSS_HOME)/_cxx/envoy $(tools/write-ifchanged) FORCE
|
||||
@PS4=; set -ex -o pipefail; { \
|
||||
pushd $</ci; \
|
||||
echo "$$(pwd)"; \
|
||||
. envoy_build_sha.sh; \
|
||||
popd; \
|
||||
echo docker.io/envoyproxy/envoy-build-ubuntu:$$ENVOY_BUILD_SHA | $(tools/write-ifchanged) $@; \
|
||||
}
|
||||
clean: $(OSS_HOME)/_cxx/envoy-build-image.txt.rm
|
||||
|
||||
# cleanup build artifacts
|
||||
clean: $(OSS_HOME)/docker/base-envoy/envoy-static.rm
|
||||
clean: $(OSS_HOME)/docker/base-envoy/envoy-static-stripped.rm
|
||||
clean: $(OSS_HOME)/docker/base-envoy/envoy-static.dwp.rm
|
||||
|
||||
################################# Compile Custom Envoy Protos ######################################
|
||||
|
||||
# copy raw protos and compiled go protos into emissary-ingress
|
||||
.PHONY compile-envoy-protos:
|
||||
compile-envoy-protos: $(OSS_HOME)/_cxx/envoy-build-image.txt
|
||||
$(OSS_HOME)/_cxx/tools/compile-protos.sh
|
||||
|
||||
################################# Envoy Build PhonyTargets #########################################
|
||||
|
||||
# helper to trigger the clone of the datawire/envoy repository
|
||||
.PHONY: clone-envoy
|
||||
clone-envoy: $(OSS_HOME)/_cxx/envoy
|
||||
|
||||
# clean up envoy resources
|
||||
.PHONY: clean-envoy
|
||||
clean-envoy:
|
||||
cd $(OSS_HOME)/_cxx/envoy && ./ci/run_envoy_docker.sh "./ci/do_ci.sh 'clean'"
|
||||
|
||||
# Check to see if we have already built and push an image for the
|
||||
.PHONY: verify-base-envoy
|
||||
verify-base-envoy:
|
||||
@PS4=; set -ex; { \
|
||||
if docker pull $(ENVOY_DOCKER_TAG); then \
|
||||
echo 'Already up-to-date: $(ENVOY_DOCKER_TAG)'; \
|
||||
ENVOY_VERSION_OUTPUT=$$(docker run --platform="$(BUILD_ARCH)" --rm -it --entrypoint envoy-static-stripped $(ENVOY_DOCKER_TAG) --version | grep "version:"); \
|
||||
ENVOY_VERSION_EXPECTED="envoy-static-stripped .*version:.* $(ENVOY_COMMIT)/.*"; \
|
||||
if ! echo "$$ENVOY_VERSION_OUTPUT" | grep "$$ENVOY_VERSION_EXPECTED"; then \
|
||||
{ set +x; } &>/dev/null; \
|
||||
echo "error: Envoy base image $(ENVOY_DOCKER_TAG) contains envoy-static-stripped binary that reported an unexpected version string!" \
|
||||
"See ENVOY_VERSION_OUTPUT and ENVOY_VERSION_EXPECTED in the output above. This error is usually not recoverable." \
|
||||
"You may need to rebuild the Envoy base image after either updating ENVOY_COMMIT or bumping BASE_ENVOY_RELVER" \
|
||||
"(or both, depending on what you are doing)."; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
echo "Nothing to build at this time"; \
|
||||
exit 0; \
|
||||
fi; \
|
||||
}
|
||||
|
||||
# builds envoy using release settings, see https://github.com/envoyproxy/envoy/blob/main/ci/README.md for additional
|
||||
# details on configuring builds
|
||||
.PHONY: build-envoy
|
||||
build-envoy: $(OSS_HOME)/_cxx/envoy-build-image.txt
|
||||
$(OSS_HOME)/_cxx/tools/build-envoy.sh
|
||||
|
||||
# build the base-envoy containers and tags them locally, this requires running `build-envoy` first.
|
||||
.PHONY: build-base-envoy-image
|
||||
build-base-envoy-image: $(OSS_HOME)/_cxx/envoy-build-image.txt
|
||||
docker build --platform="$(BUILD_ARCH)" -f $(OSS_HOME)/docker/base-envoy/Dockerfile.stripped -t $(ENVOY_DOCKER_TAG) $(OSS_HOME)/docker/base-envoy
|
||||
|
||||
# Allows pushing the docker image independent of building envoy and docker containers
|
||||
# Note, bump the BASE_ENVOY_RELVER and re-build before pushing when making non-commit changes to have a unique image tag.
|
||||
.PHONY: push-base-envoy-image
|
||||
push-base-envoy-image:
|
||||
docker push $(ENVOY_DOCKER_TAG)
|
||||
|
||||
|
||||
# `make update-base`: Recompile Envoy and do all of the related things.
|
||||
.PHONY: update-base
|
||||
update-base: $(OSS_HOME)/_cxx/envoy-build-image.txt
|
||||
$(MAKE) verify-base-envoy
|
||||
$(MAKE) build-envoy
|
||||
$(MAKE) build-base-envoy-image
|
||||
$(MAKE) push-base-envoy-image
|
||||
$(MAKE) compile-envoy-protos
|
||||
|
||||
.PHONY: check-envoy
|
||||
check-envoy: $(OSS_HOME)/_cxx/envoy-build-image.txt
|
||||
$(OSS_HOME)/_cxx/tools/test-envoy.sh;
|
||||
|
||||
.PHONY: envoy-shell
|
||||
envoy-shell: $(OSS_HOME)/_cxx/envoy-build-image.txt
|
||||
cd $(OSS_HOME)/_cxx/envoy && ./ci/run_envoy_docker.sh bash || true;
|
||||
|
||||
################################# Go-control-plane Targets ####################################
|
||||
#
|
||||
# Recipes used by `make generate`; files that get checked into Git (i.e. protobufs and Go code)
|
||||
#
|
||||
# These targets are depended on by `make generate` in `build-aux/generate.mk`.
|
||||
|
||||
|
||||
# See the comment on ENVOY_GO_CONTROL_PLANE_COMMIT at the top of the file for more explanation on how this target works.
|
||||
guess-envoy-go-control-plane-commit: # Have the computer suggest a value for ENVOY_GO_CONTROL_PLANE_COMMIT
|
||||
guess-envoy-go-control-plane-commit: $(OSS_HOME)/_cxx/envoy $(OSS_HOME)/_cxx/go-control-plane
|
||||
@echo
|
||||
@echo '######################################################################'
|
||||
@echo
|
||||
@set -e; { \
|
||||
(cd $(OSS_HOME)/_cxx/go-control-plane && git log --format='%H %s' origin/main) | sed -n 's, Mirrored from envoyproxy/envoy @ , ,p' | \
|
||||
while read -r go_commit cxx_commit; do \
|
||||
if (cd $(OSS_HOME)/_cxx/envoy && git merge-base --is-ancestor "$$cxx_commit" $(ENVOY_COMMIT) 2>/dev/null); then \
|
||||
echo "ENVOY_GO_CONTROL_PLANE_COMMIT = $$go_commit"; \
|
||||
break; \
|
||||
fi; \
|
||||
done; \
|
||||
}
|
||||
.PHONY: guess-envoy-go-control-plane-commit
|
||||
|
||||
# The unmodified go-control-plane
|
||||
$(OSS_HOME)/_cxx/go-control-plane: FORCE
|
||||
@echo "Getting Envoy go-control-plane sources..."
|
||||
# Ensure that GIT_DIR and GIT_WORK_TREE are unset so that `git bisect`
|
||||
# and friends work properly.
|
||||
@PS4=; set -ex; { \
|
||||
unset GIT_DIR GIT_WORK_TREE; \
|
||||
git init $@; \
|
||||
cd $@; \
|
||||
if git remote get-url origin &>/dev/null; then \
|
||||
git remote set-url origin https://github.com/envoyproxy/go-control-plane; \
|
||||
else \
|
||||
git remote add origin https://github.com/envoyproxy/go-control-plane; \
|
||||
fi; \
|
||||
git fetch --tags origin; \
|
||||
git checkout $(ENVOY_GO_CONTROL_PLANE_COMMIT); \
|
||||
}
|
||||
|
||||
# The go-control-plane patched for our version of the protobufs
|
||||
$(OSS_HOME)/pkg/envoy-control-plane: $(OSS_HOME)/_cxx/go-control-plane FORCE
|
||||
rm -rf $@
|
||||
@PS4=; set -ex; { \
|
||||
unset GIT_DIR GIT_WORK_TREE; \
|
||||
tmpdir=$$(mktemp -d); \
|
||||
trap 'rm -rf "$$tmpdir"' EXIT; \
|
||||
cd "$$tmpdir"; \
|
||||
cd $(OSS_HOME)/_cxx/go-control-plane; \
|
||||
cp -r $$(git ls-files ':[A-Z]*' ':!Dockerfile*' ':!Makefile') pkg/* ratelimit "$$tmpdir"; \
|
||||
find "$$tmpdir" -name '*.go' -exec sed -E -i.bak \
|
||||
-e 's,github\.com/envoyproxy/go-control-plane/pkg,github.com/emissary-ingress/emissary/v3/pkg/envoy-control-plane,g' \
|
||||
-e 's,github\.com/envoyproxy/go-control-plane/envoy,github.com/emissary-ingress/emissary/v3/pkg/api/envoy,g' \
|
||||
-e 's,github\.com/envoyproxy/go-control-plane/ratelimit,github.com/emissary-ingress/emissary/v3/pkg/envoy-control-plane/ratelimit,g' \
|
||||
-- {} +; \
|
||||
sed -i.bak -e 's/^package/\n&/' "$$tmpdir/log/log_test.go"; \
|
||||
find "$$tmpdir" -name '*.bak' -delete; \
|
||||
mv "$$tmpdir" $(abspath $@); \
|
||||
}
|
||||
cd $(OSS_HOME) && gofmt -w -s ./pkg/envoy-control-plane/
|
||||
|
||||
######################### Envoy Version and Mirror Check #######################
|
||||
|
||||
old_envoy_commits = $(shell { \
|
||||
{ \
|
||||
git log --patch --format='' -G'^ *ENVOY_COMMIT' -- _cxx/envoy.mk; \
|
||||
|
@ -67,8 +250,10 @@ old_envoy_commits = $(shell { \
|
|||
git log --patch --format='' -G'^ *ENVOY_BASE_IMAGE' 511ca54c3004019758980ba82f708269c373ba28 -- Makefile | sed -n 's/^. *ENVOY_BASE_IMAGE.*-g//p'; \
|
||||
git log --patch --format='' -G'FROM.*envoy.*:' 7593e7dca9aea2f146ddfd5a3676bcc30ee25aff -- Dockerfile | sed -n '/FROM.*envoy.*:/s/.*://p' | sed -e 's/ .*//' -e 's/.*-g//' -e 's/.*-//' -e '/^latest$$/d'; \
|
||||
} | uniq)
|
||||
|
||||
lost_history += 251b7d345 # mentioned in a605b62ee (wip - patched and fixed authentication, Gabriel, 2019-04-04)
|
||||
lost_history += 27770bf3d # mentioned in 026dc4cd4 (updated envoy image, Gabriel, 2019-04-04)
|
||||
|
||||
check-envoy-version: ## Check that Envoy version has been pushed to the right places
|
||||
check-envoy-version: $(OSS_HOME)/_cxx/envoy
|
||||
# First, we're going to check whether the Envoy commit is tagged, which
|
||||
|
@ -101,303 +286,4 @@ check-envoy-version: $(OSS_HOME)/_cxx/envoy
|
|||
# them... except that gcr.io doesn't allow `manifest inspect`.
|
||||
# So just go ahead and do the `pull` :(
|
||||
$(foreach ENVOY_DOCKER_REPO,$(ENVOY_DOCKER_REPOS), docker pull $(ENVOY_DOCKER_TAG) >/dev/null$(NL))
|
||||
$(foreach ENVOY_DOCKER_REPO,$(ENVOY_DOCKER_REPOS), docker pull $(ENVOY_FULL_DOCKER_TAG) >/dev/null$(NL))
|
||||
.PHONY: check-envoy-version
|
||||
|
||||
# See the comment on ENVOY_GO_CONTROL_PLANE_COMMIT at the top of the file for more explanation on how this target works.
|
||||
guess-envoy-go-control-plane-commit: # Have the computer suggest a value for ENVOY_GO_CONTROL_PLANE_COMMIT
|
||||
guess-envoy-go-control-plane-commit: $(OSS_HOME)/_cxx/envoy $(OSS_HOME)/_cxx/go-control-plane
|
||||
@echo
|
||||
@echo '######################################################################'
|
||||
@echo
|
||||
@set -e; { \
|
||||
(cd $(OSS_HOME)/_cxx/go-control-plane && git log --format='%H %s' origin/main) | sed -n 's, Mirrored from envoyproxy/envoy @ , ,p' | \
|
||||
while read -r go_commit cxx_commit; do \
|
||||
if (cd $(OSS_HOME)/_cxx/envoy && git merge-base --is-ancestor "$$cxx_commit" $(ENVOY_COMMIT) 2>/dev/null); then \
|
||||
echo "ENVOY_GO_CONTROL_PLANE_COMMIT = $$go_commit"; \
|
||||
break; \
|
||||
fi; \
|
||||
done; \
|
||||
}
|
||||
.PHONY: guess-envoy-go-control-plane-commit
|
||||
|
||||
#
|
||||
# Envoy sources and build container
|
||||
|
||||
$(OSS_HOME)/_cxx/envoy: FORCE
|
||||
@echo "Getting Envoy sources..."
|
||||
# Ensure that GIT_DIR and GIT_WORK_TREE are unset so that `git bisect`
|
||||
# and friends work properly.
|
||||
@PS4=; set -ex; { \
|
||||
unset GIT_DIR GIT_WORK_TREE; \
|
||||
git init $@; \
|
||||
cd $@; \
|
||||
if git remote get-url origin &>/dev/null; then \
|
||||
git remote set-url origin $(ENVOY_REPO); \
|
||||
else \
|
||||
git remote add origin $(ENVOY_REPO); \
|
||||
fi; \
|
||||
if [[ $(ENVOY_REPO) == http://github.com/* || $(ENVOY_REPO) == https://github.com/* || $(ENVOY_REPO) == git://github.com/* ]]; then \
|
||||
git remote set-url --push origin git@github.com:$(word 3,$(subst /, ,$(ENVOY_REPO)))/$(patsubst %.git,%,$(word 4,$(subst /, ,$(ENVOY_REPO)))).git; \
|
||||
fi; \
|
||||
git fetch --tags origin; \
|
||||
if [ $(ENVOY_COMMIT) != '-' ]; then \
|
||||
git checkout $(ENVOY_COMMIT); \
|
||||
elif ! git rev-parse HEAD >/dev/null 2>&1; then \
|
||||
git checkout origin/master; \
|
||||
fi; \
|
||||
}
|
||||
$(OSS_HOME)/_cxx/envoy.clean: %.clean:
|
||||
$(if $(filter-out -,$(ENVOY_COMMIT)),rm -rf $*)
|
||||
clobber: $(OSS_HOME)/_cxx/envoy.clean
|
||||
|
||||
$(OSS_HOME)/_cxx/envoy-build-image.txt: $(OSS_HOME)/_cxx/envoy $(tools/write-ifchanged) FORCE
|
||||
@PS4=; set -ex -o pipefail; { \
|
||||
pushd $</ci; \
|
||||
echo "$$(pwd)"; \
|
||||
. envoy_build_sha.sh; \
|
||||
popd; \
|
||||
echo docker.io/envoyproxy/envoy-build-ubuntu:$$ENVOY_BUILD_SHA | $(tools/write-ifchanged) $@; \
|
||||
}
|
||||
clean: $(OSS_HOME)/_cxx/envoy-build-image.txt.rm
|
||||
|
||||
$(OSS_HOME)/_cxx/envoy-build-container.txt: $(OSS_HOME)/_cxx/envoy-build-image.txt FORCE
|
||||
@PS4=; set -ex; { \
|
||||
if [ $@ -nt $< ] && docker exec $$(cat $@) true; then \
|
||||
exit 0; \
|
||||
fi; \
|
||||
if [ -e $@ ]; then \
|
||||
docker kill $$(cat $@) || true; \
|
||||
fi; \
|
||||
docker run --network=host --detach --rm --privileged --volume=envoy-build:/root:rw $$(cat $<) tail -f /dev/null > $@; \
|
||||
}
|
||||
$(OSS_HOME)/_cxx/envoy-build-container.txt.clean: %.clean:
|
||||
if [ -e $* ]; then docker kill $$(cat $*) || true; fi
|
||||
rm -f $*
|
||||
if docker volume inspect envoy-build &>/dev/null; then docker volume rm envoy-build >/dev/null; fi
|
||||
clean: $(OSS_HOME)/_cxx/envoy-build-container.txt.clean
|
||||
|
||||
#
|
||||
# Things that run in the Envoy build container
|
||||
#
|
||||
# We do everything with rsync and a persistent build-container
|
||||
# (instead of using a volume), because
|
||||
# 1. Docker for Mac's osxfs is very slow, so volumes are bad for
|
||||
# macOS users.
|
||||
# 2. Volumes mounts just straight-up don't work for people who use
|
||||
# Minikube's dockerd.
|
||||
ENVOY_SYNC_HOST_TO_DOCKER = rsync -a$(RSYNC_EXTRAS) --partial --delete --blocking-io -e "docker exec -i" $(OSS_HOME)/_cxx/envoy/ $$(cat $(OSS_HOME)/_cxx/envoy-build-container.txt):/root/envoy
|
||||
ENVOY_SYNC_DOCKER_TO_HOST = rsync -a$(RSYNC_EXTRAS) --partial --delete --blocking-io -e "docker exec -i" $$(cat $(OSS_HOME)/_cxx/envoy-build-container.txt):/root/envoy/ $(OSS_HOME)/_cxx/envoy/
|
||||
|
||||
ENVOY_BASH.cmd = bash -c 'PS4=; set -ex; $(ENVOY_SYNC_HOST_TO_DOCKER); trap '\''$(ENVOY_SYNC_DOCKER_TO_HOST)'\'' EXIT; '$(call quote.shell,$1)
|
||||
ENVOY_BASH.deps = $(OSS_HOME)/_cxx/envoy-build-container.txt
|
||||
|
||||
ENVOY_DOCKER.env += PATH=/opt/llvm/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
ENVOY_DOCKER.env += CC=clang
|
||||
ENVOY_DOCKER.env += CXX=clang++
|
||||
ENVOY_DOCKER.env += CLANG_FORMAT=/opt/llvm/bin/clang-format
|
||||
ENVOY_DOCKER_EXEC = docker exec --workdir=/root/envoy $(foreach e,$(ENVOY_DOCKER.env), --env=$e ) $$(cat $(OSS_HOME)/_cxx/envoy-build-container.txt)
|
||||
|
||||
$(OSS_HOME)/docker/base-envoy/envoy-static: $(ENVOY_BASH.deps) FORCE
|
||||
mkdir -p $(@D)
|
||||
@PS4=; set -ex; { \
|
||||
if [ '$(ENVOY_COMMIT)' != '-' ] && docker run --rm --entrypoint=true $(ENVOY_FULL_DOCKER_TAG); then \
|
||||
rsync -a$(RSYNC_EXTRAS) --partial --blocking-io -e 'docker run --rm -i' $$(docker image inspect $(ENVOY_FULL_DOCKER_TAG) --format='{{.Id}}' | sed 's/^sha256://'):/usr/local/bin/envoy-static $@; \
|
||||
else \
|
||||
if [ -z '$(YES_I_AM_OK_WITH_COMPILING_ENVOY)' ]; then \
|
||||
{ set +x; } &>/dev/null; \
|
||||
echo 'error: Envoy compilation triggered, but $$YES_I_AM_OK_WITH_COMPILING_ENVOY is not set'; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
$(call ENVOY_BASH.cmd, \
|
||||
$(ENVOY_DOCKER_EXEC) git config --global --add safe.directory /root/envoy; \
|
||||
$(ENVOY_DOCKER_EXEC) bazel build $(if $(FIPS_MODE), --define boringssl=fips) --verbose_failures -c $(ENVOY_COMPILATION_MODE) --config=clang //source/exe:envoy-static; \
|
||||
rsync -a$(RSYNC_EXTRAS) --partial --blocking-io -e 'docker exec -i' $$(cat $(OSS_HOME)/_cxx/envoy-build-container.txt):/root/envoy/bazel-bin/source/exe/envoy-static $@; \
|
||||
); \
|
||||
fi; \
|
||||
}
|
||||
$(OSS_HOME)/docker/base-envoy/envoy-static-stripped: %-stripped: % FORCE
|
||||
@PS4=; set -ex; { \
|
||||
if [ '$(ENVOY_COMMIT)' != '-' ] && docker run --rm --entrypoint=true $(ENVOY_FULL_DOCKER_TAG); then \
|
||||
rsync -a$(RSYNC_EXTRAS) --partial --blocking-io -e 'docker run --rm -i' $$(docker image inspect $(ENVOY_FULL_DOCKER_TAG) --format='{{.Id}}' | sed 's/^sha256://'):/usr/local/bin/$(@F) $@; \
|
||||
else \
|
||||
if [ -z '$(YES_I_AM_OK_WITH_COMPILING_ENVOY)' ]; then \
|
||||
{ set +x; } &>/dev/null; \
|
||||
echo 'error: Envoy compilation triggered, but $$YES_I_AM_OK_WITH_COMPILING_ENVOY is not set'; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
rsync -a$(RSYNC_EXTRAS) --partial --blocking-io -e 'docker exec -i' $< $$(cat $(OSS_HOME)/_cxx/envoy-build-container.txt):/tmp/$(<F); \
|
||||
docker exec $$(cat $(OSS_HOME)/_cxx/envoy-build-container.txt) strip /tmp/$(<F) -o /tmp/$(@F); \
|
||||
rsync -a$(RSYNC_EXTRAS) --partial --blocking-io -e 'docker exec -i' $$(cat $(OSS_HOME)/_cxx/envoy-build-container.txt):/tmp/$(@F) $@; \
|
||||
fi; \
|
||||
}
|
||||
clobber: $(OSS_HOME)/docker/base-envoy/envoy-static.rm $(OSS_HOME)/docker/base-envoy/envoy-static-stripped.rm
|
||||
|
||||
check-envoy: ## Run the Envoy test suite
|
||||
check-envoy: $(ENVOY_BASH.deps)
|
||||
@echo 'Testing envoy with Bazel label: "$(ENVOY_TEST_LABEL)"'; \
|
||||
$(call ENVOY_BASH.cmd, \
|
||||
$(ENVOY_DOCKER_EXEC) git config --global --add safe.directory /root/envoy; \
|
||||
$(ENVOY_DOCKER_EXEC) bazel test --config=clang --test_output=errors --verbose_failures -c dbg --test_env=ENVOY_IP_TEST_VERSIONS=v4only $(ENVOY_TEST_LABEL); \
|
||||
)
|
||||
.PHONY: check-envoy
|
||||
|
||||
envoy-shell: ## Run a shell in the Envoy build container
|
||||
envoy-shell: $(ENVOY_BASH.deps)
|
||||
$(call ENVOY_BASH.cmd, \
|
||||
docker exec -it --workdir=/root/envoy $(foreach e,$(ENVOY_DOCKER.env), --env=$e ) $$(cat $(OSS_HOME)/_cxx/envoy-build-container.txt) /bin/bash || true; \
|
||||
)
|
||||
.PHONY: envoy-shell
|
||||
|
||||
#
|
||||
# Recipes used by `make generate`; files that get checked in to Git (i.e. protobufs and Go code)
|
||||
#
|
||||
# These targets are depended on by `make generate` in `build-aux/generate.mk`.
|
||||
|
||||
# Raw protobufs
|
||||
$(OSS_HOME)/api/envoy: $(OSS_HOME)/api/%: $(OSS_HOME)/_cxx/envoy
|
||||
rsync --recursive --delete --delete-excluded --prune-empty-dirs --include='*/' --include='*.proto' --exclude='*' $</api/$*/ $@
|
||||
|
||||
# Go generated from the protobufs
|
||||
$(OSS_HOME)/_cxx/envoy/build_go: $(ENVOY_BASH.deps) FORCE
|
||||
$(call ENVOY_BASH.cmd, \
|
||||
$(ENVOY_DOCKER_EXEC) git config --global --add safe.directory /root/envoy; \
|
||||
$(ENVOY_DOCKER_EXEC) python3 -c 'from tools.api.generate_go_protobuf import generate_protobufs; generate_protobufs("@envoy_api//...", "/root/envoy/build_go", "envoy_api")'; \
|
||||
)
|
||||
test -d $@ && touch $@
|
||||
$(OSS_HOME)/pkg/api/envoy: $(OSS_HOME)/pkg/api/%: $(OSS_HOME)/_cxx/envoy/build_go
|
||||
rm -rf $@
|
||||
@PS4=; set -ex; { \
|
||||
unset GIT_DIR GIT_WORK_TREE; \
|
||||
tmpdir=$$(mktemp -d); \
|
||||
trap 'rm -rf "$$tmpdir"' EXIT; \
|
||||
cp -r $</$* "$$tmpdir"; \
|
||||
find "$$tmpdir" -type f \
|
||||
-exec chmod 644 {} + \
|
||||
-exec sed -E -i.bak \
|
||||
-e 's,github\.com/envoyproxy/go-control-plane/envoy,github.com/emissary-ingress/emissary/v3/pkg/api/envoy,g' \
|
||||
-- {} +; \
|
||||
find "$$tmpdir" -name '*.bak' -delete; \
|
||||
mv "$$tmpdir/$*" $@; \
|
||||
}
|
||||
# Envoy's build system still uses an old `protoc-gen-go` that emits
|
||||
# code that Go 1.19's `gofmt` isn't happy with. Even generated code
|
||||
# should be gofmt-clean, so gofmt it as a post-processing step.
|
||||
gofmt -w -s ./pkg/api/envoy
|
||||
|
||||
# The unmodified go-control-plane
|
||||
$(OSS_HOME)/_cxx/go-control-plane: FORCE
|
||||
@echo "Getting Envoy go-control-plane sources..."
|
||||
# Ensure that GIT_DIR and GIT_WORK_TREE are unset so that `git bisect`
|
||||
# and friends work properly.
|
||||
@PS4=; set -ex; { \
|
||||
unset GIT_DIR GIT_WORK_TREE; \
|
||||
git init $@; \
|
||||
cd $@; \
|
||||
if git remote get-url origin &>/dev/null; then \
|
||||
git remote set-url origin https://github.com/envoyproxy/go-control-plane; \
|
||||
else \
|
||||
git remote add origin https://github.com/envoyproxy/go-control-plane; \
|
||||
fi; \
|
||||
git fetch --tags origin; \
|
||||
git checkout $(ENVOY_GO_CONTROL_PLANE_COMMIT); \
|
||||
}
|
||||
|
||||
# The go-control-plane patched for our version of the protobufs
|
||||
$(OSS_HOME)/pkg/envoy-control-plane: $(OSS_HOME)/_cxx/go-control-plane FORCE
|
||||
rm -rf $@
|
||||
@PS4=; set -ex; { \
|
||||
unset GIT_DIR GIT_WORK_TREE; \
|
||||
tmpdir=$$(mktemp -d); \
|
||||
trap 'rm -rf "$$tmpdir"' EXIT; \
|
||||
cd "$$tmpdir"; \
|
||||
cd $(OSS_HOME)/_cxx/go-control-plane; \
|
||||
cp -r $$(git ls-files ':[A-Z]*' ':!Dockerfile*' ':!Makefile') pkg/* "$$tmpdir"; \
|
||||
find "$$tmpdir" -name '*.go' -exec sed -E -i.bak \
|
||||
-e 's,github\.com/envoyproxy/go-control-plane/pkg,github.com/emissary-ingress/emissary/v3/pkg/envoy-control-plane,g' \
|
||||
-e 's,github\.com/envoyproxy/go-control-plane/envoy,github.com/emissary-ingress/emissary/v3/pkg/api/envoy,g' \
|
||||
-- {} +; \
|
||||
sed -i.bak -e 's/^package/\n&/' "$$tmpdir/log/log_test.go"; \
|
||||
find "$$tmpdir" -name '*.bak' -delete; \
|
||||
mv "$$tmpdir" $(abspath $@); \
|
||||
}
|
||||
cd $(OSS_HOME) && gofmt -w -s ./pkg/envoy-control-plane/
|
||||
|
||||
#
|
||||
# `make update-base`: Recompile Envoy and do all of the related things.
|
||||
|
||||
update-base: $(OSS_HOME)/docker/base-envoy/envoy-static $(OSS_HOME)/docker/base-envoy/envoy-static-stripped $(OSS_HOME)/_cxx/envoy-build-image.txt
|
||||
@PS4=; set -ex; { \
|
||||
if [ '$(ENVOY_COMMIT)' != '-' ] && docker pull $(ENVOY_FULL_DOCKER_TAG); then \
|
||||
echo 'Already up-to-date: $(ENVOY_FULL_DOCKER_TAG)'; \
|
||||
ENVOY_VERSION_OUTPUT=$$(docker run --rm -it --entrypoint envoy-static $(ENVOY_FULL_DOCKER_TAG) --version | grep "version:"); \
|
||||
ENVOY_VERSION_EXPECTED="envoy-static .*version:.* $(ENVOY_COMMIT)/.*"; \
|
||||
if ! echo "$$ENVOY_VERSION_OUTPUT" | grep "$$ENVOY_VERSION_EXPECTED"; then \
|
||||
{ set +x; } &>/dev/null; \
|
||||
echo "error: Envoy base image $(ENVOY_FULL_DOCKER_TAG) contains envoy-static binary that reported an unexpected version string!" \
|
||||
"See ENVOY_VERSION_OUTPUT and ENVOY_VERSION_EXPECTED in the output above. This error is usually not recoverable." \
|
||||
"You may need to rebuild the Envoy base image after either updating ENVOY_COMMIT or bumping BASE_ENVOY_RELVER" \
|
||||
"(or both, depending on what you are doing)."; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
else \
|
||||
if [ -z '$(YES_I_AM_OK_WITH_COMPILING_ENVOY)' ]; then \
|
||||
{ set +x; } &>/dev/null; \
|
||||
echo 'error: Envoy compilation triggered, but $$YES_I_AM_OK_WITH_COMPILING_ENVOY is not set'; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
docker build --build-arg=base=$$(cat $(OSS_HOME)/_cxx/envoy-build-image.txt) -f $(OSS_HOME)/docker/base-envoy/Dockerfile -t $(ENVOY_FULL_DOCKER_TAG) $(OSS_HOME)/docker/base-envoy; \
|
||||
if [ '$(ENVOY_COMMIT)' != '-' ]; then \
|
||||
ENVOY_VERSION_OUTPUT=$$(docker run --rm -it --entrypoint envoy-static $(ENVOY_FULL_DOCKER_TAG) --version | grep "version:"); \
|
||||
ENVOY_VERSION_EXPECTED="envoy-static .*version:.* $(ENVOY_COMMIT)/.*"; \
|
||||
if ! echo "$$ENVOY_VERSION_OUTPUT" | grep "$$ENVOY_VERSION_EXPECTED"; then \
|
||||
{ set +x; } &>/dev/null; \
|
||||
echo "error: Envoy base image $(ENVOY_FULL_DOCKER_TAG) contains envoy-static binary that reported an unexpected version string!" \
|
||||
"See ENVOY_VERSION_OUTPUT and ENVOY_VERSION_EXPECTED in the output above. This error is usually not recoverable." \
|
||||
"You may need to rebuild the Envoy base image after either updating ENVOY_COMMIT or bumping BASE_ENVOY_RELVER" \
|
||||
"(or both, depending on what you are doing)."; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
docker push $(ENVOY_FULL_DOCKER_TAG); \
|
||||
fi; \
|
||||
fi; \
|
||||
}
|
||||
@PS4=; set -ex; { \
|
||||
if [ '$(ENVOY_COMMIT)' != '-' ] && docker pull $(ENVOY_DOCKER_TAG); then \
|
||||
echo 'Already up-to-date: $(ENVOY_DOCKER_TAG)'; \
|
||||
ENVOY_VERSION_OUTPUT=$$(docker run --rm -it --entrypoint envoy-static-stripped $(ENVOY_DOCKER_TAG) --version | grep "version:"); \
|
||||
ENVOY_VERSION_EXPECTED="envoy-static-stripped .*version:.* $(ENVOY_COMMIT)/.*"; \
|
||||
if ! echo "$$ENVOY_VERSION_OUTPUT" | grep "$$ENVOY_VERSION_EXPECTED"; then \
|
||||
{ set +x; } &>/dev/null; \
|
||||
echo "error: Envoy base image $(ENVOY_DOCKER_TAG) contains envoy-static-stripped binary that reported an unexpected version string!" \
|
||||
"See ENVOY_VERSION_OUTPUT and ENVOY_VERSION_EXPECTED in the output above. This error is usually not recoverable." \
|
||||
"You may need to rebuild the Envoy base image after either updating ENVOY_COMMIT or bumping BASE_ENVOY_RELVER" \
|
||||
"(or both, depending on what you are doing)."; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
else \
|
||||
if [ -z '$(YES_I_AM_OK_WITH_COMPILING_ENVOY)' ]; then \
|
||||
{ set +x; } &>/dev/null; \
|
||||
echo 'error: Envoy compilation triggered, but $$YES_I_AM_OK_WITH_COMPILING_ENVOY is not set'; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
docker build -f $(OSS_HOME)/docker/base-envoy/Dockerfile.stripped -t $(ENVOY_DOCKER_TAG) $(OSS_HOME)/docker/base-envoy; \
|
||||
if [ '$(ENVOY_COMMIT)' != '-' ]; then \
|
||||
ENVOY_VERSION_OUTPUT=$$(docker run --rm -it --entrypoint envoy-static-stripped $(ENVOY_DOCKER_TAG) --version | grep "version:"); \
|
||||
ENVOY_VERSION_EXPECTED="envoy-static-stripped .*version:.* $(ENVOY_COMMIT)/.*"; \
|
||||
if ! echo "$$ENVOY_VERSION_OUTPUT" | grep "$$ENVOY_VERSION_EXPECTED"; then \
|
||||
{ set +x; } &>/dev/null; \
|
||||
echo "error: Envoy base image $(ENVOY_DOCKER_TAG) contains envoy-static-stripped binary that reported an unexpected version string!" \
|
||||
"See ENVOY_VERSION_OUTPUT and ENVOY_VERSION_EXPECTED in the output above. This error is usually not recoverable." \
|
||||
"You may need to rebuild the Envoy base image after either updating ENVOY_COMMIT or bumping BASE_ENVOY_RELVER" \
|
||||
"(or both, depending on what you are doing)."; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
docker push $(ENVOY_DOCKER_TAG); \
|
||||
fi; \
|
||||
fi; \
|
||||
}
|
||||
# `make generate` has to come *after* the above, because builder.sh will
|
||||
# try to use the images that the above create.
|
||||
$(MAKE) generate
|
||||
.PHONY: update-base
|
||||
|
|
|
@ -0,0 +1,74 @@
|
|||
#!/bin/bash
|
||||
|
||||
# The phony make targets have been exported when calling from Make.
|
||||
FIPS_MODE=${FIPS_MODE:-}
|
||||
BUILD_ARCH=${BUILD_ARCH:-linux/amd64}
|
||||
|
||||
# base directory vars
|
||||
OSS_SOURCE="$PWD"
|
||||
BASE_ENVOY_DIR="$PWD/_cxx/envoy"
|
||||
ENVOY_DOCKER_BUILD_DIR="$PWD/_cxx/envoy-docker-build"
|
||||
export ENVOY_DOCKER_BUILD_DIR
|
||||
|
||||
# container vars
|
||||
DOCKER_OPTIONS=(
|
||||
"--platform=${BUILD_ARCH}"
|
||||
"--env=ENVOY_DELIVERY_DIR=/build/envoy/x64/contrib/exe/envoy"
|
||||
"--env=ENVOY_BUILD_TARGET=//contrib/exe:envoy-static"
|
||||
"--env=ENVOY_BUILD_DEBUG_INFORMATION=//contrib/exe:envoy-static.dwp"
|
||||
# "--env=BAZEL_BUILD_OPTIONS=\-\-define tcmalloc=gperftools"
|
||||
)
|
||||
|
||||
# unset ssh auth sock because we don't need it in the container and
|
||||
# the `run_envoy_docker.sh` adds it by default. This causes issues
|
||||
# if trying to run builds on docker for mac.
|
||||
SSH_AUTH_SOCK=""
|
||||
export SSH_AUTH_SOCK
|
||||
|
||||
BAZEL_BUILD_EXTRA_OPTIONS=()
|
||||
if [ -n "$FIPS_MODE" ]; then
|
||||
BAZEL_BUILD_EXTRA_OPTIONS+=(--define boringssl=fips)
|
||||
fi;
|
||||
|
||||
if [ ! -d "$BASE_ENVOY_DIR" ]; then
|
||||
echo "Looks like Envoy hasn't been cloned locally yet, run clone-envoy target to ensure it is cloned";
|
||||
exit 1;
|
||||
fi;
|
||||
|
||||
ENVOY_DOCKER_OPTIONS="${DOCKER_OPTIONS[*]}"
|
||||
export ENVOY_DOCKER_OPTIONS
|
||||
|
||||
echo "Building custom build of Envoy using the following parameters:"
|
||||
echo " FIPS_MODE: ${FIPS_MODE}"
|
||||
echo " BUILD_ARCH: ${BUILD_ARCH}"
|
||||
echo " ENVOY_DOCKER_BUILD_DIR: ${ENVOY_DOCKER_BUILD_DIR}"
|
||||
echo " ENVOY_DOCKER_OPTIONS: ${ENVOY_DOCKER_OPTIONS}"
|
||||
echo " SSH_AUTH_SOCK: ${SSH_AUTH_SOCK}"
|
||||
echo " "
|
||||
|
||||
ci_cmd="./ci/do_ci.sh 'release.server_only'"
|
||||
|
||||
if [ ${#BAZEL_BUILD_EXTRA_OPTIONS[@]} -gt 0 ]; then
|
||||
ci_cmd="BAZEL_BUILD_EXTRA_OPTIONS='${BAZEL_BUILD_EXTRA_OPTIONS[*]}' $ci_cmd"
|
||||
fi;
|
||||
|
||||
echo "cleaning up any old build binaries"
|
||||
rm -rf "$ENVOY_DOCKER_BUILD_DIR/envoy";
|
||||
|
||||
# build envoy
|
||||
cd "${BASE_ENVOY_DIR}" || exit
|
||||
./ci/run_envoy_docker.sh "${ci_cmd}"
|
||||
cd "${OSS_SOURCE}" || exit
|
||||
|
||||
echo "Untar release distribution which includes static builds"
|
||||
tar -xvf "${ENVOY_DOCKER_BUILD_DIR}/envoy/x64/bin/release.tar.zst" -C "${ENVOY_DOCKER_BUILD_DIR}/envoy/x64/bin";
|
||||
|
||||
echo "Copying envoy-static and envoy-static-stripped to 'docker/envoy-build'";
|
||||
cp "${ENVOY_DOCKER_BUILD_DIR}/envoy/x64/bin/dbg/envoy-contrib" "${PWD}/docker/base-envoy/envoy-static"
|
||||
chmod +x "${PWD}/docker/base-envoy/envoy-static"
|
||||
|
||||
cp "${ENVOY_DOCKER_BUILD_DIR}/envoy/x64/bin/dbg/envoy-contrib.dwp" "${PWD}/docker/base-envoy/envoy-static.dwp"
|
||||
chmod +x "${PWD}/docker/base-envoy/envoy-static.dwp"
|
||||
|
||||
cp "${ENVOY_DOCKER_BUILD_DIR}/envoy/x64/bin/envoy-contrib" "${PWD}/docker/base-envoy/envoy-static-stripped"
|
||||
chmod +x "${PWD}/docker/base-envoy/envoy-static-stripped"
|
|
@ -0,0 +1,103 @@
|
|||
#!/bin/bash
|
||||
|
||||
BLUE='\033[0;34m'
|
||||
GREEN='\033[0;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
OSS_SOURCE="${PWD}"
|
||||
|
||||
# envoy directories
|
||||
BASE_ENVOY_DIR="${OSS_SOURCE}/_cxx/envoy"
|
||||
ENVOY_PROTO_API_BASE="${BASE_ENVOY_DIR}/api"
|
||||
ENVOY_COMPILED_GO_BASE="${BASE_ENVOY_DIR}/build_go"
|
||||
|
||||
# Emissary directories
|
||||
EMISSARY_PROTO_API_BASE="${OSS_SOURCE}/api"
|
||||
EMISSARY_COMPILED_PROTO_GO_BASE="${OSS_SOURCE}/pkg/api"
|
||||
|
||||
|
||||
|
||||
# envoy build container settings
|
||||
ENVOY_DOCKER_OPTIONS="--platform=${BUILD_ARCH}"
|
||||
export ENVOY_DOCKER_OPTIONS
|
||||
|
||||
# unset ssh auth sock because we don't need it in the container and
|
||||
# the `run_envoy_docker.sh` adds it by default.
|
||||
SSH_AUTH_SOCK=""
|
||||
export SSH_AUTH_SOCK
|
||||
|
||||
############### copy raw protos into emissary repo ######################
|
||||
|
||||
echo -e "${BLUE}removing existing Envoy Protobuf API from:${GREEN} $EMISSARY_PROTO_API_BASE/envoy";
|
||||
rm -rf "${EMISSARY_PROTO_API_BASE}/envoy"
|
||||
|
||||
echo -e "${BLUE}copying Envoy Protobuf API from ${GREEN} ${ENVOY_PROTO_API_BASE}/envoy ${NC}into ${GREEN}${EMISSARY_PROTO_API_BASE}/envoy";
|
||||
rsync --recursive --delete --delete-excluded --prune-empty-dirs --include='*/' \
|
||||
--include='*.proto' --exclude='*' \
|
||||
"${ENVOY_PROTO_API_BASE}/envoy" "${EMISSARY_PROTO_API_BASE}"
|
||||
|
||||
echo -e "${BLUE}removing existing Envoy Contrib Protobuf API from:${GREEN} ${EMISSARY_PROTO_API_BASE}/contrib";
|
||||
rm -rf "${EMISSARY_PROTO_API_BASE}/contrib"
|
||||
mkdir -p "${EMISSARY_PROTO_API_BASE}/contrib/envoy/extensions/filters/http"
|
||||
|
||||
echo -e "${BLUE}copying Envoy Contrib Protobuf API from ${GREEN} ${ENVOY_PROTO_API_BASE}/contrib ${NC}into ${GREEN}${EMISSARY_PROTO_API_BASE}/contrib";
|
||||
rsync --recursive --delete --delete-excluded --prune-empty-dirs \
|
||||
--include='*/' \
|
||||
--include='*.proto' \
|
||||
--exclude='*' \
|
||||
"${ENVOY_PROTO_API_BASE}/contrib/envoy/extensions/filters/http/golang" "${EMISSARY_PROTO_API_BASE}/contrib/envoy/extensions/filters/http"
|
||||
|
||||
############### compile go protos ######################
|
||||
|
||||
echo -e "${BLUE}compiling go-protobufs in envoy build container${NC}";
|
||||
rm -rf "${ENVOY_COMPILED_GO_BASE}"
|
||||
|
||||
cd "${BASE_ENVOY_DIR}" || exit;
|
||||
./ci/run_envoy_docker.sh "./ci/do_ci.sh 'api.go'";
|
||||
cd "${OSS_SOURCE}" || exit;
|
||||
|
||||
|
||||
############## moving envoy compiled protos to emissary #################
|
||||
echo -e "${BLUE}removing existing compiled protos from: ${GREEN} $EMISSARY_COMPILED_PROTO_GO_BASE/envoy${NC}";
|
||||
rm -rf "${EMISSARY_COMPILED_PROTO_GO_BASE}/envoy"
|
||||
|
||||
echo -e "${BLUE}copying compiled protos from: ${GREEN} ${ENVOY_COMPILED_GO_BASE}/envoy${NC} into ${GREEN}${EMISSARY_COMPILED_PROTO_GO_BASE}/envoy${NC}";
|
||||
rsync --recursive --delete --delete-excluded --prune-empty-dirs \
|
||||
--include='*/' \
|
||||
--include='*.go' \
|
||||
--exclude='*' \
|
||||
"${ENVOY_COMPILED_GO_BASE}/envoy" "${EMISSARY_COMPILED_PROTO_GO_BASE}"
|
||||
|
||||
echo -e "${BLUE}Updating import pkg references from: ${GREEN}github.com/envoyproxy/go-control-plane/envoy ${NC}--> ${GREEN}github.com/emissary-ingress/emissary/v3/pkg/api/envoy${NC}"
|
||||
find "${EMISSARY_COMPILED_PROTO_GO_BASE}/envoy" -type f \
|
||||
-exec chmod 644 {} + \
|
||||
-exec sed -E -i.bak \
|
||||
-e 's,github\.com/envoyproxy/go-control-plane/envoy,github.com/emissary-ingress/emissary/v3/pkg/api/envoy,g' \
|
||||
-- {} +;
|
||||
|
||||
find "${EMISSARY_COMPILED_PROTO_GO_BASE}/envoy" -name '*.bak' -delete;
|
||||
|
||||
gofmt -w -s "${EMISSARY_COMPILED_PROTO_GO_BASE}/envoy"
|
||||
|
||||
############## moving contrib compiled protos to emissary #################
|
||||
echo -e "${BLUE}removing existing compiled protos from: ${GREEN} $EMISSARY_COMPILED_PROTO_GO_BASE/contrib${NC}";
|
||||
rm -rf "${EMISSARY_COMPILED_PROTO_GO_BASE}/contrib"
|
||||
mkdir -p "${EMISSARY_COMPILED_PROTO_GO_BASE}/contrib/envoy/extensions/filters/http"
|
||||
|
||||
echo -e "${BLUE}copying compiled protos from: ${GREEN} ${ENVOY_COMPILED_GO_BASE}/contrib${NC} into ${GREEN}${EMISSARY_COMPILED_PROTO_GO_BASE}/contrib${NC}";
|
||||
rsync --recursive --delete --delete-excluded --prune-empty-dirs \
|
||||
--include='*/' \
|
||||
--include='*.go' \
|
||||
--exclude='*' \
|
||||
"${ENVOY_COMPILED_GO_BASE}/contrib/envoy/extensions/filters/http/golang" "${EMISSARY_COMPILED_PROTO_GO_BASE}/contrib/envoy/extensions/filters/http"
|
||||
|
||||
echo -e "${BLUE}Updating import pkg references from: ${GREEN}github.com/envoyproxy/go-control-plane/envoy ${NC}--> ${GREEN}github.com/emissary-ingress/emissary/v3/pkg/api/envoy${NC}"
|
||||
find "${EMISSARY_COMPILED_PROTO_GO_BASE}/contrib" -type f \
|
||||
-exec chmod 644 {} + \
|
||||
-exec sed -E -i.bak \
|
||||
-e 's,github\.com/envoyproxy/go-control-plane/envoy,github.com/emissary-ingress/emissary/v3/pkg/api/envoy,g' \
|
||||
-- {} +;
|
||||
|
||||
find "${EMISSARY_COMPILED_PROTO_GO_BASE}/contrib" -name '*.bak' -delete;
|
||||
|
||||
gofmt -w -s "${EMISSARY_COMPILED_PROTO_GO_BASE}/contrib"
|
|
@ -0,0 +1,62 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Input Args capture from Environement Variables
|
||||
# The phone make targets have been configured to pass these along when using Make.
|
||||
default_test_targets="//contrib/golang/... //test/..."
|
||||
FIPS_MODE=${FIPS_MODE:-}
|
||||
BUILD_ARCH=${BUILD_ARCH:-linux/amd64}
|
||||
ENVOY_TEST_LABEL=${ENVOY_TEST_LABEL:-$default_test_targets}
|
||||
|
||||
# static vars
|
||||
OSS_SOURCE="$PWD"
|
||||
BASE_ENVOY_DIR="$PWD/_cxx/envoy"
|
||||
ENVOY_DOCKER_BUILD_DIR="$PWD/_cxx/envoy-docker-build"
|
||||
export ENVOY_DOCKER_BUILD_DIR
|
||||
|
||||
# Dynamic variables
|
||||
DOCKER_OPTIONS=(
|
||||
"--platform=${BUILD_ARCH}"
|
||||
"--network=host"
|
||||
)
|
||||
|
||||
ENVOY_DOCKER_OPTIONS="${DOCKER_OPTIONS[*]}"
|
||||
export ENVOY_DOCKER_OPTIONS
|
||||
|
||||
# unset ssh auth sock because we don't need it in the container and
|
||||
# the `run_envoy_docker.sh` adds it by default.
|
||||
SSH_AUTH_SOCK=""
|
||||
export SSH_AUTH_SOCK
|
||||
|
||||
BAZEL_BUILD_EXTRA_OPTIONS=()
|
||||
if [ -n "$FIPS_MODE" ]; then
|
||||
BAZEL_BUILD_EXTRA_OPTIONS+=(--define boringssl=fips)
|
||||
fi;
|
||||
|
||||
if [ ! -d "$BASE_ENVOY_DIR" ]; then
|
||||
echo "Looks like Envoy hasn't been cloned locally yet, run clone-envoy target to ensure it is cloned";
|
||||
exit 1;
|
||||
fi;
|
||||
|
||||
|
||||
echo "Running Envoy Tests with the following parameters set:"
|
||||
echo " ENVOY_TEST_LABEL: ${ENVOY_TEST_LABEL}"
|
||||
echo " FIPS_MODE: ${FIPS_MODE}"
|
||||
echo " BUILD_ARCH: ${BUILD_ARCH}"
|
||||
echo " ENVOY_DOCKER_BUILD_DIR: ${ENVOY_DOCKER_BUILD_DIR}"
|
||||
echo " ENVOY_DOCKER_OPTIONS: ${ENVOY_DOCKER_OPTIONS}"
|
||||
echo " SSH_AUTH_SOCK: ${SSH_AUTH_SOCK}"
|
||||
echo " BAZEL_BUILD_EXTRA_OPTIONS: ${BAZEL_BUILD_EXTRA_OPTIONS[*]}"
|
||||
echo " "
|
||||
echo " "
|
||||
|
||||
ci_cmd="bazel test --test_output=errors \
|
||||
--verbose_failures -c dbg --test_env=ENVOY_IP_TEST_VERSIONS=v4only \
|
||||
${ENVOY_TEST_LABEL}";
|
||||
|
||||
if [ ${#BAZEL_BUILD_EXTRA_OPTIONS[@]} -gt 0 ]; then
|
||||
ci_cmd="BAZEL_BUILD_EXTRA_OPTIONS='${BAZEL_BUILD_EXTRA_OPTIONS[*]}' $ci_cmd"
|
||||
fi;
|
||||
|
||||
cd "${BASE_ENVOY_DIR}" || exit;
|
||||
./ci/run_envoy_docker.sh "${ci_cmd}";
|
||||
cd "${OSS_SOURCE}" || exit;
|
|
@ -0,0 +1,99 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package envoy.extensions.filters.http.golang.v3alpha;
|
||||
|
||||
import "google/protobuf/any.proto";
|
||||
|
||||
import "xds/annotations/v3/status.proto";
|
||||
|
||||
import "udpa/annotations/status.proto";
|
||||
import "validate/validate.proto";
|
||||
|
||||
option java_package = "io.envoyproxy.envoy.extensions.filters.http.golang.v3alpha";
|
||||
option java_outer_classname = "GolangProto";
|
||||
option java_multiple_files = true;
|
||||
option go_package = "github.com/envoyproxy/go-control-plane/contrib/envoy/extensions/filters/http/golang/v3alpha";
|
||||
option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
||||
option (xds.annotations.v3.file_status).work_in_progress = true;
|
||||
|
||||
// [#protodoc-title: Golang HTTP filter]
|
||||
//
|
||||
// For an overview of the Golang HTTP filter please see the :ref:`configuration reference documentation <config_http_filters_golang>`.
|
||||
// [#extension: envoy.filters.http.golang]
|
||||
|
||||
// [#next-free-field: 6]
|
||||
message Config {
|
||||
// The meanings are as follows:
|
||||
//
|
||||
// :``MERGE_VIRTUALHOST_ROUTER_FILTER``: Pass all configuration into Go plugin.
|
||||
// :``MERGE_VIRTUALHOST_ROUTER``: Pass merged Virtual host and Router configuration into Go plugin.
|
||||
// :``OVERRIDE``: Pass merged Virtual host, Router, and plugin configuration into Go plugin.
|
||||
//
|
||||
// [#not-implemented-hide:]
|
||||
enum MergePolicy {
|
||||
MERGE_VIRTUALHOST_ROUTER_FILTER = 0;
|
||||
MERGE_VIRTUALHOST_ROUTER = 1;
|
||||
OVERRIDE = 3;
|
||||
}
|
||||
|
||||
// Globally unique ID for a dynamic library file.
|
||||
string library_id = 1 [(validate.rules).string = {min_len: 1}];
|
||||
|
||||
// Path to a dynamic library implementing the
|
||||
// :repo:`StreamFilter API <contrib/golang/common/go/api.StreamFilter>`
|
||||
// interface.
|
||||
// [#comment:TODO(wangfakang): Support for downloading libraries from remote repositories.]
|
||||
string library_path = 2 [(validate.rules).string = {min_len: 1}];
|
||||
|
||||
// Globally unique name of the Go plugin.
|
||||
//
|
||||
// This name **must** be consistent with the name registered in ``http::RegisterHttpFilterConfigFactory``,
|
||||
// and can be used to associate :ref:`route and virtualHost plugin configuration
|
||||
// <envoy_v3_api_field_extensions.filters.http.golang.v3alpha.ConfigsPerRoute.plugins_config>`.
|
||||
//
|
||||
string plugin_name = 3 [(validate.rules).string = {min_len: 1}];
|
||||
|
||||
// Configuration for the Go plugin.
|
||||
//
|
||||
// .. note::
|
||||
// This configuration is only parsed in the go plugin, and is therefore not validated
|
||||
// by Envoy.
|
||||
//
|
||||
// See the :repo:`StreamFilter API <contrib/golang/common/go/api/filter.go>`
|
||||
// for more information about how the plugin's configuration data can be accessed.
|
||||
//
|
||||
google.protobuf.Any plugin_config = 4;
|
||||
|
||||
// Merge policy for plugin configuration.
|
||||
//
|
||||
// The Go plugin configuration supports three dimensions:
|
||||
//
|
||||
// * Virtual host’s :ref:`typed_per_filter_config <envoy_v3_api_field_config.route.v3.VirtualHost.typed_per_filter_config>`
|
||||
// * Route’s :ref:`typed_per_filter_config <envoy_v3_api_field_config.route.v3.Route.typed_per_filter_config>`
|
||||
// * The filter's :ref:`plugin_config <envoy_v3_api_field_extensions.filters.http.golang.v3alpha.Config.plugin_config>`
|
||||
//
|
||||
// [#not-implemented-hide:]
|
||||
MergePolicy merge_policy = 5 [(validate.rules).enum = {defined_only: true}];
|
||||
}
|
||||
|
||||
message RouterPlugin {
|
||||
oneof override {
|
||||
option (validate.required) = true;
|
||||
|
||||
// [#not-implemented-hide:]
|
||||
// Disable the filter for this particular vhost or route.
|
||||
// If disabled is specified in multiple per-filter-configs, the most specific one will be used.
|
||||
bool disabled = 1 [(validate.rules).bool = {const: true}];
|
||||
|
||||
// The config field is used for setting per-route and per-virtualhost plugin config.
|
||||
google.protobuf.Any config = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message ConfigsPerRoute {
|
||||
// Configuration of the Go plugin at the per-router or per-virtualhost level,
|
||||
// keyed on the :ref:`plugin_name <envoy_v3_api_field_extensions.filters.http.golang.v3alpha.Config.plugin_name>`
|
||||
// of the Go plugin.
|
||||
//
|
||||
map<string, RouterPlugin> plugins_config = 1;
|
||||
}
|
|
@ -33,6 +33,7 @@ message ConfigDump {
|
|||
// * ``bootstrap``: :ref:`BootstrapConfigDump <envoy_v3_api_msg_admin.v3.BootstrapConfigDump>`
|
||||
// * ``clusters``: :ref:`ClustersConfigDump <envoy_v3_api_msg_admin.v3.ClustersConfigDump>`
|
||||
// * ``ecds_filter_http``: :ref:`EcdsConfigDump <envoy_v3_api_msg_admin.v3.EcdsConfigDump>`
|
||||
// * ``ecds_filter_quic_listener``: :ref:`EcdsConfigDump <envoy_v3_api_msg_admin.v3.EcdsConfigDump>`
|
||||
// * ``ecds_filter_tcp_listener``: :ref:`EcdsConfigDump <envoy_v3_api_msg_admin.v3.EcdsConfigDump>`
|
||||
// * ``endpoints``: :ref:`EndpointsConfigDump <envoy_v3_api_msg_admin.v3.EndpointsConfigDump>`
|
||||
// * ``listeners``: :ref:`ListenersConfigDump <envoy_v3_api_msg_admin.v3.ListenersConfigDump>`
|
||||
|
|
|
@ -59,7 +59,7 @@ message ServerInfo {
|
|||
config.core.v3.Node node = 7;
|
||||
}
|
||||
|
||||
// [#next-free-field: 39]
|
||||
// [#next-free-field: 41]
|
||||
message CommandLineOptions {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.admin.v2alpha.CommandLineOptions";
|
||||
|
@ -98,6 +98,12 @@ message CommandLineOptions {
|
|||
// See :option:`--use-dynamic-base-id` for details.
|
||||
bool use_dynamic_base_id = 31;
|
||||
|
||||
// See :option:`--skip-hot-restart-on-no-parent` for details.
|
||||
bool skip_hot_restart_on_no_parent = 39;
|
||||
|
||||
// See :option:`--skip-hot-restart-parent-stats` for details.
|
||||
bool skip_hot_restart_parent_stats = 40;
|
||||
|
||||
// See :option:`--base-id-path` for details.
|
||||
string base_id_path = 32;
|
||||
|
||||
|
|
|
@ -16,7 +16,6 @@ option java_package = "io.envoyproxy.envoy.api.v2";
|
|||
option java_outer_classname = "CdsProto";
|
||||
option java_multiple_files = true;
|
||||
option go_package = "github.com/envoyproxy/go-control-plane/envoy/api/v2;apiv2";
|
||||
option java_generic_services = true;
|
||||
option (udpa.annotations.file_migrate).move_to_package = "envoy.service.cluster.v3";
|
||||
option (udpa.annotations.file_status).package_version_status = FROZEN;
|
||||
|
||||
|
|
|
@ -16,7 +16,6 @@ option java_package = "io.envoyproxy.envoy.api.v2";
|
|||
option java_outer_classname = "EdsProto";
|
||||
option java_multiple_files = true;
|
||||
option go_package = "github.com/envoyproxy/go-control-plane/envoy/api/v2;apiv2";
|
||||
option java_generic_services = true;
|
||||
option (udpa.annotations.file_migrate).move_to_package = "envoy.service.endpoint.v3";
|
||||
option (udpa.annotations.file_status).package_version_status = FROZEN;
|
||||
|
||||
|
|
|
@ -16,7 +16,6 @@ option java_package = "io.envoyproxy.envoy.api.v2";
|
|||
option java_outer_classname = "LdsProto";
|
||||
option java_multiple_files = true;
|
||||
option go_package = "github.com/envoyproxy/go-control-plane/envoy/api/v2;apiv2";
|
||||
option java_generic_services = true;
|
||||
option (udpa.annotations.file_migrate).move_to_package = "envoy.service.listener.v3";
|
||||
option (udpa.annotations.file_status).package_version_status = FROZEN;
|
||||
|
||||
|
|
|
@ -16,7 +16,6 @@ option java_package = "io.envoyproxy.envoy.api.v2";
|
|||
option java_outer_classname = "RdsProto";
|
||||
option java_multiple_files = true;
|
||||
option go_package = "github.com/envoyproxy/go-control-plane/envoy/api/v2;apiv2";
|
||||
option java_generic_services = true;
|
||||
option (udpa.annotations.file_migrate).move_to_package = "envoy.service.route.v3";
|
||||
option (udpa.annotations.file_status).package_version_status = FROZEN;
|
||||
|
||||
|
|
|
@ -1265,7 +1265,7 @@ message Tracing {
|
|||
// Target percentage of requests managed by this HTTP connection manager that will be force
|
||||
// traced if the :ref:`x-client-trace-id <config_http_conn_man_headers_x-client-trace-id>`
|
||||
// header is set. This field is a direct analog for the runtime variable
|
||||
// 'tracing.client_sampling' in the :ref:`HTTP Connection Manager
|
||||
// 'tracing.client_enabled' in the :ref:`HTTP Connection Manager
|
||||
// <config_http_conn_man_runtime>`.
|
||||
// Default: 100%
|
||||
type.FractionalPercent client_sampling = 1;
|
||||
|
|
|
@ -39,7 +39,7 @@ option (udpa.annotations.file_status).package_version_status = FROZEN;
|
|||
// fragments:
|
||||
// - header_value_extractor:
|
||||
// name: X-Route-Selector
|
||||
// element_separator: ,
|
||||
// element_separator: ","
|
||||
// element:
|
||||
// separator: =
|
||||
// key: vip
|
||||
|
|
|
@ -16,7 +16,6 @@ option java_package = "io.envoyproxy.envoy.api.v2";
|
|||
option java_outer_classname = "SrdsProto";
|
||||
option java_multiple_files = true;
|
||||
option go_package = "github.com/envoyproxy/go-control-plane/envoy/api/v2;apiv2";
|
||||
option java_generic_services = true;
|
||||
option (udpa.annotations.file_migrate).move_to_package = "envoy.service.route.v3";
|
||||
option (udpa.annotations.file_status).package_version_status = FROZEN;
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@ package envoy.config.accesslog.v3;
|
|||
|
||||
import "envoy/config/core/v3/base.proto";
|
||||
import "envoy/config/route/v3/route_components.proto";
|
||||
import "envoy/data/accesslog/v3/accesslog.proto";
|
||||
import "envoy/type/matcher/v3/metadata.proto";
|
||||
import "envoy/type/v3/percent.proto";
|
||||
|
||||
|
@ -43,7 +44,7 @@ message AccessLog {
|
|||
}
|
||||
}
|
||||
|
||||
// [#next-free-field: 13]
|
||||
// [#next-free-field: 14]
|
||||
message AccessLogFilter {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.config.filter.accesslog.v2.AccessLogFilter";
|
||||
|
@ -87,6 +88,9 @@ message AccessLogFilter {
|
|||
|
||||
// Metadata Filter
|
||||
MetadataFilter metadata_filter = 12;
|
||||
|
||||
// Log Type Filter
|
||||
LogTypeFilter log_type_filter = 13;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -122,7 +126,10 @@ message StatusCodeFilter {
|
|||
ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}];
|
||||
}
|
||||
|
||||
// Filters on total request duration in milliseconds.
|
||||
// Filters based on the duration of the request or stream, in milliseconds.
|
||||
// For end of stream access logs, the total duration of the stream will be used.
|
||||
// For :ref:`periodic access logs<arch_overview_access_log_periodic>`,
|
||||
// the duration of the stream at the time of log recording will be used.
|
||||
message DurationFilter {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.config.filter.accesslog.v2.DurationFilter";
|
||||
|
@ -247,6 +254,9 @@ message ResponseFlagFilter {
|
|||
in: "UPE"
|
||||
in: "NC"
|
||||
in: "OM"
|
||||
in: "DF"
|
||||
in: "DO"
|
||||
in: "DR"
|
||||
}
|
||||
}
|
||||
}];
|
||||
|
@ -307,6 +317,17 @@ message MetadataFilter {
|
|||
google.protobuf.BoolValue match_if_key_not_found = 2;
|
||||
}
|
||||
|
||||
// Filters based on access log type.
|
||||
message LogTypeFilter {
|
||||
// Logs only records which their type is one of the types defined in this field.
|
||||
repeated data.accesslog.v3.AccessLogType types = 1
|
||||
[(validate.rules).repeated = {items {enum {defined_only: true}}}];
|
||||
|
||||
// If this field is set to true, the filter will instead block all records
|
||||
// with a access log type in types field, and allow all other records.
|
||||
bool exclude = 2;
|
||||
}
|
||||
|
||||
// Extension filter is statically registered at runtime.
|
||||
message ExtensionFilter {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
|
|
|
@ -41,7 +41,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
// <config_overview_bootstrap>` for more detail.
|
||||
|
||||
// Bootstrap :ref:`configuration overview <config_overview_bootstrap>`.
|
||||
// [#next-free-field: 37]
|
||||
// [#next-free-field: 42]
|
||||
message Bootstrap {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.config.bootstrap.v2.Bootstrap";
|
||||
|
@ -101,6 +101,48 @@ message Bootstrap {
|
|||
core.v3.ApiConfigSource ads_config = 3;
|
||||
}
|
||||
|
||||
message ApplicationLogConfig {
|
||||
message LogFormat {
|
||||
oneof log_format {
|
||||
option (validate.required) = true;
|
||||
|
||||
// Flush application logs in JSON format. The configured JSON struct can
|
||||
// support all the format flags specified in the :option:`--log-format`
|
||||
// command line options section, except for the ``%v`` and ``%_`` flags.
|
||||
google.protobuf.Struct json_format = 1;
|
||||
|
||||
// Flush application log in a format defined by a string. The text format
|
||||
// can support all the format flags specified in the :option:`--log-format`
|
||||
// command line option section.
|
||||
string text_format = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// Optional field to set the application logs format. If this field is set, it will override
|
||||
// the default log format. Setting both this field and :option:`--log-format` command line
|
||||
// option is not allowed, and will cause a bootstrap error.
|
||||
LogFormat log_format = 1;
|
||||
}
|
||||
|
||||
message DeferredStatOptions {
|
||||
// When the flag is enabled, Envoy will lazily initialize a subset of the stats (see below).
|
||||
// This will save memory and CPU cycles when creating the objects that own these stats, if those
|
||||
// stats are never referenced throughout the lifetime of the process. However, it will incur additional
|
||||
// memory overhead for these objects, and a small increase of CPU usage when a at least one of the stats
|
||||
// is updated for the first time.
|
||||
// Groups of stats that will be lazily initialized:
|
||||
// - Cluster traffic stats: a subgroup of the :ref:`cluster statistics <config_cluster_manager_cluster_stats>`
|
||||
// that are used when requests are routed to the cluster.
|
||||
bool enable_deferred_creation_stats = 1;
|
||||
}
|
||||
|
||||
message GrpcAsyncClientManagerConfig {
|
||||
// Optional field to set the expiration time for the cached gRPC client object.
|
||||
// The minimal value is 5s and the default is 50s.
|
||||
google.protobuf.Duration max_cached_entry_idle_duration = 1
|
||||
[(validate.rules).duration = {gte {seconds: 5}}];
|
||||
}
|
||||
|
||||
reserved 10, 11;
|
||||
|
||||
reserved "runtime";
|
||||
|
@ -163,6 +205,9 @@ message Bootstrap {
|
|||
// Optional set of stats sinks.
|
||||
repeated metrics.v3.StatsSink stats_sinks = 6;
|
||||
|
||||
// Options to control behaviors of deferred creation compatible stats.
|
||||
DeferredStatOptions deferred_stat_options = 39;
|
||||
|
||||
// Configuration for internal processing of stats.
|
||||
metrics.v3.StatsConfig stats_config = 13;
|
||||
|
||||
|
@ -354,6 +399,22 @@ message Bootstrap {
|
|||
// See :repo:`xds_config_tracker_integration_test <test/integration/xds_config_tracker_integration_test.cc>`
|
||||
// for an example usage of the interface.
|
||||
core.v3.TypedExtensionConfig xds_config_tracker_extension = 36;
|
||||
|
||||
// [#not-implemented-hide:]
|
||||
// This controls the type of listener manager configured for Envoy. Currently
|
||||
// Envoy only supports ListenerManager for this field and Envoy Mobile
|
||||
// supports ApiListenerManager.
|
||||
core.v3.TypedExtensionConfig listener_manager = 37;
|
||||
|
||||
// Optional application log configuration.
|
||||
ApplicationLogConfig application_log_config = 38;
|
||||
|
||||
// Optional gRPC async manager config.
|
||||
GrpcAsyncClientManagerConfig grpc_async_client_manager_config = 40;
|
||||
|
||||
// Optional configuration for memory allocation manager.
|
||||
// Memory releasing is only supported for `tcmalloc allocator <https://github.com/google/tcmalloc>`_.
|
||||
MemoryAllocatorManager memory_allocator_manager = 41;
|
||||
}
|
||||
|
||||
// Administration interface :ref:`operations documentation
|
||||
|
@ -391,6 +452,7 @@ message Admin {
|
|||
}
|
||||
|
||||
// Cluster manager :ref:`architecture overview <arch_overview_cluster_manager>`.
|
||||
// [#next-free-field: 6]
|
||||
message ClusterManager {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.config.bootstrap.v2.ClusterManager";
|
||||
|
@ -431,6 +493,11 @@ message ClusterManager {
|
|||
// <envoy_v3_api_field_config.core.v3.ApiConfigSource.api_type>` :ref:`GRPC
|
||||
// <envoy_v3_api_enum_value_config.core.v3.ApiConfigSource.ApiType.GRPC>`.
|
||||
core.v3.ApiConfigSource load_stats_config = 4;
|
||||
|
||||
// Whether the ClusterManager will create clusters on the worker threads
|
||||
// inline during requests. This will save memory and CPU cycles in cases where
|
||||
// there are lots of inactive clusters and > 1 worker thread.
|
||||
bool enable_deferred_cluster_creation = 5;
|
||||
}
|
||||
|
||||
// Allows you to specify different watchdog configs for different subsystems.
|
||||
|
@ -671,3 +738,14 @@ message CustomInlineHeader {
|
|||
// The type of the header that is expected to be set as the inline header.
|
||||
InlineHeaderType inline_header_type = 2 [(validate.rules).enum = {defined_only: true}];
|
||||
}
|
||||
|
||||
message MemoryAllocatorManager {
|
||||
// Configures tcmalloc to perform background release of free memory in amount of bytes per ``memory_release_interval`` interval.
|
||||
// If equals to ``0``, no memory release will occur. Defaults to ``0``.
|
||||
uint64 bytes_to_release = 1;
|
||||
|
||||
// Interval in milliseconds for memory releasing. If specified, during every
|
||||
// interval Envoy will try to release ``bytes_to_release`` of free memory back to operating system for reuse.
|
||||
// Defaults to 1000 milliseconds.
|
||||
google.protobuf.Duration memory_release_interval = 2;
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@ import "envoy/config/core/v3/health_check.proto";
|
|||
import "envoy/config/core/v3/protocol.proto";
|
||||
import "envoy/config/core/v3/resolver.proto";
|
||||
import "envoy/config/endpoint/v3/endpoint.proto";
|
||||
import "envoy/type/metadata/v3/metadata.proto";
|
||||
import "envoy/type/v3/percent.proto";
|
||||
|
||||
import "google/protobuf/any.proto";
|
||||
|
@ -44,7 +45,7 @@ message ClusterCollection {
|
|||
}
|
||||
|
||||
// Configuration for a single upstream cluster.
|
||||
// [#next-free-field: 57]
|
||||
// [#next-free-field: 58]
|
||||
message Cluster {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster";
|
||||
|
||||
|
@ -167,7 +168,7 @@ message Cluster {
|
|||
// The name of the match, used in stats generation.
|
||||
string name = 1 [(validate.rules).string = {min_len: 1}];
|
||||
|
||||
// Optional endpoint metadata match criteria.
|
||||
// Optional metadata match criteria.
|
||||
// The connection to the endpoint with metadata matching what is set in this field
|
||||
// will use the transport socket configuration specified here.
|
||||
// The endpoint's metadata entry in ``envoy.transport_socket_match`` is used to match
|
||||
|
@ -551,6 +552,10 @@ message Cluster {
|
|||
// The port to override for the original dst address. This port
|
||||
// will take precedence over filter state and header override ports
|
||||
google.protobuf.UInt32Value upstream_port_override = 3 [(validate.rules).uint32 = {lte: 65535}];
|
||||
|
||||
// The dynamic metadata key to override destination address.
|
||||
// First the request metadata is considered, then the connection one.
|
||||
type.metadata.v3.MetadataKey metadata_key = 4;
|
||||
}
|
||||
|
||||
// Common configuration for all load balancer implementations.
|
||||
|
@ -719,7 +724,7 @@ message Cluster {
|
|||
google.protobuf.DoubleValue per_upstream_preconnect_ratio = 1
|
||||
[(validate.rules).double = {lte: 3.0 gte: 1.0}];
|
||||
|
||||
// Indicates how many many streams (rounded up) can be anticipated across a cluster for each
|
||||
// Indicates how many streams (rounded up) can be anticipated across a cluster for each
|
||||
// stream, useful for low QPS services. This is currently supported for a subset of
|
||||
// deterministic non-hash-based load-balancing algorithms (weighted round robin, random).
|
||||
// Unlike ``per_upstream_preconnect_ratio`` this preconnects across the upstream instances in a
|
||||
|
@ -749,12 +754,14 @@ message Cluster {
|
|||
|
||||
reserved "hosts", "tls_context", "extension_protocol_options";
|
||||
|
||||
// Configuration to use different transport sockets for different endpoints.
|
||||
// The entry of ``envoy.transport_socket_match`` in the
|
||||
// :ref:`LbEndpoint.Metadata <envoy_v3_api_field_config.endpoint.v3.LbEndpoint.metadata>`
|
||||
// is used to match against the transport sockets as they appear in the list. The first
|
||||
// :ref:`match <envoy_v3_api_msg_config.cluster.v3.Cluster.TransportSocketMatch>` is used.
|
||||
// For example, with the following match
|
||||
// Configuration to use different transport sockets for different endpoints. The entry of
|
||||
// ``envoy.transport_socket_match`` in the :ref:`LbEndpoint.Metadata
|
||||
// <envoy_v3_api_field_config.endpoint.v3.LbEndpoint.metadata>` is used to match against the
|
||||
// transport sockets as they appear in the list. If a match is not found, the search continues in
|
||||
// :ref:`LocalityLbEndpoints.Metadata
|
||||
// <envoy_v3_api_field_config.endpoint.v3.LocalityLbEndpoints.metadata>`. The first :ref:`match
|
||||
// <envoy_v3_api_msg_config.cluster.v3.Cluster.TransportSocketMatch>` is used. For example, with
|
||||
// the following match
|
||||
//
|
||||
// .. code-block:: yaml
|
||||
//
|
||||
|
@ -778,8 +785,9 @@ message Cluster {
|
|||
// socket match in case above.
|
||||
//
|
||||
// If an endpoint metadata's value under ``envoy.transport_socket_match`` does not match any
|
||||
// ``TransportSocketMatch``, socket configuration fallbacks to use the ``tls_context`` or
|
||||
// ``transport_socket`` specified in this cluster.
|
||||
// ``TransportSocketMatch``, the locality metadata is then checked for a match. Barring any
|
||||
// matches in the endpoint or locality metadata, the socket configuration fallbacks to use the
|
||||
// ``tls_context`` or ``transport_socket`` specified in this cluster.
|
||||
//
|
||||
// This field allows gradual and flexible transport socket configuration changes.
|
||||
//
|
||||
|
@ -1010,7 +1018,8 @@ message Cluster {
|
|||
|
||||
// Optional configuration for having cluster readiness block on warm-up. Currently, only applicable for
|
||||
// :ref:`STRICT_DNS<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STRICT_DNS>`,
|
||||
// or :ref:`LOGICAL_DNS<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>`.
|
||||
// or :ref:`LOGICAL_DNS<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>`,
|
||||
// or :ref:`Redis Cluster<arch_overview_redis>`.
|
||||
// If true, cluster readiness blocks on warm-up. If false, the cluster will complete
|
||||
// initialization whether or not warm-up has completed. Defaults to true.
|
||||
google.protobuf.BoolValue wait_for_warm_on_init = 54;
|
||||
|
@ -1142,6 +1151,22 @@ message Cluster {
|
|||
// from the LRS stream here.]
|
||||
core.v3.ConfigSource lrs_server = 42;
|
||||
|
||||
// [#not-implemented-hide:]
|
||||
// A list of metric names from ORCA load reports to propagate to LRS.
|
||||
//
|
||||
// For map fields in the ORCA proto, the string will be of the form ``<map_field_name>.<map_key>``.
|
||||
// For example, the string ``named_metrics.foo`` will mean to look for the key ``foo`` in the ORCA
|
||||
// ``named_metrics`` field.
|
||||
//
|
||||
// The special map key ``*`` means to report all entries in the map (e.g., ``named_metrics.*`` means to
|
||||
// report all entries in the ORCA named_metrics field). Note that this should be used only with trusted
|
||||
// backends.
|
||||
//
|
||||
// The metric names in LRS will follow the same semantics as this field. In other words, if this field
|
||||
// contains ``named_metrics.foo``, then the LRS load report will include the data with that same string
|
||||
// as the key.
|
||||
repeated string lrs_report_endpoint_metrics = 57;
|
||||
|
||||
// If track_timeout_budgets is true, the :ref:`timeout budget histograms
|
||||
// <config_cluster_manager_cluster_stats_timeout_budgets>` will be published for each
|
||||
// request. These show what percentage of a request's per try and global timeout was used. A value
|
||||
|
@ -1230,6 +1255,26 @@ message UpstreamConnectionOptions {
|
|||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.UpstreamConnectionOptions";
|
||||
|
||||
enum FirstAddressFamilyVersion {
|
||||
// respect the native ranking of destination ip addresses returned from dns
|
||||
// resolution
|
||||
DEFAULT = 0;
|
||||
|
||||
V4 = 1;
|
||||
|
||||
V6 = 2;
|
||||
}
|
||||
|
||||
message HappyEyeballsConfig {
|
||||
// Specify the IP address family to attempt connection first in happy
|
||||
// eyeballs algorithm according to RFC8305#section-4.
|
||||
FirstAddressFamilyVersion first_address_family_version = 1;
|
||||
|
||||
// Specify the number of addresses of the first_address_family_version being
|
||||
// attempted for connection before the other address family.
|
||||
google.protobuf.UInt32Value first_address_family_count = 2 [(validate.rules).uint32 = {gte: 1}];
|
||||
}
|
||||
|
||||
// If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives.
|
||||
core.v3.TcpKeepalive tcp_keepalive = 1;
|
||||
|
||||
|
@ -1237,6 +1282,11 @@ message UpstreamConnectionOptions {
|
|||
// This can be used by extensions during processing of requests. The association mechanism is
|
||||
// implementation specific. Defaults to false due to performance concerns.
|
||||
bool set_local_interface_name_on_upstream_connections = 2;
|
||||
|
||||
// Configurations for happy eyeballs algorithm.
|
||||
// Add configs for first_address_family_version and first_address_family_count
|
||||
// when sorting destination ip addresses.
|
||||
HappyEyeballsConfig happy_eyeballs_config = 3;
|
||||
}
|
||||
|
||||
message TrackClusterStats {
|
||||
|
@ -1251,4 +1301,19 @@ message TrackClusterStats {
|
|||
// <config_cluster_manager_cluster_stats_request_response_sizes>` tracking header and body sizes
|
||||
// of requests and responses will be published.
|
||||
bool request_response_sizes = 2;
|
||||
|
||||
// If true, some stats will be emitted per-endpoint, similar to the stats in admin ``/clusters``
|
||||
// output.
|
||||
//
|
||||
// This does not currently output correct stats during a hot-restart.
|
||||
//
|
||||
// This is not currently implemented by all stat sinks.
|
||||
//
|
||||
// These stats do not honor filtering or tag extraction rules in :ref:`StatsConfig
|
||||
// <envoy_v3_api_msg_config.metrics.v3.StatsConfig>` (but fixed-value tags are supported). Admin
|
||||
// endpoint filtering is supported.
|
||||
//
|
||||
// This may not be used at the same time as
|
||||
// :ref:`load_stats_config <envoy_v3_api_field_config.bootstrap.v3.ClusterManager.load_stats_config>`.
|
||||
bool per_endpoint_stats = 3;
|
||||
}
|
||||
|
|
|
@ -2,6 +2,8 @@ syntax = "proto3";
|
|||
|
||||
package envoy.config.cluster.v3;
|
||||
|
||||
import "envoy/config/core/v3/config_source.proto";
|
||||
|
||||
import "google/protobuf/any.proto";
|
||||
|
||||
import "udpa/annotations/status.proto";
|
||||
|
@ -14,8 +16,8 @@ option java_multiple_files = true;
|
|||
option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3;clusterv3";
|
||||
option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
||||
|
||||
// [#protodoc-title: Upstream filters]
|
||||
// Upstream filters apply to the connections to the upstream cluster hosts.
|
||||
// [#protodoc-title: Upstream network filters]
|
||||
// Upstream network filters apply to the connections to the upstream cluster hosts.
|
||||
|
||||
message Filter {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.cluster.Filter";
|
||||
|
@ -26,6 +28,13 @@ message Filter {
|
|||
// Filter specific configuration which depends on the filter being
|
||||
// instantiated. See the supported filters for further documentation.
|
||||
// Note that Envoy's :ref:`downstream network
|
||||
// filters <config_network_filters>` are not valid upstream filters.
|
||||
// filters <config_network_filters>` are not valid upstream network filters.
|
||||
// Only one of typed_config or config_discovery can be used.
|
||||
google.protobuf.Any typed_config = 2;
|
||||
|
||||
// Configuration source specifier for an extension configuration discovery
|
||||
// service. In case of a failure and without the default configuration, the
|
||||
// listener closes the connections.
|
||||
// Only one of typed_config or config_discovery can be used.
|
||||
core.v3.ExtensionConfigSource config_discovery = 3;
|
||||
}
|
||||
|
|
|
@ -2,6 +2,8 @@ syntax = "proto3";
|
|||
|
||||
package envoy.config.cluster.v3;
|
||||
|
||||
import "envoy/config/core/v3/extension.proto";
|
||||
|
||||
import "google/protobuf/duration.proto";
|
||||
import "google/protobuf/wrappers.proto";
|
||||
|
||||
|
@ -19,7 +21,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
|
||||
// See the :ref:`architecture overview <arch_overview_outlier_detection>` for
|
||||
// more information on outlier detection.
|
||||
// [#next-free-field: 23]
|
||||
// [#next-free-field: 26]
|
||||
message OutlierDetection {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.cluster.OutlierDetection";
|
||||
|
@ -40,8 +42,8 @@ message OutlierDetection {
|
|||
// Defaults to 30000ms or 30s.
|
||||
google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration = {gt {}}];
|
||||
|
||||
// The maximum % of an upstream cluster that can be ejected due to outlier
|
||||
// detection. Defaults to 10% but will eject at least one host regardless of the value.
|
||||
// The maximum % of an upstream cluster that can be ejected due to outlier detection. Defaults to 10% .
|
||||
// Will eject at least one host regardless of the value if :ref:`always_eject_one_host<envoy_v3_api_field_config.cluster.v3.OutlierDetection.always_eject_one_host>` is enabled.
|
||||
google.protobuf.UInt32Value max_ejection_percent = 4 [(validate.rules).uint32 = {lte: 100}];
|
||||
|
||||
// The % chance that a host will be actually ejected when an outlier status
|
||||
|
@ -161,4 +163,18 @@ message OutlierDetection {
|
|||
// See :ref:`max_ejection_time_jitter<envoy_v3_api_field_config.cluster.v3.OutlierDetection.base_ejection_time>`
|
||||
// Defaults to 0s.
|
||||
google.protobuf.Duration max_ejection_time_jitter = 22;
|
||||
|
||||
// If active health checking is enabled and a host is ejected by outlier detection, a successful active health check
|
||||
// unejects the host by default and considers it as healthy. Unejection also clears all the outlier detection counters.
|
||||
// To change this default behavior set this config to ``false`` where active health checking will not uneject the host.
|
||||
// Defaults to true.
|
||||
google.protobuf.BoolValue successful_active_health_check_uneject_host = 23;
|
||||
|
||||
// Set of host's passive monitors.
|
||||
// [#not-implemented-hide:]
|
||||
repeated core.v3.TypedExtensionConfig monitors = 24;
|
||||
|
||||
// If enabled, at least one host is ejected regardless of the value of :ref:`max_ejection_percent<envoy_v3_api_field_config.cluster.v3.OutlierDetection.max_ejection_percent>`.
|
||||
// Defaults to false.
|
||||
google.protobuf.BoolValue always_eject_one_host = 25;
|
||||
}
|
||||
|
|
|
@ -6,8 +6,6 @@ import "envoy/config/core/v3/extension.proto";
|
|||
import "envoy/config/route/v3/route_components.proto";
|
||||
import "envoy/type/matcher/v3/string.proto";
|
||||
|
||||
import "xds/annotations/v3/status.proto";
|
||||
|
||||
import "udpa/annotations/status.proto";
|
||||
import "validate/validate.proto";
|
||||
|
||||
|
@ -24,9 +22,10 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
// is found the action specified by the most specific on_no_match will be evaluated.
|
||||
// As an on_no_match might result in another matching tree being evaluated, this process
|
||||
// might repeat several times until the final OnMatch (or no match) is decided.
|
||||
//
|
||||
// .. note::
|
||||
// Please use the syntactically equivalent :ref:`matching API <envoy_v3_api_msg_.xds.type.matcher.v3.Matcher>`
|
||||
message Matcher {
|
||||
option (xds.annotations.v3.message_status).work_in_progress = true;
|
||||
|
||||
// What to do if a match is successful.
|
||||
message OnMatch {
|
||||
oneof on_match {
|
||||
|
|
|
@ -2,6 +2,7 @@ syntax = "proto3";
|
|||
|
||||
package envoy.config.core.v3;
|
||||
|
||||
import "envoy/config/core/v3/extension.proto";
|
||||
import "envoy/config/core/v3/socket_option.proto";
|
||||
|
||||
import "google/protobuf/wrappers.proto";
|
||||
|
@ -130,7 +131,7 @@ message ExtraSourceAddress {
|
|||
SocketOptionsOverride socket_options = 2;
|
||||
}
|
||||
|
||||
// [#next-free-field: 6]
|
||||
// [#next-free-field: 7]
|
||||
message BindConfig {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.BindConfig";
|
||||
|
||||
|
@ -150,20 +151,22 @@ message BindConfig {
|
|||
// precompiled binaries.
|
||||
repeated SocketOption socket_options = 3;
|
||||
|
||||
// Extra source addresses appended to the address specified in the `source_address`
|
||||
// field. This enables to specify multiple source addresses. Currently, only one extra
|
||||
// address can be supported, and the extra address should have a different IP version
|
||||
// with the address in the `source_address` field. The address which has the same IP
|
||||
// version with the target host's address IP version will be used as bind address. If more
|
||||
// than one extra address specified, only the first address matched IP version will be
|
||||
// returned. If there is no same IP version address found, the address in the `source_address`
|
||||
// will be returned.
|
||||
// Extra source addresses appended to the address specified in the ``source_address``
|
||||
// field. This enables to specify multiple source addresses.
|
||||
// The source address selection is determined by :ref:`local_address_selector
|
||||
// <envoy_v3_api_field_config.core.v3.BindConfig.local_address_selector>`.
|
||||
repeated ExtraSourceAddress extra_source_addresses = 5;
|
||||
|
||||
// Deprecated by
|
||||
// :ref:`extra_source_addresses <envoy_v3_api_field_config.core.v3.BindConfig.extra_source_addresses>`
|
||||
repeated SocketAddress additional_source_addresses = 4
|
||||
[deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"];
|
||||
|
||||
// Custom local address selector to override the default (i.e.
|
||||
// :ref:`DefaultLocalAddressSelector
|
||||
// <envoy_v3_api_msg_config.upstream.local_address_selector.v3.DefaultLocalAddressSelector>`).
|
||||
// [#extension-category: envoy.upstream.local_address_selector]
|
||||
TypedExtensionConfig local_address_selector = 6;
|
||||
}
|
||||
|
||||
// Addresses specify either a logical or physical address and port, which are
|
||||
|
|
|
@ -245,7 +245,8 @@ message Metadata {
|
|||
// :ref:`typed_filter_metadata <envoy_v3_api_field_config.core.v3.Metadata.typed_filter_metadata>`
|
||||
// fields are present in the metadata with same keys,
|
||||
// only ``typed_filter_metadata`` field will be parsed.
|
||||
map<string, google.protobuf.Struct> filter_metadata = 1;
|
||||
map<string, google.protobuf.Struct> filter_metadata = 1
|
||||
[(validate.rules).map = {keys {string {min_len: 1}}}];
|
||||
|
||||
// Key is the reverse DNS filter name, e.g. com.acme.widget. The ``envoy.*``
|
||||
// namespace is reserved for Envoy's built-in filters.
|
||||
|
@ -253,7 +254,8 @@ message Metadata {
|
|||
// If both :ref:`filter_metadata <envoy_v3_api_field_config.core.v3.Metadata.filter_metadata>`
|
||||
// and ``typed_filter_metadata`` fields are present in the metadata with same keys,
|
||||
// only ``typed_filter_metadata`` field will be parsed.
|
||||
map<string, google.protobuf.Any> typed_filter_metadata = 2;
|
||||
map<string, google.protobuf.Any> typed_filter_metadata = 2
|
||||
[(validate.rules).map = {keys {string {min_len: 1}}}];
|
||||
}
|
||||
|
||||
// Runtime derived uint32 with a default when not specified.
|
||||
|
@ -301,6 +303,59 @@ message RuntimeFeatureFlag {
|
|||
string runtime_key = 2 [(validate.rules).string = {min_len: 1}];
|
||||
}
|
||||
|
||||
message KeyValue {
|
||||
// The key of the key/value pair.
|
||||
string key = 1 [(validate.rules).string = {min_len: 1 max_bytes: 16384}];
|
||||
|
||||
// The value of the key/value pair.
|
||||
bytes value = 2;
|
||||
}
|
||||
|
||||
// Key/value pair plus option to control append behavior. This is used to specify
|
||||
// key/value pairs that should be appended to a set of existing key/value pairs.
|
||||
message KeyValueAppend {
|
||||
// Describes the supported actions types for key/value pair append action.
|
||||
enum KeyValueAppendAction {
|
||||
// If the key already exists, this action will result in the following behavior:
|
||||
//
|
||||
// - Comma-concatenated value if multiple values are not allowed.
|
||||
// - New value added to the list of values if multiple values are allowed.
|
||||
//
|
||||
// If the key doesn't exist then this will add pair with specified key and value.
|
||||
APPEND_IF_EXISTS_OR_ADD = 0;
|
||||
|
||||
// This action will add the key/value pair if it doesn't already exist. If the
|
||||
// key already exists then this will be a no-op.
|
||||
ADD_IF_ABSENT = 1;
|
||||
|
||||
// This action will overwrite the specified value by discarding any existing
|
||||
// values if the key already exists. If the key doesn't exist then this will add
|
||||
// the pair with specified key and value.
|
||||
OVERWRITE_IF_EXISTS_OR_ADD = 2;
|
||||
|
||||
// This action will overwrite the specified value by discarding any existing
|
||||
// values if the key already exists. If the key doesn't exist then this will
|
||||
// be no-op.
|
||||
OVERWRITE_IF_EXISTS = 3;
|
||||
}
|
||||
|
||||
// Key/value pair entry that this option to append or overwrite.
|
||||
KeyValue entry = 1 [(validate.rules).message = {required: true}];
|
||||
|
||||
// Describes the action taken to append/overwrite the given value for an existing
|
||||
// key or to only add this key if it's absent.
|
||||
KeyValueAppendAction action = 2 [(validate.rules).enum = {defined_only: true}];
|
||||
}
|
||||
|
||||
// Key/value pair to append or remove.
|
||||
message KeyValueMutation {
|
||||
// Key/value pair to append or overwrite. Only one of ``append`` or ``remove`` can be set.
|
||||
KeyValueAppend append = 1;
|
||||
|
||||
// Key to remove. Only one of ``append`` or ``remove`` can be set.
|
||||
string remove = 2 [(validate.rules).string = {max_bytes: 16384}];
|
||||
}
|
||||
|
||||
// Query parameter name/value pair.
|
||||
message QueryParameter {
|
||||
// The key of the query parameter. Case sensitive.
|
||||
|
@ -324,8 +379,18 @@ message HeaderValue {
|
|||
// The same :ref:`format specifier <config_access_log_format>` as used for
|
||||
// :ref:`HTTP access logging <config_access_log>` applies here, however
|
||||
// unknown header values are replaced with the empty string instead of ``-``.
|
||||
// Header value is encoded as string. This does not work for non-utf8 characters.
|
||||
// Only one of ``value`` or ``raw_value`` can be set.
|
||||
string value = 2 [
|
||||
(validate.rules).string = {max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}
|
||||
(validate.rules).string = {max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false},
|
||||
(udpa.annotations.field_migrate).oneof_promotion = "value_type"
|
||||
];
|
||||
|
||||
// Header value is encoded as bytes which can support non-utf8 characters.
|
||||
// Only one of ``value`` or ``raw_value`` can be set.
|
||||
bytes raw_value = 3 [
|
||||
(validate.rules).bytes = {min_len: 0 max_len: 16384},
|
||||
(udpa.annotations.field_migrate).oneof_promotion = "value_type"
|
||||
];
|
||||
}
|
||||
|
||||
|
@ -336,9 +401,12 @@ message HeaderValueOption {
|
|||
|
||||
// Describes the supported actions types for header append action.
|
||||
enum HeaderAppendAction {
|
||||
// This action will append the specified value to the existing values if the header
|
||||
// already exists. If the header doesn't exist then this will add the header with
|
||||
// specified key and value.
|
||||
// If the header already exists, this action will result in:
|
||||
//
|
||||
// - Comma-concatenated for predefined inline headers.
|
||||
// - Duplicate header added in the ``HeaderMap`` for other headers.
|
||||
//
|
||||
// If the header doesn't exist then this will add new header with specified key and value.
|
||||
APPEND_IF_EXISTS_OR_ADD = 0;
|
||||
|
||||
// This action will add the header if it doesn't already exist. If the header
|
||||
|
@ -349,6 +417,10 @@ message HeaderValueOption {
|
|||
// the header already exists. If the header doesn't exist then this will add the header
|
||||
// with specified key and value.
|
||||
OVERWRITE_IF_EXISTS_OR_ADD = 2;
|
||||
|
||||
// This action will overwrite the specified value by discarding any existing values if
|
||||
// the header already exists. If the header doesn't exist then this will be no-op.
|
||||
OVERWRITE_IF_EXISTS = 3;
|
||||
}
|
||||
|
||||
// Header name/value pair that this option applies to.
|
||||
|
@ -392,6 +464,7 @@ message WatchedDirectory {
|
|||
}
|
||||
|
||||
// Data source consisting of a file, an inline value, or an environment variable.
|
||||
// [#next-free-field: 6]
|
||||
message DataSource {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.DataSource";
|
||||
|
||||
|
@ -410,12 +483,47 @@ message DataSource {
|
|||
// Environment variable data source.
|
||||
string environment_variable = 4 [(validate.rules).string = {min_len: 1}];
|
||||
}
|
||||
|
||||
// Watched directory that is watched for file changes. If this is set explicitly, the file
|
||||
// specified in the ``filename`` field will be reloaded when relevant file move events occur.
|
||||
//
|
||||
// .. note::
|
||||
// This field only makes sense when the ``filename`` field is set.
|
||||
//
|
||||
// .. note::
|
||||
// Envoy only updates when the file is replaced by a file move, and not when the file is
|
||||
// edited in place.
|
||||
//
|
||||
// .. note::
|
||||
// Not all use cases of ``DataSource`` support watching directories. It depends on the
|
||||
// specific usage of the ``DataSource``. See the documentation of the parent message for
|
||||
// details.
|
||||
WatchedDirectory watched_directory = 5;
|
||||
}
|
||||
|
||||
// The message specifies the retry policy of remote data source when fetching fails.
|
||||
// [#next-free-field: 7]
|
||||
message RetryPolicy {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.RetryPolicy";
|
||||
|
||||
// See :ref:`RetryPriority <envoy_v3_api_field_config.route.v3.RetryPolicy.retry_priority>`.
|
||||
message RetryPriority {
|
||||
string name = 1 [(validate.rules).string = {min_len: 1}];
|
||||
|
||||
oneof config_type {
|
||||
google.protobuf.Any typed_config = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// See :ref:`RetryHostPredicate <envoy_v3_api_field_config.route.v3.RetryPolicy.retry_host_predicate>`.
|
||||
message RetryHostPredicate {
|
||||
string name = 1 [(validate.rules).string = {min_len: 1}];
|
||||
|
||||
oneof config_type {
|
||||
google.protobuf.Any typed_config = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// Specifies parameters that control :ref:`retry backoff strategy <envoy_v3_api_msg_config.core.v3.BackoffStrategy>`.
|
||||
// This parameter is optional, in which case the default base interval is 1000 milliseconds. The
|
||||
// default maximum interval is 10 times the base interval.
|
||||
|
@ -425,6 +533,18 @@ message RetryPolicy {
|
|||
// defaults to 1.
|
||||
google.protobuf.UInt32Value num_retries = 2
|
||||
[(udpa.annotations.field_migrate).rename = "max_retries"];
|
||||
|
||||
// For details, see :ref:`retry_on <envoy_v3_api_field_config.route.v3.RetryPolicy.retry_on>`.
|
||||
string retry_on = 3;
|
||||
|
||||
// For details, see :ref:`retry_priority <envoy_v3_api_field_config.route.v3.RetryPolicy.retry_priority>`.
|
||||
RetryPriority retry_priority = 4;
|
||||
|
||||
// For details, see :ref:`RetryHostPredicate <envoy_v3_api_field_config.route.v3.RetryPolicy.retry_host_predicate>`.
|
||||
repeated RetryHostPredicate retry_host_predicate = 5;
|
||||
|
||||
// For details, see :ref:`host_selection_retry_max_attempts <envoy_v3_api_field_config.route.v3.RetryPolicy.host_selection_retry_max_attempts>`.
|
||||
int64 host_selection_retry_max_attempts = 6;
|
||||
}
|
||||
|
||||
// The message specifies how to fetch data from remote and how to verify it.
|
||||
|
|
|
@ -28,12 +28,10 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
// xDS API and non-xDS services version. This is used to describe both resource and transport
|
||||
// protocol versions (in distinct configuration fields).
|
||||
enum ApiVersion {
|
||||
// When not specified, we assume v2, to ease migration to Envoy's stable API
|
||||
// versioning. If a client does not support v2 (e.g. due to deprecation), this
|
||||
// is an invalid value.
|
||||
AUTO = 0 [deprecated = true, (envoy.annotations.deprecated_at_minor_version_enum) = "3.0"];
|
||||
// When not specified, we assume v3; it is the only supported version.
|
||||
AUTO = 0;
|
||||
|
||||
// Use xDS v2 API.
|
||||
// Use xDS v2 API. This is no longer supported.
|
||||
V2 = 1 [deprecated = true, (envoy.annotations.deprecated_at_minor_version_enum) = "3.0"];
|
||||
|
||||
// Use xDS v3 API.
|
||||
|
@ -152,7 +150,8 @@ message RateLimitSettings {
|
|||
google.protobuf.UInt32Value max_tokens = 1;
|
||||
|
||||
// Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens
|
||||
// per second will be used.
|
||||
// per second will be used. The minimal fill rate is once per year. Lower
|
||||
// fill rates will be set to once per year.
|
||||
google.protobuf.DoubleValue fill_rate = 2 [(validate.rules).double = {gt: 0.0}];
|
||||
}
|
||||
|
||||
|
|
|
@ -25,10 +25,11 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
|
||||
// gRPC service configuration. This is used by :ref:`ApiConfigSource
|
||||
// <envoy_v3_api_msg_config.core.v3.ApiConfigSource>` and filter configurations.
|
||||
// [#next-free-field: 6]
|
||||
// [#next-free-field: 7]
|
||||
message GrpcService {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcService";
|
||||
|
||||
// [#next-free-field: 6]
|
||||
message EnvoyGrpc {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.core.GrpcService.EnvoyGrpc";
|
||||
|
@ -43,6 +44,24 @@ message GrpcService {
|
|||
string authority = 2
|
||||
[(validate.rules).string =
|
||||
{min_len: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}];
|
||||
|
||||
// Indicates the retry policy for re-establishing the gRPC stream
|
||||
// This field is optional. If max interval is not provided, it will be set to ten times the provided base interval.
|
||||
// Currently only supported for xDS gRPC streams.
|
||||
// If not set, xDS gRPC streams default base interval:500ms, maximum interval:30s will be applied.
|
||||
RetryPolicy retry_policy = 3;
|
||||
|
||||
// Maximum gRPC message size that is allowed to be received.
|
||||
// If a message over this limit is received, the gRPC stream is terminated with the RESOURCE_EXHAUSTED error.
|
||||
// This limit is applied to individual messages in the streaming response and not the total size of streaming response.
|
||||
// Defaults to 0, which means unlimited.
|
||||
google.protobuf.UInt32Value max_receive_message_length = 4;
|
||||
|
||||
// This provides gRPC client level control over envoy generated headers.
|
||||
// If false, the header will be sent but it can be overridden by per stream option.
|
||||
// If true, the header will be removed and can not be overridden by per stream option.
|
||||
// Default to false.
|
||||
bool skip_envoy_headers = 5;
|
||||
}
|
||||
|
||||
// [#next-free-field: 9]
|
||||
|
@ -294,4 +313,8 @@ message GrpcService {
|
|||
// documentation on :ref:`custom request headers
|
||||
// <config_http_conn_man_headers_custom_request_headers>`.
|
||||
repeated HeaderValue initial_metadata = 5;
|
||||
|
||||
// Optional default retry policy for streams toward the service.
|
||||
// If an async stream doesn't have retry policy configured in its stream options, this retry policy is used.
|
||||
RetryPolicy retry_policy = 6;
|
||||
}
|
||||
|
|
|
@ -4,6 +4,8 @@ package envoy.config.core.v3;
|
|||
|
||||
import "envoy/config/core/v3/base.proto";
|
||||
import "envoy/config/core/v3/event_service_config.proto";
|
||||
import "envoy/config/core/v3/extension.proto";
|
||||
import "envoy/config/core/v3/proxy_protocol.proto";
|
||||
import "envoy/type/matcher/v3/string.proto";
|
||||
import "envoy/type/v3/http.proto";
|
||||
import "envoy/type/v3/range.proto";
|
||||
|
@ -13,6 +15,7 @@ import "google/protobuf/duration.proto";
|
|||
import "google/protobuf/struct.proto";
|
||||
import "google/protobuf/wrappers.proto";
|
||||
|
||||
import "envoy/annotations/deprecation.proto";
|
||||
import "udpa/annotations/status.proto";
|
||||
import "udpa/annotations/versioning.proto";
|
||||
import "validate/validate.proto";
|
||||
|
@ -60,7 +63,7 @@ message HealthStatusSet {
|
|||
[(validate.rules).repeated = {items {enum {defined_only: true}}}];
|
||||
}
|
||||
|
||||
// [#next-free-field: 25]
|
||||
// [#next-free-field: 27]
|
||||
message HealthCheck {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HealthCheck";
|
||||
|
||||
|
@ -93,12 +96,11 @@ message HealthCheck {
|
|||
// left empty (default value), the name of the cluster this health check is associated
|
||||
// with will be used. The host header can be customized for a specific endpoint by setting the
|
||||
// :ref:`hostname <envoy_v3_api_field_config.endpoint.v3.Endpoint.HealthCheckConfig.hostname>` field.
|
||||
string host = 1 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];
|
||||
string host = 1 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE}];
|
||||
|
||||
// Specifies the HTTP path that will be requested during health checking. For example
|
||||
// ``/healthcheck``.
|
||||
string path = 2
|
||||
[(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}];
|
||||
string path = 2 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE}];
|
||||
|
||||
// [#not-implemented-hide:] HTTP specific payload.
|
||||
Payload send = 3;
|
||||
|
@ -176,6 +178,13 @@ message HealthCheck {
|
|||
// payload block must be found, and in the order specified, but not
|
||||
// necessarily contiguous.
|
||||
repeated Payload receive = 2;
|
||||
|
||||
// When setting this value, it tries to attempt health check request with ProxyProtocol.
|
||||
// When ``send`` is presented, they are sent after preceding ProxyProtocol header.
|
||||
// Only ProxyProtocol header is sent when ``send`` is not presented.
|
||||
// It allows to use both ProxyProtocol V1 and V2. In V1, it presents L3/L4. In V2, it includes
|
||||
// LOCAL command and doesn't include L3/L4.
|
||||
ProxyProtocolConfig proxy_protocol_config = 3;
|
||||
}
|
||||
|
||||
message RedisHealthCheck {
|
||||
|
@ -366,9 +375,19 @@ message HealthCheck {
|
|||
// The default value for "healthy edge interval" is the same as the default interval.
|
||||
google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration = {gt {}}];
|
||||
|
||||
// .. attention::
|
||||
// This field is deprecated in favor of the extension
|
||||
// :ref:`event_logger <envoy_v3_api_field_config.core.v3.HealthCheck.event_logger>` and
|
||||
// :ref:`event_log_path <envoy_v3_api_field_extensions.health_check.event_sinks.file.v3.HealthCheckEventFileSink.event_log_path>`
|
||||
// in the file sink extension.
|
||||
//
|
||||
// Specifies the path to the :ref:`health check event log <arch_overview_health_check_logging>`.
|
||||
// If empty, no event log will be written.
|
||||
string event_log_path = 17;
|
||||
string event_log_path = 17
|
||||
[deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"];
|
||||
|
||||
// A list of event log sinks to process the health check event.
|
||||
// [#extension-category: envoy.health_check.event_sinks]
|
||||
repeated TypedExtensionConfig event_logger = 25;
|
||||
|
||||
// [#not-implemented-hide:]
|
||||
// The gRPC service for the health check event service.
|
||||
|
@ -380,6 +399,11 @@ message HealthCheck {
|
|||
// The default value is false.
|
||||
bool always_log_health_check_failures = 19;
|
||||
|
||||
// If set to true, health check success events will always be logged. If set to false, only host addition event will be logged
|
||||
// if it is the first successful health check, or if the healthy threshold is reached.
|
||||
// The default value is false.
|
||||
bool always_log_health_check_success = 26;
|
||||
|
||||
// This allows overriding the cluster TLS settings, just for health check connections.
|
||||
TlsOptions tls_options = 21;
|
||||
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package envoy.config.core.v3;
|
||||
|
||||
import "envoy/config/core/v3/base.proto";
|
||||
import "envoy/config/core/v3/http_uri.proto";
|
||||
|
||||
import "udpa/annotations/status.proto";
|
||||
import "validate/validate.proto";
|
||||
|
||||
option java_package = "io.envoyproxy.envoy.config.core.v3";
|
||||
option java_outer_classname = "HttpServiceProto";
|
||||
option java_multiple_files = true;
|
||||
option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3";
|
||||
option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
||||
|
||||
// [#protodoc-title: HTTP services]
|
||||
|
||||
// HTTP service configuration.
|
||||
message HttpService {
|
||||
// The service's HTTP URI. For example:
|
||||
//
|
||||
// .. code-block:: yaml
|
||||
//
|
||||
// http_uri:
|
||||
// uri: https://www.myserviceapi.com/v1/data
|
||||
// cluster: www.myserviceapi.com|443
|
||||
//
|
||||
HttpUri http_uri = 1;
|
||||
|
||||
// Specifies a list of HTTP headers that should be added to each request
|
||||
// handled by this virtual host.
|
||||
repeated HeaderValueOption request_headers_to_add = 2
|
||||
[(validate.rules).repeated = {max_items: 1000}];
|
||||
}
|
|
@ -52,6 +52,7 @@ message HttpUri {
|
|||
// Sets the maximum duration in milliseconds that a response can take to arrive upon request.
|
||||
google.protobuf.Duration timeout = 3 [(validate.rules).duration = {
|
||||
required: true
|
||||
lt {seconds: 4294967296}
|
||||
gte {}
|
||||
}];
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ message QuicKeepAliveSettings {
|
|||
}
|
||||
|
||||
// QUIC protocol options which apply to both downstream and upstream connections.
|
||||
// [#next-free-field: 6]
|
||||
// [#next-free-field: 9]
|
||||
message QuicProtocolOptions {
|
||||
// Maximum number of streams that the client can negotiate per connection. 100
|
||||
// if not specified.
|
||||
|
@ -64,7 +64,7 @@ message QuicProtocolOptions {
|
|||
|
||||
// `Initial stream-level flow-control receive window
|
||||
// <https://tools.ietf.org/html/draft-ietf-quic-transport-34#section-4.1>`_ size. Valid values range from
|
||||
// 1 to 16777216 (2^24, maximum supported by QUICHE) and defaults to 65536 (2^16).
|
||||
// 1 to 16777216 (2^24, maximum supported by QUICHE) and defaults to 16777216 (16 * 1024 * 1024).
|
||||
//
|
||||
// NOTE: 16384 (2^14) is the minimum window size supported in Google QUIC. If configured smaller than it, we will use 16384 instead.
|
||||
// QUICHE IETF Quic implementation supports 1 bytes window. We only support increasing the default window size now, so it's also the minimum.
|
||||
|
@ -76,8 +76,8 @@ message QuicProtocolOptions {
|
|||
[(validate.rules).uint32 = {lte: 16777216 gte: 1}];
|
||||
|
||||
// Similar to ``initial_stream_window_size``, but for connection-level
|
||||
// flow-control. Valid values rage from 1 to 25165824 (24MB, maximum supported by QUICHE) and defaults to 65536 (2^16).
|
||||
// window. Currently, this has the same minimum/default as ``initial_stream_window_size``.
|
||||
// flow-control. Valid values rage from 1 to 25165824 (24MB, maximum supported by QUICHE) and defaults
|
||||
// to 25165824 (24 * 1024 * 1024).
|
||||
//
|
||||
// NOTE: 16384 (2^14) is the minimum window size supported in Google QUIC. We only support increasing the default
|
||||
// window size now, so it's also the minimum.
|
||||
|
@ -85,7 +85,7 @@ message QuicProtocolOptions {
|
|||
[(validate.rules).uint32 = {lte: 25165824 gte: 1}];
|
||||
|
||||
// The number of timeouts that can occur before port migration is triggered for QUIC clients.
|
||||
// This defaults to 1. If set to 0, port migration will not occur on path degrading.
|
||||
// This defaults to 4. If set to 0, port migration will not occur on path degrading.
|
||||
// Timeout here refers to QUIC internal path degrading timeout mechanism, such as PTO.
|
||||
// This has no effect on server sessions.
|
||||
google.protobuf.UInt32Value num_timeouts_to_trigger_port_migration = 4
|
||||
|
@ -94,6 +94,23 @@ message QuicProtocolOptions {
|
|||
// Probes the peer at the configured interval to solicit traffic, i.e. ACK or PATH_RESPONSE, from the peer to push back connection idle timeout.
|
||||
// If absent, use the default keepalive behavior of which a client connection sends PINGs every 15s, and a server connection doesn't do anything.
|
||||
QuicKeepAliveSettings connection_keepalive = 5;
|
||||
|
||||
// A comma-separated list of strings representing QUIC connection options defined in
|
||||
// `QUICHE <https://github.com/google/quiche/blob/main/quiche/quic/core/crypto/crypto_protocol.h>`_ and to be sent by upstream connections.
|
||||
string connection_options = 6;
|
||||
|
||||
// A comma-separated list of strings representing QUIC client connection options defined in
|
||||
// `QUICHE <https://github.com/google/quiche/blob/main/quiche/quic/core/crypto/crypto_protocol.h>`_ and to be sent by upstream connections.
|
||||
string client_connection_options = 7;
|
||||
|
||||
// The duration that a QUIC connection stays idle before it closes itself. If this field is not present, QUICHE
|
||||
// default 600s will be applied.
|
||||
// For internal corporate network, a long timeout is often fine.
|
||||
// But for client facing network, 30s is usually a good choice.
|
||||
google.protobuf.Duration idle_network_timeout = 8 [(validate.rules).duration = {
|
||||
lte {seconds: 600}
|
||||
gte {seconds: 1}
|
||||
}];
|
||||
}
|
||||
|
||||
message UpstreamHttpProtocolOptions {
|
||||
|
@ -104,12 +121,14 @@ message UpstreamHttpProtocolOptions {
|
|||
// upstream connections based on the downstream HTTP host/authority header or any other arbitrary
|
||||
// header when :ref:`override_auto_sni_header <envoy_v3_api_field_config.core.v3.UpstreamHttpProtocolOptions.override_auto_sni_header>`
|
||||
// is set, as seen by the :ref:`router filter <config_http_filters_router>`.
|
||||
// Does nothing if a filter before the http router filter sets the corresponding metadata.
|
||||
bool auto_sni = 1;
|
||||
|
||||
// Automatic validate upstream presented certificate for new upstream connections based on the
|
||||
// downstream HTTP host/authority header or any other arbitrary header when :ref:`override_auto_sni_header <envoy_v3_api_field_config.core.v3.UpstreamHttpProtocolOptions.override_auto_sni_header>`
|
||||
// is set, as seen by the :ref:`router filter <config_http_filters_router>`.
|
||||
// This field is intended to be set with ``auto_sni`` field.
|
||||
// Does nothing if a filter before the http router filter sets the corresponding metadata.
|
||||
bool auto_san_validation = 2;
|
||||
|
||||
// An optional alternative to the host/authority header to be used for setting the SNI value.
|
||||
|
@ -119,6 +138,7 @@ message UpstreamHttpProtocolOptions {
|
|||
// is not found or the value is empty, host/authority header will be used instead.
|
||||
// This field is intended to be set with ``auto_sni`` and/or ``auto_san_validation`` fields.
|
||||
// If none of these fields are set then setting this would be a no-op.
|
||||
// Does nothing if a filter before the http router filter sets the corresponding metadata.
|
||||
string override_auto_sni_header = 3
|
||||
[(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME ignore_empty: true}];
|
||||
}
|
||||
|
@ -177,7 +197,7 @@ message AlternateProtocolsCacheOptions {
|
|||
// this list contained the value ``.c.example.com``, then an Alt-Svc entry for ``foo.c.example.com``
|
||||
// could be shared with ``bar.c.example.com`` but would not be shared with ``baz.example.com``. On
|
||||
// the other hand, if the list contained the value ``.example.com`` then all three hosts could share
|
||||
// Alt-Svc entries. Each entry must start with ``.``. If a hostname matches multiple suffixes, the
|
||||
// Alt-Svc entries. Each entry must start with ``.``. If a hostname matches multiple suffixes, the
|
||||
// first listed suffix will be used.
|
||||
//
|
||||
// Since lookup in this list is O(n), it is recommended that the number of suffixes be limited.
|
||||
|
@ -229,10 +249,9 @@ message HttpProtocolOptions {
|
|||
google.protobuf.Duration idle_timeout = 1;
|
||||
|
||||
// The maximum duration of a connection. The duration is defined as a period since a connection
|
||||
// was established. If not set, there is no max duration. When max_connection_duration is reached
|
||||
// and if there are no active streams, the connection will be closed. If the connection is a
|
||||
// downstream connection and there are any active streams, the drain sequence will kick-in,
|
||||
// and the connection will be force-closed after the drain period. See :ref:`drain_timeout
|
||||
// was established. If not set, there is no max duration. When max_connection_duration is reached,
|
||||
// the drain sequence will kick-in. The connection will be closed after the drain timeout period
|
||||
// if there are no active streams. See :ref:`drain_timeout
|
||||
// <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.drain_timeout>`.
|
||||
google.protobuf.Duration max_connection_duration = 3;
|
||||
|
||||
|
@ -259,7 +278,7 @@ message HttpProtocolOptions {
|
|||
google.protobuf.UInt32Value max_requests_per_connection = 6;
|
||||
}
|
||||
|
||||
// [#next-free-field: 9]
|
||||
// [#next-free-field: 11]
|
||||
message Http1ProtocolOptions {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.core.Http1ProtocolOptions";
|
||||
|
@ -363,6 +382,27 @@ message Http1ProtocolOptions {
|
|||
// (inferred if not present), host (from the host/:authority header) and path
|
||||
// (from first line or :path header).
|
||||
bool send_fully_qualified_url = 8;
|
||||
|
||||
// [#not-implemented-hide:] Hiding so that field can be removed after BalsaParser is rolled out.
|
||||
// If set, force HTTP/1 parser: BalsaParser if true, http-parser if false.
|
||||
// If unset, HTTP/1 parser is selected based on
|
||||
// envoy.reloadable_features.http1_use_balsa_parser.
|
||||
// See issue #21245.
|
||||
google.protobuf.BoolValue use_balsa_parser = 9
|
||||
[(xds.annotations.v3.field_status).work_in_progress = true];
|
||||
|
||||
// [#not-implemented-hide:] Hiding so that field can be removed.
|
||||
// If true, and BalsaParser is used (either `use_balsa_parser` above is true,
|
||||
// or `envoy.reloadable_features.http1_use_balsa_parser` is true and
|
||||
// `use_balsa_parser` is unset), then every non-empty method with only valid
|
||||
// characters is accepted. Otherwise, methods not on the hard-coded list are
|
||||
// rejected.
|
||||
// Once UHV is enabled, this field should be removed, and BalsaParser should
|
||||
// allow any method. UHV validates the method, rejecting empty string or
|
||||
// invalid characters, and provides :ref:`restrict_http_methods
|
||||
// <envoy_v3_api_field_extensions.http.header_validators.envoy_default.v3.HeaderValidatorConfig.restrict_http_methods>`
|
||||
// to reject custom methods.
|
||||
bool allow_custom_methods = 10 [(xds.annotations.v3.field_status).work_in_progress = true];
|
||||
}
|
||||
|
||||
message KeepaliveSettings {
|
||||
|
@ -395,7 +435,7 @@ message KeepaliveSettings {
|
|||
[(validate.rules).duration = {gte {nanos: 1000000}}];
|
||||
}
|
||||
|
||||
// [#next-free-field: 16]
|
||||
// [#next-free-field: 17]
|
||||
message Http2ProtocolOptions {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.core.Http2ProtocolOptions";
|
||||
|
@ -458,10 +498,10 @@ message Http2ProtocolOptions {
|
|||
// Allows proxying Websocket and other upgrades over H2 connect.
|
||||
bool allow_connect = 5;
|
||||
|
||||
// [#not-implemented-hide:] Hiding until envoy has full metadata support.
|
||||
// [#not-implemented-hide:] Hiding until Envoy has full metadata support.
|
||||
// Still under implementation. DO NOT USE.
|
||||
//
|
||||
// Allows metadata. See [metadata
|
||||
// Allows sending and receiving HTTP/2 METADATA frames. See [metadata
|
||||
// docs](https://github.com/envoyproxy/envoy/blob/main/source/docs/h2_metadata.md) for more
|
||||
// information.
|
||||
bool allow_metadata = 6;
|
||||
|
@ -573,6 +613,12 @@ message Http2ProtocolOptions {
|
|||
// Send HTTP/2 PING frames to verify that the connection is still healthy. If the remote peer
|
||||
// does not respond within the configured timeout, the connection will be aborted.
|
||||
KeepaliveSettings connection_keepalive = 15;
|
||||
|
||||
// [#not-implemented-hide:] Hiding so that the field can be removed after oghttp2 is rolled out.
|
||||
// If set, force use of a particular HTTP/2 codec: oghttp2 if true, nghttp2 if false.
|
||||
// If unset, HTTP/2 codec is selected based on envoy.reloadable_features.http2_use_oghttp2.
|
||||
google.protobuf.BoolValue use_oghttp2_codec = 16
|
||||
[(xds.annotations.v3.field_status).work_in_progress = true];
|
||||
}
|
||||
|
||||
// [#not-implemented-hide:]
|
||||
|
@ -584,7 +630,7 @@ message GrpcProtocolOptions {
|
|||
}
|
||||
|
||||
// A message which allows using HTTP/3.
|
||||
// [#next-free-field: 6]
|
||||
// [#next-free-field: 7]
|
||||
message Http3ProtocolOptions {
|
||||
QuicProtocolOptions quic_protocol_options = 1;
|
||||
|
||||
|
@ -603,12 +649,27 @@ message Http3ProtocolOptions {
|
|||
// <https://datatracker.ietf.org/doc/draft-ietf-httpbis-h3-websockets/>`_
|
||||
// Note that HTTP/3 CONNECT is not yet an RFC.
|
||||
bool allow_extended_connect = 5 [(xds.annotations.v3.field_status).work_in_progress = true];
|
||||
|
||||
// [#not-implemented-hide:] Hiding until Envoy has full metadata support.
|
||||
// Still under implementation. DO NOT USE.
|
||||
//
|
||||
// Allows sending and receiving HTTP/3 METADATA frames. See [metadata
|
||||
// docs](https://github.com/envoyproxy/envoy/blob/main/source/docs/h2_metadata.md) for more
|
||||
// information.
|
||||
bool allow_metadata = 6;
|
||||
}
|
||||
|
||||
// A message to control transformations to the :scheme header
|
||||
message SchemeHeaderTransformation {
|
||||
oneof transformation {
|
||||
// Overwrite any Scheme header with the contents of this string.
|
||||
// If set, takes precedence over match_upstream.
|
||||
string scheme_to_overwrite = 1 [(validate.rules).string = {in: "http" in: "https"}];
|
||||
}
|
||||
|
||||
// Set the Scheme header to match the upstream transport protocol. For example, should a
|
||||
// request be sent to the upstream over TLS, the scheme header will be set to "https". Should the
|
||||
// request be sent over plaintext, the scheme header will be set to "http".
|
||||
// If scheme_to_overwrite is set, this field is not used.
|
||||
bool match_upstream = 2;
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ syntax = "proto3";
|
|||
package envoy.config.core.v3;
|
||||
|
||||
import "udpa/annotations/status.proto";
|
||||
import "validate/validate.proto";
|
||||
|
||||
option java_package = "io.envoyproxy.envoy.config.core.v3";
|
||||
option java_outer_classname = "ProxyProtocolProto";
|
||||
|
@ -12,6 +13,25 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
|
||||
// [#protodoc-title: Proxy protocol]
|
||||
|
||||
message ProxyProtocolPassThroughTLVs {
|
||||
enum PassTLVsMatchType {
|
||||
// Pass all TLVs.
|
||||
INCLUDE_ALL = 0;
|
||||
|
||||
// Pass specific TLVs defined in tlv_type.
|
||||
INCLUDE = 1;
|
||||
}
|
||||
|
||||
// The strategy to pass through TLVs. Default is INCLUDE_ALL.
|
||||
// If INCLUDE_ALL is set, all TLVs will be passed through no matter the tlv_type field.
|
||||
PassTLVsMatchType match_type = 1;
|
||||
|
||||
// The TLV types that are applied based on match_type.
|
||||
// TLV type is defined as uint8_t in proxy protocol. See `the spec
|
||||
// <https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt>`_ for details.
|
||||
repeated uint32 tlv_type = 2 [(validate.rules).repeated = {items {uint32 {lt: 256}}}];
|
||||
}
|
||||
|
||||
message ProxyProtocolConfig {
|
||||
enum Version {
|
||||
// PROXY protocol version 1. Human readable format.
|
||||
|
@ -23,4 +43,8 @@ message ProxyProtocolConfig {
|
|||
|
||||
// The PROXY protocol version to use. See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details
|
||||
Version version = 1;
|
||||
|
||||
// This config controls which TLVs can be passed to upstream if it is Proxy Protocol
|
||||
// V2 header. If there is no setting for this field, no TLVs will be passed through.
|
||||
ProxyProtocolPassThroughTLVs pass_through_tlvs = 2;
|
||||
}
|
||||
|
|
|
@ -19,9 +19,15 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
|
||||
// [#protodoc-title: Substitution format string]
|
||||
|
||||
// Optional configuration options to be used with json_format.
|
||||
message JsonFormatOptions {
|
||||
// The output JSON string properties will be sorted.
|
||||
bool sort_properties = 1;
|
||||
}
|
||||
|
||||
// Configuration to use multiple :ref:`command operators <config_access_log_command_operators>`
|
||||
// to generate a new string in either plain text or JSON format.
|
||||
// [#next-free-field: 7]
|
||||
// [#next-free-field: 8]
|
||||
message SubstitutionFormatString {
|
||||
oneof format {
|
||||
option (validate.required) = true;
|
||||
|
@ -113,4 +119,7 @@ message SubstitutionFormatString {
|
|||
// See the formatters extensions documentation for details.
|
||||
// [#extension-category: envoy.formatter]
|
||||
repeated TypedExtensionConfig formatters = 6;
|
||||
|
||||
// If json_format is used, the options will be applied to the output JSON string.
|
||||
JsonFormatOptions json_format_options = 7;
|
||||
}
|
||||
|
|
|
@ -35,12 +35,11 @@ message ClusterLoadAssignment {
|
|||
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.ClusterLoadAssignment";
|
||||
|
||||
// Load balancing policy settings.
|
||||
// [#next-free-field: 6]
|
||||
// [#next-free-field: 7]
|
||||
message Policy {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.ClusterLoadAssignment.Policy";
|
||||
|
||||
// [#not-implemented-hide:]
|
||||
message DropOverload {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.ClusterLoadAssignment.Policy.DropOverload";
|
||||
|
@ -75,7 +74,15 @@ message ClusterLoadAssignment {
|
|||
// "throttle"_drop = 60%
|
||||
// "lb"_drop = 20% // 50% of the remaining 'actual' load, which is 40%.
|
||||
// actual_outgoing_load = 20% // remaining after applying all categories.
|
||||
// [#not-implemented-hide:]
|
||||
//
|
||||
// Envoy supports only one element and will NACK if more than one element is present.
|
||||
// Other xDS-capable data planes will not necessarily have this limitation.
|
||||
//
|
||||
// In Envoy, this ``drop_overloads`` config can be overridden by a runtime key
|
||||
// "load_balancing_policy.drop_overload_limit" setting. This runtime key can be set to
|
||||
// any integer number between 0 and 100. 0 means drop 0%. 100 means drop 100%.
|
||||
// When both ``drop_overloads`` config and "load_balancing_policy.drop_overload_limit"
|
||||
// setting are in place, the min of these two wins.
|
||||
repeated DropOverload drop_overloads = 2;
|
||||
|
||||
// Priority levels and localities are considered overprovisioned with this
|
||||
|
@ -99,6 +106,16 @@ message ClusterLoadAssignment {
|
|||
// are considered stale and should be marked unhealthy.
|
||||
// Defaults to 0 which means endpoints never go stale.
|
||||
google.protobuf.Duration endpoint_stale_after = 4 [(validate.rules).duration = {gt {}}];
|
||||
|
||||
// If true, use the :ref:`load balancing weight
|
||||
// <envoy_v3_api_field_config.endpoint.v3.LbEndpoint.load_balancing_weight>` of healthy and unhealthy
|
||||
// hosts to determine the health of the priority level. If false, use the number of healthy and unhealthy hosts
|
||||
// to determine the health of the priority level, or in other words assume each host has a weight of 1 for
|
||||
// this calculation.
|
||||
//
|
||||
// Note: this is not currently implemented for
|
||||
// :ref:`locality weighted load balancing <arch_overview_load_balancing_locality_weighted_lb>`.
|
||||
bool weighted_priority_health = 6;
|
||||
}
|
||||
|
||||
// Name of the cluster. This will be the :ref:`service_name
|
||||
|
|
|
@ -57,6 +57,11 @@ message Endpoint {
|
|||
bool disable_active_health_check = 4;
|
||||
}
|
||||
|
||||
message AdditionalAddress {
|
||||
// Additional address that is associated with the endpoint.
|
||||
core.v3.Address address = 1;
|
||||
}
|
||||
|
||||
// The upstream host address.
|
||||
//
|
||||
// .. attention::
|
||||
|
@ -82,6 +87,13 @@ message Endpoint {
|
|||
// that require a hostname, like
|
||||
// :ref:`auto_host_rewrite <envoy_v3_api_field_config.route.v3.RouteAction.auto_host_rewrite>`.
|
||||
string hostname = 3;
|
||||
|
||||
// An ordered list of addresses that together with ``address`` comprise the
|
||||
// list of addresses for an endpoint. The address given in the ``address`` is
|
||||
// prepended to this list. It is assumed that the list must already be
|
||||
// sorted by preference order of the addresses. This will only be supported
|
||||
// for STATIC and EDS clusters.
|
||||
repeated AdditionalAddress additional_addresses = 4;
|
||||
}
|
||||
|
||||
// An Endpoint that Envoy can route traffic to.
|
||||
|
@ -135,7 +147,7 @@ message LedsClusterLocalityConfig {
|
|||
// A group of endpoints belonging to a Locality.
|
||||
// One can have multiple LocalityLbEndpoints for a locality, but only if
|
||||
// they have different priorities.
|
||||
// [#next-free-field: 9]
|
||||
// [#next-free-field: 10]
|
||||
message LocalityLbEndpoints {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.endpoint.LocalityLbEndpoints";
|
||||
|
@ -149,6 +161,9 @@ message LocalityLbEndpoints {
|
|||
// Identifies location of where the upstream hosts run.
|
||||
core.v3.Locality locality = 1;
|
||||
|
||||
// Metadata to provide additional information about the locality endpoints in aggregate.
|
||||
core.v3.Metadata metadata = 9;
|
||||
|
||||
// The group of endpoints belonging to the locality specified.
|
||||
// [#comment:TODO(adisuissa): Once LEDS is implemented this field needs to be
|
||||
// deprecated and replaced by ``load_balancer_endpoints``.]
|
||||
|
@ -182,9 +197,9 @@ message LocalityLbEndpoints {
|
|||
// default to the highest priority (0).
|
||||
//
|
||||
// Under usual circumstances, Envoy will only select endpoints for the highest
|
||||
// priority (0). In the event all endpoints for a particular priority are
|
||||
// priority (0). In the event that enough endpoints for a particular priority are
|
||||
// unavailable/unhealthy, Envoy will fail over to selecting endpoints for the
|
||||
// next highest priority group.
|
||||
// next highest priority group. Read more at :ref:`priority levels <arch_overview_load_balancing_priority_levels>`.
|
||||
//
|
||||
// Priorities should range from 0 (highest) to N (lowest) without skipping.
|
||||
uint32 priority = 5 [(validate.rules).uint32 = {lte: 128}];
|
||||
|
|
|
@ -8,6 +8,8 @@ import "envoy/config/core/v3/base.proto";
|
|||
import "google/protobuf/duration.proto";
|
||||
import "google/protobuf/struct.proto";
|
||||
|
||||
import "xds/annotations/v3/status.proto";
|
||||
|
||||
import "udpa/annotations/status.proto";
|
||||
import "udpa/annotations/versioning.proto";
|
||||
import "validate/validate.proto";
|
||||
|
@ -23,7 +25,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
// These are stats Envoy reports to the management server at a frequency defined by
|
||||
// :ref:`LoadStatsResponse.load_reporting_interval<envoy_v3_api_field_service.load_stats.v3.LoadStatsResponse.load_reporting_interval>`.
|
||||
// Stats per upstream region/zone and optionally per subzone.
|
||||
// [#next-free-field: 9]
|
||||
// [#next-free-field: 15]
|
||||
message UpstreamLocalityStats {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.endpoint.UpstreamLocalityStats";
|
||||
|
@ -48,7 +50,45 @@ message UpstreamLocalityStats {
|
|||
// upstream endpoints in the locality.
|
||||
uint64 total_issued_requests = 8;
|
||||
|
||||
// Stats for multi-dimensional load balancing.
|
||||
// The total number of connections in an established state at the time of the
|
||||
// report. This field is aggregated over all the upstream endpoints in the
|
||||
// locality.
|
||||
// In Envoy, this information may be based on ``upstream_cx_active metric``.
|
||||
// [#not-implemented-hide:]
|
||||
uint64 total_active_connections = 9 [(xds.annotations.v3.field_status).work_in_progress = true];
|
||||
|
||||
// The total number of connections opened since the last report.
|
||||
// This field is aggregated over all the upstream endpoints in the locality.
|
||||
// In Envoy, this information may be based on ``upstream_cx_total`` metric
|
||||
// compared to itself between start and end of an interval, i.e.
|
||||
// ``upstream_cx_total``(now) - ``upstream_cx_total``(now -
|
||||
// load_report_interval).
|
||||
// [#not-implemented-hide:]
|
||||
uint64 total_new_connections = 10 [(xds.annotations.v3.field_status).work_in_progress = true];
|
||||
|
||||
// The total number of connection failures since the last report.
|
||||
// This field is aggregated over all the upstream endpoints in the locality.
|
||||
// In Envoy, this information may be based on ``upstream_cx_connect_fail``
|
||||
// metric compared to itself between start and end of an interval, i.e.
|
||||
// ``upstream_cx_connect_fail``(now) - ``upstream_cx_connect_fail``(now -
|
||||
// load_report_interval).
|
||||
// [#not-implemented-hide:]
|
||||
uint64 total_fail_connections = 11 [(xds.annotations.v3.field_status).work_in_progress = true];
|
||||
|
||||
// CPU utilization stats for multi-dimensional load balancing.
|
||||
// This typically comes from endpoint metrics reported via ORCA.
|
||||
UnnamedEndpointLoadMetricStats cpu_utilization = 12;
|
||||
|
||||
// Memory utilization for multi-dimensional load balancing.
|
||||
// This typically comes from endpoint metrics reported via ORCA.
|
||||
UnnamedEndpointLoadMetricStats mem_utilization = 13;
|
||||
|
||||
// Blended application-defined utilization for multi-dimensional load balancing.
|
||||
// This typically comes from endpoint metrics reported via ORCA.
|
||||
UnnamedEndpointLoadMetricStats application_utilization = 14;
|
||||
|
||||
// Named stats for multi-dimensional load balancing.
|
||||
// These typically come from endpoint metrics reported via ORCA.
|
||||
repeated EndpointLoadMetricStats load_metric_stats = 5;
|
||||
|
||||
// Endpoint granularity stats information for this locality. This information
|
||||
|
@ -118,6 +158,16 @@ message EndpointLoadMetricStats {
|
|||
double total_metric_value = 3;
|
||||
}
|
||||
|
||||
// Same as EndpointLoadMetricStats, except without the metric_name field.
|
||||
message UnnamedEndpointLoadMetricStats {
|
||||
// Number of calls that finished and included this metric.
|
||||
uint64 num_requests_finished_with_metric = 1;
|
||||
|
||||
// Sum of metric values across all calls that finished with this metric for
|
||||
// load_reporting_interval.
|
||||
double total_metric_value = 2;
|
||||
}
|
||||
|
||||
// Per cluster load stats. Envoy reports these stats a management server in a
|
||||
// :ref:`LoadStatsRequest<envoy_v3_api_msg_service.load_stats.v3.LoadStatsRequest>`
|
||||
// Next ID: 7
|
||||
|
|
|
@ -46,9 +46,7 @@ message FilterConfig {
|
|||
//
|
||||
// .. attention::
|
||||
// If neither `individual_method_stats_allowlist` nor `stats_for_all_methods` is set, the
|
||||
// behavior will default to `stats_for_all_methods=false`. This default value is changed due
|
||||
// to the previous value being deprecated. This behavior can be changed with runtime override
|
||||
// `envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default`.
|
||||
// behavior will default to `stats_for_all_methods=false`.
|
||||
google.protobuf.BoolValue stats_for_all_methods = 3;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -127,7 +127,7 @@ message HttpConnectionManager {
|
|||
// Target percentage of requests managed by this HTTP connection manager that will be force
|
||||
// traced if the :ref:`x-client-trace-id <config_http_conn_man_headers_x-client-trace-id>`
|
||||
// header is set. This field is a direct analog for the runtime variable
|
||||
// 'tracing.client_sampling' in the :ref:`HTTP Connection Manager
|
||||
// 'tracing.client_enabled' in the :ref:`HTTP Connection Manager
|
||||
// <config_http_conn_man_runtime>`.
|
||||
// Default: 100%
|
||||
type.Percent client_sampling = 3;
|
||||
|
|
|
@ -21,4 +21,42 @@ option (udpa.annotations.file_status).package_version_status = FROZEN;
|
|||
message KafkaBroker {
|
||||
// The prefix to use when emitting :ref:`statistics <config_network_filters_kafka_broker_stats>`.
|
||||
string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];
|
||||
|
||||
// Set to true if broker filter should attempt to serialize the received responses from the
|
||||
// upstream broker instead of passing received bytes as is.
|
||||
// Disabled by default.
|
||||
bool force_response_rewrite = 2;
|
||||
|
||||
// Optional broker address rewrite specification.
|
||||
// Allows the broker filter to rewrite Kafka responses so that all connections established by
|
||||
// the Kafka clients point to Envoy.
|
||||
// This allows Kafka cluster not to configure its 'advertised.listeners' property
|
||||
// (as the necessary re-pointing will be done by this filter).
|
||||
// This collection of rules should cover all brokers in the cluster that is being proxied,
|
||||
// otherwise some nodes' addresses might leak to the downstream clients.
|
||||
oneof broker_address_rewrite_spec {
|
||||
// Broker address rewrite rules that match by broker ID.
|
||||
IdBasedBrokerRewriteSpec id_based_broker_address_rewrite_spec = 3;
|
||||
}
|
||||
}
|
||||
|
||||
// Collection of rules matching by broker ID.
|
||||
message IdBasedBrokerRewriteSpec {
|
||||
repeated IdBasedBrokerRewriteRule rules = 1;
|
||||
}
|
||||
|
||||
// Defines a rule to rewrite broker address data.
|
||||
message IdBasedBrokerRewriteRule {
|
||||
// Broker ID to match.
|
||||
uint32 id = 1 [(validate.rules).uint32 = {gte: 0}];
|
||||
|
||||
// The host value to use (resembling the host part of Kafka's advertised.listeners).
|
||||
// The value should point to the Envoy (not Kafka) listener, so that all client traffic goes
|
||||
// through Envoy.
|
||||
string host = 2 [(validate.rules).string = {min_len: 1}];
|
||||
|
||||
// The port value to use (resembling the port part of Kafka's advertised.listeners).
|
||||
// The value should point to the Envoy (not Kafka) listener, so that all client traffic goes
|
||||
// through Envoy.
|
||||
uint32 port = 3 [(validate.rules).uint32 = {lte: 65535}];
|
||||
}
|
||||
|
|
|
@ -53,7 +53,7 @@ message ListenerCollection {
|
|||
repeated xds.core.v3.CollectionEntry entries = 1;
|
||||
}
|
||||
|
||||
// [#next-free-field: 34]
|
||||
// [#next-free-field: 36]
|
||||
message Listener {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Listener";
|
||||
|
||||
|
@ -199,7 +199,12 @@ message Listener {
|
|||
// before a connection is created.
|
||||
// UDP Listener filters can be specified when the protocol in the listener socket address in
|
||||
// :ref:`protocol <envoy_v3_api_field_config.core.v3.SocketAddress.protocol>` is :ref:`UDP
|
||||
// <envoy_v3_api_enum_value_config.core.v3.SocketAddress.Protocol.UDP>`.
|
||||
// <envoy_v3_api_enum_value_config.core.v3.SocketAddress.Protocol.UDP>` and no
|
||||
// :ref:`quic_options <envoy_v3_api_field_config.listener.v3.UdpListenerConfig.quic_options>` is specified in :ref:`udp_listener_config <envoy_v3_api_field_config.listener.v3.Listener.udp_listener_config>`.
|
||||
// QUIC listener filters can be specified when :ref:`quic_options
|
||||
// <envoy_v3_api_field_config.listener.v3.UdpListenerConfig.quic_options>` is
|
||||
// specified in :ref:`udp_listener_config <envoy_v3_api_field_config.listener.v3.Listener.udp_listener_config>`.
|
||||
// They are processed sequentially right before connection creation. And like TCP Listener filters, they can be used to manipulate the connection metadata and socket. But the difference is that they can't be used to pause connection creation.
|
||||
repeated ListenerFilter listener_filters = 9;
|
||||
|
||||
// The timeout to wait for all listener filters to complete operation. If the timeout is reached,
|
||||
|
@ -244,7 +249,7 @@ message Listener {
|
|||
// Additional socket options that may not be present in Envoy source code or
|
||||
// precompiled binaries. The socket options can be updated for a listener when
|
||||
// :ref:`enable_reuse_port <envoy_v3_api_field_config.listener.v3.Listener.enable_reuse_port>`
|
||||
// is `true`. Otherwise, if socket options change during a listener update the update will be rejected
|
||||
// is ``true``. Otherwise, if socket options change during a listener update the update will be rejected
|
||||
// to make it clear that the options were not updated.
|
||||
repeated core.v3.SocketOption socket_options = 13;
|
||||
|
||||
|
@ -339,6 +344,17 @@ message Listener {
|
|||
// provided net.core.somaxconn will be used on Linux and 128 otherwise.
|
||||
google.protobuf.UInt32Value tcp_backlog_size = 24;
|
||||
|
||||
// The maximum number of connections to accept from the kernel per socket
|
||||
// event. Envoy may decide to close these connections after accepting them
|
||||
// from the kernel e.g. due to load shedding, or other policies.
|
||||
// If there are more than max_connections_to_accept_per_socket_event
|
||||
// connections pending accept, connections over this threshold will be
|
||||
// accepted in later event loop iterations.
|
||||
// If no value is provided Envoy will accept all connections pending accept
|
||||
// from the kernel.
|
||||
google.protobuf.UInt32Value max_connections_to_accept_per_socket_event = 34
|
||||
[(validate.rules).uint32 = {gt: 0}];
|
||||
|
||||
// Whether the listener should bind to the port. A listener that doesn't
|
||||
// bind can only receive connections redirected from other listeners that set
|
||||
// :ref:`use_original_dst <envoy_v3_api_field_config.listener.v3.Listener.use_original_dst>`
|
||||
|
@ -371,4 +387,25 @@ message Listener {
|
|||
// Whether the listener should limit connections based upon the value of
|
||||
// :ref:`global_downstream_max_connections <config_overload_manager_limiting_connections>`.
|
||||
bool ignore_global_conn_limit = 31;
|
||||
|
||||
// Whether the listener bypasses configured overload manager actions.
|
||||
bool bypass_overload_manager = 35;
|
||||
}
|
||||
|
||||
// A placeholder proto so that users can explicitly configure the standard
|
||||
// Listener Manager via the bootstrap's :ref:`listener_manager <envoy_v3_api_field_config.bootstrap.v3.Bootstrap.listener_manager>`.
|
||||
// [#not-implemented-hide:]
|
||||
message ListenerManager {
|
||||
}
|
||||
|
||||
// A placeholder proto so that users can explicitly configure the standard
|
||||
// Validation Listener Manager via the bootstrap's :ref:`listener_manager <envoy_v3_api_field_config.bootstrap.v3.Bootstrap.listener_manager>`.
|
||||
// [#not-implemented-hide:]
|
||||
message ValidationListenerManager {
|
||||
}
|
||||
|
||||
// A placeholder proto so that users can explicitly configure the API
|
||||
// Listener Manager via the bootstrap's :ref:`listener_manager <envoy_v3_api_field_config.bootstrap.v3.Bootstrap.listener_manager>`.
|
||||
// [#not-implemented-hide:]
|
||||
message ApiListenerManager {
|
||||
}
|
||||
|
|
|
@ -45,7 +45,6 @@ message Filter {
|
|||
// Configuration source specifier for an extension configuration discovery
|
||||
// service. In case of a failure and without the default configuration, the
|
||||
// listener closes the connections.
|
||||
// [#not-implemented-hide:]
|
||||
core.v3.ExtensionConfigSource config_discovery = 5;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,6 +9,8 @@ import "envoy/config/core/v3/protocol.proto";
|
|||
import "google/protobuf/duration.proto";
|
||||
import "google/protobuf/wrappers.proto";
|
||||
|
||||
import "xds/annotations/v3/status.proto";
|
||||
|
||||
import "udpa/annotations/status.proto";
|
||||
import "udpa/annotations/versioning.proto";
|
||||
import "validate/validate.proto";
|
||||
|
@ -22,7 +24,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
// [#protodoc-title: QUIC listener config]
|
||||
|
||||
// Configuration specific to the UDP QUIC listener.
|
||||
// [#next-free-field: 9]
|
||||
// [#next-free-field: 12]
|
||||
message QuicProtocolOptions {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.listener.QuicProtocolOptions";
|
||||
|
@ -68,4 +70,20 @@ message QuicProtocolOptions {
|
|||
// If not specified the :ref:`default one configured by <envoy_v3_api_msg_extensions.quic.connection_id_generator.v3.DeterministicConnectionIdGeneratorConfig>` will be used.
|
||||
// [#extension-category: envoy.quic.connection_id_generator]
|
||||
core.v3.TypedExtensionConfig connection_id_generator_config = 8;
|
||||
|
||||
// Configure the server's preferred address to advertise so that client can migrate to it. See :ref:`example <envoy_v3_api_msg_extensions.quic.server_preferred_address.v3.FixedServerPreferredAddressConfig>` which configures a pair of v4 and v6 preferred addresses.
|
||||
// The current QUICHE implementation will advertise only one of the preferred IPv4 and IPv6 addresses based on the address family the client initially connects with.
|
||||
// If not specified, Envoy will not advertise any server's preferred address.
|
||||
// [#extension-category: envoy.quic.server_preferred_address]
|
||||
core.v3.TypedExtensionConfig server_preferred_address_config = 9
|
||||
[(xds.annotations.v3.field_status).work_in_progress = true];
|
||||
|
||||
// Configure the server to send transport parameter `disable_active_migration <https://www.rfc-editor.org/rfc/rfc9000#section-18.2-4.30.1>`_.
|
||||
// Defaults to false (do not send this transport parameter).
|
||||
google.protobuf.BoolValue send_disable_active_migration = 10;
|
||||
|
||||
// Configure which implementation of ``quic::QuicConnectionDebugVisitor`` to be used for this listener.
|
||||
// If not specified, no debug visitor will be attached to connections.
|
||||
// [#extension-category: envoy.quic.connection_debug_visitor]
|
||||
core.v3.TypedExtensionConfig connection_debug_visitor_config = 11;
|
||||
}
|
||||
|
|
|
@ -19,6 +19,18 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
|
||||
// [#protodoc-title: Metrics service]
|
||||
|
||||
// HistogramEmitMode is used to configure which metric types should be emitted for histograms.
|
||||
enum HistogramEmitMode {
|
||||
// Emit Histogram and Summary metric types.
|
||||
SUMMARY_AND_HISTOGRAM = 0;
|
||||
|
||||
// Emit only Summary metric types.
|
||||
SUMMARY = 1;
|
||||
|
||||
// Emit only Histogram metric types.
|
||||
HISTOGRAM = 2;
|
||||
}
|
||||
|
||||
// Metrics Service is configured as a built-in ``envoy.stat_sinks.metrics_service`` :ref:`StatsSink
|
||||
// <envoy_v3_api_msg_config.metrics.v3.StatsSink>`. This opaque configuration will be used to create
|
||||
// Metrics Service.
|
||||
|
@ -31,9 +43,9 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
// - name: envoy.stat_sinks.metrics_service
|
||||
// typed_config:
|
||||
// "@type": type.googleapis.com/envoy.config.metrics.v3.MetricsServiceConfig
|
||||
// transport_api_version: V3
|
||||
//
|
||||
// [#extension: envoy.stat_sinks.metrics_service]
|
||||
// [#next-free-field: 6]
|
||||
message MetricsServiceConfig {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.config.metrics.v2.MetricsServiceConfig";
|
||||
|
@ -55,4 +67,7 @@ message MetricsServiceConfig {
|
|||
// and the tag extracted name will be used instead of the full name, which may contain values used by the tag
|
||||
// extractor or additional tags added during stats creation.
|
||||
bool emit_tags_as_labels = 4;
|
||||
|
||||
// Specify which metrics types to emit for histograms. Defaults to SUMMARY_AND_HISTOGRAM.
|
||||
HistogramEmitMode histogram_emit_mode = 5 [(validate.rules).enum = {defined_only: true}];
|
||||
}
|
||||
|
|
|
@ -121,8 +121,8 @@ message StatsMatcher {
|
|||
// limited by either an exclusion or an inclusion list of :ref:`StringMatcher
|
||||
// <envoy_v3_api_msg_type.matcher.v3.StringMatcher>` protos:
|
||||
//
|
||||
// * If ``reject_all`` is set to `true`, no stats will be instantiated. If ``reject_all`` is set to
|
||||
// `false`, all stats will be instantiated.
|
||||
// * If ``reject_all`` is set to ``true``, no stats will be instantiated. If ``reject_all`` is set to
|
||||
// ``false``, all stats will be instantiated.
|
||||
//
|
||||
// * If an exclusion list is supplied, any stat name matching *any* of the StringMatchers in the
|
||||
// list will not instantiate.
|
||||
|
|
|
@ -134,14 +134,37 @@ message OverloadAction {
|
|||
string name = 1 [(validate.rules).string = {min_len: 1}];
|
||||
|
||||
// A set of triggers for this action. The state of the action is the maximum
|
||||
// state of all triggers, which can be scaling between 0 and 1 or saturated. Listeners
|
||||
// are notified when the overload action changes state.
|
||||
// state of all triggers, which can be scalar values between 0 and 1 or
|
||||
// saturated. Listeners are notified when the overload action changes state.
|
||||
// An overload manager action can only have one trigger for a given resource
|
||||
// e.g. :ref:`Trigger.name
|
||||
// <envoy_v3_api_field_config.overload.v3.Trigger.name>` must be unique
|
||||
// in this list.
|
||||
repeated Trigger triggers = 2 [(validate.rules).repeated = {min_items: 1}];
|
||||
|
||||
// Configuration for the action being instantiated.
|
||||
google.protobuf.Any typed_config = 3;
|
||||
}
|
||||
|
||||
// A point within the connection or request lifecycle that provides context on
|
||||
// whether to shed load at that given stage for the current entity at the
|
||||
// point.
|
||||
message LoadShedPoint {
|
||||
// This is just a well-known string for the LoadShedPoint.
|
||||
// Deployment specific LoadShedPoints e.g. within a custom extension should
|
||||
// be prefixed by the company / deployment name to avoid colliding with any
|
||||
// open source LoadShedPoints.
|
||||
string name = 1 [(validate.rules).string = {min_len: 1}];
|
||||
|
||||
// A set of triggers for this LoadShedPoint. The LoadShedPoint will use the
|
||||
// the maximum state of all triggers, which can be scalar values between 0 and
|
||||
// 1 or saturated. A LoadShedPoint can only have one trigger for a given
|
||||
// resource e.g. :ref:`Trigger.name
|
||||
// <envoy_v3_api_field_config.overload.v3.Trigger.name>` must be unique in
|
||||
// this list.
|
||||
repeated Trigger triggers = 2 [(validate.rules).repeated = {min_items: 1}];
|
||||
}
|
||||
|
||||
// Configuration for which accounts the WatermarkBuffer Factories should
|
||||
// track.
|
||||
message BufferFactoryConfig {
|
||||
|
@ -162,6 +185,7 @@ message BufferFactoryConfig {
|
|||
uint32 minimum_account_to_track_power_of_two = 1 [(validate.rules).uint32 = {lte: 56 gte: 10}];
|
||||
}
|
||||
|
||||
// [#next-free-field: 6]
|
||||
message OverloadManager {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.config.overload.v2alpha.OverloadManager";
|
||||
|
@ -175,6 +199,9 @@ message OverloadManager {
|
|||
// The set of overload actions.
|
||||
repeated OverloadAction actions = 3;
|
||||
|
||||
// The set of load shed points.
|
||||
repeated LoadShedPoint loadshed_points = 5;
|
||||
|
||||
// Configuration for buffer factory.
|
||||
BufferFactoryConfig buffer_factory_config = 4;
|
||||
}
|
||||
|
|
|
@ -95,6 +95,45 @@ message RBAC {
|
|||
LOG = 2;
|
||||
}
|
||||
|
||||
message AuditLoggingOptions {
|
||||
// Deny and allow here refer to RBAC decisions, not actions.
|
||||
enum AuditCondition {
|
||||
// Never audit.
|
||||
NONE = 0;
|
||||
|
||||
// Audit when RBAC denies the request.
|
||||
ON_DENY = 1;
|
||||
|
||||
// Audit when RBAC allows the request.
|
||||
ON_ALLOW = 2;
|
||||
|
||||
// Audit whether RBAC allows or denies the request.
|
||||
ON_DENY_AND_ALLOW = 3;
|
||||
}
|
||||
|
||||
// [#not-implemented-hide:]
|
||||
message AuditLoggerConfig {
|
||||
// Typed logger configuration.
|
||||
//
|
||||
// [#extension-category: envoy.rbac.audit_loggers]
|
||||
core.v3.TypedExtensionConfig audit_logger = 1;
|
||||
|
||||
// If true, when the logger is not supported, the data plane will not NACK but simply ignore it.
|
||||
bool is_optional = 2;
|
||||
}
|
||||
|
||||
// Condition for the audit logging to happen.
|
||||
// If this condition is met, all the audit loggers configured here will be invoked.
|
||||
//
|
||||
// [#not-implemented-hide:]
|
||||
AuditCondition audit_condition = 1 [(validate.rules).enum = {defined_only: true}];
|
||||
|
||||
// Configurations for RBAC-based authorization audit loggers.
|
||||
//
|
||||
// [#not-implemented-hide:]
|
||||
repeated AuditLoggerConfig logger_configs = 2;
|
||||
}
|
||||
|
||||
// The action to take if a policy matches. Every action either allows or denies a request,
|
||||
// and can also carry out action-specific operations.
|
||||
//
|
||||
|
@ -114,6 +153,12 @@ message RBAC {
|
|||
// Maps from policy name to policy. A match occurs when at least one policy matches the request.
|
||||
// The policies are evaluated in lexicographic order of the policy name.
|
||||
map<string, Policy> policies = 2;
|
||||
|
||||
// Audit logging options that include the condition for audit logging to happen
|
||||
// and audit logger configurations.
|
||||
//
|
||||
// [#not-implemented-hide:]
|
||||
AuditLoggingOptions audit_logging_options = 3;
|
||||
}
|
||||
|
||||
// Policy specifies a role and the principals that are assigned/denied the role.
|
||||
|
@ -149,7 +194,7 @@ message Policy {
|
|||
}
|
||||
|
||||
// Permission defines an action (or actions) that a principal can take.
|
||||
// [#next-free-field: 13]
|
||||
// [#next-free-field: 14]
|
||||
message Permission {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Permission";
|
||||
|
||||
|
@ -225,6 +270,10 @@ message Permission {
|
|||
// Extension for configuring custom matchers for RBAC.
|
||||
// [#extension-category: envoy.rbac.matchers]
|
||||
core.v3.TypedExtensionConfig matcher = 12;
|
||||
|
||||
// URI template path matching.
|
||||
// [#extension-category: envoy.path.match]
|
||||
core.v3.TypedExtensionConfig uri_template = 13;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -275,6 +324,11 @@ message Principal {
|
|||
|
||||
// A CIDR block that describes the downstream IP.
|
||||
// This address will honor proxy protocol, but will not honor XFF.
|
||||
//
|
||||
// This field is deprecated; either use :ref:`remote_ip
|
||||
// <envoy_v3_api_field_config.rbac.v3.Principal.remote_ip>` for the same
|
||||
// behavior, or use
|
||||
// :ref:`direct_remote_ip <envoy_v3_api_field_config.rbac.v3.Principal.direct_remote_ip>`.
|
||||
core.v3.CidrRange source_ip = 5
|
||||
[deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"];
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
// * Routing :ref:`architecture overview <arch_overview_http_routing>`
|
||||
// * HTTP :ref:`router filter <config_http_filters_router>`
|
||||
|
||||
// [#next-free-field: 17]
|
||||
// [#next-free-field: 18]
|
||||
message RouteConfiguration {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.RouteConfiguration";
|
||||
|
||||
|
@ -82,14 +82,11 @@ message RouteConfiguration {
|
|||
(validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}}
|
||||
];
|
||||
|
||||
// By default, headers that should be added/removed are evaluated from most to least specific:
|
||||
//
|
||||
// * route level
|
||||
// * virtual host level
|
||||
// * connection manager level
|
||||
//
|
||||
// To allow setting overrides at the route or virtual host level, this order can be reversed
|
||||
// by setting this option to true. Defaults to false.
|
||||
// Headers mutations at all levels are evaluated, if specified. By default, the order is from most
|
||||
// specific (i.e. route entry level) to least specific (i.e. route configuration level). Later header
|
||||
// mutations may override earlier mutations.
|
||||
// This order can be reversed by setting this field to true. In other words, most specific level mutation
|
||||
// is evaluated last.
|
||||
//
|
||||
bool most_specific_header_mutations_wins = 10;
|
||||
|
||||
|
@ -142,19 +139,22 @@ message RouteConfiguration {
|
|||
// For users who want to only match path on the "<path>" portion, this option should be true.
|
||||
bool ignore_path_parameters_in_path_matching = 15;
|
||||
|
||||
// The typed_per_filter_config field can be used to provide RouteConfiguration level per filter config.
|
||||
// The key should match the :ref:`filter config name
|
||||
// This field can be used to provide RouteConfiguration level per filter config. The key should match the
|
||||
// :ref:`filter config name
|
||||
// <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpFilter.name>`.
|
||||
// The canonical filter name (e.g., ``envoy.filters.http.buffer`` for the HTTP buffer filter) can also
|
||||
// be used for the backwards compatibility. If there is no entry referred by the filter config name, the
|
||||
// entry referred by the canonical filter name will be provided to the filters as fallback.
|
||||
//
|
||||
// Use of this field is filter specific;
|
||||
// see the :ref:`HTTP filter documentation <config_http_filters>` for if and how it is utilized.
|
||||
// See :ref:`Http filter route specific config <arch_overview_http_filters_per_filter_config>`
|
||||
// for details.
|
||||
// [#comment: An entry's value may be wrapped in a
|
||||
// :ref:`FilterConfig<envoy_v3_api_msg_config.route.v3.FilterConfig>`
|
||||
// message to specify additional options.]
|
||||
map<string, google.protobuf.Any> typed_per_filter_config = 16;
|
||||
|
||||
// The metadata field can be used to provide additional information
|
||||
// about the route configuration. It can be used for configuration, stats, and logging.
|
||||
// The metadata should go under the filter namespace that will need it.
|
||||
// For instance, if the metadata is intended for the Router filter,
|
||||
// the filter name should be specified as ``envoy.filters.http.router``.
|
||||
core.v3.Metadata metadata = 17;
|
||||
}
|
||||
|
||||
message Vhds {
|
||||
|
|
|
@ -41,7 +41,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
// host header. This allows a single listener to service multiple top level domain path trees. Once
|
||||
// a virtual host is selected based on the domain, the routes are processed in order to see which
|
||||
// upstream cluster to route to or whether to perform a redirect.
|
||||
// [#next-free-field: 24]
|
||||
// [#next-free-field: 25]
|
||||
message VirtualHost {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.VirtualHost";
|
||||
|
||||
|
@ -153,15 +153,11 @@ message VirtualHost {
|
|||
// to configure the CORS HTTP filter.
|
||||
CorsPolicy cors = 8 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"];
|
||||
|
||||
// The per_filter_config field can be used to provide virtual host-specific configurations for filters.
|
||||
// The key should match the :ref:`filter config name
|
||||
// This field can be used to provide virtual host level per filter config. The key should match the
|
||||
// :ref:`filter config name
|
||||
// <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpFilter.name>`.
|
||||
// The canonical filter name (e.g., ``envoy.filters.http.buffer`` for the HTTP buffer filter) can also
|
||||
// be used for the backwards compatibility. If there is no entry referred by the filter config name, the
|
||||
// entry referred by the canonical filter name will be provided to the filters as fallback.
|
||||
//
|
||||
// Use of this field is filter specific;
|
||||
// see the :ref:`HTTP filter documentation <config_http_filters>` for if and how it is utilized.
|
||||
// See :ref:`Http filter route specific config <arch_overview_http_filters_per_filter_config>`
|
||||
// for details.
|
||||
// [#comment: An entry's value may be wrapped in a
|
||||
// :ref:`FilterConfig<envoy_v3_api_msg_config.route.v3.FilterConfig>`
|
||||
// message to specify additional options.]
|
||||
|
@ -219,6 +215,13 @@ message VirtualHost {
|
|||
// It takes precedence over the route config mirror policy entirely.
|
||||
// That is, policies are not merged, the most specific non-empty one becomes the mirror policies.
|
||||
repeated RouteAction.RequestMirrorPolicy request_mirror_policies = 22;
|
||||
|
||||
// The metadata field can be used to provide additional information
|
||||
// about the virtual host. It can be used for configuration, stats, and logging.
|
||||
// The metadata should go under the filter namespace that will need it.
|
||||
// For instance, if the metadata is intended for the Router filter,
|
||||
// the filter name should be specified as ``envoy.filters.http.router``.
|
||||
core.v3.Metadata metadata = 24;
|
||||
}
|
||||
|
||||
// A filter-defined action type.
|
||||
|
@ -292,15 +295,11 @@ message Route {
|
|||
// Decorator for the matched route.
|
||||
Decorator decorator = 5;
|
||||
|
||||
// The per_filter_config field can be used to provide route-specific configurations for filters.
|
||||
// The key should match the :ref:`filter config name
|
||||
// This field can be used to provide route specific per filter config. The key should match the
|
||||
// :ref:`filter config name
|
||||
// <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpFilter.name>`.
|
||||
// The canonical filter name (e.g., ``envoy.filters.http.buffer`` for the HTTP buffer filter) can also
|
||||
// be used for the backwards compatibility. If there is no entry referred by the filter config name, the
|
||||
// entry referred by the canonical filter name will be provided to the filters as fallback.
|
||||
//
|
||||
// Use of this field is filter specific;
|
||||
// see the :ref:`HTTP filter documentation <config_http_filters>` for if and how it is utilized.
|
||||
// See :ref:`Http filter route specific config <arch_overview_http_filters_per_filter_config>`
|
||||
// for details.
|
||||
// [#comment: An entry's value may be wrapped in a
|
||||
// :ref:`FilterConfig<envoy_v3_api_msg_config.route.v3.FilterConfig>`
|
||||
// message to specify additional options.]
|
||||
|
@ -408,7 +407,8 @@ message WeightedCluster {
|
|||
// The weight of the cluster. This value is relative to the other clusters'
|
||||
// weights. When a request matches the route, the choice of an upstream cluster
|
||||
// is determined by its weight. The sum of weights across all
|
||||
// entries in the clusters array must be greater than 0.
|
||||
// entries in the clusters array must be greater than 0, and must not exceed
|
||||
// uint32_t maximal value (4294967295).
|
||||
google.protobuf.UInt32Value weight = 2;
|
||||
|
||||
// Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in
|
||||
|
@ -450,16 +450,11 @@ message WeightedCluster {
|
|||
items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}
|
||||
}];
|
||||
|
||||
// The per_filter_config field can be used to provide weighted cluster-specific configurations
|
||||
// for filters.
|
||||
// The key should match the :ref:`filter config name
|
||||
// This field can be used to provide weighted cluster specific per filter config. The key should match the
|
||||
// :ref:`filter config name
|
||||
// <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpFilter.name>`.
|
||||
// The canonical filter name (e.g., ``envoy.filters.http.buffer`` for the HTTP buffer filter) can also
|
||||
// be used for the backwards compatibility. If there is no entry referred by the filter config name, the
|
||||
// entry referred by the canonical filter name will be provided to the filters as fallback.
|
||||
//
|
||||
// Use of this field is filter specific;
|
||||
// see the :ref:`HTTP filter documentation <config_http_filters>` for if and how it is utilized.
|
||||
// See :ref:`Http filter route specific config <arch_overview_http_filters_per_filter_config>`
|
||||
// for details.
|
||||
// [#comment: An entry's value may be wrapped in a
|
||||
// :ref:`FilterConfig<envoy_v3_api_msg_config.route.v3.FilterConfig>`
|
||||
// message to specify additional options.]
|
||||
|
@ -536,10 +531,20 @@ message RouteMatch {
|
|||
|
||||
// If specified, the route will match against whether or not a certificate is validated.
|
||||
// If not specified, certificate validation status (true or false) will not be considered when route matching.
|
||||
//
|
||||
// .. warning::
|
||||
//
|
||||
// Client certificate validation is not currently performed upon TLS session resumption. For
|
||||
// a resumed TLS session the route will match only when ``validated`` is false, regardless of
|
||||
// whether the client TLS certificate is valid.
|
||||
//
|
||||
// The only known workaround for this issue is to disable TLS session resumption entirely, by
|
||||
// setting both :ref:`disable_stateless_session_resumption <envoy_v3_api_field_extensions.transport_sockets.tls.v3.DownstreamTlsContext.disable_stateless_session_resumption>`
|
||||
// and :ref:`disable_stateful_session_resumption <envoy_v3_api_field_extensions.transport_sockets.tls.v3.DownstreamTlsContext.disable_stateful_session_resumption>` on the DownstreamTlsContext.
|
||||
google.protobuf.BoolValue validated = 2;
|
||||
}
|
||||
|
||||
// An extensible message for matching CONNECT requests.
|
||||
// An extensible message for matching CONNECT or CONNECT-UDP requests.
|
||||
message ConnectMatcher {
|
||||
}
|
||||
|
||||
|
@ -572,11 +577,10 @@ message RouteMatch {
|
|||
// stripping. This needs more thought.]
|
||||
type.matcher.v3.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}];
|
||||
|
||||
// If this is used as the matcher, the matcher will only match CONNECT requests.
|
||||
// Note that this will not match HTTP/2 upgrade-style CONNECT requests
|
||||
// (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style
|
||||
// upgrades.
|
||||
// This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2,
|
||||
// If this is used as the matcher, the matcher will only match CONNECT or CONNECT-UDP requests.
|
||||
// Note that this will not match other Extended CONNECT requests (WebSocket and the like) as
|
||||
// they are normalized in Envoy as HTTP/1.1 style upgrades.
|
||||
// This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2 and HTTP/3,
|
||||
// where Extended CONNECT requests may have a path, the path matchers will work if
|
||||
// there is a path present.
|
||||
// Note that CONNECT support is currently considered alpha in Envoy.
|
||||
|
@ -631,7 +635,8 @@ message RouteMatch {
|
|||
// match. The router will check the query string from the ``path`` header
|
||||
// against all the specified query parameters. If the number of specified
|
||||
// query parameters is nonzero, they all must match the ``path`` header's
|
||||
// query string for a match to occur.
|
||||
// query string for a match to occur. In the event query parameters are
|
||||
// repeated, only the first value for each key will be considered.
|
||||
//
|
||||
// .. note::
|
||||
//
|
||||
|
@ -668,7 +673,7 @@ message RouteMatch {
|
|||
// :ref:`CorsPolicy in filter extension <envoy_v3_api_msg_extensions.filters.http.cors.v3.CorsPolicy>`
|
||||
// as as alternative.
|
||||
//
|
||||
// [#next-free-field: 13]
|
||||
// [#next-free-field: 14]
|
||||
message CorsPolicy {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.CorsPolicy";
|
||||
|
||||
|
@ -722,6 +727,10 @@ message CorsPolicy {
|
|||
//
|
||||
// More details refer to https://developer.chrome.com/blog/private-network-access-preflight.
|
||||
google.protobuf.BoolValue allow_private_network_access = 12;
|
||||
|
||||
// Specifies if preflight requests not matching the configured allowed origin should be forwarded
|
||||
// to the upstream. Default is true.
|
||||
google.protobuf.BoolValue forward_not_matching_preflights = 13;
|
||||
}
|
||||
|
||||
// [#next-free-field: 42]
|
||||
|
@ -754,7 +763,8 @@ message RouteAction {
|
|||
// collected for the shadow cluster making this feature useful for testing.
|
||||
//
|
||||
// During shadowing, the host/authority header is altered such that ``-shadow`` is appended. This is
|
||||
// useful for logging. For example, ``cluster1`` becomes ``cluster1-shadow``.
|
||||
// useful for logging. For example, ``cluster1`` becomes ``cluster1-shadow``. This behavior can be
|
||||
// disabled by setting ``disable_shadow_host_suffix_append`` to ``true``.
|
||||
//
|
||||
// .. note::
|
||||
//
|
||||
|
@ -763,7 +773,7 @@ message RouteAction {
|
|||
// .. note::
|
||||
//
|
||||
// Shadowing doesn't support Http CONNECT and upgrades.
|
||||
// [#next-free-field: 6]
|
||||
// [#next-free-field: 7]
|
||||
message RequestMirrorPolicy {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.route.RouteAction.RequestMirrorPolicy";
|
||||
|
@ -809,6 +819,9 @@ message RouteAction {
|
|||
|
||||
// Determines if the trace span should be sampled. Defaults to true.
|
||||
google.protobuf.BoolValue trace_sampled = 4;
|
||||
|
||||
// Disables appending the ``-shadow`` suffix to the shadowed ``Host`` header. Defaults to ``false``.
|
||||
bool disable_shadow_host_suffix_append = 6;
|
||||
}
|
||||
|
||||
// Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer
|
||||
|
@ -832,6 +845,18 @@ message RouteAction {
|
|||
type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 2;
|
||||
}
|
||||
|
||||
// CookieAttribute defines an API for adding additional attributes for a HTTP cookie.
|
||||
message CookieAttribute {
|
||||
// The name of the cookie attribute.
|
||||
string name = 1
|
||||
[(validate.rules).string =
|
||||
{min_len: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}];
|
||||
|
||||
// The optional value of the cookie attribute.
|
||||
string value = 2 [(validate.rules).string =
|
||||
{max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}];
|
||||
}
|
||||
|
||||
// Envoy supports two types of cookie affinity:
|
||||
//
|
||||
// 1. Passive. Envoy takes a cookie that's present in the cookies header and
|
||||
|
@ -863,6 +888,9 @@ message RouteAction {
|
|||
// The name of the path for the cookie. If no path is specified here, no path
|
||||
// will be set for the cookie.
|
||||
string path = 3;
|
||||
|
||||
// Additional attributes for the cookie. They will be used when generating a new cookie.
|
||||
repeated CookieAttribute attributes = 4;
|
||||
}
|
||||
|
||||
message ConnectionProperties {
|
||||
|
@ -879,7 +907,8 @@ message RouteAction {
|
|||
|
||||
// The name of the URL query parameter that will be used to obtain the hash
|
||||
// key. If the parameter is not present, no hash will be produced. Query
|
||||
// parameter names are case-sensitive.
|
||||
// parameter names are case-sensitive. If query parameters are repeated, only
|
||||
// the first value will be considered.
|
||||
string name = 1 [(validate.rules).string = {min_len: 1}];
|
||||
}
|
||||
|
||||
|
@ -1134,7 +1163,9 @@ message RouteAction {
|
|||
// Indicates that during forwarding, the host header will be swapped with
|
||||
// the hostname of the upstream host chosen by the cluster manager. This
|
||||
// option is applicable only when the destination cluster for a route is of
|
||||
// type ``strict_dns`` or ``logical_dns``. Setting this to true with other cluster types
|
||||
// type ``strict_dns`` or ``logical_dns``,
|
||||
// or when :ref:`hostname <envoy_v3_api_field_config.endpoint.v3.Endpoint.hostname>`
|
||||
// field is not empty. Setting this to true with other cluster types
|
||||
// has no effect. Using this option will append the
|
||||
// :ref:`config_http_conn_man_headers_x-forwarded-host` header if
|
||||
// :ref:`append_x_forwarded_host <envoy_v3_api_field_config.route.v3.RouteAction.append_x_forwarded_host>`
|
||||
|
@ -1187,7 +1218,7 @@ message RouteAction {
|
|||
// :ref:`host_rewrite_header <envoy_v3_api_field_config.route.v3.RouteAction.host_rewrite_header>`, or
|
||||
// :ref:`host_rewrite_path_regex <envoy_v3_api_field_config.route.v3.RouteAction.host_rewrite_path_regex>`)
|
||||
// causes the original value of the host header, if any, to be appended to the
|
||||
// :ref:`config_http_conn_man_headers_x-forwarded-host` HTTP header.
|
||||
// :ref:`config_http_conn_man_headers_x-forwarded-host` HTTP header if it is different to the last value appended.
|
||||
bool append_x_forwarded_host = 38;
|
||||
|
||||
// Specifies the upstream timeout for the route. If not specified, the default is 15s. This
|
||||
|
@ -1771,7 +1802,7 @@ message Tracing {
|
|||
// Target percentage of requests managed by this HTTP connection manager that will be force
|
||||
// traced if the :ref:`x-client-trace-id <config_http_conn_man_headers_x-client-trace-id>`
|
||||
// header is set. This field is a direct analog for the runtime variable
|
||||
// 'tracing.client_sampling' in the :ref:`HTTP Connection Manager
|
||||
// 'tracing.client_enabled' in the :ref:`HTTP Connection Manager
|
||||
// <config_http_conn_man_runtime>`.
|
||||
// Default: 100%
|
||||
type.v3.FractionalPercent client_sampling = 1;
|
||||
|
@ -2011,6 +2042,7 @@ message RateLimit {
|
|||
// .. code-block:: cpp
|
||||
//
|
||||
// ("<descriptor_key>", "<value_queried_from_metadata>")
|
||||
// [#next-free-field: 6]
|
||||
message MetaData {
|
||||
enum Source {
|
||||
// Query :ref:`dynamic metadata <well_known_dynamic_metadata>`
|
||||
|
@ -2028,11 +2060,17 @@ message RateLimit {
|
|||
type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}];
|
||||
|
||||
// An optional value to use if ``metadata_key`` is empty. If not set and
|
||||
// no value is present under the metadata_key then no descriptor is generated.
|
||||
// no value is present under the metadata_key then ``skip_if_absent`` is followed to
|
||||
// skip calling the rate limiting service or skip the descriptor.
|
||||
string default_value = 3;
|
||||
|
||||
// Source of metadata
|
||||
Source source = 4 [(validate.rules).enum = {defined_only: true}];
|
||||
|
||||
// If set to true, Envoy skips the descriptor while calling rate limiting service
|
||||
// when ``metadata_key`` is empty and ``default_value`` is not set. By default it skips calling the
|
||||
// rate limiting service in that case.
|
||||
bool skip_if_absent = 5;
|
||||
}
|
||||
|
||||
// The following descriptor entry is appended to the descriptor:
|
||||
|
@ -2334,6 +2372,7 @@ message QueryParameterMatcher {
|
|||
}
|
||||
|
||||
// HTTP Internal Redirect :ref:`architecture overview <arch_overview_internal_redirects>`.
|
||||
// [#next-free-field: 6]
|
||||
message InternalRedirectPolicy {
|
||||
// An internal redirect is not handled, unless the number of previous internal redirects that a
|
||||
// downstream request has encountered is lower than this value.
|
||||
|
@ -2359,6 +2398,14 @@ message InternalRedirectPolicy {
|
|||
// Allow internal redirect to follow a target URI with a different scheme than the value of
|
||||
// x-forwarded-proto. The default is false.
|
||||
bool allow_cross_scheme_redirect = 4;
|
||||
|
||||
// Specifies a list of headers, by name, to copy from the internal redirect into the subsequent
|
||||
// request. If a header is specified here but not present in the redirect, it will be cleared in
|
||||
// the subsequent request.
|
||||
repeated string response_headers_to_copy = 5 [(validate.rules).repeated = {
|
||||
unique: true
|
||||
items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}
|
||||
}];
|
||||
}
|
||||
|
||||
// A simple wrapper for an HTTP filter config. This is intended to be used as a wrapper for the
|
||||
|
@ -2367,7 +2414,6 @@ message InternalRedirectPolicy {
|
|||
// :ref:`Route.typed_per_filter_config<envoy_v3_api_field_config.route.v3.Route.typed_per_filter_config>`,
|
||||
// or :ref:`WeightedCluster.ClusterWeight.typed_per_filter_config<envoy_v3_api_field_config.route.v3.WeightedCluster.ClusterWeight.typed_per_filter_config>`
|
||||
// to add additional flags to the filter.
|
||||
// [#not-implemented-hide:]
|
||||
message FilterConfig {
|
||||
// The filter config.
|
||||
google.protobuf.Any config = 1;
|
||||
|
@ -2376,4 +2422,20 @@ message FilterConfig {
|
|||
// not support the specified filter, it may ignore the map entry rather
|
||||
// than rejecting the config.
|
||||
bool is_optional = 2;
|
||||
|
||||
// If true, the filter is disabled in the route or virtual host and the ``config`` field is ignored.
|
||||
// See :ref:`route based filter chain <arch_overview_http_filters_route_based_filter_chain>`
|
||||
// for more details.
|
||||
//
|
||||
// .. note::
|
||||
//
|
||||
// This field will take effect when the request arrive and filter chain is created for the request.
|
||||
// If initial route is selected for the request and a filter is disabled in the initial route, then
|
||||
// the filter will not be added to the filter chain.
|
||||
// And if the request is mutated later and re-match to another route, the disabled filter by the
|
||||
// initial route will not be added back to the filter chain because the filter chain is already
|
||||
// created and it is too late to change the chain.
|
||||
//
|
||||
// This field only make sense for the downstream HTTP filters for now.
|
||||
bool disabled = 3;
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
// fragments:
|
||||
// - header_value_extractor:
|
||||
// name: X-Route-Selector
|
||||
// element_separator: ,
|
||||
// element_separator: ","
|
||||
// element:
|
||||
// separator: =
|
||||
// key: vip
|
||||
|
|
|
@ -4,6 +4,7 @@ package envoy.config.tap.v3;
|
|||
|
||||
import "envoy/config/common/matcher/v3/matcher.proto";
|
||||
import "envoy/config/core/v3/base.proto";
|
||||
import "envoy/config/core/v3/extension.proto";
|
||||
import "envoy/config/core/v3/grpc_service.proto";
|
||||
import "envoy/config/route/v3/route_components.proto";
|
||||
|
||||
|
@ -183,7 +184,7 @@ message OutputConfig {
|
|||
}
|
||||
|
||||
// Tap output sink configuration.
|
||||
// [#next-free-field: 6]
|
||||
// [#next-free-field: 7]
|
||||
message OutputSink {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.service.tap.v2alpha.OutputSink";
|
||||
|
@ -259,6 +260,9 @@ message OutputSink {
|
|||
// been configured to receive tap configuration from some other source (e.g., static
|
||||
// file, XDS, etc.) configuring the buffered admin output type will fail.
|
||||
BufferedAdminSink buffered_admin = 5;
|
||||
|
||||
// Tap output filter will be defined by an extension type
|
||||
core.v3.TypedExtensionConfig custom_sink = 6;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2,6 +2,8 @@ syntax = "proto3";
|
|||
|
||||
package envoy.config.trace.v3;
|
||||
|
||||
import "google/protobuf/duration.proto";
|
||||
|
||||
import "udpa/annotations/migrate.proto";
|
||||
import "udpa/annotations/status.proto";
|
||||
import "udpa/annotations/versioning.proto";
|
||||
|
@ -16,6 +18,13 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
|
||||
// [#protodoc-title: Datadog tracer]
|
||||
|
||||
// Configuration for the Remote Configuration feature.
|
||||
message DatadogRemoteConfig {
|
||||
// Frequency at which new configuration updates are queried.
|
||||
// If no value is provided, the default value is delegated to the Datadog tracing library.
|
||||
google.protobuf.Duration polling_interval = 1;
|
||||
}
|
||||
|
||||
// Configuration for the Datadog tracer.
|
||||
// [#extension: envoy.tracers.datadog]
|
||||
message DatadogConfig {
|
||||
|
@ -31,4 +40,11 @@ message DatadogConfig {
|
|||
// Optional hostname to use when sending spans to the collector_cluster. Useful for collectors
|
||||
// that require a specific hostname. Defaults to :ref:`collector_cluster <envoy_v3_api_field_config.trace.v3.DatadogConfig.collector_cluster>` above.
|
||||
string collector_hostname = 3;
|
||||
|
||||
// Enables and configures remote configuration.
|
||||
// Remote Configuration allows to configure the tracer from Datadog's user interface.
|
||||
// This feature can drastically increase the number of connections to the Datadog Agent.
|
||||
// Each tracer regularly polls for configuration updates, and the number of tracers is the product
|
||||
// of the number of listeners and worker threads.
|
||||
DatadogRemoteConfig remote_config = 4;
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ package envoy.config.trace.v3;
|
|||
|
||||
import "google/protobuf/struct.proto";
|
||||
|
||||
import "envoy/annotations/deprecation.proto";
|
||||
import "udpa/annotations/migrate.proto";
|
||||
import "udpa/annotations/status.proto";
|
||||
import "udpa/annotations/versioning.proto";
|
||||
|
@ -29,9 +30,18 @@ message DynamicOtConfig {
|
|||
|
||||
// Dynamic library implementing the `OpenTracing API
|
||||
// <https://github.com/opentracing/opentracing-cpp>`_.
|
||||
string library = 1 [(validate.rules).string = {min_len: 1}];
|
||||
string library = 1 [
|
||||
deprecated = true,
|
||||
(validate.rules).string = {min_len: 1},
|
||||
(envoy.annotations.deprecated_at_minor_version) = "3.0",
|
||||
(envoy.annotations.disallowed_by_default) = true
|
||||
];
|
||||
|
||||
// The configuration to use when creating a tracer from the given dynamic
|
||||
// library.
|
||||
google.protobuf.Struct config = 2;
|
||||
google.protobuf.Struct config = 2 [
|
||||
deprecated = true,
|
||||
(envoy.annotations.deprecated_at_minor_version) = "3.0",
|
||||
(envoy.annotations.disallowed_by_default) = true
|
||||
];
|
||||
}
|
||||
|
|
|
@ -48,59 +48,109 @@ message OpenCensusConfig {
|
|||
reserved 7;
|
||||
|
||||
// Configures tracing, e.g. the sampler, max number of annotations, etc.
|
||||
opencensus.proto.trace.v1.TraceConfig trace_config = 1;
|
||||
opencensus.proto.trace.v1.TraceConfig trace_config = 1 [
|
||||
deprecated = true,
|
||||
(envoy.annotations.deprecated_at_minor_version) = "3.0",
|
||||
(envoy.annotations.disallowed_by_default) = true
|
||||
];
|
||||
|
||||
// Enables the stdout exporter if set to true. This is intended for debugging
|
||||
// purposes.
|
||||
bool stdout_exporter_enabled = 2;
|
||||
bool stdout_exporter_enabled = 2 [
|
||||
deprecated = true,
|
||||
(envoy.annotations.deprecated_at_minor_version) = "3.0",
|
||||
(envoy.annotations.disallowed_by_default) = true
|
||||
];
|
||||
|
||||
// Enables the Stackdriver exporter if set to true. The project_id must also
|
||||
// be set.
|
||||
bool stackdriver_exporter_enabled = 3;
|
||||
bool stackdriver_exporter_enabled = 3 [
|
||||
deprecated = true,
|
||||
(envoy.annotations.deprecated_at_minor_version) = "3.0",
|
||||
(envoy.annotations.disallowed_by_default) = true
|
||||
];
|
||||
|
||||
// The Cloud project_id to use for Stackdriver tracing.
|
||||
string stackdriver_project_id = 4;
|
||||
string stackdriver_project_id = 4 [
|
||||
deprecated = true,
|
||||
(envoy.annotations.deprecated_at_minor_version) = "3.0",
|
||||
(envoy.annotations.disallowed_by_default) = true
|
||||
];
|
||||
|
||||
// (optional) By default, the Stackdriver exporter will connect to production
|
||||
// Stackdriver. If stackdriver_address is non-empty, it will instead connect
|
||||
// to this address, which is in the gRPC format:
|
||||
// https://github.com/grpc/grpc/blob/master/doc/naming.md
|
||||
string stackdriver_address = 10;
|
||||
string stackdriver_address = 10 [
|
||||
deprecated = true,
|
||||
(envoy.annotations.deprecated_at_minor_version) = "3.0",
|
||||
(envoy.annotations.disallowed_by_default) = true
|
||||
];
|
||||
|
||||
// (optional) The gRPC server that hosts Stackdriver tracing service. Only
|
||||
// Google gRPC is supported. If :ref:`target_uri <envoy_v3_api_field_config.core.v3.GrpcService.GoogleGrpc.target_uri>`
|
||||
// is not provided, the default production Stackdriver address will be used.
|
||||
core.v3.GrpcService stackdriver_grpc_service = 13;
|
||||
core.v3.GrpcService stackdriver_grpc_service = 13 [
|
||||
deprecated = true,
|
||||
(envoy.annotations.deprecated_at_minor_version) = "3.0",
|
||||
(envoy.annotations.disallowed_by_default) = true
|
||||
];
|
||||
|
||||
// Enables the Zipkin exporter if set to true. The url and service name must
|
||||
// also be set. This is deprecated, prefer to use Envoy's :ref:`native Zipkin
|
||||
// tracer <envoy_v3_api_msg_config.trace.v3.ZipkinConfig>`.
|
||||
bool zipkin_exporter_enabled = 5
|
||||
[deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"];
|
||||
bool zipkin_exporter_enabled = 5 [
|
||||
deprecated = true,
|
||||
(envoy.annotations.deprecated_at_minor_version) = "3.0",
|
||||
(envoy.annotations.disallowed_by_default) = true
|
||||
];
|
||||
|
||||
// The URL to Zipkin, e.g. "http://127.0.0.1:9411/api/v2/spans". This is
|
||||
// deprecated, prefer to use Envoy's :ref:`native Zipkin tracer
|
||||
// <envoy_v3_api_msg_config.trace.v3.ZipkinConfig>`.
|
||||
string zipkin_url = 6
|
||||
[deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"];
|
||||
string zipkin_url = 6 [
|
||||
deprecated = true,
|
||||
(envoy.annotations.deprecated_at_minor_version) = "3.0",
|
||||
(envoy.annotations.disallowed_by_default) = true
|
||||
];
|
||||
|
||||
// Enables the OpenCensus Agent exporter if set to true. The ocagent_address or
|
||||
// ocagent_grpc_service must also be set.
|
||||
bool ocagent_exporter_enabled = 11;
|
||||
bool ocagent_exporter_enabled = 11 [
|
||||
deprecated = true,
|
||||
(envoy.annotations.deprecated_at_minor_version) = "3.0",
|
||||
(envoy.annotations.disallowed_by_default) = true
|
||||
];
|
||||
|
||||
// The address of the OpenCensus Agent, if its exporter is enabled, in gRPC
|
||||
// format: https://github.com/grpc/grpc/blob/master/doc/naming.md
|
||||
// [#comment:TODO: deprecate this field]
|
||||
string ocagent_address = 12;
|
||||
string ocagent_address = 12 [
|
||||
deprecated = true,
|
||||
(envoy.annotations.deprecated_at_minor_version) = "3.0",
|
||||
(envoy.annotations.disallowed_by_default) = true
|
||||
];
|
||||
|
||||
// (optional) The gRPC server hosted by the OpenCensus Agent. Only Google gRPC is supported.
|
||||
// This is only used if the ocagent_address is left empty.
|
||||
core.v3.GrpcService ocagent_grpc_service = 14;
|
||||
core.v3.GrpcService ocagent_grpc_service = 14 [
|
||||
deprecated = true,
|
||||
(envoy.annotations.deprecated_at_minor_version) = "3.0",
|
||||
(envoy.annotations.disallowed_by_default) = true
|
||||
];
|
||||
|
||||
// List of incoming trace context headers we will accept. First one found
|
||||
// wins.
|
||||
repeated TraceContext incoming_trace_context = 8;
|
||||
repeated TraceContext incoming_trace_context = 8 [
|
||||
deprecated = true,
|
||||
(envoy.annotations.deprecated_at_minor_version) = "3.0",
|
||||
(envoy.annotations.disallowed_by_default) = true
|
||||
];
|
||||
|
||||
// List of outgoing trace context headers we will produce.
|
||||
repeated TraceContext outgoing_trace_context = 9;
|
||||
repeated TraceContext outgoing_trace_context = 9 [
|
||||
deprecated = true,
|
||||
(envoy.annotations.deprecated_at_minor_version) = "3.0",
|
||||
(envoy.annotations.disallowed_by_default) = true
|
||||
];
|
||||
}
|
||||
|
|
|
@ -2,10 +2,12 @@ syntax = "proto3";
|
|||
|
||||
package envoy.config.trace.v3;
|
||||
|
||||
import "envoy/config/core/v3/extension.proto";
|
||||
import "envoy/config/core/v3/grpc_service.proto";
|
||||
import "envoy/config/core/v3/http_service.proto";
|
||||
|
||||
import "udpa/annotations/migrate.proto";
|
||||
import "udpa/annotations/status.proto";
|
||||
import "validate/validate.proto";
|
||||
|
||||
option java_package = "io.envoyproxy.envoy.config.trace.v3";
|
||||
option java_outer_classname = "OpentelemetryProto";
|
||||
|
@ -17,12 +19,42 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
|
||||
// Configuration for the OpenTelemetry tracer.
|
||||
// [#extension: envoy.tracers.opentelemetry]
|
||||
// [#next-free-field: 6]
|
||||
message OpenTelemetryConfig {
|
||||
// The upstream gRPC cluster that will receive OTLP traces.
|
||||
// Note that the tracer drops traces if the server does not read data fast enough.
|
||||
core.v3.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}];
|
||||
// This field can be left empty to disable reporting traces to the gRPC service.
|
||||
// Only one of ``grpc_service``, ``http_service`` may be used.
|
||||
core.v3.GrpcService grpc_service = 1
|
||||
[(udpa.annotations.field_migrate).oneof_promotion = "otlp_exporter"];
|
||||
|
||||
// The upstream HTTP cluster that will receive OTLP traces.
|
||||
// This field can be left empty to disable reporting traces to the HTTP service.
|
||||
// Only one of ``grpc_service``, ``http_service`` may be used.
|
||||
//
|
||||
// .. note::
|
||||
//
|
||||
// Note: The ``request_headers_to_add`` property in the OTLP HTTP exporter service
|
||||
// does not support the :ref:`format specifier <config_access_log_format>` as used for
|
||||
// :ref:`HTTP access logging <config_access_log>`.
|
||||
// The values configured are added as HTTP headers on the OTLP export request
|
||||
// without any formatting applied.
|
||||
core.v3.HttpService http_service = 3
|
||||
[(udpa.annotations.field_migrate).oneof_promotion = "otlp_exporter"];
|
||||
|
||||
// The name for the service. This will be populated in the ResourceSpan Resource attributes.
|
||||
// If it is not provided, it will default to "unknown_service:envoy".
|
||||
string service_name = 2;
|
||||
|
||||
// An ordered list of resource detectors
|
||||
// [#extension-category: envoy.tracers.opentelemetry.resource_detectors]
|
||||
repeated core.v3.TypedExtensionConfig resource_detectors = 4;
|
||||
|
||||
// Specifies the sampler to be used by the OpenTelemetry tracer.
|
||||
// The configured sampler implements the Sampler interface defined by the OpenTelemetry specification.
|
||||
// This field can be left empty. In this case, the default Envoy sampling decision is used.
|
||||
//
|
||||
// See: `OpenTelemetry sampler specification <https://opentelemetry.io/docs/specs/otel/trace/sdk/#sampler>`_
|
||||
// [#extension-category: envoy.tracers.opentelemetry.samplers]
|
||||
core.v3.TypedExtensionConfig sampler = 5;
|
||||
}
|
||||
|
|
|
@ -22,9 +22,9 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
// [#protodoc-title: SkyWalking tracer]
|
||||
|
||||
// Configuration for the SkyWalking tracer. Please note that if SkyWalking tracer is used as the
|
||||
// provider of http tracer, then
|
||||
// :ref:`start_child_span <envoy_v3_api_field_extensions.filters.http.router.v3.Router.start_child_span>`
|
||||
// in the router must be set to true to get the correct topology and tracing data. Moreover, SkyWalking
|
||||
// provider of tracing, then
|
||||
// :ref:`spawn_upstream_span <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.spawn_upstream_span>`
|
||||
// in the tracing config must be set to true to get the correct topology and tracing data. Moreover, SkyWalking
|
||||
// Tracer does not support SkyWalking extension header (``sw8-x``) temporarily.
|
||||
// [#extension: envoy.tracers.skywalking]
|
||||
message SkyWalkingConfig {
|
||||
|
|
|
@ -75,12 +75,17 @@ message ZipkinConfig {
|
|||
//
|
||||
// * The Envoy Proxy is used as gateway or ingress.
|
||||
// * The Envoy Proxy is used as sidecar but inbound traffic capturing or outbound traffic capturing is disabled.
|
||||
// * Any case that the `start_child_span of router <envoy_v3_api_field_extensions.filters.http.router.v3.Router.start_child_span>` is set to true.
|
||||
// * Any case that the :ref:`start_child_span of router <envoy_v3_api_field_extensions.filters.http.router.v3.Router.start_child_span>` is set to true.
|
||||
//
|
||||
// .. attention::
|
||||
//
|
||||
// If this is set to true, then the
|
||||
// :ref:`start_child_span of router <envoy_v3_api_field_extensions.filters.http.router.v3.Router.start_child_span>`
|
||||
// SHOULD be set to true also to ensure the correctness of trace chain.
|
||||
bool split_spans_for_request = 7;
|
||||
//
|
||||
// Both this field and ``start_child_span`` are deprecated by the
|
||||
// :ref:`spawn_upstream_span <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing.spawn_upstream_span>`.
|
||||
// Please use that ``spawn_upstream_span`` field to control the span creation.
|
||||
bool split_spans_for_request = 7
|
||||
[deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"];
|
||||
}
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package envoy.config.upstream.local_address_selector.v3;
|
||||
|
||||
import "udpa/annotations/status.proto";
|
||||
|
||||
option java_package = "io.envoyproxy.envoy.config.upstream.local_address_selector.v3";
|
||||
option java_outer_classname = "DefaultLocalAddressSelectorProto";
|
||||
option java_multiple_files = true;
|
||||
option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/upstream/local_address_selector/v3;local_address_selectorv3";
|
||||
option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
||||
|
||||
// [#protodoc-title: Default Local Address Selector]
|
||||
// [#extension: envoy.upstream.local_address_selector.default_local_address_selector]
|
||||
|
||||
// Default implementation of a local address selector. This implementation is
|
||||
// used if :ref:`local_address_selector
|
||||
// <envoy_v3_api_field_config.core.v3.BindConfig.local_address_selector>` is not
|
||||
// specified.
|
||||
// This implementation supports the specification of only one address in
|
||||
// :ref:`extra_source_addresses
|
||||
// <envoy_v3_api_field_config.core.v3.BindConfig.extra_source_addresses>` which
|
||||
// is appended to the address specified in the
|
||||
// :ref:`source_address <envoy_v3_api_field_config.core.v3.BindConfig.source_address>`
|
||||
// field. The extra address should have a different IP version than the address in the
|
||||
// ``source_address`` field. The address which has the same IP
|
||||
// version with the target host's address IP version will be used as bind address.
|
||||
// If there is no same IP version address found, the address in the ``source_address`` field will
|
||||
// be returned.
|
||||
message DefaultLocalAddressSelector {
|
||||
}
|
|
@ -10,6 +10,7 @@ import "google/protobuf/duration.proto";
|
|||
import "google/protobuf/timestamp.proto";
|
||||
import "google/protobuf/wrappers.proto";
|
||||
|
||||
import "envoy/annotations/deprecation.proto";
|
||||
import "udpa/annotations/status.proto";
|
||||
import "udpa/annotations/versioning.proto";
|
||||
import "validate/validate.proto";
|
||||
|
@ -31,6 +32,23 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
// Fields describing *upstream* interaction will explicitly include ``upstream``
|
||||
// in their name.
|
||||
|
||||
enum AccessLogType {
|
||||
NotSet = 0;
|
||||
TcpUpstreamConnected = 1;
|
||||
TcpPeriodic = 2;
|
||||
TcpConnectionEnd = 3;
|
||||
DownstreamStart = 4;
|
||||
DownstreamPeriodic = 5;
|
||||
DownstreamEnd = 6;
|
||||
UpstreamPoolReady = 7;
|
||||
UpstreamPeriodic = 8;
|
||||
UpstreamEnd = 9;
|
||||
DownstreamTunnelSuccessfullyEstablished = 10;
|
||||
UdpTunnelUpstreamConnected = 11;
|
||||
UdpPeriodic = 12;
|
||||
UdpSessionEnd = 13;
|
||||
}
|
||||
|
||||
message TCPAccessLogEntry {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.data.accesslog.v2.TCPAccessLogEntry";
|
||||
|
@ -80,7 +98,7 @@ message ConnectionProperties {
|
|||
}
|
||||
|
||||
// Defines fields that are shared by all Envoy access logs.
|
||||
// [#next-free-field: 28]
|
||||
// [#next-free-field: 34]
|
||||
message AccessLogCommon {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.data.accesslog.v2.AccessLogCommon";
|
||||
|
@ -214,11 +232,46 @@ message AccessLogCommon {
|
|||
// And if it is necessary, unique ID or identifier can be added to the log entry
|
||||
// :ref:`stream_id <envoy_v3_api_field_data.accesslog.v3.AccessLogCommon.stream_id>` to
|
||||
// correlate all these intermediate log entries and final log entry.
|
||||
bool intermediate_log_entry = 27;
|
||||
//
|
||||
// .. attention::
|
||||
//
|
||||
// This field is deprecated in favor of ``access_log_type`` for better indication of the
|
||||
// type of the access log record.
|
||||
bool intermediate_log_entry = 27
|
||||
[deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"];
|
||||
|
||||
// If downstream connection in listener failed due to transport socket (e.g. TLS handshake), provides the
|
||||
// failure reason from the transport socket. The format of this field depends on the configured downstream
|
||||
// transport socket. Common TLS failures are in :ref:`TLS trouble shooting <arch_overview_ssl_trouble_shooting>`.
|
||||
string downstream_transport_failure_reason = 28;
|
||||
|
||||
// For HTTP: Total number of bytes sent to the downstream by the http stream.
|
||||
// For TCP: Total number of bytes sent to the downstream by the tcp proxy.
|
||||
uint64 downstream_wire_bytes_sent = 29;
|
||||
|
||||
// For HTTP: Total number of bytes received from the downstream by the http stream. Envoy over counts sizes of received HTTP/1.1 pipelined requests by adding up bytes of requests in the pipeline to the one currently being processed.
|
||||
// For TCP: Total number of bytes received from the downstream by the tcp proxy.
|
||||
uint64 downstream_wire_bytes_received = 30;
|
||||
|
||||
// For HTTP: Total number of bytes sent to the upstream by the http stream. This value accumulates during upstream retries.
|
||||
// For TCP: Total number of bytes sent to the upstream by the tcp proxy.
|
||||
uint64 upstream_wire_bytes_sent = 31;
|
||||
|
||||
// For HTTP: Total number of bytes received from the upstream by the http stream.
|
||||
// For TCP: Total number of bytes sent to the upstream by the tcp proxy.
|
||||
uint64 upstream_wire_bytes_received = 32;
|
||||
|
||||
// The type of the access log, which indicates when the log was recorded.
|
||||
// See :ref:`ACCESS_LOG_TYPE <config_access_log_format_access_log_type>` for the available values.
|
||||
// In case the access log was recorded by a flow which does not correspond to one of the supported
|
||||
// values, then the default value will be ``NotSet``.
|
||||
// For more information about how access log behaves and when it is being recorded,
|
||||
// please refer to :ref:`access logging <arch_overview_access_logs>`.
|
||||
AccessLogType access_log_type = 33;
|
||||
}
|
||||
|
||||
// Flags indicating occurrences during request/response processing.
|
||||
// [#next-free-field: 28]
|
||||
// [#next-free-field: 29]
|
||||
message ResponseFlags {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.data.accesslog.v2.ResponseFlags";
|
||||
|
@ -319,6 +372,9 @@ message ResponseFlags {
|
|||
|
||||
// Indicates a DNS resolution failed.
|
||||
bool dns_resolution_failure = 27;
|
||||
|
||||
// Indicates a downstream remote codec level reset was received on the stream
|
||||
bool downstream_remote_reset = 28;
|
||||
}
|
||||
|
||||
// Properties of a negotiated TLS connection.
|
||||
|
@ -356,6 +412,9 @@ message TLSProperties {
|
|||
|
||||
// The subject field of the certificate.
|
||||
string subject = 2;
|
||||
|
||||
// The issuer field of the certificate.
|
||||
string issuer = 3;
|
||||
}
|
||||
|
||||
// Version of TLS that was negotiated.
|
||||
|
@ -384,7 +443,7 @@ message TLSProperties {
|
|||
string ja3_fingerprint = 7;
|
||||
}
|
||||
|
||||
// [#next-free-field: 14]
|
||||
// [#next-free-field: 16]
|
||||
message HTTPRequestProperties {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.data.accesslog.v2.HTTPRequestProperties";
|
||||
|
@ -438,9 +497,17 @@ message HTTPRequestProperties {
|
|||
|
||||
// Map of additional headers that have been configured to be logged.
|
||||
map<string, string> request_headers = 13;
|
||||
|
||||
// Number of header bytes sent to the upstream by the http stream, including protocol overhead.
|
||||
//
|
||||
// This value accumulates during upstream retries.
|
||||
uint64 upstream_header_bytes_sent = 14;
|
||||
|
||||
// Number of header bytes received from the downstream by the http stream, including protocol overhead.
|
||||
uint64 downstream_header_bytes_received = 15;
|
||||
}
|
||||
|
||||
// [#next-free-field: 7]
|
||||
// [#next-free-field: 9]
|
||||
message HTTPResponseProperties {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.data.accesslog.v2.HTTPResponseProperties";
|
||||
|
@ -451,7 +518,7 @@ message HTTPResponseProperties {
|
|||
// Size of the HTTP response headers in bytes.
|
||||
//
|
||||
// This value is captured from the OSI layer 7 perspective, i.e. it does not
|
||||
// include overhead from framing or encoding at other networking layers.
|
||||
// include protocol overhead or overhead from framing or encoding at other networking layers.
|
||||
uint64 response_headers_bytes = 2;
|
||||
|
||||
// Size of the HTTP response body in bytes.
|
||||
|
@ -468,4 +535,10 @@ message HTTPResponseProperties {
|
|||
|
||||
// The HTTP response code details.
|
||||
string response_code_details = 6;
|
||||
|
||||
// Number of header bytes received from the upstream by the http stream, including protocol overhead.
|
||||
uint64 upstream_header_bytes_received = 7;
|
||||
|
||||
// Number of header bytes sent to the downstream by the http stream, including protocol overhead.
|
||||
uint64 downstream_header_bytes_sent = 8;
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ syntax = "proto3";
|
|||
package envoy.data.core.v3;
|
||||
|
||||
import "envoy/config/core/v3/address.proto";
|
||||
import "envoy/config/core/v3/base.proto";
|
||||
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
|
@ -34,7 +35,7 @@ enum HealthCheckerType {
|
|||
THRIFT = 4;
|
||||
}
|
||||
|
||||
// [#next-free-field: 10]
|
||||
// [#next-free-field: 13]
|
||||
message HealthCheckEvent {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.data.core.v2alpha.HealthCheckEvent";
|
||||
|
@ -54,6 +55,12 @@ message HealthCheckEvent {
|
|||
// Host addition.
|
||||
HealthCheckAddHealthy add_healthy_event = 5;
|
||||
|
||||
// A health check was successful. Note: a host will be considered healthy either if it is
|
||||
// the first ever health check, or if the healthy threshold is reached. This kind of event
|
||||
// indicate that a health check was successful, but does not indicates that the host is
|
||||
// considered healthy. A host is considered healthy if HealthCheckAddHealthy kind of event is sent.
|
||||
HealthCheckSuccessful successful_health_check_event = 12;
|
||||
|
||||
// Host failure.
|
||||
HealthCheckFailure health_check_failure_event = 7;
|
||||
|
||||
|
@ -66,6 +73,12 @@ message HealthCheckEvent {
|
|||
|
||||
// Timestamp for event.
|
||||
google.protobuf.Timestamp timestamp = 6;
|
||||
|
||||
// Host metadata
|
||||
config.core.v3.Metadata metadata = 10;
|
||||
|
||||
// Host locality
|
||||
config.core.v3.Locality locality = 11;
|
||||
}
|
||||
|
||||
message HealthCheckEjectUnhealthy {
|
||||
|
@ -86,6 +99,9 @@ message HealthCheckAddHealthy {
|
|||
bool first_check = 1;
|
||||
}
|
||||
|
||||
message HealthCheckSuccessful {
|
||||
}
|
||||
|
||||
message HealthCheckFailure {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.data.core.v2alpha.HealthCheckFailure";
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package envoy.data.core.v3;
|
||||
|
||||
import "udpa/annotations/status.proto";
|
||||
|
||||
option java_package = "io.envoyproxy.envoy.data.core.v3";
|
||||
option java_outer_classname = "TlvMetadataProto";
|
||||
option java_multiple_files = true;
|
||||
option go_package = "github.com/envoyproxy/go-control-plane/envoy/data/core/v3;corev3";
|
||||
option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
||||
|
||||
// [#protodoc-title: Proxy Protocol Filter Typed Metadata]
|
||||
// PROXY protocol filter typed metadata.
|
||||
|
||||
message TlvsMetadata {
|
||||
// Typed metadata for :ref:`Proxy protocol filter <envoy_v3_api_msg_extensions.filters.listener.proxy_protocol.v3.ProxyProtocol>`, that represents a map of TLVs.
|
||||
// Each entry in the map consists of a key which corresponds to a configured
|
||||
// :ref:`rule key <envoy_v3_api_field_extensions.filters.listener.proxy_protocol.v3.ProxyProtocol.KeyValuePair.key>` and a value (TLV value in bytes).
|
||||
// When runtime flag ``envoy.reloadable_features.use_typed_metadata_in_proxy_protocol_listener`` is enabled,
|
||||
// :ref:`Proxy protocol filter <envoy_v3_api_msg_extensions.filters.listener.proxy_protocol.v3.ProxyProtocol>`
|
||||
// will populate typed metadata and regular metadata. By default filter will populate typed and untyped metadata.
|
||||
map<string, bytes> typed_metadata = 1;
|
||||
}
|
|
@ -128,7 +128,15 @@ message DnsTable {
|
|||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.data.dns.v2alpha.DnsTable.DnsVirtualDomain";
|
||||
|
||||
// A domain name for which Envoy will respond to query requests
|
||||
// A domain name for which Envoy will respond to query requests.
|
||||
// Wildcard records are supported on the first label only, e.g. ``*.example.com`` or ``*.subdomain.example.com``.
|
||||
// Names such as ``*example.com``, ``subdomain.*.example.com``, ``*subdomain.example.com``, etc
|
||||
// are not valid wildcard names and asterisk will be interpreted as a literal ``*`` character.
|
||||
// Wildcard records match subdomains on any levels, e.g. ``*.example.com`` will match
|
||||
// ``foo.example.com``, ``bar.foo.example.com``, ``baz.bar.foo.example.com``, etc. In case there are multiple
|
||||
// wildcard records, the longest wildcard match will be used, e.g. if there are wildcard records for
|
||||
// ``*.example.com`` and ``*.foo.example.com`` and the query is for ``bar.foo.example.com``, the latter will be used.
|
||||
// Specific records will always take precedence over wildcard records.
|
||||
string name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}];
|
||||
|
||||
// The configuration containing the method to determine the address of this endpoint
|
||||
|
|
|
@ -2,6 +2,8 @@ syntax = "proto3";
|
|||
|
||||
package envoy.data.tap.v3;
|
||||
|
||||
import "envoy/config/core/v3/address.proto";
|
||||
|
||||
import "udpa/annotations/status.proto";
|
||||
import "udpa/annotations/versioning.proto";
|
||||
|
||||
|
@ -36,3 +38,14 @@ message Body {
|
|||
// <envoy_v3_api_field_config.tap.v3.OutputConfig.max_buffered_tx_bytes>` settings.
|
||||
bool truncated = 3;
|
||||
}
|
||||
|
||||
// Connection properties.
|
||||
message Connection {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.data.tap.v2alpha.Connection";
|
||||
|
||||
// Local address.
|
||||
config.core.v3.Address local_address = 1;
|
||||
|
||||
// Remote address.
|
||||
config.core.v3.Address remote_address = 2;
|
||||
}
|
||||
|
|
|
@ -5,6 +5,8 @@ package envoy.data.tap.v3;
|
|||
import "envoy/config/core/v3/base.proto";
|
||||
import "envoy/data/tap/v3/common.proto";
|
||||
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
import "udpa/annotations/status.proto";
|
||||
import "udpa/annotations/versioning.proto";
|
||||
|
||||
|
@ -34,6 +36,9 @@ message HttpBufferedTrace {
|
|||
|
||||
// Message trailers.
|
||||
repeated config.core.v3.HeaderValue trailers = 3;
|
||||
|
||||
// The timestamp after receiving the message headers.
|
||||
google.protobuf.Timestamp headers_received_time = 4;
|
||||
}
|
||||
|
||||
// Request message.
|
||||
|
@ -41,6 +46,9 @@ message HttpBufferedTrace {
|
|||
|
||||
// Response message.
|
||||
Message response = 2;
|
||||
|
||||
// downstream connection
|
||||
Connection downstream_connection = 3;
|
||||
}
|
||||
|
||||
// A streamed HTTP trace segment. Multiple segments make up a full trace.
|
||||
|
|
|
@ -2,7 +2,6 @@ syntax = "proto3";
|
|||
|
||||
package envoy.data.tap.v3;
|
||||
|
||||
import "envoy/config/core/v3/address.proto";
|
||||
import "envoy/data/tap/v3/common.proto";
|
||||
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
@ -20,17 +19,6 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
// Trace format for the tap transport socket extension. This dumps plain text read/write
|
||||
// sequences on a socket.
|
||||
|
||||
// Connection properties.
|
||||
message Connection {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.data.tap.v2alpha.Connection";
|
||||
|
||||
// Local address.
|
||||
config.core.v3.Address local_address = 2;
|
||||
|
||||
// Remote address.
|
||||
config.core.v3.Address remote_address = 3;
|
||||
}
|
||||
|
||||
// Event in a socket trace.
|
||||
message SocketEvent {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.data.tap.v2alpha.SocketEvent";
|
||||
|
|
|
@ -21,7 +21,8 @@ message ExpressionFilter {
|
|||
// Expressions are based on the set of Envoy :ref:`attributes <arch_overview_attributes>`.
|
||||
// The provided expression must evaluate to true for logging (expression errors are considered false).
|
||||
// Examples:
|
||||
// - ``response.code >= 400``
|
||||
// - ``(connection.mtls && request.headers['x-log-mtls'] == 'true') || request.url_path.contains('v1beta3')``
|
||||
//
|
||||
// * ``response.code >= 400``
|
||||
// * ``(connection.mtls && request.headers['x-log-mtls'] == 'true') || request.url_path.contains('v1beta3')``
|
||||
string expression = 1;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,94 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package envoy.extensions.access_loggers.fluentd.v3;
|
||||
|
||||
import "envoy/config/core/v3/backoff.proto";
|
||||
import "envoy/config/core/v3/extension.proto";
|
||||
|
||||
import "google/protobuf/duration.proto";
|
||||
import "google/protobuf/struct.proto";
|
||||
import "google/protobuf/wrappers.proto";
|
||||
|
||||
import "udpa/annotations/status.proto";
|
||||
import "validate/validate.proto";
|
||||
|
||||
option java_package = "io.envoyproxy.envoy.extensions.access_loggers.fluentd.v3";
|
||||
option java_outer_classname = "FluentdProto";
|
||||
option java_multiple_files = true;
|
||||
option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/access_loggers/fluentd/v3;fluentdv3";
|
||||
option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
||||
|
||||
// [#protodoc-title: Fluentd access log]
|
||||
|
||||
// Configuration for the *envoy.access_loggers.fluentd* :ref:`AccessLog <envoy_v3_api_msg_config.accesslog.v3.AccessLog>`.
|
||||
// This access log extension will send the emitted access logs over a TCP connection to an upstream that is accepting
|
||||
// the Fluentd Forward Protocol as described in: `Fluentd Forward Protocol Specification
|
||||
// <https://github.com/fluent/fluentd/wiki/Forward-Protocol-Specification-v1>`_.
|
||||
// [#extension: envoy.access_loggers.fluentd]
|
||||
// [#next-free-field: 9]
|
||||
message FluentdAccessLogConfig {
|
||||
message RetryOptions {
|
||||
// The number of times the logger will attempt to connect to the upstream during reconnects.
|
||||
// By default, there is no limit. The logger will attempt to reconnect to the upstream each time
|
||||
// connecting to the upstream failed or the upstream connection had been closed for any reason.
|
||||
google.protobuf.UInt32Value max_connect_attempts = 1;
|
||||
|
||||
// Sets the backoff strategy. If this value is not set, the default base backoff interval is 500
|
||||
// milliseconds and the default max backoff interval is 5 seconds (10 times the base interval).
|
||||
config.core.v3.BackoffStrategy backoff_options = 2;
|
||||
}
|
||||
|
||||
// The upstream cluster to connect to for streaming the Fluentd messages.
|
||||
string cluster = 1 [(validate.rules).string = {min_len: 1}];
|
||||
|
||||
// A tag is a string separated with '.' (e.g. log.type) to categorize events.
|
||||
// See: https://github.com/fluent/fluentd/wiki/Forward-Protocol-Specification-v1#message-modes
|
||||
string tag = 2 [(validate.rules).string = {min_len: 1}];
|
||||
|
||||
// The prefix to use when emitting :ref:`statistics <config_access_log_stats>`.
|
||||
string stat_prefix = 3 [(validate.rules).string = {min_len: 1}];
|
||||
|
||||
// Interval for flushing access logs to the TCP stream. Logger will flush requests every time
|
||||
// this interval is elapsed, or when batch size limit is hit, whichever comes first. Defaults to
|
||||
// 1 second.
|
||||
google.protobuf.Duration buffer_flush_interval = 4 [(validate.rules).duration = {gt {}}];
|
||||
|
||||
// Soft size limit in bytes for access log entries buffer. The logger will buffer requests until
|
||||
// this limit it hit, or every time flush interval is elapsed, whichever comes first. When the buffer
|
||||
// limit is hit, the logger will immediately flush the buffer contents. Setting it to zero effectively
|
||||
// disables the batching. Defaults to 16384.
|
||||
google.protobuf.UInt32Value buffer_size_bytes = 5;
|
||||
|
||||
// A struct that represents the record that is sent for each log entry.
|
||||
// https://github.com/fluent/fluentd/wiki/Forward-Protocol-Specification-v1#entry
|
||||
// Values are rendered as strings, numbers, or boolean values as appropriate.
|
||||
// Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA).
|
||||
// See :ref:`format string<config_access_log_format_strings>` documentation for a specific command operator details.
|
||||
//
|
||||
// .. validated-code-block:: yaml
|
||||
// :type-name: envoy.extensions.access_loggers.fluentd.v3.FluentdAccessLogConfig
|
||||
//
|
||||
// record:
|
||||
// status: "%RESPONSE_CODE%"
|
||||
// message: "%LOCAL_REPLY_BODY%"
|
||||
//
|
||||
// The following msgpack record would be created:
|
||||
//
|
||||
// .. code-block:: json
|
||||
//
|
||||
// {
|
||||
// "status": 500,
|
||||
// "message": "My error message"
|
||||
// }
|
||||
google.protobuf.Struct record = 6 [(validate.rules).message = {required: true}];
|
||||
|
||||
// Optional retry, in case upstream connection has failed. If this field is not set, the default values will be applied,
|
||||
// as specified in the :ref:`RetryOptions <envoy_v3_api_msg_extensions.access_loggers.fluentd.v3.FluentdAccessLogConfig.RetryOptions>`
|
||||
// configuration.
|
||||
RetryOptions retry_options = 7;
|
||||
|
||||
// Specifies a collection of Formatter plugins that can be called from the access log configuration.
|
||||
// See the formatters extensions documentation for details.
|
||||
// [#extension-category: envoy.formatter]
|
||||
repeated config.core.v3.TypedExtensionConfig formatters = 8;
|
||||
}
|
|
@ -2,6 +2,7 @@ syntax = "proto3";
|
|||
|
||||
package envoy.extensions.access_loggers.open_telemetry.v3;
|
||||
|
||||
import "envoy/config/core/v3/extension.proto";
|
||||
import "envoy/extensions/access_loggers/grpc/v3/als.proto";
|
||||
|
||||
import "opentelemetry/proto/common/v1/common.proto";
|
||||
|
@ -22,10 +23,15 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
// populate `opentelemetry.proto.collector.v1.logs.ExportLogsServiceRequest.resource_logs <https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/collector/logs/v1/logs_service.proto>`_.
|
||||
// In addition, the request start time is set in the dedicated field.
|
||||
// [#extension: envoy.access_loggers.open_telemetry]
|
||||
// [#next-free-field: 8]
|
||||
message OpenTelemetryAccessLogConfig {
|
||||
// [#comment:TODO(itamarkam): add 'filter_state_objects_to_log' to logs.]
|
||||
grpc.v3.CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}];
|
||||
|
||||
// If specified, Envoy will not generate built-in resource labels
|
||||
// like ``log_name``, ``zone_name``, ``cluster_name``, ``node_name``.
|
||||
bool disable_builtin_labels = 5;
|
||||
|
||||
// OpenTelemetry `Resource <https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/logs/v1/logs.proto#L51>`_
|
||||
// attributes are filled with Envoy node info.
|
||||
// Example: ``resource_attributes { values { key: "region" value { string_value: "cn-north-7" } } }``.
|
||||
|
@ -41,4 +47,14 @@ message OpenTelemetryAccessLogConfig {
|
|||
// See 'attributes' in the LogResource proto for more details.
|
||||
// Example: ``attributes { values { key: "user_agent" value { string_value: "%REQ(USER-AGENT)%" } } }``.
|
||||
opentelemetry.proto.common.v1.KeyValueList attributes = 3;
|
||||
|
||||
// Optional. Additional prefix to use on OpenTelemetry access logger stats. If empty, the stats will be rooted at
|
||||
// ``access_logs.open_telemetry_access_log.``. If non-empty, stats will be rooted at
|
||||
// ``access_logs.open_telemetry_access_log.<stat_prefix>.``.
|
||||
string stat_prefix = 6;
|
||||
|
||||
// Specifies a collection of Formatter plugins that can be called from the access log configuration.
|
||||
// See the formatters extensions documentation for details.
|
||||
// [#extension-category: envoy.formatter]
|
||||
repeated config.core.v3.TypedExtensionConfig formatters = 7;
|
||||
}
|
||||
|
|
|
@ -4,8 +4,6 @@ package envoy.extensions.bootstrap.internal_listener.v3;
|
|||
|
||||
import "google/protobuf/wrappers.proto";
|
||||
|
||||
import "xds/annotations/v3/status.proto";
|
||||
|
||||
import "udpa/annotations/status.proto";
|
||||
import "validate/validate.proto";
|
||||
|
||||
|
@ -14,7 +12,6 @@ option java_outer_classname = "InternalListenerProto";
|
|||
option java_multiple_files = true;
|
||||
option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/bootstrap/internal_listener/v3;internal_listenerv3";
|
||||
option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
||||
option (xds.annotations.v3.file_status).work_in_progress = true;
|
||||
|
||||
// [#protodoc-title: Internal Listener]
|
||||
// Internal Listener :ref:`overview <config_internal_listener>`.
|
||||
|
|
|
@ -2,8 +2,13 @@ syntax = "proto3";
|
|||
|
||||
package envoy.extensions.clusters.dynamic_forward_proxy.v3;
|
||||
|
||||
import "envoy/config/cluster/v3/cluster.proto";
|
||||
import "envoy/config/core/v3/address.proto";
|
||||
import "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto";
|
||||
|
||||
import "google/protobuf/duration.proto";
|
||||
import "google/protobuf/wrappers.proto";
|
||||
|
||||
import "udpa/annotations/status.proto";
|
||||
import "udpa/annotations/versioning.proto";
|
||||
import "validate/validate.proto";
|
||||
|
@ -23,11 +28,27 @@ message ClusterConfig {
|
|||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.config.cluster.dynamic_forward_proxy.v2alpha.ClusterConfig";
|
||||
|
||||
// The DNS cache configuration that the cluster will attach to. Note this configuration must
|
||||
// match that of associated :ref:`dynamic forward proxy HTTP filter configuration
|
||||
// <envoy_v3_api_field_extensions.filters.http.dynamic_forward_proxy.v3.FilterConfig.dns_cache_config>`.
|
||||
common.dynamic_forward_proxy.v3.DnsCacheConfig dns_cache_config = 1
|
||||
[(validate.rules).message = {required: true}];
|
||||
oneof cluster_implementation_specifier {
|
||||
// The DNS cache configuration that the cluster will attach to. Note this configuration must
|
||||
// match that of associated :ref:`dynamic forward proxy HTTP filter configuration
|
||||
// <envoy_v3_api_field_extensions.filters.http.dynamic_forward_proxy.v3.FilterConfig.dns_cache_config>`.
|
||||
common.dynamic_forward_proxy.v3.DnsCacheConfig dns_cache_config = 1;
|
||||
|
||||
// Configuration for sub clusters, when this configuration is enabled,
|
||||
// Envoy will create an independent sub cluster dynamically for each host:port.
|
||||
// Most of the configuration of a sub cluster is inherited from the current cluster,
|
||||
// i.e. health_checks, dns_resolvers and etc.
|
||||
// And the load_assignment will be set to the only one endpoint, host:port.
|
||||
//
|
||||
// Compared to the dns_cache_config, it has the following advantages:
|
||||
//
|
||||
// 1. sub clusters will be created with the STRICT_DNS DiscoveryType,
|
||||
// so that Envoy will use all of the IPs resolved from the host.
|
||||
//
|
||||
// 2. each sub cluster is full featured cluster, with lb_policy and health check and etc enabled.
|
||||
//
|
||||
SubClustersConfig sub_clusters_config = 4;
|
||||
}
|
||||
|
||||
// If true allow the cluster configuration to disable the auto_sni and auto_san_validation options
|
||||
// in the :ref:`cluster's upstream_http_protocol_options
|
||||
|
@ -39,7 +60,7 @@ message ClusterConfig {
|
|||
// resolved address for the new connection matches the peer address of the connection and
|
||||
// the TLS certificate is also valid for the new hostname. For example, if a connection
|
||||
// has previously been established to foo.example.com at IP 1.2.3.4 with a certificate
|
||||
// that is valid for `*.example.com`, then this connection could be used for requests to
|
||||
// that is valid for ``*.example.com``, then this connection could be used for requests to
|
||||
// bar.example.com if that also resolved to 1.2.3.4.
|
||||
//
|
||||
// .. note::
|
||||
|
@ -54,3 +75,22 @@ message ClusterConfig {
|
|||
//
|
||||
bool allow_coalesced_connections = 3;
|
||||
}
|
||||
|
||||
// Configuration for sub clusters. Hard code STRICT_DNS cluster type now.
|
||||
message SubClustersConfig {
|
||||
// The :ref:`load balancer type <arch_overview_load_balancing_types>` to use
|
||||
// when picking a host in a sub cluster. Note that CLUSTER_PROVIDED is not allowed here.
|
||||
config.cluster.v3.Cluster.LbPolicy lb_policy = 1 [(validate.rules).enum = {defined_only: true}];
|
||||
|
||||
// The maximum number of sub clusters that the DFP cluster will hold. If not specified defaults to 1024.
|
||||
google.protobuf.UInt32Value max_sub_clusters = 2 [(validate.rules).uint32 = {gt: 0}];
|
||||
|
||||
// The TTL for sub clusters that are unused. Sub clusters that have not been used in the configured time
|
||||
// interval will be purged. If not specified defaults to 5m.
|
||||
google.protobuf.Duration sub_cluster_ttl = 3 [(validate.rules).duration = {gt {}}];
|
||||
|
||||
// Sub clusters that should be created & warmed upon creation. This might provide a
|
||||
// performance improvement, in the form of cache hits, for sub clusters that are going to be
|
||||
// warmed during steady state and are known at config load time.
|
||||
repeated config.core.v3.SocketAddress preresolve_clusters = 4;
|
||||
}
|
||||
|
|
|
@ -67,9 +67,9 @@ message DnsCacheConfig {
|
|||
|
||||
// The minimum rate that DNS resolution will occur. Per ``dns_refresh_rate``, once a host is
|
||||
// resolved, the DNS TTL will be used, with a minimum set by ``dns_min_refresh_rate``.
|
||||
// ``dns_min_refresh_rate`` defaults to 5s and must also be >= 5s.
|
||||
// ``dns_min_refresh_rate`` defaults to 5s and must also be >= 1s.
|
||||
google.protobuf.Duration dns_min_refresh_rate = 14
|
||||
[(validate.rules).duration = {gte {seconds: 5}}];
|
||||
[(validate.rules).duration = {gte {seconds: 1}}];
|
||||
|
||||
// The TTL for hosts that are unused. Hosts that have not been used in the configured time
|
||||
// interval will be purged. If not specified defaults to 5m.
|
||||
|
@ -139,7 +139,6 @@ message DnsCacheConfig {
|
|||
// and are then retried using the standard refresh rates. Defaults to 5s if not set.
|
||||
google.protobuf.Duration dns_query_timeout = 11 [(validate.rules).duration = {gt {}}];
|
||||
|
||||
// [#not-implemented-hide:]
|
||||
// Configuration to flush the DNS cache to long term storage.
|
||||
config.common.key_value.v3.KeyValueStoreConfig key_value_config = 13;
|
||||
}
|
||||
|
|
|
@ -5,7 +5,6 @@ package envoy.extensions.common.matching.v3;
|
|||
import "envoy/config/common/matcher/v3/matcher.proto";
|
||||
import "envoy/config/core/v3/extension.proto";
|
||||
|
||||
import "xds/annotations/v3/status.proto";
|
||||
import "xds/type/matcher/v3/matcher.proto";
|
||||
|
||||
import "envoy/annotations/deprecation.proto";
|
||||
|
@ -24,8 +23,6 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
// decorating an existing extension with a matcher, which can be used to match against
|
||||
// relevant protocol data.
|
||||
message ExtensionWithMatcher {
|
||||
option (xds.annotations.v3.message_status).work_in_progress = true;
|
||||
|
||||
// The associated matcher. This is deprecated in favor of xds_matcher.
|
||||
config.common.matcher.v3.Matcher matcher = 1
|
||||
[deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"];
|
||||
|
@ -37,3 +34,9 @@ message ExtensionWithMatcher {
|
|||
config.core.v3.TypedExtensionConfig extension_config = 2
|
||||
[(validate.rules).message = {required: true}];
|
||||
}
|
||||
|
||||
// Extra settings on a per virtualhost/route/weighted-cluster level.
|
||||
message ExtensionWithMatcherPerRoute {
|
||||
// Matcher override.
|
||||
xds.type.matcher.v3.Matcher xds_matcher = 1;
|
||||
}
|
||||
|
|
|
@ -130,3 +130,15 @@ message LocalRateLimitDescriptor {
|
|||
// Token Bucket algorithm for local ratelimiting.
|
||||
type.v3.TokenBucket token_bucket = 2 [(validate.rules).message = {required: true}];
|
||||
}
|
||||
|
||||
// Configuration used to enable local cluster level rate limiting where the token buckets
|
||||
// will be shared across all the Envoy instances in the local cluster.
|
||||
// A share will be calculated based on the membership of the local cluster dynamically
|
||||
// and the configuration. When the limiter refilling the token bucket, the share will be
|
||||
// applied. By default, the token bucket will be shared evenly.
|
||||
//
|
||||
// See :ref:`local cluster name
|
||||
// <envoy_v3_api_field_config.bootstrap.v3.ClusterManager.local_cluster_name>` for more context
|
||||
// about local cluster.
|
||||
message LocalClusterRateLimit {
|
||||
}
|
||||
|
|
|
@ -0,0 +1,65 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package envoy.extensions.filters.common.set_filter_state.v3;
|
||||
|
||||
import "envoy/config/core/v3/substitution_format_string.proto";
|
||||
|
||||
import "udpa/annotations/status.proto";
|
||||
import "validate/validate.proto";
|
||||
|
||||
option java_package = "io.envoyproxy.envoy.extensions.filters.common.set_filter_state.v3";
|
||||
option java_outer_classname = "ValueProto";
|
||||
option java_multiple_files = true;
|
||||
option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/set_filter_state/v3;set_filter_statev3";
|
||||
option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
||||
|
||||
// [#protodoc-title: Set-Filter-State filter state value]
|
||||
|
||||
// A filter state key and value pair.
|
||||
// [#next-free-field: 7]
|
||||
message FilterStateValue {
|
||||
enum SharedWithUpstream {
|
||||
// Object is not shared with the upstream internal connections.
|
||||
NONE = 0;
|
||||
|
||||
// Object is shared with the upstream internal connection.
|
||||
ONCE = 1;
|
||||
|
||||
// Object is shared with the upstream internal connection and any internal connection upstream from it.
|
||||
TRANSITIVE = 2;
|
||||
}
|
||||
|
||||
oneof key {
|
||||
option (validate.required) = true;
|
||||
|
||||
// Filter state object key. The key is used to lookup the object factory, unless :ref:`factory_key
|
||||
// <envoy_v3_api_field_extensions.filters.common.set_filter_state.v3.FilterStateValue.factory_key>` is set. See
|
||||
// :ref:`the well-known filter state keys <well_known_filter_state>` for a list of valid object keys.
|
||||
string object_key = 1 [(validate.rules).string = {min_len: 1}];
|
||||
}
|
||||
|
||||
// Optional filter object factory lookup key. See :ref:`the well-known filter state keys <well_known_filter_state>`
|
||||
// for a list of valid factory keys.
|
||||
string factory_key = 6;
|
||||
|
||||
oneof value {
|
||||
option (validate.required) = true;
|
||||
|
||||
// Uses the :ref:`format string <config_access_log_format_strings>` to
|
||||
// instantiate the filter state object value.
|
||||
config.core.v3.SubstitutionFormatString format_string = 2;
|
||||
}
|
||||
|
||||
// If marked as read-only, the filter state key value is locked, and cannot
|
||||
// be overridden by any filter, including this filter.
|
||||
bool read_only = 3;
|
||||
|
||||
// Configures the object to be shared with the upstream internal connections. See :ref:`internal upstream
|
||||
// transport <config_internal_upstream_transport>` for more details on the filter state sharing with
|
||||
// the internal connections.
|
||||
SharedWithUpstream shared_with_upstream = 4;
|
||||
|
||||
// Skip the update if the value evaluates to an empty string.
|
||||
// This option can be used to supply multiple alternatives for the same filter state object key.
|
||||
bool skip_if_empty = 5;
|
||||
}
|
|
@ -4,6 +4,7 @@ package envoy.extensions.filters.http.alternate_protocols_cache.v3;
|
|||
|
||||
import "envoy/config/core/v3/protocol.proto";
|
||||
|
||||
import "envoy/annotations/deprecation.proto";
|
||||
import "udpa/annotations/status.proto";
|
||||
|
||||
option java_package = "io.envoyproxy.envoy.extensions.filters.http.alternate_protocols_cache.v3";
|
||||
|
@ -17,9 +18,8 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
// Configuration for the alternate protocols cache HTTP filter.
|
||||
// [#extension: envoy.filters.http.alternate_protocols_cache]
|
||||
message FilterConfig {
|
||||
// If set, causes the use of the alternate protocols cache, which is responsible for
|
||||
// parsing and caching HTTP Alt-Svc headers. This enables the use of HTTP/3 for upstream
|
||||
// servers that advertise supporting it.
|
||||
// TODO(RyanTheOptimist): Make this field required when HTTP/3 is enabled via auto_http.
|
||||
config.core.v3.AlternateProtocolsCacheOptions alternate_protocols_cache_options = 1;
|
||||
// This field is ignored: the alternate protocols cache filter will use the
|
||||
// cache for the cluster the request is routed to.
|
||||
config.core.v3.AlternateProtocolsCacheOptions alternate_protocols_cache_options = 1
|
||||
[deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"];
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
// [#extension: envoy.filters.http.aws_lambda]
|
||||
|
||||
// AWS Lambda filter config
|
||||
// [#next-free-field: 7]
|
||||
message Config {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.config.filter.http.aws_lambda.v2alpha.Config";
|
||||
|
@ -42,6 +43,51 @@ message Config {
|
|||
|
||||
// Determines the way to invoke the Lambda function.
|
||||
InvocationMode invocation_mode = 3 [(validate.rules).enum = {defined_only: true}];
|
||||
|
||||
// Indicates that before signing headers, the host header will be swapped with
|
||||
// this value. If not set or empty, the original host header value
|
||||
// will be used and no rewrite will happen.
|
||||
//
|
||||
// Note: this rewrite affects both signing and host header forwarding. However, this
|
||||
// option shouldn't be used with
|
||||
// :ref:`HCM host rewrite <envoy_v3_api_field_config.route.v3.RouteAction.host_rewrite_literal>` given that the
|
||||
// value set here would be used for signing whereas the value set in the HCM would be used
|
||||
// for host header forwarding which is not the desired outcome.
|
||||
// Changing the value of the host header can result in a different route to be selected
|
||||
// if an HTTP filter after AWS lambda re-evaluates the route (clears route cache).
|
||||
string host_rewrite = 4;
|
||||
|
||||
// Specifies the credentials profile to be used from the AWS credentials file.
|
||||
// This parameter is optional. If set, it will override the value set in the AWS_PROFILE env variable and
|
||||
// the provider chain is limited to the AWS credentials file Provider.
|
||||
// If credentials configuration is provided, this configuration will be ignored.
|
||||
// If this field is provided, then the default providers chain specified in the documentation will be ignored.
|
||||
// (See :ref:`default credentials providers <config_http_filters_aws_lambda_credentials>`).
|
||||
string credentials_profile = 5;
|
||||
|
||||
// Specifies the credentials to be used. This parameter is optional and if it is set,
|
||||
// it will override other providers and will take precedence over credentials_profile.
|
||||
// The provider chain is limited to the configuration credentials provider.
|
||||
// If this field is provided, then the default providers chain specified in the documentation will be ignored.
|
||||
// (See :ref:`default credentials providers <config_http_filters_aws_lambda_credentials>`).
|
||||
//
|
||||
// .. warning::
|
||||
// Distributing the AWS credentials via this configuration should not be done in production.
|
||||
Credentials credentials = 6;
|
||||
}
|
||||
|
||||
// AWS Lambda Credentials config.
|
||||
message Credentials {
|
||||
// AWS access key id.
|
||||
string access_key_id = 1 [(validate.rules).string = {min_len: 1}];
|
||||
|
||||
// AWS secret access key.
|
||||
string secret_access_key = 2 [(validate.rules).string = {min_len: 1}];
|
||||
|
||||
// AWS session token.
|
||||
// This parameter is optional. If it is set to empty string it will not be consider in the request.
|
||||
// It is required if temporary security credentials retrieved directly from AWS STS operations are used.
|
||||
string session_token = 3;
|
||||
}
|
||||
|
||||
// Per-route configuration for AWS Lambda. This can be useful when invoking a different Lambda function or a different
|
||||
|
|
|
@ -4,6 +4,8 @@ package envoy.extensions.filters.http.aws_request_signing.v3;
|
|||
|
||||
import "envoy/type/matcher/v3/string.proto";
|
||||
|
||||
import "google/protobuf/duration.proto";
|
||||
|
||||
import "udpa/annotations/status.proto";
|
||||
import "udpa/annotations/versioning.proto";
|
||||
import "validate/validate.proto";
|
||||
|
@ -19,11 +21,29 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
// [#extension: envoy.filters.http.aws_request_signing]
|
||||
|
||||
// Top level configuration for the AWS request signing filter.
|
||||
// [#next-free-field: 6]
|
||||
// [#next-free-field: 8]
|
||||
message AwsRequestSigning {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.config.filter.http.aws_request_signing.v2alpha.AwsRequestSigning";
|
||||
|
||||
enum SigningAlgorithm {
|
||||
// Use SigV4 for signing
|
||||
AWS_SIGV4 = 0;
|
||||
|
||||
// Use SigV4A for signing
|
||||
AWS_SIGV4A = 1;
|
||||
}
|
||||
|
||||
message QueryString {
|
||||
// Optional expiration time for the query string parameters. As query string parameter based requests are replayable, in effect representing
|
||||
// an API call that has already been authenticated, it is recommended to keep this expiration time as short as feasible.
|
||||
// This value will default to 5 seconds and has a maximum value of 3600 seconds (1 hour).
|
||||
google.protobuf.Duration expiration_time = 1 [(validate.rules).duration = {
|
||||
lte {seconds: 3600}
|
||||
gte {seconds: 1}
|
||||
}];
|
||||
}
|
||||
|
||||
// The `service namespace
|
||||
// <https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces>`_
|
||||
// of the HTTP endpoint.
|
||||
|
@ -31,11 +51,24 @@ message AwsRequestSigning {
|
|||
// Example: s3
|
||||
string service_name = 1 [(validate.rules).string = {min_len: 1}];
|
||||
|
||||
// The `region <https://docs.aws.amazon.com/general/latest/gr/rande.html>`_ hosting the HTTP
|
||||
// endpoint.
|
||||
// Optional region string. If region is not provided, the region will be retrieved from the environment
|
||||
// or AWS configuration files. See :ref:`config_http_filters_aws_request_signing_region` for more details.
|
||||
//
|
||||
// When signing_algorithm is set to ``AWS_SIGV4`` the region is a standard AWS `region <https://docs.aws.amazon.com/general/latest/gr/rande.html>`_ string for the service
|
||||
// hosting the HTTP endpoint.
|
||||
//
|
||||
// Example: us-west-2
|
||||
string region = 2 [(validate.rules).string = {min_len: 1}];
|
||||
//
|
||||
// When signing_algorithm is set to ``AWS_SIGV4A`` the region is used as a region set.
|
||||
//
|
||||
// A region set is a comma separated list of AWS regions, such as ``us-east-1,us-east-2`` or wildcard ``*``
|
||||
// or even region strings containing wildcards such as ``us-east-*``
|
||||
//
|
||||
// Example: '*'
|
||||
//
|
||||
// By configuring a region set, a SigV4A signed request can be sent to multiple regions, rather than being
|
||||
// valid for only a single region destination.
|
||||
string region = 2;
|
||||
|
||||
// Indicates that before signing headers, the host header will be swapped with
|
||||
// this value. If not set or empty, the original host header value
|
||||
|
@ -63,4 +96,24 @@ message AwsRequestSigning {
|
|||
// - exact: bar
|
||||
// When applied, all headers that start with "x-envoy" and headers "foo" and "bar" will not be signed.
|
||||
repeated type.matcher.v3.StringMatcher match_excluded_headers = 5;
|
||||
|
||||
// Optional Signing algorithm specifier, either ``AWS_SIGV4`` or ``AWS_SIGV4A``, defaulting to ``AWS_SIGV4``.
|
||||
SigningAlgorithm signing_algorithm = 6;
|
||||
|
||||
// If set, use the query string to store output of SigV4 or SigV4A calculation, rather than HTTP headers. The ``Authorization`` header will not be modified if ``query_string``
|
||||
// is configured.
|
||||
//
|
||||
// Example:
|
||||
// query_string: {}
|
||||
//
|
||||
QueryString query_string = 7;
|
||||
}
|
||||
|
||||
message AwsRequestSigningPerRoute {
|
||||
// Override the global configuration of the filter with this new config.
|
||||
// This overrides the entire message of AwsRequestSigning and not at field level.
|
||||
AwsRequestSigning aws_request_signing = 1;
|
||||
|
||||
// The human readable prefix to use when emitting stats.
|
||||
string stat_prefix = 2 [(validate.rules).string = {min_len: 1}];
|
||||
}
|
||||
|
|
|
@ -0,0 +1,52 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package envoy.extensions.filters.http.basic_auth.v3;
|
||||
|
||||
import "envoy/config/core/v3/base.proto";
|
||||
|
||||
import "udpa/annotations/sensitive.proto";
|
||||
import "udpa/annotations/status.proto";
|
||||
import "validate/validate.proto";
|
||||
|
||||
option java_package = "io.envoyproxy.envoy.extensions.filters.http.basic_auth.v3";
|
||||
option java_outer_classname = "BasicAuthProto";
|
||||
option java_multiple_files = true;
|
||||
option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/basic_auth/v3;basic_authv3";
|
||||
option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
||||
|
||||
// [#protodoc-title: Basic Auth]
|
||||
// Basic Auth :ref:`configuration overview <config_http_filters_basic_auth>`.
|
||||
// [#extension: envoy.filters.http.basic_auth]
|
||||
|
||||
// Basic HTTP authentication.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// .. code-block:: yaml
|
||||
//
|
||||
// users:
|
||||
// inline_string: |-
|
||||
// user1:{SHA}hashed_user1_password
|
||||
// user2:{SHA}hashed_user2_password
|
||||
//
|
||||
message BasicAuth {
|
||||
// Username-password pairs used to verify user credentials in the "Authorization" header.
|
||||
// The value needs to be the htpasswd format.
|
||||
// Reference to https://httpd.apache.org/docs/2.4/programs/htpasswd.html
|
||||
config.core.v3.DataSource users = 1 [(udpa.annotations.sensitive) = true];
|
||||
|
||||
// This field specifies the header name to forward a successfully authenticated user to
|
||||
// the backend. The header will be added to the request with the username as the value.
|
||||
//
|
||||
// If it is not specified, the username will not be forwarded.
|
||||
string forward_username_header = 2
|
||||
[(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}];
|
||||
}
|
||||
|
||||
// Extra settings that may be added to per-route configuration for
|
||||
// a virtual host or a cluster.
|
||||
message BasicAuthPerRoute {
|
||||
// Username-password pairs for this route.
|
||||
config.core.v3.DataSource users = 1
|
||||
[(validate.rules).message = {required: true}, (udpa.annotations.sensitive) = true];
|
||||
}
|
|
@ -6,10 +6,10 @@ import "envoy/config/route/v3/route_components.proto";
|
|||
import "envoy/type/matcher/v3/string.proto";
|
||||
|
||||
import "google/protobuf/any.proto";
|
||||
import "google/protobuf/wrappers.proto";
|
||||
|
||||
import "udpa/annotations/status.proto";
|
||||
import "udpa/annotations/versioning.proto";
|
||||
import "validate/validate.proto";
|
||||
|
||||
option java_package = "io.envoyproxy.envoy.extensions.filters.http.cache.v3";
|
||||
option java_outer_classname = "CacheProto";
|
||||
|
@ -20,6 +20,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
// [#protodoc-title: HTTP Cache Filter]
|
||||
|
||||
// [#extension: envoy.filters.http.cache]
|
||||
// [#next-free-field: 7]
|
||||
message CacheConfig {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.config.filter.http.cache.v2alpha.CacheConfig";
|
||||
|
@ -49,9 +50,17 @@ message CacheConfig {
|
|||
repeated config.route.v3.QueryParameterMatcher query_parameters_excluded = 4;
|
||||
}
|
||||
|
||||
// Config specific to the cache storage implementation.
|
||||
// Config specific to the cache storage implementation. Required unless ``disabled``
|
||||
// is true.
|
||||
// [#extension-category: envoy.http.cache]
|
||||
google.protobuf.Any typed_config = 1 [(validate.rules).any = {required: true}];
|
||||
google.protobuf.Any typed_config = 1;
|
||||
|
||||
// When true, the cache filter is a no-op filter.
|
||||
//
|
||||
// Possible use-cases for this include:
|
||||
// - Turning a filter on and off with :ref:`ECDS <envoy_v3_api_file_envoy/service/extension/v3/config_discovery.proto>`.
|
||||
// [#comment: once route-specific overrides are implemented, they are the more likely use-case.]
|
||||
google.protobuf.BoolValue disabled = 5;
|
||||
|
||||
// List of matching rules that defines allowed ``Vary`` headers.
|
||||
//
|
||||
|
@ -79,4 +88,9 @@ message CacheConfig {
|
|||
// Max body size the cache filter will insert into a cache. 0 means unlimited (though the cache
|
||||
// storage implementation may have its own limit beyond which it will reject insertions).
|
||||
uint32 max_body_bytes = 4;
|
||||
|
||||
// By default, a ``cache-control: no-cache`` or ``pragma: no-cache`` header in the request
|
||||
// causes the cache to validate with its upstream even if the lookup is a hit. Setting this
|
||||
// to true will ignore these headers.
|
||||
bool ignore_request_cache_control_header = 6;
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue