Compare commits
534 Commits
chart/v8.3
...
master
Author | SHA1 | Date |
---|---|---|
|
9081324e6b | |
|
cc19ca0746 | |
|
db5d38e826 | |
|
a8e8f4aacd | |
|
e6fa8e56e3 | |
|
4f12337556 | |
|
dd98ecd66a | |
|
c815e182b2 | |
|
96a49735a8 | |
|
d25610acbe | |
|
0f94681cfb | |
|
5d1dea8ba8 | |
|
7f3c6a8868 | |
|
214320b2e4 | |
|
433ac459a0 | |
|
79170dbc4a | |
|
2f95c68bf1 | |
|
da250b7cc7 | |
|
08d78948ac | |
|
d14c84c690 | |
|
2ae71716cc | |
|
6c161bd268 | |
|
9b6894249f | |
|
cffdd53f8e | |
|
ccdc52db1d | |
|
600dcaf4b8 | |
|
def2e22bc2 | |
|
1c5819bce5 | |
|
0e1a1d1d9d | |
|
c8f597d7ce | |
|
faf6f7a057 | |
|
672c554e16 | |
|
f0afe10599 | |
|
120314d95b | |
|
368ca59863 | |
|
79917553b1 | |
|
a4933625e8 | |
|
e053f3b716 | |
|
1f8b0cb718 | |
|
2e92aa8a21 | |
|
d784486390 | |
|
c2cd9ddfc6 | |
|
20e3f63e7c | |
|
f30725562c | |
|
f8829ee1d8 | |
|
ab7b539a47 | |
|
b1fb2d6bfb | |
|
46ab826f03 | |
|
88712774f4 | |
|
c5e28b8fbe | |
|
2b124a957d | |
|
d9f94770a3 | |
|
d0e902dceb | |
|
95495a54c2 | |
|
c9a542be33 | |
|
233307cb95 | |
|
a47f7482e1 | |
|
33a9ce8c80 | |
|
c55dad2cf3 | |
|
dfcf01298f | |
|
8e7ee3b7cf | |
|
9f6c9b536e | |
|
02e88319b7 | |
|
f853d23884 | |
|
ac2dc64c66 | |
|
010ac84078 | |
|
30c8adca06 | |
|
4734dff37d | |
|
0bca8b0352 | |
|
b7929aed97 | |
|
3ab7632e37 | |
|
778869f935 | |
|
680e3dc3d0 | |
|
eb817604ec | |
|
4103bc151d | |
|
70cfbfb935 | |
|
5d5eaa0677 | |
|
6521f85a7e | |
|
642c78428c | |
|
d04280e3a8 | |
|
e8ca65046a | |
|
5f7ac30080 | |
|
2947049e06 | |
|
16c971e3d3 | |
|
1d56ae0965 | |
|
1c96a9df06 | |
|
8b343d5989 | |
|
8d75fd48bc | |
|
9292c47470 | |
|
2d36cf21e0 | |
|
084034461e | |
|
99256f1e12 | |
|
3b18d9a636 | |
|
ab531b7a2b | |
|
f88f6e0e45 | |
|
7b7d39e261 | |
|
75b4a5e445 | |
|
31dd587529 | |
|
bd0dbefdb7 | |
|
0fee7f95bd | |
|
f772a0c332 | |
|
76985900e5 | |
|
d781ecfb9e | |
|
a6afd065b6 | |
|
7a2e1b6d67 | |
|
2bf8880e95 | |
|
2c21e72146 | |
|
799c9a5a39 | |
|
53a1631519 | |
|
086914f235 | |
|
42eab8ab69 | |
|
f621c33a4a | |
|
56a235c1a1 | |
|
3de3ff793e | |
|
3b61f68cdc | |
|
ed2aef94f3 | |
|
d8831a40de | |
|
8239684191 | |
|
1026db35ad | |
|
f9e27c94a8 | |
|
c8edb16264 | |
|
628aba93e5 | |
|
c57fc34b3b | |
|
c543058d66 | |
|
90d7afef29 | |
|
14acd8e8c5 | |
|
72bd2c7ae7 | |
|
2a12d65a44 | |
|
cade34b2d9 | |
|
b43c95bd6d | |
|
19a15543c0 | |
|
3ce2ba28c1 | |
|
c11896ba67 | |
|
2cac062f7f | |
|
5a7c9b90c5 | |
|
f76e5d4642 | |
|
43aab8470d | |
|
ee7c77ad01 | |
|
a289c618aa | |
|
819628d48d | |
|
982be874e1 | |
|
d04b904ab6 | |
|
bbfcd578ab | |
|
25e6949fae | |
|
e3944b055c | |
|
0ebded51d0 | |
|
be9403d528 | |
|
b2a901df2c | |
|
deee4153d1 | |
|
2c9cfaa933 | |
|
71b981eeb9 | |
|
85bba5d86f | |
|
7b7be436c5 | |
|
cc9502a511 | |
|
644eba62f2 | |
|
65f43d1fe5 | |
|
f89c3595ed | |
|
576ff7de02 | |
|
512c71f313 | |
|
63222e20cf | |
|
a56bfce8b7 | |
|
229abf0750 | |
|
ec880db9fe | |
|
cf7ad46a89 | |
|
a5a4c688b1 | |
|
66e86429bc | |
|
81de0a26fc | |
|
6b6b628ab9 | |
|
999f292b84 | |
|
0341aa7adb | |
|
ebea5f5a55 | |
|
f4c0535e10 | |
|
3d01d338a5 | |
|
6dc259f448 | |
|
3164b8bf61 | |
|
36b3d213ca | |
|
ccbf179736 | |
|
1eccac57d5 | |
|
ac662313a1 | |
|
7fc11933e4 | |
|
1cd8650cb9 | |
|
d94b4c21a2 | |
|
cac12065fb | |
|
90c31adb0e | |
|
27adba6eab | |
|
9e1ea1c9aa | |
|
8b7723c0a9 | |
|
47c72fa0f7 | |
|
7e23a9b924 | |
|
c5fc4a7ce4 | |
|
ac19ea6609 | |
|
28ff281739 | |
|
c316a95e2a | |
|
9209418d67 | |
|
fe02667366 | |
|
0a620e8abb | |
|
2b55a41868 | |
|
bc148a7620 | |
|
650418f364 | |
|
2820aafdf7 | |
|
89b318f94a | |
|
e7f36a6555 | |
|
57344cd774 | |
|
767c429dc3 | |
|
179ee90179 | |
|
59f9c9185c | |
|
10171ad4ca | |
|
9fe6be4d95 | |
|
12418ed2ff | |
|
ccaf980639 | |
|
ba9e399058 | |
|
772ce1e044 | |
|
5d07d29382 | |
|
4d936bfcdd | |
|
68e7e3ce88 | |
|
4c8cc88f38 | |
|
06a0d7b45c | |
|
2513ae1c13 | |
|
f937f08d63 | |
|
e4d0b43df7 | |
|
4a8a0132dd | |
|
78ee2df1fc | |
|
89228f169a | |
|
585922f950 | |
|
40ad84ce0a | |
|
da9b271c36 | |
|
3d5635fc99 | |
|
dc39f11a17 | |
|
f3ab7183df | |
|
7f56afa587 | |
|
d6a828895a | |
|
3155b60e8a | |
|
9593640f2c | |
|
2a88d45bb6 | |
|
f17e1898c4 | |
|
5dd181f03e | |
|
e9014c6493 | |
|
56ec74de75 | |
|
b987a5c2d3 | |
|
f6a6ac57df | |
|
e5848545c4 | |
|
1ac7b74f29 | |
|
a28ef97c45 | |
|
cb6eea73cb | |
|
172ad72ab4 | |
|
330582b9a4 | |
|
3099436e2d | |
|
30a6ddd292 | |
|
03a32471a3 | |
|
d740937f36 | |
|
a7fc5d9343 | |
|
faf29b6012 | |
|
a666525df7 | |
|
b965751835 | |
|
5c5325ffeb | |
|
60867c4329 | |
|
fb7455dd68 | |
|
adb17e3894 | |
|
74fe4ed608 | |
|
934711a92a | |
|
114e2a3bc7 | |
|
eef86a4afe | |
|
31734e52ef | |
|
40b9d4690d | |
|
cd26813101 | |
|
8489c4067f | |
|
3950d5962e | |
|
f2b7a68586 | |
|
87747d5ad7 | |
|
5107d72b37 | |
|
4008635f93 | |
|
6ab7c1a40d | |
|
a2e71ecc0e | |
|
a13b6cc131 | |
|
d0cb1f7f2d | |
|
658f22ea83 | |
|
04bd2b25bb | |
|
ead6531e22 | |
|
f2299728d9 | |
|
f07f87ad0d | |
|
91b41ffda8 | |
|
2552454023 | |
|
3a61acc77b | |
|
11534472d7 | |
|
aeb71dfced | |
|
f1c7c67d6c | |
|
cc7ca53fbc | |
|
910b0573fb | |
|
dc35488c10 | |
|
9b16792ec0 | |
|
397aadef61 | |
|
4ee26bca64 | |
|
5698251d8f | |
|
1429b4a166 | |
|
671cbe915c | |
|
239dc73f8f | |
|
d48da3019d | |
|
f1e9138549 | |
|
674cec267f | |
|
e135c08387 | |
|
6d065220c0 | |
|
5ddb009343 | |
|
88a5a286fa | |
|
7631b1a55f | |
|
c28d9fa4fc | |
|
998453d2b1 | |
|
55aaa7187a | |
|
d50e69b8e9 | |
|
f7ae900603 | |
|
cc2d8c05fd | |
|
d4ce1e5db7 | |
|
969d44e6ae | |
|
faf305224d | |
|
2deda8724f | |
|
778688a74e | |
|
bfefad2bee | |
|
9db6319897 | |
|
561d0f9667 | |
|
19f9e0069e | |
|
3151e99ec8 | |
|
5d7c00b916 | |
|
51fdee2e28 | |
|
0c5052fcf2 | |
|
34d6008f6f | |
|
e08ee1bfa5 | |
|
390332c114 | |
|
c03d0107e7 | |
|
3ecb1972de | |
|
b32fd681c4 | |
|
b25ba6428e | |
|
1eb324b3ef | |
|
e44c54eed3 | |
|
db9c2fcddc | |
|
d21b3e5cc5 | |
|
b27207eaa1 | |
|
43b612222d | |
|
77af561057 | |
|
350c65708e | |
|
1fc1d04b43 | |
|
b7265c260f | |
|
bc5904f1fe | |
|
04b3841e03 | |
|
7ec36acae5 | |
|
a08532d766 | |
|
c65e8b6109 | |
|
af6ed4bf64 | |
|
70008af3e9 | |
|
5e7b7a6115 | |
|
7afe60257b | |
|
0d8ad37f35 | |
|
35913cd9b9 | |
|
0768ff846f | |
|
eacbb1a300 | |
|
ba882cdf38 | |
|
1854ffdf85 | |
|
6595ae997c | |
|
c62be2cd83 | |
|
16b1173f1b | |
|
7574f1f890 | |
|
3b1036d5b7 | |
|
232a480530 | |
|
9bb6a876c4 | |
|
9b19425da2 | |
|
e9c2058e9b | |
|
31d7fc97a8 | |
|
97fb84d8c8 | |
|
f39bda4e0b | |
|
0b51ef8157 | |
|
34cc83d147 | |
|
17a29d8b34 | |
|
b5cab692c5 | |
|
47ed9a3893 | |
|
5d8570db3a | |
|
37ce239f1c | |
|
097e311a98 | |
|
dc00c0bd3f | |
|
2a6bb29982 | |
|
359d314357 | |
|
54990b2274 | |
|
9e25b379ae | |
|
13c5485d01 | |
|
ec1cd9ef02 | |
|
1bd83d4b14 | |
|
2297a5615a | |
|
0fbb8de9fe | |
|
1e1df278a5 | |
|
d980812241 | |
|
13762b757c | |
|
2a84e9bc0c | |
|
2272b6489d | |
|
de4f9dae69 | |
|
97fd61df30 | |
|
186682b8c5 | |
|
36d69fcb70 | |
|
9d13603872 | |
|
a145496c13 | |
|
e38f25d298 | |
|
97fa2d60ff | |
|
b07ceb249b | |
|
c8bc33d927 | |
|
df88bc4a38 | |
|
fb14b27108 | |
|
8ca5a6f461 | |
|
44b53cee9f | |
|
3f9dc81f35 | |
|
d67aaa5e5b | |
|
6660e9cab0 | |
|
3af77aa59d | |
|
a523af5194 | |
|
5e02cc64f3 | |
|
f8da475721 | |
|
7f5fe57e0a | |
|
13410eb153 | |
|
c9cdb303bb | |
|
fb27220f7a | |
|
210dc8c3df | |
|
0c7cad07b1 | |
|
e611335e74 | |
|
ef18e62bf9 | |
|
e0ffc22e69 | |
|
4d618089b5 | |
|
ea1c2e9836 | |
|
7df31ae570 | |
|
718ff190b6 | |
|
4652aa52bb | |
|
f3d38b6e63 | |
|
7015921c66 | |
|
2e353869cb | |
|
cbf6277341 | |
|
5b4510f2a2 | |
|
215346737e | |
|
eb67c840be | |
|
e395f1f081 | |
|
7a6edf74da | |
|
f70b5ae69c | |
|
aee9d48591 | |
|
e2842d95b7 | |
|
12715b9b3b | |
|
9865a4d99e | |
|
dccda27a6e | |
|
66d66912e1 | |
|
edfd64ac74 | |
|
9a0f074100 | |
|
0911ee65eb | |
|
c4bdb83e08 | |
|
5256926df4 | |
|
d3aa93c9b3 | |
|
0244f2638a | |
|
00b64b8007 | |
|
700c590cff | |
|
f3bf5b92df | |
|
559c6fb994 | |
|
14c09e453f | |
|
dc4bf5f6d3 | |
|
28ada88dd2 | |
|
bee3e8b662 | |
|
fa5b086686 | |
|
86a2afcfc3 | |
|
c7e317efeb | |
|
110320614d | |
|
2d80737988 | |
|
d320e517df | |
|
19321e6ce4 | |
|
b97464f780 | |
|
fb5c1be3f5 | |
|
fbf0865bbd | |
|
ef421ee17f | |
|
a8d1a18069 | |
|
84f3473798 | |
|
f9e3590c6d | |
|
c744a05aee | |
|
e679a55f37 | |
|
e1f21398de | |
|
6c509eb937 | |
|
b31d518847 | |
|
e6f3dd5ee7 | |
|
515cfc948d | |
|
21ed210ed4 | |
|
838277b45c | |
|
185f701eab | |
|
b21dfde558 | |
|
d9b48353f4 | |
|
b8c6a2457a | |
|
22c0f65da3 | |
|
50d9cd0f55 | |
|
11408dec56 | |
|
7dc4e91fd8 | |
|
507bf91f95 | |
|
c0079ba744 | |
|
88f267808a | |
|
64a3dace3a | |
|
388dc0dad7 | |
|
c669cdcca0 | |
|
cffc9c67a9 | |
|
0826019c50 | |
|
a15c0d36f6 | |
|
db619f7eac | |
|
149915d807 | |
|
3b00747690 | |
|
90f9e6aeb4 | |
|
e97b0ae4b6 | |
|
12b79cf47f | |
|
e577e01861 | |
|
7d37953e82 | |
|
27612b2a5c | |
|
cff3129c7d | |
|
d61af3952a | |
|
7c0df5ad05 | |
|
576bcaaa6a | |
|
c6dd19f9b8 | |
|
9210638f41 | |
|
337faac002 | |
|
70c53c50fb | |
|
c0173dddf0 | |
|
bce1b1565a | |
|
cab7c5143b | |
|
9709774d5c | |
|
9fa37ebc58 | |
|
43d5fa8f6a | |
|
9e50c49ce7 | |
|
8fe3f80efc | |
|
5f41fd28aa | |
|
7849c50230 | |
|
1d7ced4cd2 | |
|
53fe79fb66 | |
|
47122ec4ee | |
|
3d1ef9c918 | |
|
10cfe96c8f | |
|
583ebc2f52 | |
|
8d264ab98a | |
|
a87bf9a127 | |
|
239bf149d2 | |
|
98a9aec0d0 | |
|
98741d598d |
|
@ -0,0 +1,929 @@
|
|||
# Developing Emissary-ingress
|
||||
|
||||
Welcome to the Emissary-ingress Community!
|
||||
|
||||
Thank you for contributing, we appreciate small and large contributions and look forward to working with you to make Emissary-ingress better.
|
||||
|
||||
This document is intended for developers looking to contribute to the Emissary-ingress project. In this document you will learn how to get your development environment setup and how to contribute to the project. Also, you will find more information about the internal components of Emissary-ingress and other questions about working on the project.
|
||||
|
||||
> Looking for end user guides for Emissary-ingress? You can check out the end user guides at <https://www.getambassador.io/docs/emissary/>.
|
||||
|
||||
After reading this document if you have questions we encourage you to join us on our [Slack channel](https://communityinviter.com/apps/cloud-native/cncf) in the #emissary-ingress channel.
|
||||
|
||||
- [Code of Conduct](../Community/CODE_OF_CONDUCT.md)
|
||||
- [Governance](../Community/GOVERNANCE.md)
|
||||
- [Maintainers](../Community/MAINTAINERS.md)
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [Development Setup](#development-setup)
|
||||
- [Step 1: Install Build Dependencies](#step-1-install-build-dependencies)
|
||||
- [Step 2: Clone Project](#step-2-clone-project)
|
||||
- [Step 3: Configuration](#step-3-configuration)
|
||||
- [Step 4: Building](#step-4-building)
|
||||
- [Step 5: Push](#step-5-push)
|
||||
- [Step 6: Deploy](#step-6-deploy)
|
||||
- [Step 7: Dev-loop](#step-7-dev-loop)
|
||||
- [What should I do next?](#what-should-i-do-next)
|
||||
- [Contributing](#contributing)
|
||||
- [Submitting a Pull Request (PR)](#submitting-a-pull-request-pr)
|
||||
- [Pull Request Review Process](#pull-request-review-process)
|
||||
- [Rebasing a branch under review](#rebasing-a-branch-under-review)
|
||||
- [Fixup commits during PR review](#fixup-commits-during-pr-review)
|
||||
- [Development Workflow](#development-workflow)
|
||||
- [Branching Strategy](#branching-strategy)
|
||||
- [Backport Strategy](#backport-strategy)
|
||||
- [What if I need a patch to land in a previous supported version?](#what-if-i-need-a-patch-to-land-in-a-previous-supported-version)
|
||||
- [What if my patch is only for a previous supported version?](#what-if-my-patch-is-only-for-a-previous-supported-version)
|
||||
- [What if I'm still not sure?](#what-if-im-still-not-sure)
|
||||
- [Merge Strategy](#merge-strategy)
|
||||
- [What about merge commit strategy?](#what-about-merge-commit-strategy)
|
||||
- [Contributing to the Docs](#contributing-to-the-docs)
|
||||
- [Advanced Topics](#advanced-topics)
|
||||
- [Running Emissary-ingress internals locally](#running-emissary-ingress-internals-locally)
|
||||
- [Setting up diagd](#setting-up-diagd)
|
||||
- [Changing the ambassador root](#changing-the-ambassador-root)
|
||||
- [Getting envoy](#getting-envoy)
|
||||
- [Shutting up the pod labels error](#shutting-up-the-pod-labels-error)
|
||||
- [Extra credit](#extra-credit)
|
||||
- [Debugging and Developing Envoy Configuration](#debugging-and-developing-envoy-configuration)
|
||||
- [Making changes to Envoy](#making-changes-to-envoy)
|
||||
- [1. Preparing your machine](#1-preparing-your-machine)
|
||||
- [2. Setting up your workspace to hack on Envoy](#2-setting-up-your-workspace-to-hack-on-envoy)
|
||||
- [3. Hacking on Envoy](#3-hacking-on-envoy)
|
||||
- [4. Building and testing your hacked-up Envoy](#4-building-and-testing-your-hacked-up-envoy)
|
||||
- [5. Test Devloop](#5-test-devloop)
|
||||
- [6. Protobuf changes](#6-protobuf-changes)
|
||||
- [7. Finalizing your changes](#7-finalizing-your-changes)
|
||||
- [8. Final Checklist](#8-final-checklist)
|
||||
- [Developing Emissary-ingress (Maintainers-only advice)](#developing-emissary-ingress-maintainers-only-advice)
|
||||
- [Updating license documentation](#updating-license-documentation)
|
||||
- [Upgrading Python dependencies](#upgrading-python-dependencies)
|
||||
- [FAQ](#faq)
|
||||
- [How do I find out what build targets are available?](#how-do-i-find-out-what-build-targets-are-available)
|
||||
- [How do I develop on a Mac with Apple Silicon?](#how-do-i-develop-on-a-mac-with-apple-silicon)
|
||||
- [How do I develop on Windows using WSL?](#how-do-i-develop-on-windows-using-wsl)
|
||||
- [How do I test using a private Docker repository?](#how-do-i-test-using-a-private-docker-repository)
|
||||
- [How do I change the loglevel at runtime?](#how-do-i-change-the-loglevel-at-runtime)
|
||||
- [Can I build from a docker container instead of on my local computer?](#can-i-build-from-a-docker-container-instead-of-on-my-local-computer)
|
||||
- [How do I clear everything out to make sure my build runs like it will in CI?](#how-do-i-clear-everything-out-to-make-sure-my-build-runs-like-it-will-in-ci)
|
||||
- [My editor is changing `go.mod` or `go.sum`, should I commit that?](#my-editor-is-changing-gomod-or-gosum-should-i-commit-that)
|
||||
- [How do I debug "This should not happen in CI" errors?](#how-do-i-debug-this-should-not-happen-in-ci-errors)
|
||||
- [How do I run Emissary-ingress tests?](#how-do-i-run-emissary-ingress-tests)
|
||||
- [How do I type check my python code?](#how-do-i-type-check-my-python-code)
|
||||
|
||||
## Development Setup
|
||||
|
||||
This section provides the steps for getting started developing on Emissary-ingress. There are a number of prerequisites that need to be setup. In general, our tooling tries to detect any missing requirements and provide a friendly error message. If you ever find that this is not the case please file an issue.
|
||||
|
||||
> **Note:** To enable developers contributing on Macs with Apple Silicon, we ensure that the artifacts are built for `linux/amd64`
|
||||
> rather than the host `linux/arm64` architecture. This can be overriden using the `BUILD_ARCH` environment variable. Pull Request are welcome :).
|
||||
|
||||
### Step 1: Install Build Dependencies
|
||||
|
||||
Here is a list of tools that are used by the build system to generate the build artifacts, packaging them up into containers, generating crds, helm charts and for running tests.
|
||||
|
||||
- git
|
||||
- make
|
||||
- docker (make sure you can run docker commands as your dev user without sudo)
|
||||
- bash
|
||||
- rsync
|
||||
- golang - `go.mod` for current version
|
||||
- python (>=3.10.9)
|
||||
- kubectl
|
||||
- a kubernetes cluster (you need permissions to create resources, i.e. crds, deployments, services, etc...)
|
||||
- a Docker registry
|
||||
- bsdtar (Provided by libarchive-tools on Ubuntu 19.10 and newer)
|
||||
- gawk
|
||||
- jq
|
||||
- helm
|
||||
|
||||
### Step 2: Clone Project
|
||||
|
||||
If you haven't already then this would be a good time to clone the project running the following commands:
|
||||
|
||||
```bash
|
||||
# clone to your preferred folder
|
||||
git clone https://github.com/emissary-ingress/emissary.git
|
||||
|
||||
# navigate to project
|
||||
cd emissary
|
||||
```
|
||||
|
||||
### Step 3: Configuration
|
||||
|
||||
You can configure the build system using environment variables, two required variables are used for setting the container registry and the kubeconfig used.
|
||||
|
||||
> **Important**: the test and build system perform destructive operations against your cluster. Therefore, we recommend that you
|
||||
> use a development cluster. Setting the DEV_KUBECONFIG variable described below ensures you don't accidently perform actions on a production cluster.
|
||||
|
||||
Open a terminal in the location where you cloned the repository and run the following commands:
|
||||
|
||||
```bash
|
||||
# set container registry using `export DEV_REGISTRY=<your-registry>
|
||||
# note: you need to be logged in and have permissions to push
|
||||
# Example:
|
||||
export DEV_REGISTRY=docker.io/parsec86
|
||||
|
||||
# set kube config file using `export DEV_KUBECONFIG=<dev-kubeconfig>`
|
||||
# your cluster needs the ability to read from the configured container registry
|
||||
export DEV_KUBECONFIG="$HOME/.kube/dev-config.yaml"
|
||||
|
||||
```
|
||||
|
||||
### Step 4: Building
|
||||
|
||||
The build system for this project leverages `make` and multi-stage `docker` builds to produce the following containers:
|
||||
|
||||
- `emissary.local/emissary` - single deployable container for Emissary-ingress
|
||||
- `emissary.local/kat-client` - test client container used for testing
|
||||
- `emissary.local/kat-server` - test server container used for testing
|
||||
|
||||
Using the terminal session you opened in step 2, run the following commands
|
||||
|
||||
>
|
||||
|
||||
```bash
|
||||
# This will pull and build the necessary docker containers and produce multiple containers.
|
||||
# If this is the first time running this command it will take a little bit while the base images are built up and cached.
|
||||
make images
|
||||
|
||||
# verify containers were successfully created, you should also see some of the intermediate builder containers as well
|
||||
docker images | grep emissary.local
|
||||
```
|
||||
|
||||
*What just happened?*
|
||||
|
||||
The build system generated a build container that pulled in envoy, the build dependencies, built various binaries from within this project and packaged them into a single deployable container. More information on this can be found in the [Architecture Document](ARCHITECTURE.md).
|
||||
|
||||
### Step 5: Push
|
||||
|
||||
Now that you have successfully built the containers its time to push them to your container registry which you setup in step 2.
|
||||
|
||||
In the same terminal session you can run the following command:
|
||||
|
||||
```bash
|
||||
# re-tags the images and pushes them to your configured container registry
|
||||
# docker must be able to login to your registry and you have to have push permissions
|
||||
make push
|
||||
|
||||
# you can view the newly tag images by running
|
||||
docker images | grep <your -registry>
|
||||
|
||||
# alternatively, we have two make targets that provide information as well
|
||||
make env
|
||||
|
||||
# or in a bash export friendly format
|
||||
make export
|
||||
```
|
||||
|
||||
### Step 6: Deploy
|
||||
|
||||
Now its time to deploy the container out to your Kubernetes cluster that was configured in step 2. Hopefully, it is already becoming apparent that we love to leverage Make to handle the complexity for you :).
|
||||
|
||||
```bash
|
||||
# generate helm charts and K8's Configs with your container swapped in and apply them to your cluster
|
||||
make deploy
|
||||
|
||||
# check your cluster to see if emissary is running
|
||||
# note: kubectl doesn't know about DEV_KUBECONFIG so you may need to ensure KUBECONFIG is pointing to the correct cluster
|
||||
kubectl get pod -n ambassador
|
||||
```
|
||||
|
||||
🥳 If all has gone well then you should have your development environment setup for building and testing Emissary-ingress.
|
||||
|
||||
### Step 7: Dev-loop
|
||||
|
||||
Now that you are all setup and able to deploy a development container of Emissary-ingress to a cluster, it is time to start making some changes.
|
||||
|
||||
Lookup an issue that you want to work on, assign it to yourself and if you have any questions feel free to ping us on slack in the #emissary-dev channel.
|
||||
|
||||
Make a change to Emissary-ingress and when you want to test it in a live cluster just re-run
|
||||
|
||||
`make deploy`
|
||||
|
||||
This will:
|
||||
|
||||
- recompile the go binary
|
||||
- rebuild containers
|
||||
- push them to the docker registry
|
||||
- rebuild helm charts and manifest
|
||||
- reapply manifest to cluster and re-deploy Emissary-ingress to the cluster
|
||||
|
||||
> *Do I have to run the other make targets `make images` or `make push` ?*
|
||||
> No you don't have to because `make deploy` will actually run those commands for you. The steps above were meant to introduce you to the various make targets so that you aware of them and have options when developing.
|
||||
|
||||
### What should I do next?
|
||||
|
||||
Now that you have your dev system up and running here are some additional content that we recommend you check out:
|
||||
|
||||
- [Emissary-ingress Architecture](ARCHITECTURE.md)
|
||||
- [Contributing Code](#contributing)
|
||||
- [Contributing to Docs](#contributing-to-the-docs)
|
||||
- [Advanced Topics](#advanced-topics)
|
||||
- [Faq](#faq)
|
||||
|
||||
## Contributing
|
||||
|
||||
This section goes over how to contribute code to the project and how to get started contributing. More information on how we manage our branches can be found below in [Development Workflow](#development-workflow).
|
||||
|
||||
Before contributing be sure to read our [Code of Conduct](../Community/CODE_OF_CONDUCT.md) and [Governance](../Community/GOVERNANCE.md) to get an understanding of how our project is structured.
|
||||
|
||||
### Submitting a Pull Request (PR)
|
||||
|
||||
> If you haven't set up your development environment then please see the [Development Setup](#development-setup) section.
|
||||
|
||||
When submitting a Pull Request (PR) here are a set of guidelines to follow:
|
||||
|
||||
1. Search for an [existing issue](https://github.com/emissary-ingress/emissary/issues) or create a [new issue](https://github.com/emissary-ingress/emissary/issues/new/choose).
|
||||
|
||||
2. Be sure to describe your proposed change and any open questions you might have in the issue. This allows us to collect historical context around an issue, provide feedback on the proposed solution and discuss what versions a fix should target.
|
||||
|
||||
3. If you haven't done so already create a fork of the respository and clone it locally
|
||||
|
||||
```shell
|
||||
git clone <your-fork>
|
||||
```
|
||||
|
||||
4. Cut a new patch branch from `master`:
|
||||
|
||||
```shell
|
||||
git checkout master
|
||||
git checkout -b my-patch-branch master
|
||||
```
|
||||
|
||||
5. Make necessary code changes.
|
||||
|
||||
- Make sure you include test coverage for the change, see [How do I run Tests](#how-do-i-run-emissary-ingress-tests)
|
||||
- Ensure code linting is passing by running `make lint`
|
||||
- Code changes must have associated documentation updates.
|
||||
- Make changes in <https://github.com/datawire/ambassador-docs> as necessary, and include a reference to those changes the pull request for your code changes.
|
||||
- See [Contributing to Docs](#contributing-to-the-docs) for more details.
|
||||
|
||||
> Smaller pull requests are easier to review and can get merged faster thus reducing potential for merge conflicts so it is recommend to keep them small and focused.
|
||||
|
||||
6. Commit your changes using descriptive commit messages.
|
||||
- we **require** that all commits are signed off so please be sure to commit using the `--signoff` flag, e.g. `git commit --signoff`
|
||||
- commit message should summarize the fix and motivation for the proposed fix. Include issue # that the fix looks to address.
|
||||
- we are "ok" with multiple commits but we may ask you to squash some commits during the PR review process
|
||||
|
||||
7. Push your branch to your forked repository:
|
||||
|
||||
> It is good practice to make sure your change is rebased on the latest master to ensure it will merge cleanly so if it has been awhile since you rebased on upstream you should do it now to ensure there are no merge conflicts
|
||||
|
||||
```shell
|
||||
git push origin my-patch-branch
|
||||
```
|
||||
|
||||
8. Submit a Pull Request from your fork targeting upstream `emissary/master`.
|
||||
|
||||
Thanks for your contribution! One of the [Maintainers](../Community/MAINTAINERS.md) will review your PR and discuss any changes that need to be made.
|
||||
|
||||
### Pull Request Review Process
|
||||
|
||||
This is an opportunity for the Maintainers to review the code for accuracy and ensure that it solves the problem outlined in the issue. This is an iterative process and meant to ensure the quality of the code base. During this process we may ask you to break up Pull Request into smaller changes, squash commits, rebase on master, etc...
|
||||
|
||||
Once you have been provided feedback:
|
||||
|
||||
1. Make the required updates to the code per the review discussion
|
||||
2. Retest the code and ensure linting is still passing
|
||||
3. Commit the changes and push to Github
|
||||
- see [Fixup Commits](#fixup-commits-during-pr-review) below
|
||||
4. Repeat these steps as necessary
|
||||
|
||||
Once you have **two approvals** then one of the Maintainers will merge the PR.
|
||||
|
||||
:tada: Thank you for contributing and being apart of the Emissary-ingress Community!
|
||||
|
||||
### Rebasing a branch under review
|
||||
|
||||
Many times the base branch will have new commits added to it which may cause merge conflicts with your open pull request. First, a good rule of thumb is to make pull request small so that these conflicts are less likely to occur but this is not always possible when have multiple people working on similiar features. Second, if it is just addressing commit feedback a `fixup` commit is also a good option so that the reviewers can see what changed since their last review.
|
||||
|
||||
If you need to address merge conflicts then it is preferred that you use **Rebase** on the base branch rather than merging base branch into the feature branch. This ensures that when the PR is merged that it will cleanly replay on top of the base branch ensuring we maintain a clean linear history.
|
||||
|
||||
To do a rebase you can do the following:
|
||||
|
||||
```shell
|
||||
# add emissary.git as a remote repository, only needs to be done once
|
||||
git remote add upstream https://github.com/emissary-ingress/emissary.git
|
||||
|
||||
# fetch upstream master
|
||||
git fetch upstream master
|
||||
|
||||
# checkout local master and update it from upstream master
|
||||
git checkout master
|
||||
git pull -ff upstream master
|
||||
|
||||
# rebase patch branch on local master
|
||||
git checkout my-patch-branch
|
||||
git rebase -i master
|
||||
```
|
||||
|
||||
Once the merge conflicts are addressed and you are ready to push the code up you will need to force push your changes because during the rebase process the commit sha's are re-written and it has diverged from what is in your remote fork (Github).
|
||||
|
||||
To force push a branch you can:
|
||||
|
||||
```shell
|
||||
git push head --force-with-lease
|
||||
```
|
||||
|
||||
> Note: the `--force-with-lease` is recommended over `--force` because it is safer because it will check if the remote branch had new commits added during your rebase. You can read more detail here: <https://itnext.io/git-force-vs-force-with-lease-9d0e753e8c41>
|
||||
|
||||
### Fixup commits during PR review
|
||||
|
||||
One of the major downsides to rebasing a branch is that it requires force pushing over the remote (Github) which then marks all the existing review history outdated. This makes it hard for a reviewer to figure out whether or not the new changes addressed the feedback.
|
||||
|
||||
One way you can help the reviewer out is by using **fixup** commits. Fixup commits are special git commits that append `fixup!` to the subject of a commit. `Git` provides tools for easily creating these and also squashing them after the PR review process is done.
|
||||
|
||||
Since this is a new commit on top of the other commits, you will not lose your previous review and the new commit can be reviewed independently to determine if the new changes addressed the feedback correctly. Then once the reviewers are happy we will ask you to squash them so that we when it is merged we will maintain a clean linear history.
|
||||
|
||||
Here is a quick read on it: <https://jordanelver.co.uk/blog/2020/06/04/fixing-commits-with-git-commit-fixup-and-git-rebase-autosquash/>
|
||||
|
||||
TL;DR;
|
||||
|
||||
```shell
|
||||
# make code change and create new commit
|
||||
git commit --fixup <sha>
|
||||
|
||||
# push to Github for review
|
||||
git push
|
||||
|
||||
# reviewers are happy and ask you to do a final rebase before merging
|
||||
git rebase -i --autosquash master
|
||||
|
||||
# final push before merging
|
||||
git push --force-with-lease
|
||||
```
|
||||
|
||||
## Development Workflow
|
||||
|
||||
This section introduces the development workflow used for this repository. It is recommended that both Contributors, Release Engineers and Maintainers familiarize themselves with this content.
|
||||
|
||||
### Branching Strategy
|
||||
|
||||
This repository follows a trunk based development workflow. Depending on what article you read there are slight nuances to this so this section will outline how this repository interprets that workflow.
|
||||
|
||||
The most important branch is `master` this is our **Next Release** version and it should always be in a shippable state. This means that CI should be green and at any point we can decided to ship a new release from it. In a traditional trunk based development workflow, developers are encouraged to land partially finished work daily and to keep that work hidden behind feature flags. This repository does **NOT** follow that and instead if code lands on master it is something we are comfortable with shipping.
|
||||
|
||||
We ship release candidate (RC) builds from the `master` branch (current major) and also from `release/v{major.minor}` branches (last major version) during our development cycles. Therefore, it is important that it remains shippable at all times!
|
||||
|
||||
When we do a final release then we will cut a new `release/v{major.minor}` branch. These are long lived release branches which capture a snapshot in time for that release. For example here are some of the current release branches (as of writing this):
|
||||
|
||||
- release/v3.2
|
||||
- release/v3.1
|
||||
- release/v3.0
|
||||
- release/v2.4
|
||||
- release/v2.3
|
||||
- release/v1.14
|
||||
|
||||
These branches contain the codebase as it was at that time when the release was done. These branches have branch protection enabled to ensure that they are not removed or accidently overwritten. If we needed to do a security fix or bug patch then we may cut a new `.Z` patch release from an existing release branch. For example, the `release/v2.4` branch is currently on `2.4.1`.
|
||||
|
||||
As you can see we currently support mutliple major versions of Emissary-ingress and you can read more about our [End-of-Life Policy](https://www.getambassador.io/docs/emissary/latest/about/aes-emissary-eol/).
|
||||
|
||||
For more information on our current RC and Release process you can find that in our [Release Wiki](https://github.com/emissary-ingress/emissary/wiki).
|
||||
|
||||
### Backport Strategy
|
||||
|
||||
Since we follow a trunk based development workflow this means that the majority of the time your patch branch will be based off from `master` and that most Pull Request will target `master`.
|
||||
|
||||
This ensures that we do not miss bug fixes or features for the "Next" shippable release and simplifies the mental-model for deciding how to get started contributing code.
|
||||
|
||||
#### What if I need a patch to land in a previous supported version?
|
||||
|
||||
Let's say I have a bug fix for CRD round trip conversion for AuthService, which is affecting both `v2.y` and `v3.y`.
|
||||
|
||||
First within the issue we should discuss what versions we want to target. This can depend on current cycle work and any upcoming releases we may have.
|
||||
|
||||
The general rules we follow are:
|
||||
|
||||
1. land patch in "next" version which is `master`
|
||||
2. backport patch to any `release/v{major}.{minor}` branches
|
||||
|
||||
So, let's say we discuss it and say that the "next" major version is a long ways away so we want to do a z patch release on our current minor version(`v3.2`) and we also want to do a z patch release on our last supported major version (`v2.4`).
|
||||
|
||||
This means that these patches need to land in three separate branches:
|
||||
|
||||
1. `master` - next release
|
||||
2. `release/v3.2` - patch release
|
||||
3. `release/v2.4` - patch release
|
||||
|
||||
In this scenario, we first ask you to land the patch in the `master` branch and then provide separate PR's with the commits backported onto the `release/v*` branches.
|
||||
|
||||
> Recommendation: using the `git cherry-pick -x` will add the source commit sha to the commit message. This helps with tracing work back to the original commit.
|
||||
|
||||
#### What if my patch is only for a previous supported version?
|
||||
|
||||
Although, this should be an edge case, it does happen where the code has diverged enough that a fix may only be relevant to an existing supported version. In these cases we may need to do a patch release for that older supported version.
|
||||
|
||||
A good example, if we were to find a bug in the Envoy v2 protocol configuration we would only want to target the v2 release.
|
||||
|
||||
In this scenario, the base branch that we would create our feature branch off from would be the latest `minor` version for that release. As of writing this, that would be the `release/v2.4` branch. We would **not** need to target master.
|
||||
|
||||
But, let's say during our fix we notice other things that need to be addressed that would also need to be fixed in `master`. Then you need to submit a **separate Pull Request** that should first land on master and then follow the normal backporting process for the other patches.
|
||||
|
||||
#### What if I'm still not sure?
|
||||
|
||||
This is what the issue discussions and disucssion in Slack are for so that we can help guide you so feel free to ping us in the `#emissary-dev` channel on Slack to discuss directly with us.
|
||||
|
||||
### Merge Strategy
|
||||
|
||||
> The audience for this section is the Maintainers but also beneficial for Contributors so that they are familiar with how the project operates.
|
||||
|
||||
Having a clean linear commit history for a repository makes it easier to understand what is being changed and reduces the mental load for new comers to the project.
|
||||
|
||||
To maintain a clean linear commit history the following rules should be followed:
|
||||
|
||||
First, always rebase patch branch on to base branch. This means **NO** merge commits from merging base branch into the patch branch. This can be accomplished using git rebase.
|
||||
|
||||
```shell
|
||||
# first, make sure you pull latest upstream changes
|
||||
git fetch upstream
|
||||
git checkout master
|
||||
git pull -ff upstream/master
|
||||
|
||||
# checkout patch branch and rebase interactive
|
||||
# you may have merge conflicts you need to resolve
|
||||
git checkout my-patch-branch
|
||||
git rebase -i master
|
||||
```
|
||||
|
||||
> Note: this does rewrite your commit shas so be aware when sharing branches with co-workers.
|
||||
|
||||
Once the Pull Request is reviewed and has **two approvals** then a Maintainer can merge. Maintainers should follow prefer the following merge strategies:
|
||||
|
||||
1. rebase and merge
|
||||
2. squash merge
|
||||
|
||||
When `rebase and merge` is used your commits are played on top of the base branch so that it creates a clean linear history. This will maintain all the commits from the Pull Request. In most cases this should be the **preferred** merge strategy.
|
||||
|
||||
When a Pull Request has lots of fixup commits, or pr feedback fixes then you should ask the Contributor to squash them as part of the PR process.
|
||||
|
||||
If the contributor is unable to squash them then using a `squash merge` in some cases makes sense. **IMPORTANT**, when this does happen it is important that the commit messages are cleaned up and not just blindly accepted the way proposed by Github. Since it is easy to miss that cleanup step, this should be used less frequently compared to `rebase and merge`.
|
||||
|
||||
#### What about merge commit strategy?
|
||||
|
||||
> The audience for this section is the Maintainers but also beneficial for Contributors so that they are familiar with how the project operates.
|
||||
|
||||
When maintaining a linear commit history, each commit tells the story of what was changed in the repository. When using `merge commits` it
|
||||
adds an additional commit to the history that is not necessary because the commit history and PR history already tell the story.
|
||||
|
||||
Now `merge commits` can be useful when you are concerned with not rewriting the commit sha. Based on the current release process which includes using `rel/v` branches that are tagged and merged into `release/v` branches we must use a `merge commit` when merging these branches. This ensures that the commit sha a Git Tag is pointing at still exists once merged into the `release/v` branch.
|
||||
|
||||
## Contributing to the Docs
|
||||
|
||||
The Emissary-ingress community will all benefit from having documentation that is useful and correct. If you have found an issue with the end user documentation, then please help us out by submitting an issue and/or pull request with a fix!
|
||||
|
||||
The end user documentation for Emissary-ingress lives in a different repository and can be found at <https://github.com/datawire/ambassador-docs>.
|
||||
|
||||
See this repository for details on how to contribute to either a `pre-release` or already-released version of Emissary-ingress.
|
||||
|
||||
## Advanced Topics
|
||||
|
||||
This section is for more advanced topics that provide more detailed instructions. Make sure you go through the Development Setup and read the Architecture document before exploring these topics.
|
||||
|
||||
### Running Emissary-ingress internals locally
|
||||
|
||||
The main entrypoint is written in go. It strives to be as compatible as possible
|
||||
with the normal go toolchain. You can run it with:
|
||||
|
||||
```bash
|
||||
go run ./cmd/busyambassador entrypoint
|
||||
```
|
||||
|
||||
Of course just because you can run it this way does not mean it will succeed.
|
||||
The entrypoint needs to launch `diagd` and `envoy` in order to function, and it
|
||||
also expect to be able to write to the `/ambassador` directory.
|
||||
|
||||
#### Setting up diagd
|
||||
|
||||
If you want to hack on diagd, its easiest to setup a virtualenv with an editable
|
||||
copy and launch your `go run` from within that virtualenv. Note that these
|
||||
instructions depend on the virtualenvwrapper
|
||||
(<https://virtualenvwrapper.readthedocs.io/en/latest/>) package:
|
||||
|
||||
```bash
|
||||
# Create a virtualenv named venv with all the python requirements
|
||||
# installed.
|
||||
python3 -m venv venv
|
||||
. venv/bin/activate
|
||||
# If you're doing this in Datawire's apro.git, then:
|
||||
cd ambassador
|
||||
# Update pip and install dependencies
|
||||
pip install --upgrade pip
|
||||
pip install orjson # see below
|
||||
pip install -r builder/requirements.txt
|
||||
# Created an editable installation of ambassador:
|
||||
pip install -e python/
|
||||
# Check that we do indeed have diagd in our path.
|
||||
which diagd
|
||||
# If you're doing this in Datawire's apro.git, then:
|
||||
cd ..
|
||||
```
|
||||
|
||||
(Note: it shouldn't be necessary to install `orjson` by hand. The fact that it is
|
||||
at the moment is an artifact of the way Ambassador builds currently happen.)
|
||||
|
||||
#### Changing the ambassador root
|
||||
|
||||
You should now be able to launch ambassador if you set the
|
||||
`ambassador_root` environment variable to a writable location:
|
||||
|
||||
ambassador_root=/tmp go run ./cmd/busyambassador entrypoint
|
||||
|
||||
#### Getting envoy
|
||||
|
||||
If you do not have envoy in your path already, the entrypoint will use
|
||||
docker to run it.
|
||||
|
||||
#### Shutting up the pod labels error
|
||||
|
||||
An astute observe of the logs will notice that ambassador complains
|
||||
vociferously that pod labels are not mounted in the ambassador
|
||||
container. To reduce this noise, you can:
|
||||
|
||||
```bash
|
||||
mkdir /tmp/ambassador-pod-info && touch /tmp/ambassador-pod-info/labels
|
||||
```
|
||||
|
||||
#### Extra credit
|
||||
|
||||
When you run ambassador locally it will configure itself exactly as it
|
||||
would in the cluster. That means with two caveats you can actually
|
||||
interact with it and it will function normally:
|
||||
|
||||
1. You need to run `telepresence connect` or equivalent so it can
|
||||
connect to the backend services in its configuration.
|
||||
|
||||
2. You need to supply the host header when you talk to it.
|
||||
|
||||
### Debugging and Developing Envoy Configuration
|
||||
|
||||
Envoy configuration is generated by the ambassador compiler. Debugging
|
||||
the ambassador compiler by running it in kubernetes is very slow since
|
||||
we need to push both the code and any relevant kubernetes resources
|
||||
into the cluster. The following sections will provide tips for improving
|
||||
this development experience.
|
||||
|
||||
### Making changes to Envoy
|
||||
|
||||
Emissary-ingress is built on top of Envoy and leverages a vendored version of Envoy (*we track upstream very closely*). This section will go into how to make changes to the Envoy that is packaged with Emissary-ingress.
|
||||
|
||||
This is a bit more complex than anyone likes, but here goes:
|
||||
|
||||
#### 1. Preparing your machine
|
||||
|
||||
Building and testing Envoy can be very resource intensive. A laptop
|
||||
often can build Envoy... if you plug in an external hard drive, point
|
||||
a fan at it, and leave it running overnight and most of the next day.
|
||||
At Ambassador Labs, we'll often spin up a temporary build machine in GCE, so
|
||||
that we can build it very quickly.
|
||||
|
||||
As of Envoy 1.15.0, we've measure the resource use to build and test
|
||||
it as:
|
||||
|
||||
> | Command | Disk Size | Disk Used | Duration[1] |
|
||||
> |--------------------|-----------|-----------|-------------|
|
||||
> | `make update-base` | 450G | 12GB | ~11m |
|
||||
> | `make check-envoy` | 450G | 424GB | ~45m |
|
||||
>
|
||||
> [1] On a "Machine type: custom (32 vCPUs, 512 GB memory)" VM on GCE,
|
||||
> with the following entry in its `/etc/fstab`:
|
||||
>
|
||||
> ```bash
|
||||
> tmpfs:docker /var/lib/docker tmpfs size=450G 0 0
|
||||
> ```
|
||||
|
||||
If you have the RAM, we've seen huge speed gains from doing the builds
|
||||
and tests on a RAM disk (see the `/etc/fstab` line above).
|
||||
|
||||
#### 2. Setting up your workspace to hack on Envoy
|
||||
|
||||
1. From your `emissary.git` checkout, get Emissary-ingress's current
|
||||
version of the Envoy sources, and create a branch from that:
|
||||
|
||||
```shell
|
||||
make $PWD/_cxx/envoy
|
||||
git -C _cxx/envoy checkout -b YOUR_BRANCHNAME
|
||||
```
|
||||
2. To build Envoy in FIPS mode, set the following variable:
|
||||
|
||||
```shell
|
||||
export FIPS_MODE=true
|
||||
```
|
||||
|
||||
It is important to note that while building Envoy in FIPS mode is
|
||||
required for FIPS compliance, additional steps may be necessary.
|
||||
Emissary does not claim to be FIPS compliant or certified.
|
||||
See [here](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/security/ssl#fips-140-2) for more information on FIPS and Envoy.
|
||||
|
||||
> _NOTE:_ FIPS_MODE is NOT supported by the emissary-ingress maintainers but we provide this for developers as convience
|
||||
|
||||
#### 3. Hacking on Envoy
|
||||
|
||||
Modify the sources in `./_cxx/envoy/`. or update the branch and/or `ENVOY_COMMIT` as necessary in `./_cxx/envoy.mk`
|
||||
|
||||
#### 4. Building and testing your hacked-up Envoy
|
||||
|
||||
> See `./_cxx/envoy.mk` for the full list of targets.
|
||||
|
||||
Multiple Phony targets are provided so that developers can run the steps they are interested in when developing, here are few of the key ones:
|
||||
|
||||
- `make update-base`: will perform all the steps necessary to verify, build envoy, build docker images, push images to the container repository and compile the updated protos.
|
||||
|
||||
- `make build-envoy`: will build the envoy binaries using the same build container as the upstream Envoy project. Build outputs are mounted to the `_cxx/envoy-docker-build` directory and Bazel will write the results there.
|
||||
|
||||
- `make build-base-envoy-image`: will use the release outputs from building envoy to generate a new `base-envoy` container which is then used in the main emissary-ingress container build.
|
||||
|
||||
- `make push-base-envoy`: will push the built container to the remote container repository.
|
||||
|
||||
- `make check-envoy`: will use the build docker container to run the Envoy test suite against the currently checked out envoy in the `_cxx/envoy` folder.
|
||||
|
||||
- `make envoy-shell`: will run the envoy build container and open a bash shell session. The `_cxx/envoy` folder is volume mounted into the container and the user is set to the `envoybuild` user in the container to ensure you are not running as root to ensure hermetic builds.
|
||||
|
||||
#### 5. Test Devloop
|
||||
|
||||
Running the Envoy test suite will compile all the test targets. This is a slow process and can use lots of disk space.
|
||||
|
||||
The Envoy Inner Devloop for build and testing:
|
||||
|
||||
- You can make a change to Envoy code and run the whole test by just calling `make check-envoy`
|
||||
- You can run a specific test instead of the whole test suite by setting the `ENVOY_TEST_LABEL` environment variable.
|
||||
- For example, to run just the unit tests in `test/common/network/listener_impl_test.cc`, you should run:
|
||||
|
||||
```shell
|
||||
ENVOY_TEST_LABEL='//test/common/network:listener_impl_test' make check-envoy
|
||||
```
|
||||
|
||||
- Alternatively, you can run `make envoy-shell` to get a bash shell into the Docker container that does the Envoy builds and you are free to interact with `Bazel` directly.
|
||||
|
||||
Interpreting the test results:
|
||||
|
||||
- If you see the following message, don't worry, it's harmless; the tests still ran:
|
||||
|
||||
```text
|
||||
There were tests whose specified size is too big. Use the --test_verbose_timeout_warnings command line option to see which ones these are.
|
||||
```
|
||||
|
||||
The message means that the test passed, but it passed too
|
||||
quickly, and Bazel is suggesting that you declare it as smaller.
|
||||
Something along the lines of "This test only took 2s, but you
|
||||
declared it as being in the 60s-300s ('moderate') bucket,
|
||||
consider declaring it as being in the 0s-60s ('short')
|
||||
bucket".
|
||||
|
||||
Don't be confused (as I was) in to thinking that it was saying
|
||||
that the test was too big and was skipped and that you need to
|
||||
throw more hardware at it.
|
||||
|
||||
- **Build or test Emissary-ingress** with the usual `make` commands, with
|
||||
the exception that you MUST run `make update-base` first whenever
|
||||
Envoy needs to be recompiled; it won't happen automatically. So
|
||||
`make test` to build-and-test Emissary-ingress would become
|
||||
`make update-base && make test`, and `make images` to just build
|
||||
Emissary-ingress would become `make update-base && make images`.
|
||||
|
||||
The Envoy changes with Emissary-ingress:
|
||||
|
||||
- Either run `make update-base` to build, and push a new base container and then you can run `make test` for the Emissary-ingress test suite.
|
||||
- If you do not want to push the container you can instead:
|
||||
- Build Envoy - `make build-envoy`
|
||||
- Build container - `make build-base-envoy-image`
|
||||
- Test Emissary - `make test`
|
||||
|
||||
#### 6. Protobuf changes
|
||||
|
||||
If you made any changes to the Protocol Buffer files or if you bumped versions of Envoy then you
|
||||
should make sure that you are re-compiling the Protobufs so that they are available and checked-in
|
||||
to the emissary.git repository.
|
||||
|
||||
```sh
|
||||
make compile-envoy-protos
|
||||
```
|
||||
|
||||
This will copy over the raw proto files, compile and copy the generated go code over to emisary-ignress repository.
|
||||
|
||||
#### 7. Finalizing your changes
|
||||
|
||||
> NOTE: we are no longer accepting PR's in `datawire/envoy.git`.
|
||||
|
||||
If you have custom changes then land them in your custom envoy repository and update the `ENVOY_COMMIT` and `ENVOY_DOCKER_REPO` variable in `_cxx/envoy.mk` so that the image will be pushed to the correct repository.
|
||||
|
||||
Then run `make update-base` does all the bits so assuming that was successful then are all good.
|
||||
|
||||
**For maintainers:**
|
||||
|
||||
You will want to make sure that the image is pushed to the backup container registries:
|
||||
|
||||
```shell
|
||||
# upload image to the mirror in GCR
|
||||
SHA=GET_THIS_FROM_THE_make_update-base_OUTPUT
|
||||
TAG="envoy-0.$SHA.opt"
|
||||
docker pull "docker.io/emissaryingress/base-envoy:envoy-0.$TAG.opt"
|
||||
docker tag "docker.io/emissaryingress/base-envoy:$TAG" "gcr.io/datawire/ambassador-base:$TAG"
|
||||
docker push "gcr.io/datawire/ambassador-base:$TAG"
|
||||
```
|
||||
|
||||
#### 8. Final Checklist
|
||||
|
||||
**For Maintainers Only**
|
||||
|
||||
Here is a checklist of things to do when bumping the `base-envoy` version:
|
||||
|
||||
- [ ] The image has been pushed to...
|
||||
- [ ] `docker.io/emissaryingress/base-envoy`
|
||||
- [ ] `gcr.io/datawire/ambassador-base`
|
||||
- [ ] The `datawire/envoy.git` commit has been tagged as `datawire-$(git describe --tags --match='v*')`
|
||||
(the `--match` is to prevent `datawire-*` tags from stacking on each other).
|
||||
- [ ] It's been tested with...
|
||||
- [ ] `make check-envoy`
|
||||
|
||||
The `check-envoy-version` CI job will double check all these things, with the exception of running
|
||||
the Envoy tests. If the `check-envoy-version` is failing then double check the above, fix them and
|
||||
re-run the job.
|
||||
|
||||
### Developing Emissary-ingress (Maintainers-only advice)
|
||||
|
||||
At the moment, these techniques will only work internally to Maintainers. Mostly
|
||||
this is because they require credentials to access internal resources at the
|
||||
moment, though in several cases we're working to fix that.
|
||||
|
||||
#### Updating license documentation
|
||||
|
||||
When new dependencies are added or existing ones are updated, run
|
||||
`make generate` and commit changes to `DEPENDENCIES.md` and
|
||||
`DEPENDENCY_LICENSES.md`
|
||||
|
||||
#### Upgrading Python dependencies
|
||||
|
||||
Delete `python/requirements.txt`, then run `make generate`.
|
||||
|
||||
If there are some dependencies you don't want to upgrade, but want to
|
||||
upgrade everything else, then
|
||||
|
||||
1. Remove from `python/requirements.txt` all of the entries except
|
||||
for those you want to pin.
|
||||
2. Delete `python/requirements.in` (if it exists).
|
||||
3. Run `make generate`.
|
||||
|
||||
> **Note**: If you are updating orjson you will need to also update `docker/base-python/Dockerfile` before running `make generate` for the new version. orjson uses rust bindings and the default wheels on PyPI rely on glibc. Because our base python image is Alpine based, it is built from scratch using rustc to build a musl compatable version.
|
||||
|
||||
> :warning: You may run into an error when running `make generate` where it can't detect the licenses for new or upgraded dependencies, which is needed so that so that we can properly generate DEPENDENCIES.md and DEPENDENCY_LICENSES.md. If that is the case, you may also have to update `build-aux/tools/src/py-mkopensource/main.go:parseLicenses` for any license changes then run `make generate` again.
|
||||
|
||||
## FAQ
|
||||
|
||||
This section contains a set of Frequently Asked Questions that may answer a question you have. Also, feel free to ping us in Slack.
|
||||
|
||||
### How do I find out what build targets are available?
|
||||
|
||||
Use `make help` and `make targets` to see what build targets are
|
||||
available along with documentation for what each target does.
|
||||
|
||||
### How do I develop on a Mac with Apple Silicon?
|
||||
|
||||
To ensure that developers using a Mac with Apple Silicon can contribute, the build system ensures
|
||||
the build artifacts are `linux/amd64` rather than the host architecture. This behavior can be overriden
|
||||
using the `BUILD_ARCH` environment variable (e.g. `BUILD_ARCH=linux/arm64 make images`).
|
||||
|
||||
### How do I develop on Windows using WSL?
|
||||
|
||||
- [WSL 2](https://learn.microsoft.com/en-us/windows/wsl/)
|
||||
- [Docker Desktop for Windows](https://docs.docker.com/desktop/windows/wsl/)
|
||||
- [VS Code](https://code.visualstudio.com/)
|
||||
|
||||
### How do I test using a private Docker repository?
|
||||
|
||||
If you are pushing your development images to a private Docker repo,
|
||||
then:
|
||||
|
||||
```sh
|
||||
export DEV_USE_IMAGEPULLSECRET=true
|
||||
export DOCKER_BUILD_USERNAME=...
|
||||
export DOCKER_BUILD_PASSWORD=...
|
||||
```
|
||||
|
||||
and the test machinery should create an `imagePullSecret` from those Docker credentials such that it can pull the images.
|
||||
|
||||
### How do I change the loglevel at runtime?
|
||||
|
||||
```console
|
||||
curl localhost:8877/ambassador/v0/diag/?loglevel=debug
|
||||
```
|
||||
|
||||
Note: This affects diagd and Envoy, but NOT the AES `amb-sidecar`.
|
||||
See the AES `CONTRIBUTING.md` for how to do that.
|
||||
|
||||
### Can I build from a docker container instead of on my local computer?
|
||||
|
||||
If you want to build within a container instead of setting up dependencies on your local machine then you can run the build within a docker container and leverage "Docker in Docker" to build it.
|
||||
|
||||
1. `docker pull docker:latest`
|
||||
2. `docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -it docker:latest sh`
|
||||
3. `apk add --update --no-cache bash build-base go curl rsync python3 python2 git libarchive-tools gawk jq`
|
||||
4. `git clone https://github.com/emissary-ingress/emissary.git && cd emissary`
|
||||
5. `make images`
|
||||
|
||||
Steps 0 and 1 are run on your machine, and 2 - 4 are from within the docker container. The base image is a "Docker in Docker" image, ran with `-v /var/run/docker.sock:/var/run/docker.sock` in order to connect to your local daemon from the docker inside the container. More info on Docker in Docker [here](https://hub.docker.com/_/docker).
|
||||
|
||||
The images will be created and tagged as defined above, and will be available in docker on your local machine.
|
||||
|
||||
### How do I clear everything out to make sure my build runs like it will in CI?
|
||||
|
||||
Use `make clobber` to completely remove all derived objects, all cached artifacts, everything, and get back to a clean slate. This is recommended if you change branches within a clone, or if you need to `make generate` when you're not *certain* that your last `make generate` was using the same Envoy version.
|
||||
|
||||
Use `make clean` to remove derived objects, but *not* clear the caches.
|
||||
|
||||
### My editor is changing `go.mod` or `go.sum`, should I commit that?
|
||||
|
||||
If you notice this happening, run `make go-mod-tidy`, and commit that.
|
||||
|
||||
(If you're in Ambassador Labs, you should do this from `apro/`, not
|
||||
`apro/ambassador/`, so that apro.git's files are included too.)
|
||||
|
||||
### How do I debug "This should not happen in CI" errors?
|
||||
|
||||
These checks indicate that some output file changed in the middle of a
|
||||
run, when it should only change if a source file has changed. Since
|
||||
CI isn't editing the source files, this shouldn't happen in CI!
|
||||
|
||||
This is problematic because it means that running the build multiple
|
||||
times can give different results, and that the tests are probably not
|
||||
testing the same image that would be released.
|
||||
|
||||
These checks will show you a patch showing how the output file
|
||||
changed; it is up to you to figure out what is happening in the
|
||||
build/test system that would cause that change in the middle of a run.
|
||||
For the most part, this is pretty simple... except when the output
|
||||
file is a Docker image; you just see that one image hash is different
|
||||
than another image hash.
|
||||
|
||||
Fortunately, the failure showing the changed image hash is usually
|
||||
immediately preceded by a `docker build`. Earlier in the CI output,
|
||||
you should find an identical `docker build` command from the first time it
|
||||
ran. In the second `docker build`'s output, each step should say
|
||||
`---> Using cache`; the first few steps will say this, but at some
|
||||
point later steps will stop saying this; find the first step that is
|
||||
missing the `---> Using cache` line, and try to figure out what could
|
||||
have changed between the two runs that would cause it to not use the
|
||||
cache.
|
||||
|
||||
If that step is an `ADD` command that is adding a directory, the
|
||||
problem is probably that you need to add something to `.dockerignore`.
|
||||
To help figure out what you need to add, try adding a `RUN find
|
||||
DIRECTORY -exec ls -ld -- {} +` step after the `ADD` step, so that you
|
||||
can see what it added, and see what is different on that between the
|
||||
first and second `docker build` commands.
|
||||
|
||||
### How do I run Emissary-ingress tests?
|
||||
|
||||
- `export DEV_REGISTRY=<your-dev-docker-registry>` (you need to be logged in and have permission to push)
|
||||
- `export DEV_KUBECONFIG=<your-dev-kubeconfig>`
|
||||
|
||||
If you want to run the Go tests for `cmd/entrypoint`, you'll need `diagd`
|
||||
in your `PATH`. See the instructions below about `Setting up diagd` to do
|
||||
that.
|
||||
|
||||
| Group | Command |
|
||||
| --------------- | ---------------------------------------------------------------------- |
|
||||
| All Tests | `make test` |
|
||||
| All Golang | `make gotest` |
|
||||
| All Python | `make pytest` |
|
||||
| Some/One Golang | `make gotest GOTEST_PKGS=./cmd/entrypoint GOTEST_ARGS="-run TestName"` |
|
||||
| Some/One Python | `make pytest PYTEST_ARGS="-k TestName"` |
|
||||
|
||||
Please note the python tests use a local cache to speed up test
|
||||
results. If you make a code update that changes the generated envoy
|
||||
configuration, those tests will fail and you will need to update the
|
||||
python test cache.
|
||||
|
||||
Note that it is invalid to run one of the `main[Plain.*]` Python tests
|
||||
without running all of the other `main[Plain*]` tests; the test will
|
||||
fail to run (not even showing up as a failure or xfail--it will fail
|
||||
to run at all). For example, `PYTEST_ARGS="-k WebSocket"` would match
|
||||
the `main[Plain.WebSocketMapping-GRPC]` test, and that test would fail
|
||||
to run; one should instead say `PYTEST_ARGS="-k Plain or WebSocket"`
|
||||
to avoid breaking the sub-tests of "Plain".
|
||||
|
||||
### How do I type check my python code?
|
||||
|
||||
Ambassador uses Python 3 type hinting and the `mypy` static type checker to
|
||||
help find bugs before runtime. If you haven't worked with hinting before, a
|
||||
good place to start is
|
||||
[the `mypy` cheat sheet](https://mypy.readthedocs.io/en/latest/cheat_sheet_py3.html).
|
||||
|
||||
New code must be hinted, and the build process will verify that the type
|
||||
check passes when you `make test`. Fair warning: this means that
|
||||
PRs will not pass CI if the type checker fails.
|
||||
|
||||
We strongly recommend using an editor that can do realtime type checking
|
||||
(at Datawire we tend to use PyCharm and VSCode a lot, but many many editors
|
||||
can do this now) and also running the type checker by hand before submitting
|
||||
anything:
|
||||
|
||||
- `make lint/mypy` will check all the Ambassador code
|
||||
|
||||
Ambassador code should produce *no* warnings and *no* errors.
|
||||
|
||||
If you're concerned that the mypy cache is somehow wrong, delete the
|
||||
`.mypy_cache/` directory to clear the cache.
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
name: 'Collect Logs'
|
||||
name: "Collect Logs"
|
||||
description: >-
|
||||
Store any log files as artifacts.
|
||||
inputs:
|
||||
|
@ -47,8 +47,9 @@ runs:
|
|||
tools/bin/kubectl cp xfpredirect:/tmp/ambassador/snapshots /tmp/test-logs/cluster/xfpredirect.snapshots || true
|
||||
fi
|
||||
cp /tmp/*.yaml /tmp/test-logs || true
|
||||
cp /tmp/kat-client-*.log /tmp/test-logs || true
|
||||
- name: "Upload Logs"
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: logs-${{ inputs.jobname }}
|
||||
path: /tmp/test-logs
|
||||
|
|
|
@ -9,21 +9,15 @@ runs:
|
|||
run: |
|
||||
sudo apt-get update -y
|
||||
sudo apt-get install -y libarchive-tools
|
||||
- name: "Install Python requirements with pip"
|
||||
uses: BSFishy/pip-action@v1
|
||||
with:
|
||||
packages: |
|
||||
awscli
|
||||
packaging
|
||||
# Go: Do this first because `Makefile` checks that the `go` version is correct.
|
||||
- name: "Get Go version from builder container"
|
||||
id: step-detect-go
|
||||
shell: bash
|
||||
run: |
|
||||
make "$PWD/build-aux/go-version.txt"
|
||||
echo "::set-output name=go_version::$(cat "$PWD/build-aux/go-version.txt")"
|
||||
echo "go_version=$(cat "$PWD/build-aux/go-version.txt")" >> $GITHUB_OUTPUT
|
||||
- name: "Install Go (${{ steps.step-detect-go.outputs.go_version }})"
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "${{ steps.step-detect-go.outputs.go_version }}"
|
||||
# Python
|
||||
|
@ -32,8 +26,12 @@ runs:
|
|||
shell: bash
|
||||
run: |
|
||||
make "$PWD/build-aux/py-version.txt"
|
||||
echo "::set-output name=py_version::$(cat "$PWD/build-aux/py-version.txt")"
|
||||
echo "py_version=$(cat "$PWD/build-aux/py-version.txt")" >> $GITHUB_OUTPUT
|
||||
- name: "Install Py (${{ steps.step-detect-py.outputs.py_version }})"
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "${{ steps.step-detect-py.outputs.py_version }}"
|
||||
- name: "Install Python requirements with pip"
|
||||
shell: bash
|
||||
run: python -m pip install awscli packaging
|
||||
|
||||
|
|
|
@ -0,0 +1,110 @@
|
|||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
ignore:
|
||||
- dependency-name: "sigs.k8s.io/gateway-api"
|
||||
- dependency-name: "go.opentelemetry.io/proto/otlp"
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/tools/src/yq"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/tools/src/chart-doc-gen"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/tools/src/crane"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/tools/src/ct"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/tools/src/golangci-lint"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/tools/src/ocibuild"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: pip
|
||||
directory: "/docker/test-auth"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: pip
|
||||
directory: "/docker/test-shadow"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: pip
|
||||
directory: "/docker/test-stats"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: pip
|
||||
directory: "/python"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
ignore:
|
||||
- dependency-name: pytest
|
||||
- dependency-name: urllib3
|
||||
versions:
|
||||
- "<2.0"
|
||||
- package-ecosystem: docker
|
||||
directory: "/docker/base-python"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: docker
|
||||
directory: "/docker/test-auth"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: docker
|
||||
directory: "/docker/test-http"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: docker
|
||||
directory: "/docker/test-shadow"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: docker
|
||||
directory: "/docker/test-stats"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
open-pull-requests-limit: 10
|
|
@ -1,96 +0,0 @@
|
|||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/tools/src/yq"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/tools/src/chart-doc-gen"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/tools/src/crane"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/tools/src/ct"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/tools/src/golangci-lint"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/tools/src/ocibuild"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: pip
|
||||
directory: "/docker/test-auth"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: pip
|
||||
directory: "/docker/test-shadow"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: pip
|
||||
directory: "/docker/test-stats"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: pip
|
||||
directory: "/python"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
- package-ecosystem: docker
|
||||
directory: "/docker/base-python"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: docker
|
||||
directory: "/docker/test-auth"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: docker
|
||||
directory: "/docker/test-http"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: docker
|
||||
directory: "/docker/test-shadow"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
- package-ecosystem: docker
|
||||
directory: "/docker/test-stats"
|
||||
schedule:
|
||||
interval: daily
|
||||
open-pull-requests-limit: 10
|
|
@ -46,6 +46,6 @@ A few sentences describing what testing you've done, e.g., manual tests, automat
|
|||
- We should lean on the bulk of code being covered by unit tests, but...
|
||||
- ... an end-to-end test should cover the integration points
|
||||
|
||||
- [ ] **I updated `DEVELOPING.md` with any any special dev tricks I had to use to work on this code efficiently.**
|
||||
- [ ] **I updated `CONTRIBUTING.md` with any special dev tricks I had to use to work on this code efficiently.**
|
||||
|
||||
- [ ] **The changes in this PR have been reviewed for security concerns and adherence to security best practices.**
|
||||
|
|
|
@ -22,7 +22,7 @@ name: Check branch version
|
|||
|
||||
jobs:
|
||||
check-branch-version:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
|
@ -34,7 +34,7 @@ jobs:
|
|||
run: |
|
||||
# Set tag_majorminor
|
||||
tag_majorminor=$(git describe --tags --match='v*'|sed 's/^v//'|cut -d. -f1,2)
|
||||
echo "::set-output name=tag_majorminor::${tag_majorminor}"
|
||||
echo "tag_majorminor=${tag_majorminor}" >> $GITHUB_OUTPUT
|
||||
|
||||
# Set branch_majorminor
|
||||
case "$GITHUB_REF" in
|
||||
|
@ -51,13 +51,13 @@ jobs:
|
|||
else
|
||||
branch_majorminor=${branch#release/v}
|
||||
fi
|
||||
echo "::set-output name=branch_majorminor::${branch_majorminor}"
|
||||
echo "branch_majorminor=${branch_majorminor}" >> $GITHUB_OUTPUT
|
||||
|
||||
# Set relnotes_majorminor
|
||||
make tools/bin/yq
|
||||
relnotes_version=$(tools/bin/yq read docs/releaseNotes.yml items[0].version)
|
||||
relnotes_majorminor=$(cut -d. -f1,2 <<<"$relnotes_version")
|
||||
echo "::set-output name=relnotes_majorminor::${relnotes_majorminor}"
|
||||
echo "relnotes_majorminor=${relnotes_majorminor}" >> $GITHUB_OUTPUT
|
||||
|
||||
declare -p tag_majorminor branch_majorminor relnotes_majorminor
|
||||
- name: Check version numbers
|
||||
|
|
|
@ -6,10 +6,11 @@ name: job-promote-to-passed
|
|||
- master
|
||||
- release/v*
|
||||
pull_request: {}
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
lint: ########################################################################
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
# See docker/base-python.docker.gen
|
||||
BASE_PYTHON_REPO: ${{ secrets.BASE_PYTHON_REPO }}
|
||||
|
@ -26,10 +27,12 @@ jobs:
|
|||
run: |
|
||||
make lint
|
||||
- uses: ./.github/actions/after-job
|
||||
with:
|
||||
jobname: lint
|
||||
if: always()
|
||||
|
||||
generate: ####################################################################
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
# See docker/base-python.docker.gen
|
||||
BASE_PYTHON_REPO: ${{ secrets.BASE_PYTHON_REPO }}
|
||||
|
@ -46,7 +49,7 @@ jobs:
|
|||
install -m600 /dev/stdin ~/.ssh/id_rsa <<<'${{ secrets.GHA_SSH_KEY }}'
|
||||
fi
|
||||
- name: "Docker Login"
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ (!startsWith(secrets.RELEASE_REGISTRY, 'docker.io/')) && secrets.RELEASE_REGISTRY || null }}
|
||||
username: ${{ secrets.GH_DOCKER_RELEASE_USERNAME }}
|
||||
|
@ -55,6 +58,16 @@ jobs:
|
|||
shell: bash
|
||||
run: |
|
||||
make generate
|
||||
- name: "Update dependency information after dependabot change"
|
||||
uses: datawire/go-mkopensource/actions/save-dependabot-changes@v0.0.7
|
||||
id: changed-by-dependabot
|
||||
with:
|
||||
branches_to_skip: master
|
||||
- name: "Abort if dependencies changed"
|
||||
if: steps.changed-by-dependabot.outputs.is_dirty == 'true'
|
||||
run: |
|
||||
echo "Dependabot triggered a dependency update. Aborting workflow."
|
||||
exit 1
|
||||
- uses: ./.github/actions/git-dirty-check
|
||||
name: "Check Git not dirty from 'make generate'"
|
||||
- name: "'make generate' (again!)"
|
||||
|
@ -64,10 +77,46 @@ jobs:
|
|||
- uses: ./.github/actions/git-dirty-check
|
||||
name: "Check Git not dirty from 'make generate' (again!)"
|
||||
- uses: ./.github/actions/after-job
|
||||
with:
|
||||
jobname: generate
|
||||
if: always()
|
||||
|
||||
check-envoy-protos: ####################################################################
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
# See docker/base-python.docker.gen
|
||||
BASE_PYTHON_REPO: ${{ secrets.BASE_PYTHON_REPO }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Install Deps
|
||||
uses: ./.github/actions/setup-deps
|
||||
- name: "Git Login"
|
||||
run: |
|
||||
if [[ -n '${{ secrets.GHA_SSH_KEY }}' ]]; then
|
||||
install -m700 -d ~/.ssh
|
||||
install -m600 /dev/stdin ~/.ssh/id_rsa <<<'${{ secrets.GHA_SSH_KEY }}'
|
||||
fi
|
||||
- name: "Docker Login"
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ (!startsWith(secrets.RELEASE_REGISTRY, 'docker.io/')) && secrets.RELEASE_REGISTRY || null }}
|
||||
username: ${{ secrets.GH_DOCKER_RELEASE_USERNAME }}
|
||||
password: ${{ secrets.GH_DOCKER_RELEASE_TOKEN }}
|
||||
- name: "'make compile-envoy-protos'"
|
||||
shell: bash
|
||||
run: |
|
||||
make compile-envoy-protos
|
||||
- name: "Check Git not dirty from 'make compile-envoy-protos'"
|
||||
uses: ./.github/actions/git-dirty-check
|
||||
- uses: ./.github/actions/after-job
|
||||
with:
|
||||
jobname: check-envoy-protos
|
||||
if: always()
|
||||
|
||||
check-envoy-version: #########################################################
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
# See docker/base-python.docker.gen
|
||||
BASE_PYTHON_REPO: ${{ secrets.BASE_PYTHON_REPO }}
|
||||
|
@ -85,18 +134,51 @@ jobs:
|
|||
fi
|
||||
- name: "Docker Login"
|
||||
# This is important if ENVOY_DOCKER_REPO is a private repo.
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ (!startsWith(secrets.DEV_REGISTRY, 'docker.io/')) && secrets.DEV_REGISTRY || null }}
|
||||
username: ${{ secrets.GH_DOCKER_BUILD_USERNAME }}
|
||||
password: ${{ secrets.GH_DOCKER_BUILD_TOKEN }}
|
||||
- run: make check-envoy-version
|
||||
- uses: ./.github/actions/after-job
|
||||
with:
|
||||
jobname: check-envoy-version
|
||||
if: always()
|
||||
|
||||
# Tests ######################################################################
|
||||
apiext-e2e:
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
# See docker/base-python.docker.gen
|
||||
BASE_PYTHON_REPO: ${{ secrets.BASE_PYTHON_REPO }}
|
||||
APIEXT_E2E: ""
|
||||
APIEXT_BUILD_ARCH: linux/amd64
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver-opts: |
|
||||
network=host
|
||||
- name: Install Deps
|
||||
uses: ./.github/actions/setup-deps
|
||||
- name: Install k3d
|
||||
shell: bash
|
||||
run: |
|
||||
curl -s https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | TAG=v5.6.0 bash
|
||||
k3d --version
|
||||
- name: go mod vendor
|
||||
shell: bash
|
||||
run: |
|
||||
make vendor
|
||||
- name: run apiext-e2e tests
|
||||
shell: bash
|
||||
run: |
|
||||
go test -p 1 -parallel 1 -v -tags=apiext ./test/apiext/... -timeout 15m
|
||||
check-gotest:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
# See docker/base-python.docker.gen
|
||||
BASE_PYTHON_REPO: ${{ secrets.BASE_PYTHON_REPO }}
|
||||
|
@ -107,7 +189,7 @@ jobs:
|
|||
- name: Install Deps
|
||||
uses: ./.github/actions/setup-deps
|
||||
- name: "Docker Login"
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ (!startsWith(secrets.DEV_REGISTRY, 'docker.io/')) && secrets.DEV_REGISTRY || null }}
|
||||
username: ${{ secrets.GH_DOCKER_BUILD_USERNAME }}
|
||||
|
@ -115,13 +197,13 @@ jobs:
|
|||
- name: make gotest
|
||||
shell: bash
|
||||
run: |
|
||||
export DEV_KUBE_NO_PVC=yes
|
||||
export KAT_REQ_LIMIT=900
|
||||
make gotest
|
||||
- uses: ./.github/actions/after-job
|
||||
with:
|
||||
jobname: check-gotest
|
||||
if: always()
|
||||
check-pytest:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
# See docker/base-python.docker.gen
|
||||
BASE_PYTHON_REPO: ${{ secrets.BASE_PYTHON_REPO }}
|
||||
|
@ -133,17 +215,12 @@ jobs:
|
|||
fail-fast: false
|
||||
matrix:
|
||||
test:
|
||||
- integration
|
||||
- kat-envoy3-1-of-5
|
||||
- kat-envoy3-2-of-5
|
||||
- kat-envoy3-3-of-5
|
||||
- kat-envoy3-4-of-5
|
||||
- kat-envoy3-5-of-5
|
||||
# FIXME(lukeshu): KAT_RUN_MODE=local is disabled because it
|
||||
# needs fixed for a world where annotations are already
|
||||
# unfolded in the snapshot.
|
||||
#
|
||||
#- kat-local
|
||||
- integration-tests
|
||||
- kat-envoy3-tests-1-of-5
|
||||
- kat-envoy3-tests-2-of-5
|
||||
- kat-envoy3-tests-3-of-5
|
||||
- kat-envoy3-tests-4-of-5
|
||||
- kat-envoy3-tests-5-of-5
|
||||
name: pytest-${{ matrix.test }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
@ -152,19 +229,26 @@ jobs:
|
|||
- name: Install Deps
|
||||
uses: ./.github/actions/setup-deps
|
||||
- name: "Docker Login"
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ (!startsWith(secrets.DEV_REGISTRY, 'docker.io/')) && secrets.DEV_REGISTRY || null }}
|
||||
username: ${{ secrets.GH_DOCKER_BUILD_USERNAME }}
|
||||
password: ${{ secrets.GH_DOCKER_BUILD_TOKEN }}
|
||||
- name: make pytest-${{ matrix.test }}
|
||||
- name: Create integration test cluster
|
||||
run: |
|
||||
export USE_LOCAL_K3S_CLUSTER=1
|
||||
sudo sysctl -w fs.file-max=1600000
|
||||
sudo sysctl -w fs.inotify.max_user_instances=4096
|
||||
|
||||
make ci/setup-k3d K3D_CLUSTER_NAME=amb-ci
|
||||
|
||||
make ci/setup-k3d
|
||||
- name: Setup integration test environment
|
||||
run: |
|
||||
export DEV_KUBE_NO_PVC=yes
|
||||
export KAT_REQ_LIMIT=900
|
||||
export DEV_KUBECONFIG=~/.kube/config
|
||||
export DEV_REGISTRY=${{ secrets.DEV_REGISTRY }}
|
||||
make python-integration-test-environment
|
||||
- name: Run ${{ matrix.test }}
|
||||
run: |
|
||||
export DEV_KUBE_NO_PVC=yes
|
||||
export KAT_REQ_LIMIT=900
|
||||
export DEV_KUBECONFIG=~/.kube/config
|
||||
|
@ -175,17 +259,11 @@ jobs:
|
|||
with:
|
||||
jobname: check-pytest-${{ matrix.test }}
|
||||
check-pytest-unit:
|
||||
# pytest-unit is separate from pytests (above) because we know for certain that no cluster is needed.
|
||||
# XXX This is pretty much a crock.
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
# See docker/base-python.docker.gen
|
||||
BASE_PYTHON_REPO: ${{ secrets.BASE_PYTHON_REPO }}
|
||||
strategy:
|
||||
matrix:
|
||||
test:
|
||||
- unit
|
||||
name: pytest-${{ matrix.test }}
|
||||
name: pytest-unit
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
|
@ -193,28 +271,25 @@ jobs:
|
|||
- name: Install Deps
|
||||
uses: ./.github/actions/setup-deps
|
||||
- name: "Docker Login"
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ (!startsWith(secrets.DEV_REGISTRY, 'docker.io/')) && secrets.DEV_REGISTRY || null }}
|
||||
username: ${{ secrets.GH_DOCKER_BUILD_USERNAME }}
|
||||
password: ${{ secrets.GH_DOCKER_BUILD_TOKEN }}
|
||||
- name: make pytest-${{ matrix.test }}
|
||||
- name: Create Python virtual environment
|
||||
run: |
|
||||
sudo sysctl -w fs.file-max=1600000
|
||||
sudo sysctl -w fs.inotify.max_user_instances=4096
|
||||
|
||||
export DEV_KUBE_NO_PVC=yes
|
||||
export KAT_REQ_LIMIT=900
|
||||
export DEV_KUBECONFIG=~/.kube/config
|
||||
export DEV_REGISTRY=${{ secrets.DEV_REGISTRY }}
|
||||
make python-virtual-environment
|
||||
- name: Run Python unit tests
|
||||
run: |
|
||||
export PYTEST_ARGS=' --cov-branch --cov=ambassador --cov-report html:/tmp/cov_html '
|
||||
make pytest-${{ matrix.test }}
|
||||
make pytest-unit-tests
|
||||
- uses: ./.github/actions/after-job
|
||||
if: always()
|
||||
with:
|
||||
jobname: check-pytest-${{ matrix.test }}
|
||||
jobname: check-pytest-unit
|
||||
if: always()
|
||||
check-chart:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
DEV_REGISTRY: ${{ secrets.DEV_REGISTRY }}
|
||||
# See docker/base-python.docker.gen
|
||||
|
@ -224,28 +299,33 @@ jobs:
|
|||
DOCKER_BUILD_USERNAME: ${{ secrets.GH_DOCKER_BUILD_USERNAME }}
|
||||
DOCKER_BUILD_PASSWORD: ${{ secrets.GH_DOCKER_BUILD_TOKEN }}
|
||||
steps:
|
||||
- uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ${{ (!startsWith(secrets.DEV_REGISTRY, 'docker.io/')) && secrets.DEV_REGISTRY || null }}
|
||||
username: ${{ secrets.GH_DOCKER_BUILD_USERNAME }}
|
||||
password: ${{ secrets.GH_DOCKER_BUILD_TOKEN }}
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: Install Deps
|
||||
uses: ./.github/actions/setup-deps
|
||||
- name: make test-chart
|
||||
- name: Warn about skip
|
||||
run: |
|
||||
make ci/setup-k3d K3D_CLUSTER_NAME=amb-ci
|
||||
export DEV_KUBECONFIG=~/.kube/config
|
||||
echo "SKIPPING CHART TEST; check the charts manually"
|
||||
# - uses: docker/login-action@v2
|
||||
# with:
|
||||
# registry: ${{ (!startsWith(secrets.DEV_REGISTRY, 'docker.io/')) && secrets.DEV_REGISTRY || null }}
|
||||
# username: ${{ secrets.GH_DOCKER_BUILD_USERNAME }}
|
||||
# password: ${{ secrets.GH_DOCKER_BUILD_TOKEN }}
|
||||
# - uses: actions/checkout@v3
|
||||
# with:
|
||||
# fetch-depth: 0
|
||||
# ref: ${{ github.event.pull_request.head.sha }}
|
||||
# - name: Install Deps
|
||||
# uses: ./.github/actions/setup-deps
|
||||
# - name: make test-chart
|
||||
# run: |
|
||||
# make ci/setup-k3d
|
||||
# export DEV_KUBECONFIG=~/.kube/config
|
||||
|
||||
make test-chart
|
||||
- uses: ./.github/actions/after-job
|
||||
if: always()
|
||||
# make test-chart
|
||||
# - uses: ./.github/actions/after-job
|
||||
# with:
|
||||
# jobname: check-chart
|
||||
# if: always()
|
||||
|
||||
build: #######################################################################
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
DEV_REGISTRY: ${{ secrets.DEV_REGISTRY }}
|
||||
# See docker/base-python.docker.gen
|
||||
|
@ -260,7 +340,7 @@ jobs:
|
|||
- name: Install Deps
|
||||
uses: ./.github/actions/setup-deps
|
||||
- name: "Docker Login"
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ (!startsWith(secrets.DEV_REGISTRY, 'docker.io/')) && secrets.DEV_REGISTRY || null }}
|
||||
username: ${{ secrets.GH_DOCKER_BUILD_USERNAME }}
|
||||
|
@ -273,18 +353,20 @@ jobs:
|
|||
id: build-image
|
||||
shell: bash
|
||||
run: |
|
||||
echo "::set-output name=image-tag::$(tools/build/version.sh)"
|
||||
echo "image-tag=$(build-aux/version.sh)" >> $GITHUB_OUTPUT
|
||||
- name: "make push-dev"
|
||||
shell: bash
|
||||
run: |
|
||||
make push-dev
|
||||
- uses: ./.github/actions/after-job
|
||||
with:
|
||||
jobname: build
|
||||
if: always()
|
||||
|
||||
######################################################################
|
||||
######################### CVE Scanning ###############################
|
||||
trivy-container-scan:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
needs: [build]
|
||||
steps:
|
||||
# upload of results to github uses git so checkout of code is needed
|
||||
|
@ -314,16 +396,18 @@ jobs:
|
|||
pass:
|
||||
name: "job-promote-to-passed" # This is the job name that the branch protection looks for
|
||||
needs:
|
||||
- apiext-e2e
|
||||
- lint
|
||||
- build
|
||||
- generate
|
||||
- check-envoy-protos
|
||||
- check-envoy-version
|
||||
- check-gotest
|
||||
- check-pytest
|
||||
- check-pytest-unit
|
||||
- check-chart
|
||||
- trivy-container-scan
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- name: No-Op
|
||||
if: ${{ false }}
|
||||
|
|
|
@ -1,44 +1,46 @@
|
|||
name: generate-base-python
|
||||
on:
|
||||
schedule:
|
||||
# run 15 minutes after midnight (UTC) weekly on sundays
|
||||
# run at noon on sundays to prepare for monday
|
||||
# used https://crontab.guru/ to generate
|
||||
- cron: '15 0 * * SUN'
|
||||
- cron: "0 12 * * SUN"
|
||||
jobs:
|
||||
generate: ####################################################################
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
# See docker/base-python.docker.gen
|
||||
BASE_PYTHON_REPO: ${{ secrets.BASE_PYTHON_REPO }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Install Deps
|
||||
uses: ./.github/actions/setup-deps
|
||||
- name: "Git Login"
|
||||
run: |
|
||||
if [[ -n '${{ secrets.GHA_SSH_KEY }}' ]]; then
|
||||
install -m700 -d ~/.ssh
|
||||
install -m600 /dev/stdin ~/.ssh/id_rsa <<<'${{ secrets.GHA_SSH_KEY }}'
|
||||
fi
|
||||
- name: "Docker Login"
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ${{ (!startsWith(secrets.RELEASE_REGISTRY, 'docker.io/')) && secrets.RELEASE_REGISTRY || null }}
|
||||
username: ${{ secrets.GH_DOCKER_RELEASE_USERNAME }}
|
||||
password: ${{ secrets.GH_DOCKER_RELEASE_TOKEN }}
|
||||
- name: "'make generate'"
|
||||
shell: bash
|
||||
run: |
|
||||
make generate
|
||||
- uses: ./.github/actions/git-dirty-check
|
||||
name: "Check Git not dirty from 'make generate'"
|
||||
- name: "'make generate' (again!)"
|
||||
shell: bash
|
||||
run: |
|
||||
make generate
|
||||
- uses: ./.github/actions/git-dirty-check
|
||||
name: "Check Git not dirty from 'make generate' (again!)"
|
||||
- uses: ./.github/actions/after-job
|
||||
if: always()
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
# See docker/base-python.docker.gen
|
||||
BASE_PYTHON_REPO: ${{ secrets.BASE_PYTHON_REPO }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Install Deps
|
||||
uses: ./.github/actions/setup-deps
|
||||
- name: "Git Login"
|
||||
run: |
|
||||
if [[ -n '${{ secrets.GHA_SSH_KEY }}' ]]; then
|
||||
install -m700 -d ~/.ssh
|
||||
install -m600 /dev/stdin ~/.ssh/id_rsa <<<'${{ secrets.GHA_SSH_KEY }}'
|
||||
fi
|
||||
- name: "Docker Login"
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ (!startsWith(secrets.RELEASE_REGISTRY, 'docker.io/')) && secrets.RELEASE_REGISTRY || null }}
|
||||
username: ${{ secrets.GH_DOCKER_RELEASE_USERNAME }}
|
||||
password: ${{ secrets.GH_DOCKER_RELEASE_TOKEN }}
|
||||
- name: "'make generate'"
|
||||
shell: bash
|
||||
run: |
|
||||
make generate
|
||||
- uses: ./.github/actions/git-dirty-check
|
||||
name: "Check Git not dirty from 'make generate'"
|
||||
- name: "'make generate' (again!)"
|
||||
shell: bash
|
||||
run: |
|
||||
make generate
|
||||
- uses: ./.github/actions/git-dirty-check
|
||||
name: "Check Git not dirty from 'make generate' (again!)"
|
||||
- uses: ./.github/actions/after-job
|
||||
with:
|
||||
jobname: generate-base-python
|
||||
if: always()
|
||||
|
|
|
@ -0,0 +1,74 @@
|
|||
name: k8s-e2e
|
||||
|
||||
"on":
|
||||
pull_request: {}
|
||||
schedule:
|
||||
- cron: "0 7 * * *" # at 7am UTC everyday
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
acceptance_tests:
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
# See docker/base-python.docker.gen
|
||||
BASE_PYTHON_REPO: ${{ secrets.BASE_PYTHON_REPO }}
|
||||
# See pkg/kubeapply/resource_kubeapply.go
|
||||
DEV_USE_IMAGEPULLSECRET: ${{ secrets.DEV_USE_IMAGEPULLSECRET }}
|
||||
DOCKER_BUILD_USERNAME: ${{ secrets.GH_DOCKER_BUILD_USERNAME }}
|
||||
DOCKER_BUILD_PASSWORD: ${{ secrets.GH_DOCKER_BUILD_TOKEN }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
k8s:
|
||||
[
|
||||
{ k3s: 1.26.9+k3s1, kubectl: 1.26.9 },
|
||||
{ k3s: 1.27.6+k3s1, kubectl: 1.27.6 },
|
||||
{ k3s: 1.28.2+k3s1, kubectl: 1.28.2 },
|
||||
]
|
||||
test:
|
||||
- integration-tests
|
||||
- kat-envoy3-tests-1-of-5
|
||||
- kat-envoy3-tests-2-of-5
|
||||
- kat-envoy3-tests-3-of-5
|
||||
- kat-envoy3-tests-4-of-5
|
||||
- kat-envoy3-tests-5-of-5
|
||||
name: ${{matrix.k8s.kubectl}}-${{ matrix.test }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Install Deps
|
||||
uses: ./.github/actions/setup-deps
|
||||
- name: "Docker Login"
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ (!startsWith(secrets.DEV_REGISTRY, 'docker.io/')) && secrets.DEV_REGISTRY || null }}
|
||||
username: ${{ secrets.GH_DOCKER_BUILD_USERNAME }}
|
||||
password: ${{ secrets.GH_DOCKER_BUILD_TOKEN }}
|
||||
- name: Create integration test cluster
|
||||
env:
|
||||
K3S_VERSION: ${{matrix.k8s.k3s}}
|
||||
KUBECTL_VERSION: ${{matrix.k8s.kubectl}}
|
||||
run: |
|
||||
sudo sysctl -w fs.file-max=1600000
|
||||
sudo sysctl -w fs.inotify.max_user_instances=4096
|
||||
|
||||
make ci/setup-k3d
|
||||
- name: Setup integration test environment
|
||||
run: |
|
||||
export DEV_KUBE_NO_PVC=yes
|
||||
export KAT_REQ_LIMIT=900
|
||||
export DEV_KUBECONFIG=~/.kube/config
|
||||
export DEV_REGISTRY=${{ secrets.DEV_REGISTRY }}
|
||||
make python-integration-test-environment
|
||||
- name: Run ${{ matrix.test }}
|
||||
run: |
|
||||
export DEV_KUBE_NO_PVC=yes
|
||||
export KAT_REQ_LIMIT=900
|
||||
export DEV_KUBECONFIG=~/.kube/config
|
||||
export DEV_REGISTRY=${{ secrets.DEV_REGISTRY }}
|
||||
make pytest-${{ matrix.test }}
|
||||
- uses: ./.github/actions/after-job
|
||||
if: always()
|
||||
with:
|
||||
jobname: check-pytest-${{matrix.k8s.kubectl}}-${{ matrix.test }}
|
|
@ -2,10 +2,10 @@ name: promote-to-ga
|
|||
"on":
|
||||
push:
|
||||
tags:
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+'
|
||||
- "v[0-9]+.[0-9]+.[0-9]+"
|
||||
jobs:
|
||||
promote-to-ga:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
name: promote-to-ga
|
||||
env:
|
||||
AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }}
|
||||
|
@ -19,7 +19,7 @@ jobs:
|
|||
with:
|
||||
fetch-depth: 0
|
||||
- name: "Docker Login"
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ (!startsWith(secrets.RELEASE_REGISTRY, 'docker.io/')) && secrets.RELEASE_REGISTRY || null }}
|
||||
username: ${{ secrets.GH_DOCKER_RELEASE_USERNAME }}
|
||||
|
@ -30,10 +30,12 @@ jobs:
|
|||
run: |
|
||||
make release/promote-oss/to-ga
|
||||
- uses: ./.github/actions/after-job
|
||||
with:
|
||||
jobname: promote-to-ga-1
|
||||
if: always()
|
||||
- id: check-slack-webhook
|
||||
name: Assign slack webhook variable
|
||||
run: echo '::set-output name=slack_webhook_url::${{secrets.SLACK_WEBHOOK_URL}}'
|
||||
run: echo "slack_webhook_url=${{secrets.SLACK_WEBHOOK_URL}}" >> $GITHUB_OUTPUT
|
||||
- name: Slack notification
|
||||
if: steps.check-slack-webhook.outputs.slack_webhook_url && always()
|
||||
uses: edge/simple-slack-notify@master
|
||||
|
@ -41,18 +43,20 @@ jobs:
|
|||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
success_text: 'Emissary GA for ${env.GITHUB_REF} successfully built'
|
||||
failure_text: '${env.GITHUB_WORKFLOW} (${env.GITHUB_RUN_NUMBER}) build failed'
|
||||
cancelled_text: '${env.GITHUB_WORKFLOW} (${env.GITHUB_RUN_NUMBER}) build was cancelled'
|
||||
success_text: "Emissary GA for ${env.GITHUB_REF} successfully built"
|
||||
failure_text: "${env.GITHUB_WORKFLOW} (${env.GITHUB_RUN_NUMBER}) build failed"
|
||||
cancelled_text: "${env.GITHUB_WORKFLOW} (${env.GITHUB_RUN_NUMBER}) build was cancelled"
|
||||
fields: |
|
||||
[{ "title": "Repository", "value": "${env.GITHUB_REPOSITORY}", "short": true },
|
||||
{ "title": "Branch", "value": "${env.GITHUB_REF}", "short": true },
|
||||
{ "title": "Action URL", "value": "${env.GITHUB_SERVER_URL}/${env.GITHUB_REPOSITORY}/actions/runs/${env.GITHUB_RUN_ID}"}
|
||||
]
|
||||
- uses: ./.github/actions/after-job
|
||||
with:
|
||||
jobname: promote-to-ga-2
|
||||
if: always()
|
||||
create-gh-release:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
needs: [promote-to-ga]
|
||||
name: "Create GitHub release"
|
||||
env:
|
||||
|
@ -65,14 +69,14 @@ jobs:
|
|||
uses: ./.github/actions/setup-deps
|
||||
- name: "gh auth login"
|
||||
run: |
|
||||
echo '${{ secrets.GH_GITHUB_API_KEY }}' | gh auth login --with-token
|
||||
gh auth login --with-token <<<'${{ secrets.GH_GITHUB_API_KEY }}'
|
||||
- name: Create GitHub release
|
||||
id: step-create-gh-release
|
||||
run: |
|
||||
make release/ga/create-gh-release
|
||||
- id: check-slack-webhook
|
||||
name: Assign slack webhook variable
|
||||
run: echo '::set-output name=slack_webhook_url::${{secrets.SLACK_WEBHOOK_URL}}'
|
||||
run: echo "slack_webhook_url=${{secrets.SLACK_WEBHOOK_URL}}" >> $GITHUB_OUTPUT
|
||||
- name: Slack notification
|
||||
if: steps.check-slack-webhook.outputs.slack_webhook_url && always()
|
||||
uses: edge/simple-slack-notify@master
|
||||
|
@ -80,13 +84,15 @@ jobs:
|
|||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
success_text: 'Emissary GitHub release was created: ${{ steps.step-create-gh-release.outputs.url }}'
|
||||
failure_text: 'Emissary GitHub release failed'
|
||||
cancelled_text: 'Emissary GitHub release was was cancelled'
|
||||
success_text: "Emissary GitHub release was created: ${{ steps.step-create-gh-release.outputs.url }}"
|
||||
failure_text: "Emissary GitHub release failed"
|
||||
cancelled_text: "Emissary GitHub release was was cancelled"
|
||||
fields: |
|
||||
[{ "title": "Repository", "value": "${env.GITHUB_REPOSITORY}", "short": true },
|
||||
{ "title": "Branch", "value": "${env.GITHUB_REF}", "short": true },
|
||||
{ "title": "Action URL", "value": "${env.GITHUB_SERVER_URL}/${env.GITHUB_REPOSITORY}/actions/runs/${env.GITHUB_RUN_ID}"}
|
||||
]
|
||||
- uses: ./.github/actions/after-job
|
||||
with:
|
||||
jobname: create-gh-release
|
||||
if: always()
|
||||
|
|
|
@ -2,10 +2,11 @@ name: promote-to-rc
|
|||
"on":
|
||||
push:
|
||||
tags:
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+'
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+"
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-dev"
|
||||
jobs:
|
||||
promote-to-rc:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
name: promote-to-rc
|
||||
env:
|
||||
AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }}
|
||||
|
@ -21,7 +22,7 @@ jobs:
|
|||
- name: "Install Deps"
|
||||
uses: ./.github/actions/setup-deps
|
||||
- name: "Docker Login"
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ${{ (!startsWith(secrets.RELEASE_REGISTRY, 'docker.io/')) && secrets.RELEASE_REGISTRY || null }}
|
||||
username: ${{ secrets.GH_DOCKER_RELEASE_USERNAME }}
|
||||
|
@ -29,11 +30,11 @@ jobs:
|
|||
- id: step-main
|
||||
run: |
|
||||
make release/promote-oss/to-rc
|
||||
echo "::set-output name=version::$(go run ./tools/src/goversion | sed s/^v//)"
|
||||
echo "::set-output name=chart_version::$(go run ./tools/src/goversion --dir-prefix=chart | sed s/^v//)"
|
||||
echo "version=$(go run ./tools/src/goversion | sed s/^v//)" >> $GITHUB_OUTPUT
|
||||
echo "chart_version=$(go run ./tools/src/goversion --dir-prefix=chart | sed s/^v//)" >> $GITHUB_OUTPUT
|
||||
- id: check-slack-webhook
|
||||
name: Assign slack webhook variable
|
||||
run: echo '::set-output name=slack_webhook_url::${{secrets.SLACK_WEBHOOK_URL}}'
|
||||
run: echo "slack_webhook_url=${{secrets.SLACK_WEBHOOK_URL}}" >> $GITHUB_OUTPUT
|
||||
- name: Slack notification
|
||||
if: steps.check-slack-webhook.outputs.slack_webhook_url && always()
|
||||
uses: edge/simple-slack-notify@master
|
||||
|
@ -48,12 +49,14 @@ jobs:
|
|||
export AMBASSADOR_MANIFEST_URL=https://app.getambassador.io/yaml/emissary/${{ steps.step-main.outputs.version }}
|
||||
export HELM_CHART_VERSION=${{ steps.step-main.outputs.chart_version }}
|
||||
\`\`\`
|
||||
failure_text: '${env.GITHUB_WORKFLOW} (${env.GITHUB_RUN_NUMBER}) build failed'
|
||||
cancelled_text: '${env.GITHUB_WORKFLOW} (${env.GITHUB_RUN_NUMBER}) build was cancelled'
|
||||
failure_text: "${env.GITHUB_WORKFLOW} (${env.GITHUB_RUN_NUMBER}) build failed"
|
||||
cancelled_text: "${env.GITHUB_WORKFLOW} (${env.GITHUB_RUN_NUMBER}) build was cancelled"
|
||||
fields: |
|
||||
[{ "title": "Repository", "value": "${env.GITHUB_REPOSITORY}", "short": true },
|
||||
{ "title": "Branch", "value": "${env.GITHUB_REF}", "short": true },
|
||||
{ "title": "Action URL", "value": "${env.GITHUB_SERVER_URL}/${env.GITHUB_REPOSITORY}/actions/runs/${env.GITHUB_RUN_ID}"}
|
||||
]
|
||||
- uses: ./.github/actions/after-job
|
||||
with:
|
||||
jobname: promote-to-rc
|
||||
if: always()
|
||||
|
|
|
@ -2,10 +2,10 @@ name: chart-publish
|
|||
"on":
|
||||
push:
|
||||
tags:
|
||||
- 'chart/v*'
|
||||
- "chart/v*"
|
||||
jobs:
|
||||
chart-publish:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
name: chart-publish
|
||||
env:
|
||||
AWS_S3_BUCKET: ${{ secrets.AWS_S3_BUCKET }}
|
||||
|
@ -25,7 +25,7 @@ jobs:
|
|||
make release/push-chart
|
||||
- id: check-slack-webhook
|
||||
name: Assign slack webhook variable
|
||||
run: echo '::set-output name=slack_webhook_url::${{secrets.SLACK_WEBHOOK_URL}}'
|
||||
run: echo "slack_webhook_url=${{secrets.SLACK_WEBHOOK_URL}}" >> $GITHUB_OUTPUT
|
||||
- name: Slack notification
|
||||
if: steps.check-slack-webhook.outputs.slack_webhook_url && always()
|
||||
uses: edge/simple-slack-notify@master
|
||||
|
@ -34,22 +34,22 @@ jobs:
|
|||
with:
|
||||
status: ${{ job.status }}
|
||||
success_text: "Chart successfully published for ${env.GITHUB_REF}"
|
||||
failure_text: '${env.GITHUB_WORKFLOW} (${env.GITHUB_RUN_NUMBER}) build failed'
|
||||
cancelled_text: '${env.GITHUB_WORKFLOW} (${env.GITHUB_RUN_NUMBER}) build was cancelled'
|
||||
failure_text: "${env.GITHUB_WORKFLOW} (${env.GITHUB_RUN_NUMBER}) build failed"
|
||||
cancelled_text: "${env.GITHUB_WORKFLOW} (${env.GITHUB_RUN_NUMBER}) build was cancelled"
|
||||
fields: |
|
||||
[{ "title": "Repository", "value": "${env.GITHUB_REPOSITORY}", "short": true },
|
||||
{ "title": "Branch", "value": "${env.GITHUB_REF}", "short": true },
|
||||
{ "title": "Action URL", "value": "${env.GITHUB_SERVER_URL}/${env.GITHUB_REPOSITORY}/actions/runs/${env.GITHUB_RUN_ID}"}
|
||||
]
|
||||
- uses: ./.github/actions/after-job
|
||||
with:
|
||||
jobname: chart-publish
|
||||
if: always()
|
||||
chart-create-gh-release:
|
||||
if: ${{ ! contains(github.ref, '-') }}
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-24.04
|
||||
needs: [chart-publish]
|
||||
name: "Create GitHub release"
|
||||
env:
|
||||
GIT_TOKEN: ${{ secrets.GH_GITHUB_API_KEY }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
|
@ -58,14 +58,14 @@ jobs:
|
|||
uses: ./.github/actions/setup-deps
|
||||
- name: "gh auth login"
|
||||
run: |
|
||||
echo "${GIT_TOKEN}" | gh auth login --with-token
|
||||
gh auth login --with-token <<<'${{ secrets.GH_GITHUB_API_KEY }}'
|
||||
- name: Create GitHub release
|
||||
id: step-create-gh-release
|
||||
run: |
|
||||
make release/chart-create-gh-release
|
||||
- id: check-slack-webhook
|
||||
name: Assign slack webhook variable
|
||||
run: echo '::set-output name=slack_webhook_url::${{secrets.SLACK_WEBHOOK_URL}}'
|
||||
run: echo "slack_webhook_url=${{secrets.SLACK_WEBHOOK_URL}}" >> $GITHUB_OUTPUT
|
||||
- name: Slack notification
|
||||
if: steps.check-slack-webhook.outputs.slack_webhook_url && always()
|
||||
uses: edge/simple-slack-notify@master
|
||||
|
@ -73,13 +73,15 @@ jobs:
|
|||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
success_text: 'Chart GitHub release was created: ${{ steps.step-create-gh-release.outputs.url }}'
|
||||
failure_text: 'Chart GitHub release failed'
|
||||
cancelled_text: 'Chart GitHub release was was cancelled'
|
||||
success_text: "Chart GitHub release was created: ${{ steps.step-create-gh-release.outputs.url }}"
|
||||
failure_text: "Chart GitHub release failed"
|
||||
cancelled_text: "Chart GitHub release was was cancelled"
|
||||
fields: |
|
||||
[{ "title": "Repository", "value": "${env.GITHUB_REPOSITORY}", "short": true },
|
||||
{ "title": "Branch", "value": "${env.GITHUB_REF}", "short": true },
|
||||
{ "title": "Action URL", "value": "${env.GITHUB_SERVER_URL}/${env.GITHUB_REPOSITORY}/actions/runs/${env.GITHUB_RUN_ID}"}
|
||||
]
|
||||
- uses: ./.github/actions/after-job
|
||||
with:
|
||||
jobname: chart-create-gh-release
|
||||
if: always()
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
/build-output/
|
||||
/vendor/
|
||||
|
||||
*.tgz
|
||||
*.img.tar
|
||||
|
@ -36,8 +37,6 @@ charts/emissary-ingress/artifacts
|
|||
charts/emissary-ingress/k3dconfig
|
||||
charts/yq
|
||||
|
||||
vendor_bootstrap_hack.go
|
||||
|
||||
/docker/images.sh
|
||||
/docker/images.tar
|
||||
/docker/volume.tar
|
||||
|
@ -89,7 +88,6 @@ tags
|
|||
no-such-file
|
||||
envoy.json
|
||||
intermediate.json
|
||||
gold.no_yaml.json
|
||||
v1.json
|
||||
check-*.json
|
||||
stats.json
|
||||
|
@ -106,6 +104,8 @@ ambassador-secrets-deployment.yaml
|
|||
# Remove the tail of this list when the commit making the change gets
|
||||
# far enough in to the past.
|
||||
|
||||
# 2023-02-20
|
||||
gold.no_yaml.json
|
||||
# 2022-05-19
|
||||
/bin
|
||||
/bin_*/
|
||||
|
|
|
@ -12,16 +12,19 @@ linters-settings:
|
|||
gofmt:
|
||||
simplify: true
|
||||
depguard:
|
||||
list-type: blacklist
|
||||
include-go-root: true
|
||||
packages-with-error-message:
|
||||
- log: "Use `github.com/datawire/dlib/dlog` instead of `log`"
|
||||
- github.com/sirupsen/logrus: "Use `github.com/datawire/dlib/dlog` instead of `github.com/sirupsen/logrus`"
|
||||
- github.com/datawire/dlib/dutil: "Use either `github.com/datawire/dlib/derror` or `github.com/datawire/dlib/dhttp` instead of `github.com/datawire/dlib/dutil`"
|
||||
- github.com/gogo/protobuf: "Use `google.golang.org/protobuf` instead of `github.com/gogo/protobuf`"
|
||||
- github.com/golang/protobuf: "Use `google.golang.org/protobuf` instead of `github.com/golang/protobuf`"
|
||||
- github.com/google/shlex: "Use `github.com/kballard/go-shellquote` instead of `github.com/google/shlex`"
|
||||
- golang.org/x/net/http2/h2c: "Use `github.com/datawire/dlib/dhttp` instead of `golang.org/x/net/http2/h2c`"
|
||||
rules:
|
||||
main:
|
||||
deny:
|
||||
- pkg: "github.com/datawire/dlib/dutil"
|
||||
desc: "Use either `github.com/datawire/dlib/derror` or `github.com/datawire/dlib/dhttp` instead of `github.com/datawire/dlib/dutil`"
|
||||
- pkg: "github.com/gogo/protobuf"
|
||||
desc: "Use `google.golang.org/protobuf` instead of `github.com/gogo/protobuf`"
|
||||
- pkg: "github.com/golang/protobuf"
|
||||
desc: "Use `google.golang.org/protobuf` instead of `github.com/golang/protobuf`"
|
||||
- pkg: "github.com/google/shlex"
|
||||
desc: "Use `github.com/kballard/go-shellquote` instead of `github.com/google/shlex`"
|
||||
- pkg: "golang.org/x/net/http2/h2c"
|
||||
desc: "Use `github.com/datawire/dlib/dhttp` instead of `golang.org/x/net/http2/h2c`"
|
||||
unused:
|
||||
# treat code as a program (not a library) and report unused
|
||||
# exported identifiers
|
||||
|
|
|
@ -0,0 +1,143 @@
|
|||
# .mailmap -- normalize Git user names and email addresses
|
||||
#
|
||||
# Short version of the syntax:
|
||||
#
|
||||
# - Lines starting with "#" are comments
|
||||
#
|
||||
# - Fix just the name associated with an email address:
|
||||
#
|
||||
# New Full Name <email@address>
|
||||
#
|
||||
# Example: to map from "AliceProxy <alicewasko@datawire.io>" to
|
||||
# "Alice Wasko <alicewasko@datawire.io>", I'd write
|
||||
#
|
||||
# Alice Wasko <alicewasko@datawire.io>
|
||||
#
|
||||
# - Adjust an email address (and possibly name as well):
|
||||
#
|
||||
# New Full Name <new.email@address> <old.email@address>
|
||||
#
|
||||
# Example: to map from "AliceProxy <aliceproxy@protonmail.com>" to
|
||||
# "Alice Wasko <alicewasko@datawire.io>", I'd write
|
||||
#
|
||||
# Alice Wasko <alicewasko@datawire.io> <aliceproxy@protonmail.com>
|
||||
#
|
||||
# In `git log` `--format` strings: the uppercase '%cN', '%cE', '%aN',
|
||||
# and '%aE' obey .mailmap; while the lowercase '%cn', '%ce', '%an',
|
||||
# and '%ae' ignore it.
|
||||
#
|
||||
# See the gitmailmap(5) man-page for more information.
|
||||
|
||||
# Current Ambassador Labs employees/contractors
|
||||
# (i.e. @datawire.io is the proper email to use)
|
||||
Alex Gervais <alex@datawire.io>
|
||||
Alex Gervais <alex@datawire.io> <alex.gervais@gmail.com>
|
||||
Alex Gervais <alex@datawire.io> <alexandre.gervais@appdirect.com>
|
||||
Alexis Munoz <amunoz@datawire.io>
|
||||
Alexis Munoz <amunoz@datawire.io> <alexis.munoz@unosquare.com>
|
||||
Alice Wasko <alicewasko@datawire.io>
|
||||
Alice Wasko <alicewasko@datawire.io> <22459179+AliceProxy@users.noreply.github.com>
|
||||
Alice Wasko <alicewasko@datawire.io> <aliceProxy@protonmail.com>
|
||||
Alice Wasko <alicewasko@datawire.io> <aliceproxy@pm.me>
|
||||
Alice Wasko <alicewasko@datawire.io> <aliceproxy@protonmail.com>
|
||||
Andres Osorio <aosorio@datawire.io> <95947712+aosoriodw@users.noreply.github.com>
|
||||
Bjorn Freeman-Benson <bjorn@datawire.io> <157783+bnfb@users.noreply.github.com>
|
||||
Bjorn Freeman-Benson <bjorn@datawire.io> <bjorn@Bjorns-MacBook-Pro.local>
|
||||
Cindy Mullins <cindymullins@datawire.io> <57576741+cindymullins-dw@users.noreply.github.com>
|
||||
Guillaume Veschambre <gveschambre@datawire.io>
|
||||
Kelsey Evans <kelsey@datawire.io>
|
||||
Kevin Lambert <klambert@datawire.io> <kevin.lambert.ca@gmail.com>
|
||||
Lance Austin <laustin@datawire.io> <lanceaustin26@gmail.com>
|
||||
Nick Powell <nickpowell@datawire.io> <35627686+njayp@users.noreply.github.com>
|
||||
Nick Powell <nickpowell@datawire.io> <nickjaypowell@gmail.com>
|
||||
Rafael Schloming <rhs@datawire.io> <rhs@alum.mit.edu>
|
||||
Tenshin Higashi <thigashi@datawire.io>
|
||||
Thomas Hallgren <thomas@datawire.io> <thomas@tada.se>
|
||||
William Hardin <williamhardin@datawire.io> <47009048+w-h37@users.noreply.github.com>
|
||||
|
||||
# Former Ambassador Labs employees/contractors (without a new address)
|
||||
# (i.e. @datawire.io is the proper email to use until we hear back about their preferred email)
|
||||
Matt McClure <mattmcclure@datawire.io> <73489204+mattmcclure-dw@users.noreply.github.com>
|
||||
#Ryan Park <rpark@datawire.io>
|
||||
|
||||
# Former Ambassador Labs employees/contractors (with a new address)
|
||||
# (i.e. @datawire.io is *not* the proper email to use)
|
||||
Abhay Saxena <ark3@email.com> <ark3@datawire.io>
|
||||
Alice Nodelman <alice@nodelman.net>
|
||||
Alice Nodelman <alice@nodelman.net> <anode@boat.local>
|
||||
Alice Nodelman <alice@nodelman.net> <anode@datawire.io>
|
||||
Alix Cook <alix.cook11@gmail.com> <73843761+acookin@users.noreply.github.com>
|
||||
Alix Cook <alix.cook11@gmail.com> <alixcook@datawire.io>
|
||||
Alvaro Saurin <alvaro.saurin@gmail.com> <1841612+inercia@users.noreply.github.com>
|
||||
Alvaro Saurin <alvaro.saurin@gmail.com> <alvaro@datawire.io>
|
||||
Ava Hahn <ava@aidanis.online> <87328374+aidanhahn@users.noreply.github.com>
|
||||
Ava Hahn <ava@aidanis.online> <aidanhahn@datawire.io>
|
||||
Bruce Horn <bruce.horn@mac.com> <bruce@datawire.io>
|
||||
Cynthia Coan <cynthia@coan.dev>
|
||||
Cynthia Coan <cynthia@coan.dev> <SecurityInsanity@users.noreply.github.com>
|
||||
David Dymko <dymkod@gmail.com> <ddymko@datawire.io>
|
||||
Donny Yung <donaldryung@gmail.com> <donaldyung@datawire.io>
|
||||
Douglas Camata <d.camata@gmail.com> <dcamata@datawire.io>
|
||||
Douglas Camata <d.camata@gmail.com> <159076+douglascamata@users.noreply.github.com>
|
||||
Flynn <emissary@flynn.kodachi.com> <flynn+github@kodachi.com>
|
||||
Flynn <emissary@flynn.kodachi.com> <flynn@datawire.io>
|
||||
Flynn <emissary@flynn.kodachi.com> <flynn@ryoohko.local>
|
||||
Flynn <emissary@flynn.kodachi.com> <kflynn@users.noreply.github.com>
|
||||
Gabriel Linden Sagula <gsagula@gmail.com>
|
||||
Itamar Turner-Trauring <itamar@itamarst.org> <itamar@datawire.io>
|
||||
John Esmet <john.esmet@gmail.com> <jesmet@appnexus.com>
|
||||
John Esmet <john.esmet@gmail.com> <johnesmest@datawire.io>
|
||||
John Esmet <john.esmet@gmail.com> <johnesmet@datawire.io>
|
||||
Leonardo Luz Almeida <leonardo.la@gmail.com> <leoluz@users.noreply.github.com>
|
||||
Leonardo Luz Almeida <leonardo.la@gmail.com> <leonardoluz@datawire.io>
|
||||
Luke Shumaker <lukeshu@lukeshu.com> <luke@lukeshu.com>
|
||||
Luke Shumaker <lukeshu@lukeshu.com> <lukeshu@datawire.io>
|
||||
Noah Krause <krausenoah@gmail.com>
|
||||
Noah Krause <krausenoah@gmail.com> <nkrause@MacBook-Pro.local>
|
||||
Noah Krause <krausenoah@gmail.com> <nkrause@Opss-MacBook-Air.local>
|
||||
Noah Krause <krausenoah@gmail.com> <nkrause@datawire.io>
|
||||
Noah Krause <krausenoah@gmail.com> <noahkrause@Noahs-MacBook-Pro.local>
|
||||
Philip Lombardi <plombardi89@gmail.com>
|
||||
Philip Lombardi <plombardi89@gmail.com> <893096+plombardi89@users.noreply.github.com>
|
||||
Philip Lombardi <plombardi89@gmail.com> <plombardi@datawire.io>
|
||||
Shahriar Rostami <srdsecond@gmail.com> <54422255+alphashr@users.noreply.github.com>
|
||||
Stephanie Coyle <stephaniecoyle391@gmail.com>
|
||||
Stephanie Coyle <stephaniecoyle391@gmail.com> <55929576+scoyle391@users.noreply.github.com>
|
||||
Stephanie Coyle <stephaniecoyle391@gmail.com> <scoyle@datawire.io>
|
||||
|
||||
# Outside contributors (i.e. people who never had an @datawire.io email)
|
||||
# (actually, Dan Sipple was a contractor, but he never had an @datawire.io email)
|
||||
|
||||
## inconsistent email -- confirmed preferences
|
||||
Dan Sipple <sipple.dan@gmail.com> <dsipple@etsy.com>
|
||||
Endre Czirbesz <endre@czirbesz.hu> <endre.czirbesz@rungway.com>
|
||||
Endre Czirbesz <endre@czirbesz.hu> <endrec@users.noreply.github.com>
|
||||
Giovanni Gargiulo <gargiulo.gianni@gmail.com> <giovanni.gargiulo@hbc.com>
|
||||
Josue Diaz <josuesdiaz15@msn.com> <josue_diaz@affirmednetworks.com>
|
||||
Niko Kurtti <niko@kurtti.eu>
|
||||
Niko Kurtti <niko@kurtti.eu> <niko.kurtti@interdax.com>
|
||||
Niko Kurtti <niko@kurtti.eu> <niko@kurtti@eu>
|
||||
Phil Peble <philpeble@gmail.com> <ppeble@activecampaign.com>
|
||||
|
||||
## inconsistent email -- one of them undeliverable, so what remains must be preferred
|
||||
Wenhua Zhao <whzhao@gmail.com> <wenhuaz@splunk.com>
|
||||
|
||||
## inconsistent email -- did I choosed the correct email?
|
||||
Brandon Catcho <bcatcho@doubledutch.me> <bcatcho@gmail.com>
|
||||
Christoph Grossegger <christoph.grossegger@gmail.com> <christoph@dust-bit-games.com>
|
||||
John Morrissey <jmorrissey@devoted.com> <jwm@devoted.com>
|
||||
Markus Maga <markus@maga.nu> <markus@maga.se>
|
||||
Romario Maxwell <maxwellra@jncb.com> <romario.maxwell@gmail.com>
|
||||
|
||||
## inconsistent name
|
||||
Kevin Dagostino <kevin@tonkworks.com>
|
||||
Kowalczyk Bartek <bkowalczyyk@gmail.com>
|
||||
Mohit Sharma <imoisharma@icloud.com>
|
||||
|
||||
## just @users.norely.github.com substitutions
|
||||
Jordan Neufeld <jordan.neufeld@nfl.com> <neufeldtech@users.noreply.github.com>
|
||||
Mark Davydov <mark@monday.com> <markrity@users.noreply.github.com>
|
||||
Markus Jevring <markus.jevring@sesamecare.com> <58325443+markusjevringsesame@users.noreply.github.com>
|
||||
|
||||
# Bots
|
||||
<dev@datawire.io> <dev@datawire.ioservices@datawire.io>
|
|
@ -1,3 +0,0 @@
|
|||
# Emissary-Ingress Architecture
|
||||
|
||||
WIP - we are working to get this updated. Check back soon!
|
|
@ -1,4 +0,0 @@
|
|||
Building Ambassador
|
||||
===================
|
||||
|
||||
The content in this document has been moved to [DEVELOPING.md].
|
235
CHANGELOG.md
235
CHANGELOG.md
|
@ -85,7 +85,236 @@ it will be removed; but as it won't be user-visible this isn't considered a brea
|
|||
|
||||
## RELEASE NOTES
|
||||
|
||||
## [3.3.0] TBD
|
||||
## [3.10.0] July 29, 2025
|
||||
[3.10.0]: https://github.com/emissary-ingress/emissary/compare/v3.9.0...v3.10.0
|
||||
|
||||
### Emissary-ingress and Ambassador Edge Stack
|
||||
|
||||
- Feature: This upgrades Emissary-ingress to be built on Envoy v1.28.0 which provides security,
|
||||
performance and feature enhancements. You can read more about them here: <a
|
||||
href="https://www.envoyproxy.io/docs/envoy/v1.28.0/version_history/version_history">Envoy Proxy
|
||||
1.28.0 Release Notes</a>
|
||||
|
||||
- Change: Emissary-ingress will no longer publish YAML manifest with the Ambassador Agent being
|
||||
installed by default. This is an optional component that provides additional features on top of
|
||||
Emissary-ingress and we recommend installing it using the instructions found in the <a
|
||||
href="https://github.com/datawire/ambassador-agenty">Ambassador Agent Repo</a>.
|
||||
|
||||
- Change: Upgraded Emissary-ingress to the latest release of Golang as part of our general
|
||||
dependency upgrade process.
|
||||
|
||||
- Bugfix: Emissary-ingress was incorrectly caching Mappings with regex headers using the header name
|
||||
instead of the Mapping name, which could reduce the cache's effectiveness. This has been fixed so
|
||||
that the correct key is used. ([Incorrect Cache Key for Mapping])
|
||||
|
||||
- Feature: Emissary-ingress now supports resolving Endpoints from EndpointSlices in addition to the
|
||||
existing support for Endpoints, supporting Services with more than 1000 endpoints.
|
||||
|
||||
- Feature: Emissary-ingress now passes the client TLS certificate and SNI, if any, to the external
|
||||
auth service. These are available in the `source.certificate` and `tls_session.sni` fields, as
|
||||
described in the <a
|
||||
href="https://www.envoyproxy.io/docs/envoy/latest/api-v3/service/auth/v3/attribute_context.proto">
|
||||
Envoy extauth documentation</a>.
|
||||
|
||||
- Change: The `ambex` component of Emissary-ingress now uses `xxhash64` instead of `md5`, since
|
||||
`md5` can cause problems in crypto-restricted environments (e.g. FIPS) ([Remove usage of md5])
|
||||
|
||||
[Incorrect Cache Key for Mapping]: https://github.com/emissary-ingress/emissary/issues/5714
|
||||
[Remove usage of md5]: https://github.com/emissary-ingress/emissary/pull/5794
|
||||
|
||||
## [3.9.0] November 13, 2023
|
||||
[3.9.0]: https://github.com/emissary-ingress/emissary/compare/v3.8.0...v3.9.0
|
||||
|
||||
### Emissary-ingress and Ambassador Edge Stack
|
||||
|
||||
- Feature: This upgrades Emissary-ingress to be built on Envoy v1.27.2 which provides security,
|
||||
performance and feature enhancements. You can read more about them here: <a
|
||||
href="https://www.envoyproxy.io/docs/envoy/v1.27.2/version_history/version_history">Envoy Proxy
|
||||
1.27.2 Release Notes</a>
|
||||
|
||||
- Feature: By default, Emissary-ingress will return an `UNAVAILABLE` code when a request using gRPC
|
||||
is rate limited. The `RateLimitService` resource now exposes a new
|
||||
`grpc.use_resource_exhausted_code` field that when set to `true`, Emissary-ingress will return a
|
||||
`RESOURCE_EXHAUSTED` gRPC code instead. Thanks to <a href="https://github.com/jeromefroe">Jerome
|
||||
Froelich</a> for contributing this feature!
|
||||
|
||||
- Feature: Envoy runtime fields that were provided to mitigate the recent HTTP/2 rapid reset
|
||||
vulnerability can now be configured via the Module resource so the configuration will persist
|
||||
between restarts. This configuration is added to the Envoy bootstrap config, so restarting
|
||||
Emissary is necessary after changing these fields for the configuration to take effect.
|
||||
|
||||
- Change: APIExt would previously allow for TLS 1.0 connections. We have updated it to now only use
|
||||
a minimum TLS version of 1.3 to resolve security concerns.
|
||||
|
||||
- Change: - Update default image to Emissary-ingress v3.9.0. <br/>
|
||||
|
||||
- Bugfix: The APIExt server provides CRD conversion between the stored version v2 and the version
|
||||
watched for by Emissary-ingress v3alpha1. Since this component is required to operate
|
||||
Emissary-ingress, we have introduced an init container that will ensure it is available before
|
||||
starting. This will help address some of the intermittent issues seen during install and upgrades.
|
||||
|
||||
## [3.8.0] August 29, 2023
|
||||
[3.8.0]: https://github.com/emissary-ingress/emissary/compare/v3.7.2...v3.8.0
|
||||
|
||||
### Emissary-ingress and Ambassador Edge Stack
|
||||
|
||||
- Bugfix: As of v2.2.2, if two mappings were associated with different Hosts through host
|
||||
mappingSelector labels but share the same prefix, the labels were not taken into account which
|
||||
would cause one Mapping to be correctly routed but the other not.
|
||||
This change fixes this issue so
|
||||
that Mappings sharing the same prefix but associated with different Hosts will be correctly
|
||||
routed. ([Canary grouping must take labels into account])
|
||||
|
||||
- Bugfix: In previous versions, if multiple Headers/QueryParameters where used in a v3alpha1
|
||||
mapping, these values would duplicate and cause all the Headers/QueryParameters to have the same
|
||||
value. This is no longer the case and the expected values for unique Headers/QueryParameters will
|
||||
apply.
|
||||
This issue was only present in v3alpha1 Mappings. For users who may have this issue, please
|
||||
be sure to re-apply any v3alpha1 Mappings in order to update the stored v2 Mapping and resolve the
|
||||
issue.
|
||||
|
||||
- Change: When the Ambassador agent is being used, it will no longer attempt to collect and report
|
||||
Envoy metrics. In previous versions, Emissary-ingress would always create an Envoy stats sink for
|
||||
the agent as long as the AMBASSADOR_GRPC_METRICS_SINK environmet variable was provided. This
|
||||
environment variable was hardcoded on the release manifests and has now been removed and an Envoy
|
||||
stats sink for the agent is no longer created.
|
||||
|
||||
[Canary grouping must take labels into account]: https://github.com/emissary-ingress/emissary/issues/4170
|
||||
|
||||
## [3.7.2] July 25, 2023
|
||||
[3.7.2]: https://github.com/emissary-ingress/emissary/compare/v3.7.1...v3.7.2
|
||||
|
||||
### Emissary-ingress and Ambassador Edge Stack
|
||||
|
||||
- Security: This upgrades Emissary-ingress to be built on Envoy v1.26.4 which includes a security
|
||||
fixes for CVE-2023-35942, CVE-2023-35943, VE-2023-35944.
|
||||
|
||||
## [3.7.1] July 13, 2023
|
||||
[3.7.1]: https://github.com/emissary-ingress/emissary/compare/v3.7.0...v3.7.1
|
||||
|
||||
### Emissary-ingress and Ambassador Edge Stack
|
||||
|
||||
- Security: This upgrades Emissary-ingress to be built on Envoy v1.26.3 which includes a security
|
||||
fix for CVE-2023-35945.
|
||||
|
||||
## [3.7.0] June 20, 2023
|
||||
[3.7.0]: https://github.com/emissary-ingress/emissary/compare/v3.6.0...v3.7.0
|
||||
|
||||
### Emissary-ingress and Ambassador Edge Stack
|
||||
|
||||
- Security: Upgrading to the latest release of Golang as part of our general dependency upgrade
|
||||
process. This includes security fixes for CVE-2023-24539, CVE-2023-24540, CVE-2023-29400.
|
||||
|
||||
- Feature: This upgrades Emissary-ingress to be built on Envoy v1.26.1. This provides
|
||||
Emissary-ingress with the latest security patches, performances enhancments, and features offered
|
||||
by the envoy proxy.
|
||||
|
||||
- Feature: By default, Envoy will return an `UNAVAILABLE` gRPC code when a request is rate limited.
|
||||
The `RateLimitService` resource now exposes the <a
|
||||
href="https://www.envoyproxy.io/docs/envoy/v1.26.0/configuration/http/http_filters/rate_limit_filter">use_resource_exhausted_code</a>
|
||||
option. Set `grpc.use_resource_exhausted_code: true` so Envoy will return a `RESOURCE_EXHAUSTED`
|
||||
gRPC code instead.
|
||||
|
||||
## [3.6.0] April 17, 2023
|
||||
[3.6.0]: https://github.com/emissary-ingress/emissary/compare/v3.5.0...v3.6.0
|
||||
|
||||
### Emissary-ingress and Ambassador Edge Stack
|
||||
|
||||
- Feature: This upgrades Emissary-ingress to be built on Envoy v1.25.4. This provides
|
||||
Emissary-ingress with the latest security patches, performances enhancments, and features offered
|
||||
by the envoy proxy.
|
||||
|
||||
## [3.5.0] February 15, 2023
|
||||
[3.5.0]: https://github.com/emissary-ingress/emissary/compare/v3.4.0...v3.5.0
|
||||
|
||||
### Emissary-ingress and Ambassador Edge Stack
|
||||
|
||||
- Security: Upgrading to the latest release of Golang as part of our general dependency upgrade
|
||||
process. This includes security fixes for CVE-2022-41725, CVE-2022-41723.
|
||||
|
||||
- Feature: In Envoy 1.24, experimental support for a native OpenTelemetry tracing driver was
|
||||
introduced that allows exporting spans in the otlp format. Many Observability platforms accept
|
||||
that format and is the recommend replacement for the LightStep driver. Emissary-ingress now
|
||||
supports setting the `TracingService.spec.driver=opentelemetry` to export spans in otlp
|
||||
format.<br/><br/>
|
||||
Thanks to <a href="https://github.com/psalaberria002">Paul</a> for helping us
|
||||
get this tested and implemented!
|
||||
|
||||
- Bugfix: When wanting to expose traffic to clients on ports other than 80/443, users will set a
|
||||
port in the Host.hostname (eg.`Host.hostname=example.com:8500`. The config generated allowed
|
||||
matching on the :authority header. This worked in v1.Y series due to the way emissary was
|
||||
generating Envoy configuration under a single wild-card virtual_host and matching on
|
||||
:authority.
|
||||
|
||||
In v2.Y/v3.Y+, the way emissary generates Envoy configuration changed to address
|
||||
memory pressure and improve route lookup speed in Envoy. However, when including a port in the
|
||||
hostname, an incorrect configuration was generated with an sni match including the port. This has
|
||||
been fixed and the correct envoy configuration is being generated. ([fix: hostname port issue])
|
||||
|
||||
- Change: Previously, specifying backend ports by name in Ingress was not supported and would result
|
||||
in defaulting to port 80. This allows emissary-ingress to now resolve port names for backend
|
||||
services. If the port number cannot be resolved by the name (e.g named port in the Service doesn't
|
||||
exist) then it defaults back to the original behavior. (Thanks to <a
|
||||
href="https://github.com/antonu17">Anton Ustyuzhanin</a>!). ([#4809])
|
||||
|
||||
- Change: The `emissary-apiext` server is a Kubernetes Conversion Webhook that converts between the
|
||||
Emissary-ingress CRD versions. On startup, it ensures that a self-signed cert is available so that
|
||||
K8s API Server can talk to the conversion webhook (*TLS is required by K8s*). We have introduced a
|
||||
startupProbe to ensure that emissary-apiext server has enough time to configure the webhooks
|
||||
before running liveness and readiness probes. This is to ensure slow startup doesn't cause K8s to
|
||||
needlessly restart the pod.
|
||||
|
||||
[fix: hostname port issue]: https://github.com/emissary-ingress/emissary/pull/4816
|
||||
[#4809]: https://github.com/emissary-ingress/emissary/pull/4809
|
||||
|
||||
## [3.4.0] January 03, 2023
|
||||
[3.4.0]: https://github.com/emissary-ingress/emissary/compare/v3.3.0...v3.4.0
|
||||
|
||||
### Emissary-ingress and Ambassador Edge Stack
|
||||
|
||||
- Feature: Support for the `getambassador.io/v1` apiVersion has been re-introduced, in order to
|
||||
facilitate smoother migrations from Emissary-ingress 1.y. Previously, in order to make migrations
|
||||
possible, an "unserved" `v1` version was declared to Kubernetes, but was unsupported by
|
||||
Emissary-ingress. That unserved `v1` could cause an excess of errors to be logged by the
|
||||
Kubernetes Nodes (regardless of whether the installation was migrated from 1.y or was a fresh 2.y
|
||||
install); fully supporting `v1` again should resolve these errors.
|
||||
|
||||
- Feature: It is now possible to configure active healhchecking for upstreams within a `Mapping`. If
|
||||
the upstream fails its configured health check then Envoy will mark the upstream as unhealthy and
|
||||
no longer send traffic to that upstream. Single pods within a group may can be marked as
|
||||
unhealthy. The healthy pods will continue to receive traffic normally while the unhealthy pods
|
||||
will not receive any traffic until they recover by passing the health check.
|
||||
|
||||
- Feature: The healthcheck server's bind address, bind port and IP family can now be configured
|
||||
using environment variables:
|
||||
- `AMBASSADOR_HEALTHCHECK_BIND_ADDRESS`: The address to bind the
|
||||
healthcheck server to.
|
||||
- `AMBASSADOR_HEALTHCHECK_BIND_PORT`: The port to bind the healthcheck
|
||||
server to.
|
||||
- `AMBASSADOR_HEALTHCHECK_IP_FAMILY`: The IP family to use for the healthcheck
|
||||
server.
|
||||
This allows the healthcheck server to be configured to use IPv6-only k8s environments.
|
||||
(Thanks to <a href="https://github.com/TimonOmsk">Dmitry Golushko</a>!).
|
||||
|
||||
- Feature: This upgrades Emissary-ingress to be built on Envoy v1.24.1. One notable change is that
|
||||
the team at LightStep and Envoy Maintainers have decided to no longer support the native
|
||||
*LightStep* tracing driver in favor of using the Open Telemetry driver. The code for LightStep
|
||||
driver has been completely removed from Envoy code base so Emissary-ingress will no longer support
|
||||
it either.
|
||||
The recommended upgrade path is to leverage a supported Tracing driver such as `Zipkin`
|
||||
and use the [Open Telemetry Collector](https://opentelemetry.io/docs/collector/) to collect and
|
||||
forward Observabity data to LightStep.
|
||||
|
||||
- Feature: /ready endpoint used by emissary is using the admin port (8001 by default). This
|
||||
generates a problem during config reloads with large configs as the admin thread is blocking so
|
||||
the /ready endpoint can be very slow to answer (in the order of several seconds, even more). The
|
||||
new feature allows to enable a specific envoy listener that can answer /ready calls from the
|
||||
workers so the endpoint is always fast and it does not suffers from single threaded admin thread
|
||||
slowness on config reloads and other slow endpoints handled by the admin thread Configure the
|
||||
listener port using AMBASSADOR_READY_PORT and enable access log using AMBASSADOR_READY_LOG
|
||||
environment variables.
|
||||
|
||||
## [3.3.0] November 02, 2022
|
||||
[3.3.0]: https://github.com/emissary-ingress/emissary/compare/v3.2.0...v3.3.0
|
||||
|
||||
### Emissary-ingress and Ambassador Edge Stack
|
||||
|
@ -185,7 +414,7 @@ it will be removed; but as it won't be user-visible this isn't considered a brea
|
|||
releases, or a `Host` with or without a `TLSContext` as in prior 2.y releases.
|
||||
|
||||
- Bugfix: Prior releases of Emissary-ingress had the arbitrary limitation that a `TCPMapping` cannot
|
||||
be used on the same port that HTTP is served on, even if TLS+SNI would make this possible.
|
||||
be used on the same port that HTTP is served on, even if TLS+SNI would make this possible.
|
||||
Emissary-ingress now allows `TCPMappings` to be used on the same `Listener` port as HTTP `Hosts`,
|
||||
as long as that `Listener` terminates TLS.
|
||||
|
||||
|
@ -351,7 +580,7 @@ it will be removed; but as it won't be user-visible this isn't considered a brea
|
|||
releases, or a `Host` with or without a `TLSContext` as in prior 2.y releases.
|
||||
|
||||
- Bugfix: Prior releases of Emissary-ingress had the arbitrary limitation that a `TCPMapping` cannot
|
||||
be used on the same port that HTTP is served on, even if TLS+SNI would make this possible.
|
||||
be used on the same port that HTTP is served on, even if TLS+SNI would make this possible.
|
||||
Emissary-ingress now allows `TCPMappings` to be used on the same `Listener` port as HTTP `Hosts`,
|
||||
as long as that `Listener` terminates TLS.
|
||||
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
# Emissary Ingress Governance
|
||||
|
||||
This document defines the project governance for Emissary Ingress.
|
||||
|
||||
## Overview
|
||||
|
||||
**Emissary Ingress** is an open source project that is committed to building a thriving community. This document outlines how the community governs itself. All community members must adhere to the [Code of Conduct](CODE_OF_CONDUCT.md)
|
||||
|
||||
## Community Roles
|
||||
|
||||
* **Users:** Members that engage with the Emissary Ingress community via any medium (Slack, GitHub, mailing lists, etc.).
|
||||
* **Contributors:** Regular contributions to projects (documentation, code reviews, responding to issues, participation in proposal discussions, contributing code, etc.).
|
||||
* **Maintainers**: The Emissary Ingress project leaders. They are responsible for the overall health and direction of the project; final reviewers of PRs and responsible for releases. Maintainers are expected to triage issues, proactively fix bugs, review PRs to ensure code quality, and contribute documentation.
|
||||
|
||||
### Maintainers
|
||||
|
||||
New maintainers must be nominated by an existing maintainer and must be elected by a supermajority of existing maintainers. Likewise, maintainers can be removed by a supermajority of the existing maintainers or can resign by notifying one of the maintainers.
|
||||
|
||||
If you're interested in becoming a maintainer, contact an existing maintainer to express your interest. A good way to start is to fix some small issues (if you haven't already), working with one or more existing maintainers. As you build up a representative body of contributions, the maintainers will provide regular feedback on your progress towards maintainer status. After you have built up that representative body of contributions (usually over a period of 3-4 months), the maintainers will meet to discuss and vote on granting maintainer status.
|
||||
|
||||
### Decision Making
|
||||
|
||||
Ideally, all project decisions are resolved by consensus. If impossible, any maintainer may call a vote. Unless otherwise specified in this document, any vote will be decided by a majority of maintainers.
|
||||
|
||||
### Supermajority
|
||||
|
||||
A supermajority is defined as two-thirds of members in the group.
|
||||
|
||||
A supermajority of [Maintainers](#maintainers) is required for adding or removing maintainers.
|
||||
|
||||
### Voting Process
|
||||
|
||||
Voting on decisions will be conducted using GitHub:
|
||||
|
||||
- Open an issue, if an appropriate issue is not already present.
|
||||
- Write a description of the issue at hand in a comment on the issue. The description must include:
|
||||
- A summary of the vote to be taken;
|
||||
- Whether the vote requires a majority or a supermajority; and
|
||||
- The meaning of a yay vote and a nay vote, if not obvious.
|
||||
|
||||
For example, when voting to add a maintainer, the meanings of yay and nay are straightforward. On the other hand, for a choice between two alternatives, the comment should spell out which alternative is supported by a yay vote, and which by a nay vote.
|
||||
- Maintainers vote by placing emoji on the comment: :thumbsup: for yay, :thumbsdown: for nay.
|
||||
|
||||
## Updating Governance
|
||||
|
||||
All substantive changes in Governance require a supermajority agreement by all maintainers.
|
|
@ -0,0 +1,39 @@
|
|||
# Emissary Maintainers
|
||||
|
||||
[GOVERNANCE.md](GOVERNANCE.md) describes governance guidelines and
|
||||
maintainer responsibilities.
|
||||
|
||||
## Maintainers
|
||||
|
||||
Maintainers are listed in alphabetical order.
|
||||
|
||||
| Maintainer | GitHub ID | Affiliation |
|
||||
| ---------------- | ------------------------------------------------------ | --------------------------------------------------- |
|
||||
| Alice Wasko | [aliceproxy](https://github.com/aliceproxy) | [Ambassador Labs](https://www.github.com/datawire/) |
|
||||
| David Dymko | [ddymko](https://github.com/ddymko) | [CoreWeave](https://www.coreweave.com) |
|
||||
| Flynn | [kflynn](https://github.com/kflynn) | [Buoyant](https://www.buoyant.io) |
|
||||
| Hamzah Qudsi | [haq204](https://github.com/haq204) | [Ambassador Labs](https://www.github.com/datawire/) |
|
||||
| Mark Schlachter | [the-wondersmith](https://github.com/the-wondersmith) | [Shuttle](https://www.shuttle.rs) |
|
||||
| Phil Peble | [ppeble](https://github.com/ppeble) | [ActiveCampaign](https://www.activecampaign.com/) |
|
||||
| Rafael Schloming | [rhs](https://github.com/rhs) | [Ambassador Labs](https://www.github.com/datawire/) |
|
||||
|
||||
|
||||
In addition to the maintainers, Emissary releases may be created by any
|
||||
of the following (also listed in alphabetical order):
|
||||
|
||||
| Releaser | GitHub ID | Affiliation |
|
||||
| ------------ | ----------------------------------- | --------------------------------------------------- |
|
||||
| Will Hardin | [w-h37](https://github.com/w-h37) | [Ambassador Labs](https://www.github.com/datawire/) |
|
||||
|
||||
## Maintainers Emeriti
|
||||
|
||||
* Ava Hahn ([aidanhahn](https://github.com/aidanhahn))
|
||||
* Alix Cook ([acookin](https://github.com/acookin))
|
||||
* John Esmet ([esmet](https://github.com/esmet))
|
||||
* Luke T. Shumaker ([lukeshu](https://github.com/lukeshu))
|
||||
* Alex Gervais ([alexgervais](https://github.com/alexgervais))
|
||||
* Lance Austin ([LanceEa](https://github.com/LanceEa))
|
||||
|
||||
## Releasers Emeriti
|
||||
|
||||
* Noah Krause ([iNoahNothing](https://github.com/iNoahNothing))
|
|
@ -0,0 +1,11 @@
|
|||
# Community Meeting Schedule
|
||||
|
||||
## Monthly Contributors Meeting
|
||||
|
||||
The Emissary-ingress Contributors Meeting is held on the first Wednesday of every month at 3:30pm Eastern. The focus of this meeting is discussion of technical issues related to development of Emissary-ingress.
|
||||
|
||||
New contributors are always welcome! Check out our [contributor's guide](../DevDocumentation/CONTRIBUTING.md) to learn how you can help make Emissary-ingress better.
|
||||
|
||||
**Zoom Meeting Link**: [https://ambassadorlabs.zoom.us/j/81589589470?pwd=U8qNvZSqjQx7abIzwRtGryFU35pi3T.1](https://ambassadorlabs.zoom.us/j/81589589470?pwd=U8qNvZSqjQx7abIzwRtGryFU35pi3T.1)
|
||||
- Meeting ID: 815 8958 9470
|
||||
- Passcode: 199217
|
|
@ -0,0 +1,12 @@
|
|||
## Support for deploying and using Emissary
|
||||
|
||||
Welcome to Emissary! The Emissary community is the best current resource for
|
||||
Emissary support, with the best options being:
|
||||
|
||||
- Checking out the [documentation] at https://emissary-ingress.dev/
|
||||
- Joining the `#emissary-ingress` channel in the [CNCF Slack]
|
||||
- [Opening an issue][GitHub] in [GitHub]
|
||||
|
||||
[CNCF Slack]: https://communityinviter.com/apps/cloud-native/cncf)
|
||||
[documentation]: https://emissary-ingress.dev/
|
||||
[GitHub]: https://github.com/emissary-ingress/emissary/issues
|
404
DEPENDENCIES.md
404
DEPENDENCIES.md
|
@ -1,199 +1,219 @@
|
|||
The Go module "github.com/emissary-ingress/emissary/v3" incorporates the
|
||||
following Free and Open Source software:
|
||||
|
||||
Name Version License(s)
|
||||
---- ------- ----------
|
||||
the Go language standard library ("std") v1.19.2 3-clause BSD license
|
||||
cloud.google.com/go/compute v1.2.0 Apache License 2.0
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 MIT license
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible Apache License 2.0
|
||||
github.com/Azure/go-autorest/autorest v0.11.24 Apache License 2.0
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.18 Apache License 2.0
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 Apache License 2.0
|
||||
github.com/Azure/go-autorest/logger v0.2.1 Apache License 2.0
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 Apache License 2.0
|
||||
github.com/MakeNowJust/heredoc v1.0.0 MIT license
|
||||
github.com/Masterminds/goutils v1.1.1 Apache License 2.0
|
||||
github.com/Masterminds/semver v1.5.0 MIT license
|
||||
github.com/Masterminds/sprig v2.22.0+incompatible MIT license
|
||||
github.com/PuerkitoBio/purell v1.1.1 3-clause BSD license
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 3-clause BSD license
|
||||
github.com/armon/go-metrics v0.3.10 MIT license
|
||||
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d MIT license
|
||||
github.com/census-instrumentation/opencensus-proto v0.3.0 Apache License 2.0
|
||||
github.com/cncf/xds/go v0.0.0-20220121163655-4a2b9fdd466b Apache License 2.0
|
||||
github.com/datawire/dlib v1.3.0 Apache License 2.0
|
||||
github.com/datawire/dtest v0.0.0-20210928162311-722b199c4c2f Apache License 2.0
|
||||
github.com/datawire/go-mkopensource v0.0.0-20220218163159-cc298d9fabc4 Apache License 2.0
|
||||
github.com/davecgh/go-spew v1.1.1 ISC license
|
||||
github.com/docker/distribution v2.8.1+incompatible Apache License 2.0
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.7 Apache License 2.0
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible 3-clause BSD license
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f MIT license
|
||||
github.com/fatih/camelcase v1.0.0 MIT license
|
||||
github.com/fatih/color v1.13.0 MIT license
|
||||
github.com/fsnotify/fsnotify v1.5.4 3-clause BSD license
|
||||
github.com/go-errors/errors v1.4.2 MIT license
|
||||
github.com/go-logr/logr v0.4.0 Apache License 2.0
|
||||
github.com/go-openapi/jsonpointer v0.19.5 Apache License 2.0
|
||||
github.com/go-openapi/jsonreference v0.19.6 Apache License 2.0
|
||||
github.com/go-openapi/spec v0.20.4 Apache License 2.0
|
||||
github.com/go-openapi/swag v0.21.1 Apache License 2.0
|
||||
github.com/gobuffalo/flect v0.2.3 MIT license
|
||||
github.com/gogo/protobuf v1.3.2 3-clause BSD license
|
||||
github.com/golang-jwt/jwt/v4 v4.2.0 MIT license
|
||||
github.com/golang/protobuf v1.5.2 3-clause BSD license
|
||||
github.com/google/btree v1.0.1 Apache License 2.0
|
||||
github.com/google/go-cmp v0.5.8 3-clause BSD license
|
||||
github.com/google/gofuzz v1.2.0 Apache License 2.0
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 Apache License 2.0
|
||||
github.com/google/uuid v1.3.0 3-clause BSD license
|
||||
github.com/googleapis/gnostic v0.5.5 Apache License 2.0
|
||||
github.com/gorilla/websocket v1.5.0 2-clause BSD license
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 MIT license
|
||||
github.com/hashicorp/consul/api v1.12.0 Mozilla Public License 2.0
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 Mozilla Public License 2.0
|
||||
github.com/hashicorp/go-hclog v1.1.0 MIT license
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 Mozilla Public License 2.0
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 Mozilla Public License 2.0
|
||||
github.com/hashicorp/golang-lru v0.5.4 Mozilla Public License 2.0
|
||||
github.com/hashicorp/serf v0.9.7 Mozilla Public License 2.0
|
||||
github.com/huandu/xstrings v1.3.2 MIT license
|
||||
github.com/imdario/mergo v0.3.12 3-clause BSD license
|
||||
github.com/inconshreveable/mousetrap v1.0.0 Apache License 2.0
|
||||
github.com/josharian/intern v1.0.1-0.20211109044230-42b52b674af5 MIT license
|
||||
github.com/json-iterator/go v1.1.12 MIT license
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 MIT license
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de 3-clause BSD license
|
||||
github.com/mailru/easyjson v0.7.7 MIT license
|
||||
github.com/mattn/go-colorable v0.1.12 MIT license
|
||||
github.com/mattn/go-isatty v0.0.14 MIT license
|
||||
github.com/mitchellh/copystructure v1.2.0 MIT license
|
||||
github.com/mitchellh/go-homedir v1.1.0 MIT license
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 MIT license
|
||||
github.com/mitchellh/mapstructure v1.4.3 MIT license
|
||||
github.com/mitchellh/reflectwalk v1.0.2 MIT license
|
||||
github.com/moby/spdystream v0.2.0 Apache License 2.0
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 Apache License 2.0
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd Apache License 2.0
|
||||
github.com/modern-go/reflect2 v1.0.2 Apache License 2.0
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 MIT license
|
||||
github.com/opencontainers/go-digest v1.0.0 Apache License 2.0
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible MIT license
|
||||
github.com/pkg/errors v0.9.1 2-clause BSD license
|
||||
github.com/pmezard/go-difflib v1.0.0 3-clause BSD license
|
||||
github.com/prometheus/client_model v0.2.0 Apache License 2.0
|
||||
github.com/russross/blackfriday v1.6.0 2-clause BSD license
|
||||
github.com/sirupsen/logrus v1.9.0 MIT license
|
||||
github.com/spf13/cobra v1.5.0 Apache License 2.0
|
||||
github.com/spf13/pflag v1.0.5 3-clause BSD license
|
||||
github.com/stretchr/testify v1.8.0 MIT license
|
||||
github.com/xlab/treeprint v1.1.0 MIT license
|
||||
go.opentelemetry.io/proto/otlp v0.18.0 Apache License 2.0
|
||||
go.starlark.net v0.0.0-20220203230714-bb14e151c28f 3-clause BSD license
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa 3-clause BSD license
|
||||
golang.org/x/mod v0.5.1 3-clause BSD license
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd 3-clause BSD license
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 3-clause BSD license
|
||||
golang.org/x/sys v0.0.0-20220804214406-8e32c043e418 3-clause BSD license
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 3-clause BSD license
|
||||
golang.org/x/text v0.3.7 3-clause BSD license
|
||||
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 3-clause BSD license
|
||||
golang.org/x/tools v0.1.5 3-clause BSD license
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 3-clause BSD license
|
||||
google.golang.org/appengine v1.6.7 Apache License 2.0
|
||||
google.golang.org/genproto v0.0.0-20220204002441-d6cc3cc0770e Apache License 2.0
|
||||
google.golang.org/grpc v1.44.0 Apache License 2.0
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 Apache License 2.0
|
||||
google.golang.org/protobuf v1.28.1 3-clause BSD license
|
||||
gopkg.in/inf.v0 v0.9.1 3-clause BSD license
|
||||
gopkg.in/yaml.v2 v2.4.0 Apache License 2.0, MIT license
|
||||
gopkg.in/yaml.v3 v3.0.1 Apache License 2.0, MIT license
|
||||
k8s.io/api v0.21.9 Apache License 2.0
|
||||
k8s.io/apiextensions-apiserver v0.21.9 Apache License 2.0
|
||||
k8s.io/apimachinery v0.21.9 3-clause BSD license, Apache License 2.0
|
||||
k8s.io/apiserver v0.21.9 Apache License 2.0
|
||||
k8s.io/cli-runtime v0.21.9 Apache License 2.0
|
||||
k8s.io/client-go v0.21.9 3-clause BSD license, Apache License 2.0
|
||||
github.com/emissary-ingress/code-generator (modified from k8s.io/code-generator) v0.21.10-rc.0.0.20220204004229-4708b255a33a Apache License 2.0
|
||||
k8s.io/component-base v0.21.9 Apache License 2.0
|
||||
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 Apache License 2.0
|
||||
k8s.io/klog/v2 v2.10.0 Apache License 2.0
|
||||
k8s.io/kube-openapi v0.0.0-20211110012726-3cc51fd1e909 Apache License 2.0
|
||||
k8s.io/kubectl v0.21.9 Apache License 2.0
|
||||
k8s.io/kubernetes v1.21.9 Apache License 2.0
|
||||
k8s.io/metrics v0.21.9 Apache License 2.0
|
||||
k8s.io/utils v0.0.0-20210802155522-efc7438f0176 3-clause BSD license, Apache License 2.0
|
||||
sigs.k8s.io/controller-runtime v0.9.7 Apache License 2.0
|
||||
github.com/emissary-ingress/controller-tools (modified from sigs.k8s.io/controller-tools) v0.6.3-0.20220204053320-db507acbb466 Apache License 2.0
|
||||
sigs.k8s.io/gateway-api v0.2.0 Apache License 2.0
|
||||
sigs.k8s.io/kustomize/api v0.8.8 Apache License 2.0
|
||||
sigs.k8s.io/kustomize/kyaml v0.10.17 Apache License 2.0, MIT license
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 Apache License 2.0
|
||||
sigs.k8s.io/yaml v1.3.0 3-clause BSD license, MIT license
|
||||
Name Version License(s)
|
||||
---- ------- ----------
|
||||
the Go language standard library ("std") v1.23.3 3-clause BSD license
|
||||
cel.dev/expr v0.19.2 Apache License 2.0
|
||||
dario.cat/mergo v1.0.1 3-clause BSD license
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c MIT license
|
||||
github.com/MakeNowJust/heredoc v1.0.0 MIT license
|
||||
github.com/Masterminds/goutils v1.1.1 Apache License 2.0
|
||||
github.com/Masterminds/semver v1.5.0 MIT license
|
||||
github.com/Masterminds/sprig v2.22.0+incompatible MIT license
|
||||
github.com/Microsoft/go-winio v0.6.2 MIT license
|
||||
github.com/ProtonMail/go-crypto v1.1.5 3-clause BSD license
|
||||
github.com/antlr4-go/antlr/v4 v4.13.1 3-clause BSD license
|
||||
github.com/armon/go-metrics v0.4.1 MIT license
|
||||
github.com/beorn7/perks v1.0.1 MIT license
|
||||
github.com/blang/semver/v4 v4.0.0 MIT license
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 MIT license
|
||||
github.com/census-instrumentation/opencensus-proto v0.4.1 Apache License 2.0
|
||||
github.com/cespare/xxhash/v2 v2.3.0 MIT license
|
||||
github.com/chai2010/gettext-go v1.0.3 3-clause BSD license
|
||||
github.com/cloudflare/circl v1.6.0 3-clause BSD license
|
||||
github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 Apache License 2.0
|
||||
github.com/cyphar/filepath-securejoin v0.4.1 3-clause BSD license
|
||||
github.com/datawire/dlib v1.3.1 Apache License 2.0
|
||||
github.com/datawire/dtest v0.0.0-20210928162311-722b199c4c2f Apache License 2.0
|
||||
github.com/LukeShu/go-mkopensource (modified from github.com/datawire/go-mkopensource) v0.0.0-20250206080114-4ff6b660d8d4 Apache License 2.0
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ISC license
|
||||
github.com/distribution/reference v0.6.0 Apache License 2.0
|
||||
github.com/emicklei/go-restful/v3 v3.12.1 MIT license
|
||||
github.com/emirpasic/gods v1.18.1 2-clause BSD license, ISC license
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 Apache License 2.0
|
||||
github.com/evanphx/json-patch/v5 v5.9.11 3-clause BSD license
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f MIT license
|
||||
github.com/fatih/camelcase v1.0.0 MIT license
|
||||
github.com/fatih/color v1.18.0 MIT license
|
||||
github.com/fsnotify/fsnotify v1.8.0 3-clause BSD license
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 MIT license
|
||||
github.com/go-errors/errors v1.5.1 MIT license
|
||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 3-clause BSD license
|
||||
github.com/go-git/go-billy/v5 v5.6.2 Apache License 2.0
|
||||
github.com/go-git/go-git/v5 v5.13.2 Apache License 2.0
|
||||
github.com/go-logr/logr v1.4.2 Apache License 2.0
|
||||
github.com/go-logr/zapr v1.3.0 Apache License 2.0
|
||||
github.com/go-openapi/jsonpointer v0.21.0 Apache License 2.0
|
||||
github.com/go-openapi/jsonreference v0.21.0 Apache License 2.0
|
||||
github.com/go-openapi/swag v0.23.0 Apache License 2.0
|
||||
github.com/gobuffalo/flect v1.0.3 MIT license
|
||||
github.com/gogo/protobuf v1.3.2 3-clause BSD license
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 Apache License 2.0
|
||||
github.com/golang/protobuf v1.5.4 3-clause BSD license
|
||||
github.com/google/btree v1.1.3 Apache License 2.0
|
||||
github.com/google/cel-go v0.23.2 3-clause BSD license, Apache License 2.0
|
||||
github.com/google/gnostic-models v0.6.9 Apache License 2.0
|
||||
github.com/google/go-cmp v0.6.0 3-clause BSD license
|
||||
github.com/google/gofuzz v1.2.0 Apache License 2.0
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 Apache License 2.0
|
||||
github.com/google/uuid v1.6.0 3-clause BSD license
|
||||
github.com/gorilla/websocket v1.5.3 2-clause BSD license
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 MIT license
|
||||
github.com/hashicorp/consul/api v1.31.0 Mozilla Public License 2.0
|
||||
github.com/hashicorp/errwrap v1.1.0 Mozilla Public License 2.0
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 Mozilla Public License 2.0
|
||||
github.com/hashicorp/go-hclog v1.6.3 MIT license
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 Mozilla Public License 2.0
|
||||
github.com/hashicorp/go-metrics v0.5.4 MIT license
|
||||
github.com/hashicorp/go-multierror v1.1.1 Mozilla Public License 2.0
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 Mozilla Public License 2.0
|
||||
github.com/hashicorp/golang-lru v1.0.2 Mozilla Public License 2.0
|
||||
github.com/hashicorp/hcl v1.0.0 Mozilla Public License 2.0
|
||||
github.com/hashicorp/serf v0.10.2 Mozilla Public License 2.0
|
||||
github.com/huandu/xstrings v1.5.0 MIT license
|
||||
github.com/imdario/mergo v0.3.16 3-clause BSD license
|
||||
github.com/inconshreveable/mousetrap v1.1.0 Apache License 2.0
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 MIT license
|
||||
github.com/josharian/intern v1.0.1-0.20211109044230-42b52b674af5 MIT license
|
||||
github.com/json-iterator/go v1.1.12 MIT license
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 MIT license
|
||||
github.com/kevinburke/ssh_config v1.2.0 MIT license
|
||||
github.com/klauspost/compress v1.17.11 3-clause BSD license, Apache License 2.0, MIT license
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de 3-clause BSD license
|
||||
github.com/magiconair/properties v1.8.9 2-clause BSD license
|
||||
github.com/mailru/easyjson v0.9.0 MIT license
|
||||
github.com/mattn/go-colorable v0.1.14 MIT license
|
||||
github.com/mattn/go-isatty v0.0.20 MIT license
|
||||
github.com/mitchellh/copystructure v1.2.0 MIT license
|
||||
github.com/mitchellh/go-homedir v1.1.0 MIT license
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 MIT license
|
||||
github.com/mitchellh/mapstructure v1.5.0 MIT license
|
||||
github.com/mitchellh/reflectwalk v1.0.2 MIT license
|
||||
github.com/moby/spdystream v0.5.0 Apache License 2.0
|
||||
github.com/moby/term v0.5.2 Apache License 2.0
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd Apache License 2.0
|
||||
github.com/modern-go/reflect2 v1.0.2 Apache License 2.0
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 MIT license
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 3-clause BSD license
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f 3-clause BSD license
|
||||
github.com/opencontainers/go-digest v1.0.0 Apache License 2.0
|
||||
github.com/pelletier/go-toml/v2 v2.2.3 MIT license
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible MIT license
|
||||
github.com/pjbgf/sha1cd v0.3.2 Apache License 2.0
|
||||
github.com/pkg/errors v0.9.1 2-clause BSD license
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 3-clause BSD license
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 3-clause BSD license
|
||||
github.com/prometheus/client_golang v1.20.5 3-clause BSD license, Apache License 2.0
|
||||
github.com/prometheus/client_model v0.6.1 Apache License 2.0
|
||||
github.com/prometheus/common v0.62.0 Apache License 2.0
|
||||
github.com/prometheus/procfs v0.15.1 Apache License 2.0
|
||||
github.com/russross/blackfriday/v2 v2.1.0 2-clause BSD license
|
||||
github.com/sagikazarmark/locafero v0.7.0 MIT license
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 3-clause BSD license
|
||||
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 MIT license
|
||||
github.com/sirupsen/logrus v1.9.3 MIT license
|
||||
github.com/skeema/knownhosts v1.3.1 Apache License 2.0
|
||||
github.com/sourcegraph/conc v0.3.0 MIT license
|
||||
github.com/spf13/afero v1.12.0 Apache License 2.0
|
||||
github.com/spf13/cast v1.7.1 MIT license
|
||||
github.com/spf13/cobra v1.8.1 Apache License 2.0
|
||||
github.com/spf13/pflag v1.0.6 3-clause BSD license
|
||||
github.com/spf13/viper v1.19.0 MIT license
|
||||
github.com/stoewer/go-strcase v1.3.0 MIT license
|
||||
github.com/stretchr/testify v1.10.0 MIT license
|
||||
github.com/subosito/gotenv v1.6.0 MIT license
|
||||
github.com/vladimirvivien/gexe v0.4.1 MIT license
|
||||
github.com/x448/float16 v0.8.4 MIT license
|
||||
github.com/xanzy/ssh-agent v0.3.3 Apache License 2.0
|
||||
github.com/xlab/treeprint v1.2.0 MIT license
|
||||
go.opentelemetry.io/otel v1.34.0 Apache License 2.0
|
||||
go.opentelemetry.io/otel/trace v1.34.0 Apache License 2.0
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 Apache License 2.0
|
||||
go.uber.org/goleak v1.3.0 MIT license
|
||||
go.uber.org/multierr v1.11.0 MIT license
|
||||
go.uber.org/zap v1.27.0 MIT license
|
||||
golang.org/x/crypto v0.32.0 3-clause BSD license
|
||||
golang.org/x/exp v0.0.0-20250128182459-e0ece0dbea4c 3-clause BSD license
|
||||
golang.org/x/mod v0.23.0 3-clause BSD license
|
||||
golang.org/x/net v0.34.0 3-clause BSD license
|
||||
golang.org/x/oauth2 v0.26.0 3-clause BSD license
|
||||
golang.org/x/sync v0.11.0 3-clause BSD license
|
||||
golang.org/x/sys v0.30.0 3-clause BSD license
|
||||
golang.org/x/term v0.29.0 3-clause BSD license
|
||||
golang.org/x/text v0.22.0 3-clause BSD license
|
||||
golang.org/x/time v0.10.0 3-clause BSD license
|
||||
golang.org/x/tools v0.29.0 3-clause BSD license
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 Apache License 2.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250204164813-702378808489 Apache License 2.0
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250204164813-702378808489 Apache License 2.0
|
||||
google.golang.org/grpc v1.70.0 Apache License 2.0
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 Apache License 2.0
|
||||
google.golang.org/protobuf v1.36.4 3-clause BSD license
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 3-clause BSD license
|
||||
gopkg.in/inf.v0 v0.9.1 3-clause BSD license
|
||||
gopkg.in/ini.v1 v1.67.0 Apache License 2.0
|
||||
gopkg.in/warnings.v0 v0.1.2 2-clause BSD license
|
||||
gopkg.in/yaml.v2 v2.4.0 Apache License 2.0, MIT license
|
||||
gopkg.in/yaml.v3 v3.0.1 Apache License 2.0, MIT license
|
||||
k8s.io/api v0.32.1 Apache License 2.0
|
||||
k8s.io/apiextensions-apiserver v0.32.1 Apache License 2.0
|
||||
k8s.io/apimachinery v0.32.1 3-clause BSD license, Apache License 2.0
|
||||
k8s.io/apiserver v0.32.1 Apache License 2.0
|
||||
k8s.io/cli-runtime v0.32.1 Apache License 2.0
|
||||
k8s.io/client-go v0.32.1 3-clause BSD license, Apache License 2.0
|
||||
github.com/emissary-ingress/code-generator (modified from k8s.io/code-generator) v0.32.2-0.20250205235421-4d5bf4656f71 Apache License 2.0
|
||||
k8s.io/component-base v0.32.1 Apache License 2.0
|
||||
k8s.io/component-helpers v0.32.1 Apache License 2.0
|
||||
k8s.io/controller-manager v0.32.1 Apache License 2.0
|
||||
k8s.io/gengo/v2 v2.0.0-20250130153323-76c5745d3511 Apache License 2.0
|
||||
k8s.io/klog/v2 v2.130.1 Apache License 2.0
|
||||
k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 3-clause BSD license, Apache License 2.0, MIT license
|
||||
k8s.io/kubectl v0.32.1 Apache License 2.0
|
||||
k8s.io/kubernetes v1.32.1 Apache License 2.0
|
||||
k8s.io/metrics v0.32.1 Apache License 2.0
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758 3-clause BSD license, Apache License 2.0
|
||||
sigs.k8s.io/controller-runtime v0.20.1 Apache License 2.0
|
||||
sigs.k8s.io/controller-tools v0.17.1 Apache License 2.0
|
||||
sigs.k8s.io/e2e-framework v0.6.0 Apache License 2.0
|
||||
sigs.k8s.io/gateway-api v0.2.0 Apache License 2.0
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 3-clause BSD license, Apache License 2.0
|
||||
sigs.k8s.io/kustomize/api v0.19.0 Apache License 2.0
|
||||
sigs.k8s.io/kustomize/kyaml v0.19.0 Apache License 2.0
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.5.0 Apache License 2.0
|
||||
sigs.k8s.io/yaml v1.4.0 3-clause BSD license, Apache License 2.0, MIT license
|
||||
|
||||
The Emissary-ingress Python code makes use of the following Free and Open Source
|
||||
libraries:
|
||||
|
||||
Name Version License(s)
|
||||
---- ------- ----------
|
||||
CacheControl 0.12.10 Apache License 2.0
|
||||
Cython 0.29.24 Apache License 2.0
|
||||
Flask 2.2.1 3-clause BSD license
|
||||
Jinja2 3.1.2 3-clause BSD license
|
||||
MarkupSafe 2.1.1 3-clause BSD license
|
||||
PyYAML 5.4.1 MIT license
|
||||
Werkzeug 2.2.1 3-clause BSD license
|
||||
appdirs 1.4.4 MIT license
|
||||
attrs 21.4.0 MIT license
|
||||
cachetools 5.0.0 MIT license
|
||||
certifi 2021.10.8 Mozilla Public License 2.0
|
||||
charset-normalizer 2.0.11 MIT license
|
||||
click 8.0.3 3-clause BSD license
|
||||
colorama 0.4.4 3-clause BSD license
|
||||
contextlib2 21.6.0 Python Software Foundation license
|
||||
distlib 0.3.3 Python Software Foundation license
|
||||
distro 1.6.0 Apache License 2.0
|
||||
dpath 2.0.6 MIT license
|
||||
durationpy 0.5 MIT license
|
||||
expiringdict 1.2.1 Apache License 2.0
|
||||
google-auth 2.6.0 Apache License 2.0
|
||||
gunicorn 20.1.0 MIT license
|
||||
html5lib 1.1 MIT license
|
||||
idna 3.3 3-clause BSD license
|
||||
importlib-metadata 4.12.0 Apache License 2.0
|
||||
itsdangerous 2.0.1 3-clause BSD license
|
||||
jsonpatch 1.32 3-clause BSD license
|
||||
jsonpointer 2.2 3-clause BSD license
|
||||
jsonschema 4.4.0 MIT license
|
||||
kubernetes 21.7.0 Apache License 2.0
|
||||
lockfile 0.12.2 MIT license
|
||||
msgpack 1.0.2 Apache License 2.0
|
||||
oauthlib 3.2.0 3-clause BSD license
|
||||
ordered-set 4.0.2 MIT license
|
||||
orjson 3.6.6 Apache License 2.0, MIT license
|
||||
packaging 20.9 2-clause BSD license, Apache License 2.0
|
||||
pep517 0.12.0 MIT license
|
||||
pip-tools 6.3.1 3-clause BSD license
|
||||
progress 1.6 ISC license
|
||||
prometheus-client 0.13.1 Apache License 2.0
|
||||
protobuf 3.19.4 3-clause BSD license
|
||||
pyasn1 0.4.8 2-clause BSD license
|
||||
pyasn1-modules 0.2.8 2-clause BSD license
|
||||
pyparsing 2.4.7 MIT license
|
||||
pyrsistent 0.18.1 MIT license
|
||||
python-dateutil 2.8.2 3-clause BSD license, Apache License 2.0
|
||||
python-json-logger 2.0.2 2-clause BSD license
|
||||
requests 2.27.1 Apache License 2.0
|
||||
requests-oauthlib 1.3.1 ISC license
|
||||
retrying 1.3.3 Apache License 2.0
|
||||
rsa 4.8 Apache License 2.0
|
||||
semantic-version 2.8.5 2-clause BSD license
|
||||
six 1.16.0 MIT license
|
||||
toml 0.10.2 MIT license
|
||||
tomli 1.2.2 MIT license
|
||||
typing-extensions 4.2.0 Python Software Foundation license
|
||||
urllib3 1.26.8 MIT license
|
||||
webencodings 0.5.1 3-clause BSD license
|
||||
websocket-client 1.2.3 Apache License 2.0
|
||||
zipp 3.8.1 MIT license
|
||||
Name Version License(s)
|
||||
---- ------- ----------
|
||||
Cython 0.29.37 Apache License 2.0
|
||||
Flask 3.1.0 3-clause BSD license
|
||||
Jinja2 3.1.6 3-clause BSD license
|
||||
MarkupSafe 3.0.2 2-clause BSD license
|
||||
PyYAML 6.0.1 MIT license
|
||||
Werkzeug 3.1.3 3-clause BSD license
|
||||
blinker 1.9.0 MIT license
|
||||
build 1.2.2.post1 MIT license
|
||||
certifi 2025.1.31 Mozilla Public License 2.0
|
||||
charset-normalizer 3.4.1 MIT license
|
||||
click 8.1.8 3-clause BSD license
|
||||
durationpy 0.9 MIT license
|
||||
expiringdict 1.2.2 Apache License 2.0
|
||||
gunicorn 23.0.0 MIT license
|
||||
idna 3.10 3-clause BSD license
|
||||
itsdangerous 2.2.0 3-clause BSD license
|
||||
jsonpatch 1.33 3-clause BSD license
|
||||
jsonpointer 3.0.0 3-clause BSD license
|
||||
orjson 3.10.15 Apache License 2.0, MIT license
|
||||
packaging 23.1 2-clause BSD license, Apache License 2.0
|
||||
pip-tools 7.3.0 3-clause BSD license
|
||||
prometheus_client 0.21.1 Apache License 2.0
|
||||
pyparsing 3.0.9 MIT license
|
||||
pyproject_hooks 1.2.0 MIT license
|
||||
python-json-logger 3.2.1 2-clause BSD license
|
||||
requests 2.32.3 Apache License 2.0
|
||||
semantic-version 2.10.0 2-clause BSD license
|
||||
typing_extensions 4.12.2 Python Software Foundation license
|
||||
urllib3 2.3.0 MIT license
|
||||
|
|
1087
DEVELOPING.md
1087
DEVELOPING.md
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,224 @@
|
|||
# Emissary-Ingress Architecture
|
||||
|
||||
In this document you will find information about the internal design and architecture of the Emissary-ingress (formerly known as Ambassador API Gateway). Emissary-ingress provides a Kubernetes-native load balancer, API gateway and ingress controller that is built on top of [Envoy Proxy](https://www.envoyproxy.io).
|
||||
|
||||
> Looking for end user guides for Emissary-ingress? You can check out the end user guides at <https://www.getambassador.io/docs/emissary/>.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [Overview](#overview)
|
||||
- [Custom Resource Definitions (CRD)](#custom-resource-definitions-crd)
|
||||
- [Apiext](#apiext)
|
||||
- [Additional Reading](#additional-reading)
|
||||
- [Emissary-ingress Container](#emissary-ingress-container)
|
||||
- [Startup and Busyambassador](#startup-and-busyambassador)
|
||||
- [Entrypoint](#entrypoint)
|
||||
- [Watch All The Things (Watt)](#watch-all-the-things-watt)
|
||||
- [Diagd](#diagd)
|
||||
- [Ambex](#ambex)
|
||||
- [Envoy](#envoy)
|
||||
- [Testing Components](#testing-components)
|
||||
- [kat-client](#kat-client)
|
||||
- [kat-server](#kat-server)
|
||||
|
||||
## Overview
|
||||
|
||||
Emissary-ingress is a Kubernetes native API Gateway built on top of Envoy Proxy. We utilize Kubernetes CRDs to provide an expressive API to configure Envoy Proxy to handle routing traffic into your cluster.
|
||||
|
||||
Check [this blog post](https://blog.getambassador.io/building-ambassador-an-open-source-api-gateway-on-kubernetes-and-envoy-ed01ed520844) for additional context around the motivations and architecture decisions made for Emissary-ingress.
|
||||
|
||||
At the core of Emissary-ingress is Envoy Proxy which has very extensive configuration and extensions points. Getting this right can be challenging so Emissary-ingress provides Kubernetes Administrators and Developers a cloud-native way to configure Envoy using declarative yaml files. Here are the core components of Emissary-Ingress:
|
||||
|
||||
- CRDs - extend K8s to enable Emissary-ingress's abstractions (*generated yaml*)
|
||||
- Apiext - A server that implements the Webhook Conversion interface for CRD's (**own container**)
|
||||
- Diagd - provides diagnostic ui, translates snapshots/ir into envoy configuration (*in-process*)
|
||||
- Ambex - gRPC server implementation of envoy xDS for dynamic envoy configration (*in-process*)
|
||||
- Envoy Proxy - Proxy that handles routing all user traffic (*in-process*)
|
||||
- Ambassador Agent - provides connectivity between cluster and Ambassador Cloud. (*own container*)
|
||||
|
||||
## Custom Resource Definitions (CRD)
|
||||
|
||||
Kubernetes allows extending its API through the use of [Customer Resource Definitions](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/) (aka CRDs) which allow solutions like Emissary-ingress to add custom resources to K8s and allow developers to treat them like any other K8s resource. CRDs provide validation, strong typing, structured data, versioning and are persisted in `etcd` along with the core Kuberenetes resources.
|
||||
|
||||
Emissary-ingress provides a set of CRD's that are applied to a cluster and then are watched by Emissary-ingress. Emissary-ingress then uses the data from these CRD's along with the standard K8's resources (services, endpoints, etc...) to dynamically generate Envoy Proxy configuration. Depending on the version of Emissary-ingress there might be multiple versions of the CRD's that are suppported.
|
||||
|
||||
You can read the user documentation (see additional reading below) to find out more about all the various CRDs that are used and how to configure them. For understanding, how they are defined you can take a look in `pkg/getambassador.io/*` directory. In this directory, you will find a directory per version of the CRDs and for each version you will see the `Golang` structs that define the data structures that are used for each of the Emissary-ingress custom resources. It's recommended to read the `doc.go` file for information about API guidelines followed and how the comment markers are used by the build system.
|
||||
|
||||
The build system (`make`) uses [controller-gen](https://book.kubebuilder.io/reference/controller-gen.html) to generate the required YAML representation for the customer resources that can be found at `pkg/getambassador.io/crds.yaml`. This file is auto-generated and checked into the respository. This is the file that is applied to a cluster extending the Kubernetes API. If any changes are made to the custom resources then it needs to be re-generated and checked-in as part of your PR. Running `make generate` will trigger the generation of this file and other generated files (`protobufs`) that are checked into the respository as well. If you want to see more about the build process take a look at `build-aux/generate.mk`.
|
||||
|
||||
> **Annotations**: K8s allows developers to provide Annotations as well to the standard K8s Resources (Services, Ingress, etc...). Annotations were the preferred method of configuring early versions of Emissary-ingress but annotations did not provide validation and can be error prone. However, with the introduction of CRD's these are now the preferred method and annotations are only supported for backwards compatibility. We won't discuss the annotations much here due to this but rather making you aware that they exist.
|
||||
|
||||
### Apiext
|
||||
|
||||
Kubernetes provides the ability to have multiple versions of Custom Resources similiar to the core K8s resources but it is only capable of having a single `storage` version that is persisted in `etcd`. Custom Resource Definitions can define a `ConversionWebHook` that Kubernetes will call whenever it receives a version that is not the storage version.
|
||||
|
||||
You can check the current storage version by looking at `pkg/getambassador.io/crds.yaml` and searching for the `storage: true` field and seeing which version is the storage version of the custom resource (*at the time of writing this it is `v2`*).
|
||||
|
||||
The `apiext` container is the Emissary-ingress's server implementation for the conversion webhook that is registered with our custom resources. Each custom resource will have a section similiar to the following in the `pkg/getambassador.io/crds.yaml`:
|
||||
|
||||
```yaml
|
||||
conversion:
|
||||
strategy: Webhook
|
||||
webhook:
|
||||
clientConfig:
|
||||
service:
|
||||
name: emissary-apiext
|
||||
namespace: emissary-system
|
||||
conversionReviewVersions:
|
||||
- v1
|
||||
```
|
||||
|
||||
This is telling the Kubernetes API Server to call a WebHook using a `Service` within the cluster that is called `emissary-apiext` that can be found in the `emissary-system` namespace. It also states that our server implementation supports the `v1` version of the WebHook protocol so the K8s API Server will send the request and expect the response in the format for `v1`.
|
||||
|
||||
The implementation of the `apiext` server can be found in `cmd/apiext` and it leverages the [controller-runtime](https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.11.2starts) library which is vendored in `vendor/sigs.k8s.io/controller-runtime`. When this process starts up it will do the following:
|
||||
|
||||
1. Register the Emissary-Ingress CRD schemas using the Go structs described previously
|
||||
2. Ensure a self-signed certificate is generated that our server can register with for `https`.
|
||||
3. Kick off a Go routine that handles watching our CRD's and enriching the WebHook Conversion section (*outlined in yaml above*) so that it includes our self-signed certs, port and path that the apiext server is listening on.
|
||||
4. Starts up our two servers one for container liveness/readiness probes and one for the WebHook implementation that performs the conversion between CRD versions.
|
||||
|
||||
### Additional Reading
|
||||
|
||||
- [Ambassador Labs Docs - Custom Resources](https://www.getambassador.io/docs/emissary/latest/topics/running/host-crd/)
|
||||
- [Ambassador Labs Docs - Declarative Configuration](https://www.getambassador.io/docs/emissary/latest/topics/concepts/gitops-continuous-delivery/#policies-declarative-configuration-and-custom-resource-definitions)
|
||||
- [K8s Docs - Custom Resouce Definition](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/)
|
||||
- [K8s Docs - Version CRD's](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/)
|
||||
- [K8s Docs - Webhook Conversion](<https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#webhook-conversion>)
|
||||
|
||||
## Emissary-ingress Container
|
||||
|
||||
One of the major goals of the Emissary-ingress is to simplify the deployment of Envoy Proxy in a cloud-native friendly way using containers and declarative CRD's. To honor this goal Emissary-ingress is packaged up into a single image with all the necessary components.
|
||||
|
||||
This section will give a high-level overview of each of these components and will help provide you direction on where you can find more information on each of the components.
|
||||
|
||||
### Startup and Busyambassador
|
||||
|
||||
Emissary-ingress has evolved over many years, many contributors and many versions of Kubernetes which has led to the internal components being implemented in different programming languages. Some of the components are pre-built binaries like envoy, first-party python programs and first-party golang binaries. To provide a single entrypoint for the container startup the Golang binary called `busyambassador` was introduced.
|
||||
|
||||
/buildroot/ambassador/python/entrypoint.sh
|
||||
|
||||
The `busyambassador` binary provides a busybox like interface that dispatches the CMD's that are provided to a container for the various configured Golang binaries. This enables a single image to support multiple binaries on startup that are declartively set within a `deployment` in the `command` field when setting the image for a deployment. An example of this can be seen in the `ambassador-agent` deployment.
|
||||
|
||||
The image takes advantage of the `ENTRYPOINT` and `CMD` fields within a docker image manifest. You can see this in `builder/Dockerfile` in the final optimized image on the last line there is `ENTRYPOINT [ "bash", "/buildroot/ambassador/python/entrypoint.sh" ]`. This entrypoint cannot be overriden by the user and will run that bash script. By default the bash script will run the `entrypoint` binary which will be discussed in the next section but if passed a known binary name such as the `agent` example then `busyambassador` will run the correct command.
|
||||
|
||||
To learn more about `busyambassador` the code can be found:
|
||||
|
||||
- `cmd/busyambassador`
|
||||
- `pkg/busy`
|
||||
|
||||
> Note: the bash script will just exec into the `busyambassador` Golang binary in most cases and is still around for historical reasons and advanced debugging scenarios.
|
||||
|
||||
> Additional Reading: If you want to know more about how containers work with entrypoint and commands then take a look at this blogpost. <https://www.bmc.com/blogs/docker-cmd-vs-entrypoint/>
|
||||
|
||||
### Entrypoint
|
||||
|
||||
The `entrypoint` Golang binary is the default binary that `busyambassador` will run on container startup. It is the parent process for all the other processes that are run within the single `ambassador` image for Emissary-Ingress. At a high-level it starts and manages multiple go-routines, starts other child processes such as `diagd` (python program) and `envoy` (c++ compiled binary).
|
||||
|
||||
Here is a list of everything managed by the `entrypoint` binary. Each one is indicated by whether its a child OS process that is started or a goroutine (*note: some of the OS processes are started/managed in goroutines but the core logic resides within the child process thus they are marked as such*).
|
||||
|
||||
| Description | Goroutine | OS.Exec |
|
||||
| ------------------------------------------------------------------------- | :----------------: | :----------------: |
|
||||
| `diagd` - admin ui & config processor | | :white_check_mark: |
|
||||
| `ambex` - the Envoy ADS Server | :white_check_mark: | |
|
||||
| `envoy` - proxy routing data | | :white_check_mark: |
|
||||
| SnapshotServer - expose in-memory snapshot over localhost | :white_check_mark: | |
|
||||
| ExternalSnapshotServer - Ambassador Cloud friendly exposed over localhost | :white_check_mark: | |
|
||||
| HealthCheck - endpoints for K8s liveness/readiness probes | :white_check_mark: | |
|
||||
| Watt - Watch k8s, consul & files for cluster changes | :white_check_mark: | |
|
||||
| Sidecar Processes - start various side car processes | | :white_check_mark: |
|
||||
|
||||
Some of these items will be discussed in more detail but the best places to get started looking at the `entrypoint` is by looking at `cmd/entrypoint/entrypoint.go`.
|
||||
|
||||
> To see how the container passes `entrypoint` as the default binary to run on container startup you can look at `python/entrypoint.sh` where it calls `exec busyambassador entrypoint "$@"` which will drop the shell process and will run the entrypoint process via busyambassador.
|
||||
|
||||
#### Watch All The Things (Watt)
|
||||
|
||||
Watch All The Things (aka Watt) is tasked with watching alot of things, hence the name :smile:. Specifically, its job is to watch for changes in the K8s Cluster and potentially Consul and file system changes. Watt is the beginning point for the end-to-end data flow from developer applying the configuration to envoy being configured. You can find the code for this in the `cmd/entrypoint/watcher.go` file.
|
||||
|
||||
The watching of the K8s Cluster changes is where Emissary-ingress will get most of its configuration by looking for K8s Resources (e.g. services,ingresss, etc...) as well as the Emissary-ingress CRD Resources (e.g. Host, Mapping, Listeners, etc...). A `consulWatcher` will be started if a user has configured a Mapping to use the `ConsulResolver`. You can find this code in `cmd/entrypoint/consul.go`. The filesystem is also watched for changes to support `istio` and how it mounts certificates to the filesystem.
|
||||
|
||||
Here is the general flow:
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
subgraph watchers
|
||||
K8s & consulW(Consul) & file(FileSystem)
|
||||
K8s -.ConsulResolver.-> consulW
|
||||
K8s & consulW(Consul) & file(FileSystem) --> sgen
|
||||
end
|
||||
API[K8s API] --> K8s
|
||||
consul[Consul Manager] --> consulW
|
||||
istio[Pod Filesystem] --> file
|
||||
|
||||
sgen("Snapshot \n Generation") --> update("Update Snapshot \n(in-memory)") --> diagd(Notify diagD);
|
||||
update -.isEdgeStack?.-> AES(Notify AES)
|
||||
```
|
||||
|
||||
## Diagd
|
||||
|
||||
Provides two main functions:
|
||||
|
||||
1. A Diagnostic Admin UI for viewing the current state of Emissary-Ingress
|
||||
2. Processing Cluster changes into Envoy ready configuration
|
||||
1. This process has all the steps i'm outlining below
|
||||
|
||||
- receives "CONFIG" event and pushes on queue
|
||||
- event queue loop listens for commands and pops them off
|
||||
- on CONFIG event it calls back to emissary Snapshot Server to grab current snapshot stored in-memory
|
||||
- It is serialized and stored in `/ambassador/snapshots/snapshot-tmp.yaml`.
|
||||
- A SecretHandler and Config is initialized
|
||||
- A ResourceFetcher (aka, parse the snapshot into an in-memory representation)
|
||||
- Generate IR and envoy configs (load_ir function)
|
||||
- Take each Resource generated in ResourceFetcher and add it to the Config object as strongly typed objects
|
||||
- Store Config Object in `/ambassador/snapshots/aconf-tmp.json`
|
||||
- Check Deltas for Mappings cache and determine if we needs to be reset
|
||||
- Create IR with a Config, Cache, and invalidated items
|
||||
- IR is generated which basically just converts our stuff to strongly typed generic "envoy" items (handling filters, clusters, listeners, removing duplicates, etc...)
|
||||
- IR is updated in-memory for diagd process
|
||||
- IR is persisted to temp storage in `/ambassador/snapshots/ir-tmp.json`
|
||||
- generate envoy config from IR and cache
|
||||
- Split envoy config into bootstrap config, ads_config and clustermap config
|
||||
- Validate econfig
|
||||
- Rotate Snapshots for each of the files `aconf`, `econf`, `ir`, `snapshot` that get persisted in the snapshot path `/ambassador/snapshots`.
|
||||
- Rotating them allows for seeing the history of snapshots up to a limit and then they are dropped
|
||||
- this also renames the `-tmp` files written above into
|
||||
- Persist bootstrap, envoy ads config and clustermap config to base directory:
|
||||
- `/ambassador/bootstrap-ads.json` # this is used by envoy during startup to initial config itself and let it know about the static ADS Service
|
||||
- `/ambassador/enovy/envoy.json` # this is used in `ambex` to generate the ADS snapshots along with the fastPath items
|
||||
- `/ambassador/clustermap.json` # this might not be used either...
|
||||
- Notify `envoy` and `ambex` that a new snapshot has been persisted using signal SIGHUP
|
||||
- the Goroutine within `entrypoint` that starts up `envoy` is blocking waiting for this signal to start envoy
|
||||
- the `ambex` process continuously listens for this signal and it triggers a configuration update for ambex.
|
||||
- Update the appropriate status fields with metatdata by making calls to the `kubestatus` binary found in `cmd/kubestatus` which handles the communication to the cluster
|
||||
|
||||
## Ambex
|
||||
|
||||
This is the gRPC server implementation of the envoy xDS v2 and v3 api's based on ...
|
||||
|
||||
- listens for SIGHUP from diagd
|
||||
- converts `envoy.json` into in-memory snapshots that are cached for v2/v3
|
||||
- implements ADS v2/v3 Apis that envoy is configured to listen to
|
||||
|
||||
## Envoy
|
||||
|
||||
We maintain our own [fork](https://github.com/datawire/envoy) of Envoy that includes some additional commits for implementing some features in Emissary-Ingress.
|
||||
|
||||
Envoy does all the heavy-lifting
|
||||
|
||||
- does all routing, filtering, TLS termination, metrics collection, tracing, etc...
|
||||
- It is bootstraps from the output of diagd
|
||||
- It is dynamically updated using the xDS services and specifically the ADS service
|
||||
- Our implementation of this is `ambex`
|
||||
|
||||
## Testing Components
|
||||
|
||||
TODO: talk about testing performed by kat-client/kat-server.
|
||||
|
||||
### kat-client
|
||||
|
||||
TODO: discuss the purpose of kat-client
|
||||
|
||||
### kat-server
|
||||
|
||||
TODO: discuss the purpose of kat-client
|
|
@ -0,0 +1,4 @@
|
|||
Building Ambassador
|
||||
===================
|
||||
|
||||
The content in this document has been moved to [CONTRIBUTING.md].
|
|
@ -0,0 +1,929 @@
|
|||
# Developing Emissary-ingress
|
||||
|
||||
Welcome to the Emissary-ingress Community!
|
||||
|
||||
Thank you for contributing, we appreciate small and large contributions and look forward to working with you to make Emissary-ingress better.
|
||||
|
||||
This document is intended for developers looking to contribute to the Emissary-ingress project. In this document you will learn how to get your development environment setup and how to contribute to the project. Also, you will find more information about the internal components of Emissary-ingress and other questions about working on the project.
|
||||
|
||||
> Looking for end user guides for Emissary-ingress? You can check out the end user guides at <https://www.getambassador.io/docs/emissary/>.
|
||||
|
||||
After reading this document if you have questions we encourage you to join us on our [Slack channel](https://communityinviter.com/apps/cloud-native/cncf) in the #emissary-ingress channel.
|
||||
|
||||
- [Code of Conduct](../Community/CODE_OF_CONDUCT.md)
|
||||
- [Governance](../Community/GOVERNANCE.md)
|
||||
- [Maintainers](../Community/MAINTAINERS.md)
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Table of Contents](#table-of-contents)
|
||||
- [Development Setup](#development-setup)
|
||||
- [Step 1: Install Build Dependencies](#step-1-install-build-dependencies)
|
||||
- [Step 2: Clone Project](#step-2-clone-project)
|
||||
- [Step 3: Configuration](#step-3-configuration)
|
||||
- [Step 4: Building](#step-4-building)
|
||||
- [Step 5: Push](#step-5-push)
|
||||
- [Step 6: Deploy](#step-6-deploy)
|
||||
- [Step 7: Dev-loop](#step-7-dev-loop)
|
||||
- [What should I do next?](#what-should-i-do-next)
|
||||
- [Contributing](#contributing)
|
||||
- [Submitting a Pull Request (PR)](#submitting-a-pull-request-pr)
|
||||
- [Pull Request Review Process](#pull-request-review-process)
|
||||
- [Rebasing a branch under review](#rebasing-a-branch-under-review)
|
||||
- [Fixup commits during PR review](#fixup-commits-during-pr-review)
|
||||
- [Development Workflow](#development-workflow)
|
||||
- [Branching Strategy](#branching-strategy)
|
||||
- [Backport Strategy](#backport-strategy)
|
||||
- [What if I need a patch to land in a previous supported version?](#what-if-i-need-a-patch-to-land-in-a-previous-supported-version)
|
||||
- [What if my patch is only for a previous supported version?](#what-if-my-patch-is-only-for-a-previous-supported-version)
|
||||
- [What if I'm still not sure?](#what-if-im-still-not-sure)
|
||||
- [Merge Strategy](#merge-strategy)
|
||||
- [What about merge commit strategy?](#what-about-merge-commit-strategy)
|
||||
- [Contributing to the Docs](#contributing-to-the-docs)
|
||||
- [Advanced Topics](#advanced-topics)
|
||||
- [Running Emissary-ingress internals locally](#running-emissary-ingress-internals-locally)
|
||||
- [Setting up diagd](#setting-up-diagd)
|
||||
- [Changing the ambassador root](#changing-the-ambassador-root)
|
||||
- [Getting envoy](#getting-envoy)
|
||||
- [Shutting up the pod labels error](#shutting-up-the-pod-labels-error)
|
||||
- [Extra credit](#extra-credit)
|
||||
- [Debugging and Developing Envoy Configuration](#debugging-and-developing-envoy-configuration)
|
||||
- [Making changes to Envoy](#making-changes-to-envoy)
|
||||
- [1. Preparing your machine](#1-preparing-your-machine)
|
||||
- [2. Setting up your workspace to hack on Envoy](#2-setting-up-your-workspace-to-hack-on-envoy)
|
||||
- [3. Hacking on Envoy](#3-hacking-on-envoy)
|
||||
- [4. Building and testing your hacked-up Envoy](#4-building-and-testing-your-hacked-up-envoy)
|
||||
- [5. Test Devloop](#5-test-devloop)
|
||||
- [6. Protobuf changes](#6-protobuf-changes)
|
||||
- [7. Finalizing your changes](#7-finalizing-your-changes)
|
||||
- [8. Final Checklist](#8-final-checklist)
|
||||
- [Developing Emissary-ingress (Maintainers-only advice)](#developing-emissary-ingress-maintainers-only-advice)
|
||||
- [Updating license documentation](#updating-license-documentation)
|
||||
- [Upgrading Python dependencies](#upgrading-python-dependencies)
|
||||
- [FAQ](#faq)
|
||||
- [How do I find out what build targets are available?](#how-do-i-find-out-what-build-targets-are-available)
|
||||
- [How do I develop on a Mac with Apple Silicon?](#how-do-i-develop-on-a-mac-with-apple-silicon)
|
||||
- [How do I develop on Windows using WSL?](#how-do-i-develop-on-windows-using-wsl)
|
||||
- [How do I test using a private Docker repository?](#how-do-i-test-using-a-private-docker-repository)
|
||||
- [How do I change the loglevel at runtime?](#how-do-i-change-the-loglevel-at-runtime)
|
||||
- [Can I build from a docker container instead of on my local computer?](#can-i-build-from-a-docker-container-instead-of-on-my-local-computer)
|
||||
- [How do I clear everything out to make sure my build runs like it will in CI?](#how-do-i-clear-everything-out-to-make-sure-my-build-runs-like-it-will-in-ci)
|
||||
- [My editor is changing `go.mod` or `go.sum`, should I commit that?](#my-editor-is-changing-gomod-or-gosum-should-i-commit-that)
|
||||
- [How do I debug "This should not happen in CI" errors?](#how-do-i-debug-this-should-not-happen-in-ci-errors)
|
||||
- [How do I run Emissary-ingress tests?](#how-do-i-run-emissary-ingress-tests)
|
||||
- [How do I type check my python code?](#how-do-i-type-check-my-python-code)
|
||||
|
||||
## Development Setup
|
||||
|
||||
This section provides the steps for getting started developing on Emissary-ingress. There are a number of prerequisites that need to be setup. In general, our tooling tries to detect any missing requirements and provide a friendly error message. If you ever find that this is not the case please file an issue.
|
||||
|
||||
> **Note:** To enable developers contributing on Macs with Apple Silicon, we ensure that the artifacts are built for `linux/amd64`
|
||||
> rather than the host `linux/arm64` architecture. This can be overriden using the `BUILD_ARCH` environment variable. Pull Request are welcome :).
|
||||
|
||||
### Step 1: Install Build Dependencies
|
||||
|
||||
Here is a list of tools that are used by the build system to generate the build artifacts, packaging them up into containers, generating crds, helm charts and for running tests.
|
||||
|
||||
- git
|
||||
- make
|
||||
- docker (make sure you can run docker commands as your dev user without sudo)
|
||||
- bash
|
||||
- rsync
|
||||
- golang - `go.mod` for current version
|
||||
- python (>=3.10.9)
|
||||
- kubectl
|
||||
- a kubernetes cluster (you need permissions to create resources, i.e. crds, deployments, services, etc...)
|
||||
- a Docker registry
|
||||
- bsdtar (Provided by libarchive-tools on Ubuntu 19.10 and newer)
|
||||
- gawk
|
||||
- jq
|
||||
- helm
|
||||
|
||||
### Step 2: Clone Project
|
||||
|
||||
If you haven't already then this would be a good time to clone the project running the following commands:
|
||||
|
||||
```bash
|
||||
# clone to your preferred folder
|
||||
git clone https://github.com/emissary-ingress/emissary.git
|
||||
|
||||
# navigate to project
|
||||
cd emissary
|
||||
```
|
||||
|
||||
### Step 3: Configuration
|
||||
|
||||
You can configure the build system using environment variables, two required variables are used for setting the container registry and the kubeconfig used.
|
||||
|
||||
> **Important**: the test and build system perform destructive operations against your cluster. Therefore, we recommend that you
|
||||
> use a development cluster. Setting the DEV_KUBECONFIG variable described below ensures you don't accidently perform actions on a production cluster.
|
||||
|
||||
Open a terminal in the location where you cloned the repository and run the following commands:
|
||||
|
||||
```bash
|
||||
# set container registry using `export DEV_REGISTRY=<your-registry>
|
||||
# note: you need to be logged in and have permissions to push
|
||||
# Example:
|
||||
export DEV_REGISTRY=docker.io/parsec86
|
||||
|
||||
# set kube config file using `export DEV_KUBECONFIG=<dev-kubeconfig>`
|
||||
# your cluster needs the ability to read from the configured container registry
|
||||
export DEV_KUBECONFIG="$HOME/.kube/dev-config.yaml"
|
||||
|
||||
```
|
||||
|
||||
### Step 4: Building
|
||||
|
||||
The build system for this project leverages `make` and multi-stage `docker` builds to produce the following containers:
|
||||
|
||||
- `emissary.local/emissary` - single deployable container for Emissary-ingress
|
||||
- `emissary.local/kat-client` - test client container used for testing
|
||||
- `emissary.local/kat-server` - test server container used for testing
|
||||
|
||||
Using the terminal session you opened in step 2, run the following commands
|
||||
|
||||
>
|
||||
|
||||
```bash
|
||||
# This will pull and build the necessary docker containers and produce multiple containers.
|
||||
# If this is the first time running this command it will take a little bit while the base images are built up and cached.
|
||||
make images
|
||||
|
||||
# verify containers were successfully created, you should also see some of the intermediate builder containers as well
|
||||
docker images | grep emissary.local
|
||||
```
|
||||
|
||||
*What just happened?*
|
||||
|
||||
The build system generated a build container that pulled in envoy, the build dependencies, built various binaries from within this project and packaged them into a single deployable container. More information on this can be found in the [Architecture Document](ARCHITECTURE.md).
|
||||
|
||||
### Step 5: Push
|
||||
|
||||
Now that you have successfully built the containers its time to push them to your container registry which you setup in step 2.
|
||||
|
||||
In the same terminal session you can run the following command:
|
||||
|
||||
```bash
|
||||
# re-tags the images and pushes them to your configured container registry
|
||||
# docker must be able to login to your registry and you have to have push permissions
|
||||
make push
|
||||
|
||||
# you can view the newly tag images by running
|
||||
docker images | grep <your -registry>
|
||||
|
||||
# alternatively, we have two make targets that provide information as well
|
||||
make env
|
||||
|
||||
# or in a bash export friendly format
|
||||
make export
|
||||
```
|
||||
|
||||
### Step 6: Deploy
|
||||
|
||||
Now its time to deploy the container out to your Kubernetes cluster that was configured in step 2. Hopefully, it is already becoming apparent that we love to leverage Make to handle the complexity for you :).
|
||||
|
||||
```bash
|
||||
# generate helm charts and K8's Configs with your container swapped in and apply them to your cluster
|
||||
make deploy
|
||||
|
||||
# check your cluster to see if emissary is running
|
||||
# note: kubectl doesn't know about DEV_KUBECONFIG so you may need to ensure KUBECONFIG is pointing to the correct cluster
|
||||
kubectl get pod -n ambassador
|
||||
```
|
||||
|
||||
🥳 If all has gone well then you should have your development environment setup for building and testing Emissary-ingress.
|
||||
|
||||
### Step 7: Dev-loop
|
||||
|
||||
Now that you are all setup and able to deploy a development container of Emissary-ingress to a cluster, it is time to start making some changes.
|
||||
|
||||
Lookup an issue that you want to work on, assign it to yourself and if you have any questions feel free to ping us on slack in the #emissary-dev channel.
|
||||
|
||||
Make a change to Emissary-ingress and when you want to test it in a live cluster just re-run
|
||||
|
||||
`make deploy`
|
||||
|
||||
This will:
|
||||
|
||||
- recompile the go binary
|
||||
- rebuild containers
|
||||
- push them to the docker registry
|
||||
- rebuild helm charts and manifest
|
||||
- reapply manifest to cluster and re-deploy Emissary-ingress to the cluster
|
||||
|
||||
> *Do I have to run the other make targets `make images` or `make push` ?*
|
||||
> No you don't have to because `make deploy` will actually run those commands for you. The steps above were meant to introduce you to the various make targets so that you aware of them and have options when developing.
|
||||
|
||||
### What should I do next?
|
||||
|
||||
Now that you have your dev system up and running here are some additional content that we recommend you check out:
|
||||
|
||||
- [Emissary-ingress Architecture](ARCHITECTURE.md)
|
||||
- [Contributing Code](#contributing)
|
||||
- [Contributing to Docs](#contributing-to-the-docs)
|
||||
- [Advanced Topics](#advanced-topics)
|
||||
- [Faq](#faq)
|
||||
|
||||
## Contributing
|
||||
|
||||
This section goes over how to contribute code to the project and how to get started contributing. More information on how we manage our branches can be found below in [Development Workflow](#development-workflow).
|
||||
|
||||
Before contributing be sure to read our [Code of Conduct](../Community/CODE_OF_CONDUCT.md) and [Governance](../Community/GOVERNANCE.md) to get an understanding of how our project is structured.
|
||||
|
||||
### Submitting a Pull Request (PR)
|
||||
|
||||
> If you haven't set up your development environment then please see the [Development Setup](#development-setup) section.
|
||||
|
||||
When submitting a Pull Request (PR) here are a set of guidelines to follow:
|
||||
|
||||
1. Search for an [existing issue](https://github.com/emissary-ingress/emissary/issues) or create a [new issue](https://github.com/emissary-ingress/emissary/issues/new/choose).
|
||||
|
||||
2. Be sure to describe your proposed change and any open questions you might have in the issue. This allows us to collect historical context around an issue, provide feedback on the proposed solution and discuss what versions a fix should target.
|
||||
|
||||
3. If you haven't done so already create a fork of the respository and clone it locally
|
||||
|
||||
```shell
|
||||
git clone <your-fork>
|
||||
```
|
||||
|
||||
4. Cut a new patch branch from `master`:
|
||||
|
||||
```shell
|
||||
git checkout master
|
||||
git checkout -b my-patch-branch master
|
||||
```
|
||||
|
||||
5. Make necessary code changes.
|
||||
|
||||
- Make sure you include test coverage for the change, see [How do I run Tests](#how-do-i-run-emissary-ingress-tests)
|
||||
- Ensure code linting is passing by running `make lint`
|
||||
- Code changes must have associated documentation updates.
|
||||
- Make changes in <https://github.com/datawire/ambassador-docs> as necessary, and include a reference to those changes the pull request for your code changes.
|
||||
- See [Contributing to Docs](#contributing-to-the-docs) for more details.
|
||||
|
||||
> Smaller pull requests are easier to review and can get merged faster thus reducing potential for merge conflicts so it is recommend to keep them small and focused.
|
||||
|
||||
6. Commit your changes using descriptive commit messages.
|
||||
- we **require** that all commits are signed off so please be sure to commit using the `--signoff` flag, e.g. `git commit --signoff`
|
||||
- commit message should summarize the fix and motivation for the proposed fix. Include issue # that the fix looks to address.
|
||||
- we are "ok" with multiple commits but we may ask you to squash some commits during the PR review process
|
||||
|
||||
7. Push your branch to your forked repository:
|
||||
|
||||
> It is good practice to make sure your change is rebased on the latest master to ensure it will merge cleanly so if it has been awhile since you rebased on upstream you should do it now to ensure there are no merge conflicts
|
||||
|
||||
```shell
|
||||
git push origin my-patch-branch
|
||||
```
|
||||
|
||||
8. Submit a Pull Request from your fork targeting upstream `emissary/master`.
|
||||
|
||||
Thanks for your contribution! One of the [Maintainers](../Community/MAINTAINERS.md) will review your PR and discuss any changes that need to be made.
|
||||
|
||||
### Pull Request Review Process
|
||||
|
||||
This is an opportunity for the Maintainers to review the code for accuracy and ensure that it solves the problem outlined in the issue. This is an iterative process and meant to ensure the quality of the code base. During this process we may ask you to break up Pull Request into smaller changes, squash commits, rebase on master, etc...
|
||||
|
||||
Once you have been provided feedback:
|
||||
|
||||
1. Make the required updates to the code per the review discussion
|
||||
2. Retest the code and ensure linting is still passing
|
||||
3. Commit the changes and push to Github
|
||||
- see [Fixup Commits](#fixup-commits-during-pr-review) below
|
||||
4. Repeat these steps as necessary
|
||||
|
||||
Once you have **two approvals** then one of the Maintainers will merge the PR.
|
||||
|
||||
:tada: Thank you for contributing and being apart of the Emissary-ingress Community!
|
||||
|
||||
### Rebasing a branch under review
|
||||
|
||||
Many times the base branch will have new commits added to it which may cause merge conflicts with your open pull request. First, a good rule of thumb is to make pull request small so that these conflicts are less likely to occur but this is not always possible when have multiple people working on similiar features. Second, if it is just addressing commit feedback a `fixup` commit is also a good option so that the reviewers can see what changed since their last review.
|
||||
|
||||
If you need to address merge conflicts then it is preferred that you use **Rebase** on the base branch rather than merging base branch into the feature branch. This ensures that when the PR is merged that it will cleanly replay on top of the base branch ensuring we maintain a clean linear history.
|
||||
|
||||
To do a rebase you can do the following:
|
||||
|
||||
```shell
|
||||
# add emissary.git as a remote repository, only needs to be done once
|
||||
git remote add upstream https://github.com/emissary-ingress/emissary.git
|
||||
|
||||
# fetch upstream master
|
||||
git fetch upstream master
|
||||
|
||||
# checkout local master and update it from upstream master
|
||||
git checkout master
|
||||
git pull -ff upstream master
|
||||
|
||||
# rebase patch branch on local master
|
||||
git checkout my-patch-branch
|
||||
git rebase -i master
|
||||
```
|
||||
|
||||
Once the merge conflicts are addressed and you are ready to push the code up you will need to force push your changes because during the rebase process the commit sha's are re-written and it has diverged from what is in your remote fork (Github).
|
||||
|
||||
To force push a branch you can:
|
||||
|
||||
```shell
|
||||
git push head --force-with-lease
|
||||
```
|
||||
|
||||
> Note: the `--force-with-lease` is recommended over `--force` because it is safer because it will check if the remote branch had new commits added during your rebase. You can read more detail here: <https://itnext.io/git-force-vs-force-with-lease-9d0e753e8c41>
|
||||
|
||||
### Fixup commits during PR review
|
||||
|
||||
One of the major downsides to rebasing a branch is that it requires force pushing over the remote (Github) which then marks all the existing review history outdated. This makes it hard for a reviewer to figure out whether or not the new changes addressed the feedback.
|
||||
|
||||
One way you can help the reviewer out is by using **fixup** commits. Fixup commits are special git commits that append `fixup!` to the subject of a commit. `Git` provides tools for easily creating these and also squashing them after the PR review process is done.
|
||||
|
||||
Since this is a new commit on top of the other commits, you will not lose your previous review and the new commit can be reviewed independently to determine if the new changes addressed the feedback correctly. Then once the reviewers are happy we will ask you to squash them so that we when it is merged we will maintain a clean linear history.
|
||||
|
||||
Here is a quick read on it: <https://jordanelver.co.uk/blog/2020/06/04/fixing-commits-with-git-commit-fixup-and-git-rebase-autosquash/>
|
||||
|
||||
TL;DR;
|
||||
|
||||
```shell
|
||||
# make code change and create new commit
|
||||
git commit --fixup <sha>
|
||||
|
||||
# push to Github for review
|
||||
git push
|
||||
|
||||
# reviewers are happy and ask you to do a final rebase before merging
|
||||
git rebase -i --autosquash master
|
||||
|
||||
# final push before merging
|
||||
git push --force-with-lease
|
||||
```
|
||||
|
||||
## Development Workflow
|
||||
|
||||
This section introduces the development workflow used for this repository. It is recommended that both Contributors, Release Engineers and Maintainers familiarize themselves with this content.
|
||||
|
||||
### Branching Strategy
|
||||
|
||||
This repository follows a trunk based development workflow. Depending on what article you read there are slight nuances to this so this section will outline how this repository interprets that workflow.
|
||||
|
||||
The most important branch is `master` this is our **Next Release** version and it should always be in a shippable state. This means that CI should be green and at any point we can decided to ship a new release from it. In a traditional trunk based development workflow, developers are encouraged to land partially finished work daily and to keep that work hidden behind feature flags. This repository does **NOT** follow that and instead if code lands on master it is something we are comfortable with shipping.
|
||||
|
||||
We ship release candidate (RC) builds from the `master` branch (current major) and also from `release/v{major.minor}` branches (last major version) during our development cycles. Therefore, it is important that it remains shippable at all times!
|
||||
|
||||
When we do a final release then we will cut a new `release/v{major.minor}` branch. These are long lived release branches which capture a snapshot in time for that release. For example here are some of the current release branches (as of writing this):
|
||||
|
||||
- release/v3.2
|
||||
- release/v3.1
|
||||
- release/v3.0
|
||||
- release/v2.4
|
||||
- release/v2.3
|
||||
- release/v1.14
|
||||
|
||||
These branches contain the codebase as it was at that time when the release was done. These branches have branch protection enabled to ensure that they are not removed or accidently overwritten. If we needed to do a security fix or bug patch then we may cut a new `.Z` patch release from an existing release branch. For example, the `release/v2.4` branch is currently on `2.4.1`.
|
||||
|
||||
As you can see we currently support mutliple major versions of Emissary-ingress and you can read more about our [End-of-Life Policy](https://www.getambassador.io/docs/emissary/latest/about/aes-emissary-eol/).
|
||||
|
||||
For more information on our current RC and Release process you can find that in our [Release Wiki](https://github.com/emissary-ingress/emissary/wiki).
|
||||
|
||||
### Backport Strategy
|
||||
|
||||
Since we follow a trunk based development workflow this means that the majority of the time your patch branch will be based off from `master` and that most Pull Request will target `master`.
|
||||
|
||||
This ensures that we do not miss bug fixes or features for the "Next" shippable release and simplifies the mental-model for deciding how to get started contributing code.
|
||||
|
||||
#### What if I need a patch to land in a previous supported version?
|
||||
|
||||
Let's say I have a bug fix for CRD round trip conversion for AuthService, which is affecting both `v2.y` and `v3.y`.
|
||||
|
||||
First within the issue we should discuss what versions we want to target. This can depend on current cycle work and any upcoming releases we may have.
|
||||
|
||||
The general rules we follow are:
|
||||
|
||||
1. land patch in "next" version which is `master`
|
||||
2. backport patch to any `release/v{major}.{minor}` branches
|
||||
|
||||
So, let's say we discuss it and say that the "next" major version is a long ways away so we want to do a z patch release on our current minor version(`v3.2`) and we also want to do a z patch release on our last supported major version (`v2.4`).
|
||||
|
||||
This means that these patches need to land in three separate branches:
|
||||
|
||||
1. `master` - next release
|
||||
2. `release/v3.2` - patch release
|
||||
3. `release/v2.4` - patch release
|
||||
|
||||
In this scenario, we first ask you to land the patch in the `master` branch and then provide separate PR's with the commits backported onto the `release/v*` branches.
|
||||
|
||||
> Recommendation: using the `git cherry-pick -x` will add the source commit sha to the commit message. This helps with tracing work back to the original commit.
|
||||
|
||||
#### What if my patch is only for a previous supported version?
|
||||
|
||||
Although, this should be an edge case, it does happen where the code has diverged enough that a fix may only be relevant to an existing supported version. In these cases we may need to do a patch release for that older supported version.
|
||||
|
||||
A good example, if we were to find a bug in the Envoy v2 protocol configuration we would only want to target the v2 release.
|
||||
|
||||
In this scenario, the base branch that we would create our feature branch off from would be the latest `minor` version for that release. As of writing this, that would be the `release/v2.4` branch. We would **not** need to target master.
|
||||
|
||||
But, let's say during our fix we notice other things that need to be addressed that would also need to be fixed in `master`. Then you need to submit a **separate Pull Request** that should first land on master and then follow the normal backporting process for the other patches.
|
||||
|
||||
#### What if I'm still not sure?
|
||||
|
||||
This is what the issue discussions and disucssion in Slack are for so that we can help guide you so feel free to ping us in the `#emissary-dev` channel on Slack to discuss directly with us.
|
||||
|
||||
### Merge Strategy
|
||||
|
||||
> The audience for this section is the Maintainers but also beneficial for Contributors so that they are familiar with how the project operates.
|
||||
|
||||
Having a clean linear commit history for a repository makes it easier to understand what is being changed and reduces the mental load for new comers to the project.
|
||||
|
||||
To maintain a clean linear commit history the following rules should be followed:
|
||||
|
||||
First, always rebase patch branch on to base branch. This means **NO** merge commits from merging base branch into the patch branch. This can be accomplished using git rebase.
|
||||
|
||||
```shell
|
||||
# first, make sure you pull latest upstream changes
|
||||
git fetch upstream
|
||||
git checkout master
|
||||
git pull -ff upstream/master
|
||||
|
||||
# checkout patch branch and rebase interactive
|
||||
# you may have merge conflicts you need to resolve
|
||||
git checkout my-patch-branch
|
||||
git rebase -i master
|
||||
```
|
||||
|
||||
> Note: this does rewrite your commit shas so be aware when sharing branches with co-workers.
|
||||
|
||||
Once the Pull Request is reviewed and has **two approvals** then a Maintainer can merge. Maintainers should follow prefer the following merge strategies:
|
||||
|
||||
1. rebase and merge
|
||||
2. squash merge
|
||||
|
||||
When `rebase and merge` is used your commits are played on top of the base branch so that it creates a clean linear history. This will maintain all the commits from the Pull Request. In most cases this should be the **preferred** merge strategy.
|
||||
|
||||
When a Pull Request has lots of fixup commits, or pr feedback fixes then you should ask the Contributor to squash them as part of the PR process.
|
||||
|
||||
If the contributor is unable to squash them then using a `squash merge` in some cases makes sense. **IMPORTANT**, when this does happen it is important that the commit messages are cleaned up and not just blindly accepted the way proposed by Github. Since it is easy to miss that cleanup step, this should be used less frequently compared to `rebase and merge`.
|
||||
|
||||
#### What about merge commit strategy?
|
||||
|
||||
> The audience for this section is the Maintainers but also beneficial for Contributors so that they are familiar with how the project operates.
|
||||
|
||||
When maintaining a linear commit history, each commit tells the story of what was changed in the repository. When using `merge commits` it
|
||||
adds an additional commit to the history that is not necessary because the commit history and PR history already tell the story.
|
||||
|
||||
Now `merge commits` can be useful when you are concerned with not rewriting the commit sha. Based on the current release process which includes using `rel/v` branches that are tagged and merged into `release/v` branches we must use a `merge commit` when merging these branches. This ensures that the commit sha a Git Tag is pointing at still exists once merged into the `release/v` branch.
|
||||
|
||||
## Contributing to the Docs
|
||||
|
||||
The Emissary-ingress community will all benefit from having documentation that is useful and correct. If you have found an issue with the end user documentation, then please help us out by submitting an issue and/or pull request with a fix!
|
||||
|
||||
The end user documentation for Emissary-ingress lives in a different repository and can be found at <https://github.com/datawire/ambassador-docs>.
|
||||
|
||||
See this repository for details on how to contribute to either a `pre-release` or already-released version of Emissary-ingress.
|
||||
|
||||
## Advanced Topics
|
||||
|
||||
This section is for more advanced topics that provide more detailed instructions. Make sure you go through the Development Setup and read the Architecture document before exploring these topics.
|
||||
|
||||
### Running Emissary-ingress internals locally
|
||||
|
||||
The main entrypoint is written in go. It strives to be as compatible as possible
|
||||
with the normal go toolchain. You can run it with:
|
||||
|
||||
```bash
|
||||
go run ./cmd/busyambassador entrypoint
|
||||
```
|
||||
|
||||
Of course just because you can run it this way does not mean it will succeed.
|
||||
The entrypoint needs to launch `diagd` and `envoy` in order to function, and it
|
||||
also expect to be able to write to the `/ambassador` directory.
|
||||
|
||||
#### Setting up diagd
|
||||
|
||||
If you want to hack on diagd, its easiest to setup a virtualenv with an editable
|
||||
copy and launch your `go run` from within that virtualenv. Note that these
|
||||
instructions depend on the virtualenvwrapper
|
||||
(<https://virtualenvwrapper.readthedocs.io/en/latest/>) package:
|
||||
|
||||
```bash
|
||||
# Create a virtualenv named venv with all the python requirements
|
||||
# installed.
|
||||
python3 -m venv venv
|
||||
. venv/bin/activate
|
||||
# If you're doing this in Datawire's apro.git, then:
|
||||
cd ambassador
|
||||
# Update pip and install dependencies
|
||||
pip install --upgrade pip
|
||||
pip install orjson # see below
|
||||
pip install -r builder/requirements.txt
|
||||
# Created an editable installation of ambassador:
|
||||
pip install -e python/
|
||||
# Check that we do indeed have diagd in our path.
|
||||
which diagd
|
||||
# If you're doing this in Datawire's apro.git, then:
|
||||
cd ..
|
||||
```
|
||||
|
||||
(Note: it shouldn't be necessary to install `orjson` by hand. The fact that it is
|
||||
at the moment is an artifact of the way Ambassador builds currently happen.)
|
||||
|
||||
#### Changing the ambassador root
|
||||
|
||||
You should now be able to launch ambassador if you set the
|
||||
`ambassador_root` environment variable to a writable location:
|
||||
|
||||
ambassador_root=/tmp go run ./cmd/busyambassador entrypoint
|
||||
|
||||
#### Getting envoy
|
||||
|
||||
If you do not have envoy in your path already, the entrypoint will use
|
||||
docker to run it.
|
||||
|
||||
#### Shutting up the pod labels error
|
||||
|
||||
An astute observe of the logs will notice that ambassador complains
|
||||
vociferously that pod labels are not mounted in the ambassador
|
||||
container. To reduce this noise, you can:
|
||||
|
||||
```bash
|
||||
mkdir /tmp/ambassador-pod-info && touch /tmp/ambassador-pod-info/labels
|
||||
```
|
||||
|
||||
#### Extra credit
|
||||
|
||||
When you run ambassador locally it will configure itself exactly as it
|
||||
would in the cluster. That means with two caveats you can actually
|
||||
interact with it and it will function normally:
|
||||
|
||||
1. You need to run `telepresence connect` or equivalent so it can
|
||||
connect to the backend services in its configuration.
|
||||
|
||||
2. You need to supply the host header when you talk to it.
|
||||
|
||||
### Debugging and Developing Envoy Configuration
|
||||
|
||||
Envoy configuration is generated by the ambassador compiler. Debugging
|
||||
the ambassador compiler by running it in kubernetes is very slow since
|
||||
we need to push both the code and any relevant kubernetes resources
|
||||
into the cluster. The following sections will provide tips for improving
|
||||
this development experience.
|
||||
|
||||
### Making changes to Envoy
|
||||
|
||||
Emissary-ingress is built on top of Envoy and leverages a vendored version of Envoy (*we track upstream very closely*). This section will go into how to make changes to the Envoy that is packaged with Emissary-ingress.
|
||||
|
||||
This is a bit more complex than anyone likes, but here goes:
|
||||
|
||||
#### 1. Preparing your machine
|
||||
|
||||
Building and testing Envoy can be very resource intensive. A laptop
|
||||
often can build Envoy... if you plug in an external hard drive, point
|
||||
a fan at it, and leave it running overnight and most of the next day.
|
||||
At Ambassador Labs, we'll often spin up a temporary build machine in GCE, so
|
||||
that we can build it very quickly.
|
||||
|
||||
As of Envoy 1.15.0, we've measure the resource use to build and test
|
||||
it as:
|
||||
|
||||
> | Command | Disk Size | Disk Used | Duration[1] |
|
||||
> |--------------------|-----------|-----------|-------------|
|
||||
> | `make update-base` | 450G | 12GB | ~11m |
|
||||
> | `make check-envoy` | 450G | 424GB | ~45m |
|
||||
>
|
||||
> [1] On a "Machine type: custom (32 vCPUs, 512 GB memory)" VM on GCE,
|
||||
> with the following entry in its `/etc/fstab`:
|
||||
>
|
||||
> ```bash
|
||||
> tmpfs:docker /var/lib/docker tmpfs size=450G 0 0
|
||||
> ```
|
||||
|
||||
If you have the RAM, we've seen huge speed gains from doing the builds
|
||||
and tests on a RAM disk (see the `/etc/fstab` line above).
|
||||
|
||||
#### 2. Setting up your workspace to hack on Envoy
|
||||
|
||||
1. From your `emissary.git` checkout, get Emissary-ingress's current
|
||||
version of the Envoy sources, and create a branch from that:
|
||||
|
||||
```shell
|
||||
make $PWD/_cxx/envoy
|
||||
git -C _cxx/envoy checkout -b YOUR_BRANCHNAME
|
||||
```
|
||||
2. To build Envoy in FIPS mode, set the following variable:
|
||||
|
||||
```shell
|
||||
export FIPS_MODE=true
|
||||
```
|
||||
|
||||
It is important to note that while building Envoy in FIPS mode is
|
||||
required for FIPS compliance, additional steps may be necessary.
|
||||
Emissary does not claim to be FIPS compliant or certified.
|
||||
See [here](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/security/ssl#fips-140-2) for more information on FIPS and Envoy.
|
||||
|
||||
> _NOTE:_ FIPS_MODE is NOT supported by the emissary-ingress maintainers but we provide this for developers as convience
|
||||
|
||||
#### 3. Hacking on Envoy
|
||||
|
||||
Modify the sources in `./_cxx/envoy/`. or update the branch and/or `ENVOY_COMMIT` as necessary in `./_cxx/envoy.mk`
|
||||
|
||||
#### 4. Building and testing your hacked-up Envoy
|
||||
|
||||
> See `./_cxx/envoy.mk` for the full list of targets.
|
||||
|
||||
Multiple Phony targets are provided so that developers can run the steps they are interested in when developing, here are few of the key ones:
|
||||
|
||||
- `make update-base`: will perform all the steps necessary to verify, build envoy, build docker images, push images to the container repository and compile the updated protos.
|
||||
|
||||
- `make build-envoy`: will build the envoy binaries using the same build container as the upstream Envoy project. Build outputs are mounted to the `_cxx/envoy-docker-build` directory and Bazel will write the results there.
|
||||
|
||||
- `make build-base-envoy-image`: will use the release outputs from building envoy to generate a new `base-envoy` container which is then used in the main emissary-ingress container build.
|
||||
|
||||
- `make push-base-envoy`: will push the built container to the remote container repository.
|
||||
|
||||
- `make check-envoy`: will use the build docker container to run the Envoy test suite against the currently checked out envoy in the `_cxx/envoy` folder.
|
||||
|
||||
- `make envoy-shell`: will run the envoy build container and open a bash shell session. The `_cxx/envoy` folder is volume mounted into the container and the user is set to the `envoybuild` user in the container to ensure you are not running as root to ensure hermetic builds.
|
||||
|
||||
#### 5. Test Devloop
|
||||
|
||||
Running the Envoy test suite will compile all the test targets. This is a slow process and can use lots of disk space.
|
||||
|
||||
The Envoy Inner Devloop for build and testing:
|
||||
|
||||
- You can make a change to Envoy code and run the whole test by just calling `make check-envoy`
|
||||
- You can run a specific test instead of the whole test suite by setting the `ENVOY_TEST_LABEL` environment variable.
|
||||
- For example, to run just the unit tests in `test/common/network/listener_impl_test.cc`, you should run:
|
||||
|
||||
```shell
|
||||
ENVOY_TEST_LABEL='//test/common/network:listener_impl_test' make check-envoy
|
||||
```
|
||||
|
||||
- Alternatively, you can run `make envoy-shell` to get a bash shell into the Docker container that does the Envoy builds and you are free to interact with `Bazel` directly.
|
||||
|
||||
Interpreting the test results:
|
||||
|
||||
- If you see the following message, don't worry, it's harmless; the tests still ran:
|
||||
|
||||
```text
|
||||
There were tests whose specified size is too big. Use the --test_verbose_timeout_warnings command line option to see which ones these are.
|
||||
```
|
||||
|
||||
The message means that the test passed, but it passed too
|
||||
quickly, and Bazel is suggesting that you declare it as smaller.
|
||||
Something along the lines of "This test only took 2s, but you
|
||||
declared it as being in the 60s-300s ('moderate') bucket,
|
||||
consider declaring it as being in the 0s-60s ('short')
|
||||
bucket".
|
||||
|
||||
Don't be confused (as I was) in to thinking that it was saying
|
||||
that the test was too big and was skipped and that you need to
|
||||
throw more hardware at it.
|
||||
|
||||
- **Build or test Emissary-ingress** with the usual `make` commands, with
|
||||
the exception that you MUST run `make update-base` first whenever
|
||||
Envoy needs to be recompiled; it won't happen automatically. So
|
||||
`make test` to build-and-test Emissary-ingress would become
|
||||
`make update-base && make test`, and `make images` to just build
|
||||
Emissary-ingress would become `make update-base && make images`.
|
||||
|
||||
The Envoy changes with Emissary-ingress:
|
||||
|
||||
- Either run `make update-base` to build, and push a new base container and then you can run `make test` for the Emissary-ingress test suite.
|
||||
- If you do not want to push the container you can instead:
|
||||
- Build Envoy - `make build-envoy`
|
||||
- Build container - `make build-base-envoy-image`
|
||||
- Test Emissary - `make test`
|
||||
|
||||
#### 6. Protobuf changes
|
||||
|
||||
If you made any changes to the Protocol Buffer files or if you bumped versions of Envoy then you
|
||||
should make sure that you are re-compiling the Protobufs so that they are available and checked-in
|
||||
to the emissary.git repository.
|
||||
|
||||
```sh
|
||||
make compile-envoy-protos
|
||||
```
|
||||
|
||||
This will copy over the raw proto files, compile and copy the generated go code over to emisary-ignress repository.
|
||||
|
||||
#### 7. Finalizing your changes
|
||||
|
||||
> NOTE: we are no longer accepting PR's in `datawire/envoy.git`.
|
||||
|
||||
If you have custom changes then land them in your custom envoy repository and update the `ENVOY_COMMIT` and `ENVOY_DOCKER_REPO` variable in `_cxx/envoy.mk` so that the image will be pushed to the correct repository.
|
||||
|
||||
Then run `make update-base` does all the bits so assuming that was successful then are all good.
|
||||
|
||||
**For maintainers:**
|
||||
|
||||
You will want to make sure that the image is pushed to the backup container registries:
|
||||
|
||||
```shell
|
||||
# upload image to the mirror in GCR
|
||||
SHA=GET_THIS_FROM_THE_make_update-base_OUTPUT
|
||||
TAG="envoy-0.$SHA.opt"
|
||||
docker pull "docker.io/emissaryingress/base-envoy:envoy-0.$TAG.opt"
|
||||
docker tag "docker.io/emissaryingress/base-envoy:$TAG" "gcr.io/datawire/ambassador-base:$TAG"
|
||||
docker push "gcr.io/datawire/ambassador-base:$TAG"
|
||||
```
|
||||
|
||||
#### 8. Final Checklist
|
||||
|
||||
**For Maintainers Only**
|
||||
|
||||
Here is a checklist of things to do when bumping the `base-envoy` version:
|
||||
|
||||
- [ ] The image has been pushed to...
|
||||
- [ ] `docker.io/emissaryingress/base-envoy`
|
||||
- [ ] `gcr.io/datawire/ambassador-base`
|
||||
- [ ] The `datawire/envoy.git` commit has been tagged as `datawire-$(git describe --tags --match='v*')`
|
||||
(the `--match` is to prevent `datawire-*` tags from stacking on each other).
|
||||
- [ ] It's been tested with...
|
||||
- [ ] `make check-envoy`
|
||||
|
||||
The `check-envoy-version` CI job will double check all these things, with the exception of running
|
||||
the Envoy tests. If the `check-envoy-version` is failing then double check the above, fix them and
|
||||
re-run the job.
|
||||
|
||||
### Developing Emissary-ingress (Maintainers-only advice)
|
||||
|
||||
At the moment, these techniques will only work internally to Maintainers. Mostly
|
||||
this is because they require credentials to access internal resources at the
|
||||
moment, though in several cases we're working to fix that.
|
||||
|
||||
#### Updating license documentation
|
||||
|
||||
When new dependencies are added or existing ones are updated, run
|
||||
`make generate` and commit changes to `DEPENDENCIES.md` and
|
||||
`DEPENDENCY_LICENSES.md`
|
||||
|
||||
#### Upgrading Python dependencies
|
||||
|
||||
Delete `python/requirements.txt`, then run `make generate`.
|
||||
|
||||
If there are some dependencies you don't want to upgrade, but want to
|
||||
upgrade everything else, then
|
||||
|
||||
1. Remove from `python/requirements.txt` all of the entries except
|
||||
for those you want to pin.
|
||||
2. Delete `python/requirements.in` (if it exists).
|
||||
3. Run `make generate`.
|
||||
|
||||
> **Note**: If you are updating orjson you will need to also update `docker/base-python/Dockerfile` before running `make generate` for the new version. orjson uses rust bindings and the default wheels on PyPI rely on glibc. Because our base python image is Alpine based, it is built from scratch using rustc to build a musl compatable version.
|
||||
|
||||
> :warning: You may run into an error when running `make generate` where it can't detect the licenses for new or upgraded dependencies, which is needed so that so that we can properly generate DEPENDENCIES.md and DEPENDENCY_LICENSES.md. If that is the case, you may also have to update `build-aux/tools/src/py-mkopensource/main.go:parseLicenses` for any license changes then run `make generate` again.
|
||||
|
||||
## FAQ
|
||||
|
||||
This section contains a set of Frequently Asked Questions that may answer a question you have. Also, feel free to ping us in Slack.
|
||||
|
||||
### How do I find out what build targets are available?
|
||||
|
||||
Use `make help` and `make targets` to see what build targets are
|
||||
available along with documentation for what each target does.
|
||||
|
||||
### How do I develop on a Mac with Apple Silicon?
|
||||
|
||||
To ensure that developers using a Mac with Apple Silicon can contribute, the build system ensures
|
||||
the build artifacts are `linux/amd64` rather than the host architecture. This behavior can be overriden
|
||||
using the `BUILD_ARCH` environment variable (e.g. `BUILD_ARCH=linux/arm64 make images`).
|
||||
|
||||
### How do I develop on Windows using WSL?
|
||||
|
||||
- [WSL 2](https://learn.microsoft.com/en-us/windows/wsl/)
|
||||
- [Docker Desktop for Windows](https://docs.docker.com/desktop/windows/wsl/)
|
||||
- [VS Code](https://code.visualstudio.com/)
|
||||
|
||||
### How do I test using a private Docker repository?
|
||||
|
||||
If you are pushing your development images to a private Docker repo,
|
||||
then:
|
||||
|
||||
```sh
|
||||
export DEV_USE_IMAGEPULLSECRET=true
|
||||
export DOCKER_BUILD_USERNAME=...
|
||||
export DOCKER_BUILD_PASSWORD=...
|
||||
```
|
||||
|
||||
and the test machinery should create an `imagePullSecret` from those Docker credentials such that it can pull the images.
|
||||
|
||||
### How do I change the loglevel at runtime?
|
||||
|
||||
```console
|
||||
curl localhost:8877/ambassador/v0/diag/?loglevel=debug
|
||||
```
|
||||
|
||||
Note: This affects diagd and Envoy, but NOT the AES `amb-sidecar`.
|
||||
See the AES `CONTRIBUTING.md` for how to do that.
|
||||
|
||||
### Can I build from a docker container instead of on my local computer?
|
||||
|
||||
If you want to build within a container instead of setting up dependencies on your local machine then you can run the build within a docker container and leverage "Docker in Docker" to build it.
|
||||
|
||||
1. `docker pull docker:latest`
|
||||
2. `docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -it docker:latest sh`
|
||||
3. `apk add --update --no-cache bash build-base go curl rsync python3 python2 git libarchive-tools gawk jq`
|
||||
4. `git clone https://github.com/emissary-ingress/emissary.git && cd emissary`
|
||||
5. `make images`
|
||||
|
||||
Steps 0 and 1 are run on your machine, and 2 - 4 are from within the docker container. The base image is a "Docker in Docker" image, ran with `-v /var/run/docker.sock:/var/run/docker.sock` in order to connect to your local daemon from the docker inside the container. More info on Docker in Docker [here](https://hub.docker.com/_/docker).
|
||||
|
||||
The images will be created and tagged as defined above, and will be available in docker on your local machine.
|
||||
|
||||
### How do I clear everything out to make sure my build runs like it will in CI?
|
||||
|
||||
Use `make clobber` to completely remove all derived objects, all cached artifacts, everything, and get back to a clean slate. This is recommended if you change branches within a clone, or if you need to `make generate` when you're not *certain* that your last `make generate` was using the same Envoy version.
|
||||
|
||||
Use `make clean` to remove derived objects, but *not* clear the caches.
|
||||
|
||||
### My editor is changing `go.mod` or `go.sum`, should I commit that?
|
||||
|
||||
If you notice this happening, run `make go-mod-tidy`, and commit that.
|
||||
|
||||
(If you're in Ambassador Labs, you should do this from `apro/`, not
|
||||
`apro/ambassador/`, so that apro.git's files are included too.)
|
||||
|
||||
### How do I debug "This should not happen in CI" errors?
|
||||
|
||||
These checks indicate that some output file changed in the middle of a
|
||||
run, when it should only change if a source file has changed. Since
|
||||
CI isn't editing the source files, this shouldn't happen in CI!
|
||||
|
||||
This is problematic because it means that running the build multiple
|
||||
times can give different results, and that the tests are probably not
|
||||
testing the same image that would be released.
|
||||
|
||||
These checks will show you a patch showing how the output file
|
||||
changed; it is up to you to figure out what is happening in the
|
||||
build/test system that would cause that change in the middle of a run.
|
||||
For the most part, this is pretty simple... except when the output
|
||||
file is a Docker image; you just see that one image hash is different
|
||||
than another image hash.
|
||||
|
||||
Fortunately, the failure showing the changed image hash is usually
|
||||
immediately preceded by a `docker build`. Earlier in the CI output,
|
||||
you should find an identical `docker build` command from the first time it
|
||||
ran. In the second `docker build`'s output, each step should say
|
||||
`---> Using cache`; the first few steps will say this, but at some
|
||||
point later steps will stop saying this; find the first step that is
|
||||
missing the `---> Using cache` line, and try to figure out what could
|
||||
have changed between the two runs that would cause it to not use the
|
||||
cache.
|
||||
|
||||
If that step is an `ADD` command that is adding a directory, the
|
||||
problem is probably that you need to add something to `.dockerignore`.
|
||||
To help figure out what you need to add, try adding a `RUN find
|
||||
DIRECTORY -exec ls -ld -- {} +` step after the `ADD` step, so that you
|
||||
can see what it added, and see what is different on that between the
|
||||
first and second `docker build` commands.
|
||||
|
||||
### How do I run Emissary-ingress tests?
|
||||
|
||||
- `export DEV_REGISTRY=<your-dev-docker-registry>` (you need to be logged in and have permission to push)
|
||||
- `export DEV_KUBECONFIG=<your-dev-kubeconfig>`
|
||||
|
||||
If you want to run the Go tests for `cmd/entrypoint`, you'll need `diagd`
|
||||
in your `PATH`. See the instructions below about `Setting up diagd` to do
|
||||
that.
|
||||
|
||||
| Group | Command |
|
||||
| --------------- | ---------------------------------------------------------------------- |
|
||||
| All Tests | `make test` |
|
||||
| All Golang | `make gotest` |
|
||||
| All Python | `make pytest` |
|
||||
| Some/One Golang | `make gotest GOTEST_PKGS=./cmd/entrypoint GOTEST_ARGS="-run TestName"` |
|
||||
| Some/One Python | `make pytest PYTEST_ARGS="-k TestName"` |
|
||||
|
||||
Please note the python tests use a local cache to speed up test
|
||||
results. If you make a code update that changes the generated envoy
|
||||
configuration, those tests will fail and you will need to update the
|
||||
python test cache.
|
||||
|
||||
Note that it is invalid to run one of the `main[Plain.*]` Python tests
|
||||
without running all of the other `main[Plain*]` tests; the test will
|
||||
fail to run (not even showing up as a failure or xfail--it will fail
|
||||
to run at all). For example, `PYTEST_ARGS="-k WebSocket"` would match
|
||||
the `main[Plain.WebSocketMapping-GRPC]` test, and that test would fail
|
||||
to run; one should instead say `PYTEST_ARGS="-k Plain or WebSocket"`
|
||||
to avoid breaking the sub-tests of "Plain".
|
||||
|
||||
### How do I type check my python code?
|
||||
|
||||
Ambassador uses Python 3 type hinting and the `mypy` static type checker to
|
||||
help find bugs before runtime. If you haven't worked with hinting before, a
|
||||
good place to start is
|
||||
[the `mypy` cheat sheet](https://mypy.readthedocs.io/en/latest/cheat_sheet_py3.html).
|
||||
|
||||
New code must be hinted, and the build process will verify that the type
|
||||
check passes when you `make test`. Fair warning: this means that
|
||||
PRs will not pass CI if the type checker fails.
|
||||
|
||||
We strongly recommend using an editor that can do realtime type checking
|
||||
(at Datawire we tend to use PyCharm and VSCode a lot, but many many editors
|
||||
can do this now) and also running the type checker by hand before submitting
|
||||
anything:
|
||||
|
||||
- `make lint/mypy` will check all the Ambassador code
|
||||
|
||||
Ambassador code should produce *no* warnings and *no* errors.
|
||||
|
||||
If you're concerned that the mypy cache is somehow wrong, delete the
|
||||
`.mypy_cache/` directory to clear the cache.
|
||||
|
|
@ -15,7 +15,7 @@ The Emissary Security Team (EMST) is responsible for organizing the entire respo
|
|||
communication and external disclosure but will need help from relevant developers to successfully
|
||||
run this process.
|
||||
|
||||
The initial Emissary Security Team will consist of all [maintainers](MAINTAINERS.md), with communications
|
||||
The initial Emissary Security Team will consist of all [maintainers](../Community/MAINTAINERS.md), with communications
|
||||
initially handled via email to [secalert@datawire.io](mailto:secalert@datawire.io). In the future,
|
||||
we may change the membership of the EMST, or the communication mechanism.
|
||||
|
||||
|
@ -58,8 +58,8 @@ best protect our users.
|
|||
### Released versions and the `master` branch
|
||||
|
||||
If the vulnerability affects a supported version (typically the _most recent_ minor release, e.g.
|
||||
1.13), then the full security release process described in this document will be activated. A
|
||||
patch release will be created (e.g. 1.13.10) with the fix, and the fix will also be made on
|
||||
1.13), then the full security release process described in this document will be activated. A
|
||||
patch release will be created (e.g. 1.13.10) with the fix, and the fix will also be made on
|
||||
`master`.
|
||||
|
||||
If a vulnerability affects only `master`, the fix will be incorporated into the next release.
|
||||
|
@ -89,7 +89,7 @@ example:
|
|||
Note that while we generally consider the installation mechanisms provided by the Emissary-ingress
|
||||
project (our published Helm charts and manifests) "safe", there is no way to guarantee that the
|
||||
published installation mechanisms will always work in any specific setting. Ultimately, Emissary
|
||||
operators need to understand the impact of their own configurations, especially in larger
|
||||
operators need to understand the impact of their own configurations, especially in larger
|
||||
installations.
|
||||
|
||||
### Fix Team Organization
|
||||
|
@ -111,9 +111,9 @@ These steps should be completed within the 1-7 days of Disclosure.
|
|||
[CVSS](https://www.first.org/cvss/specification-document) using the [CVSS
|
||||
Calculator](https://www.first.org/cvss/calculator/3.0). The Fix Lead makes the final call on the
|
||||
calculated CVSS; it is better to move quickly than to spend time making the CVSS perfect.
|
||||
- The Fix Team will work per the usual [Emissary Development Process](DEVELOPING.md), including
|
||||
- The Fix Team will work per the usual [Emissary Development Process](CONTRIBUTING.md), including
|
||||
fix branches, PRs, reviews, etc.
|
||||
- The Fix Team will notify the Fix Lead that work on the fix branch is complete once the fix is
|
||||
- The Fix Team will notify the Fix Lead that work on the fix branch is complete once the fix is
|
||||
present in the relevant release branch(es) in the private security repo.
|
||||
|
||||
If the CVSS score is under 4.0 ([a low severity score](https://www.first.org/cvss/specification-document#i5))
|
||||
|
@ -144,7 +144,7 @@ patches, understand exact mitigation steps, etc.
|
|||
potentially conflicts as the fix is cherry-picked around branches.
|
||||
- The Fix Lead will request a CVE from [DWF](https://github.com/distributedweaknessfiling/DWF-Documentation)
|
||||
and include the CVSS and release details.
|
||||
- The Fix Lead will announce in `#emissary` and `#general` on the [Emissary Slack](https://a8r.io/slack)
|
||||
- The Fix Lead will announce in `#emissary` and `#general` on the [Emissary Slack](https://a8r.io/slack)
|
||||
stating the new releases, the CVE number, and the relevant merged PRs to get wide distribution and
|
||||
user action. As much as possible this message should be actionable and include links on how to apply
|
||||
the fix to user's environments; this can include links to external distributor documentation.
|
||||
|
@ -161,5 +161,5 @@ These steps should be completed 1-3 days after the Release Date. The retrospecti
|
|||
and any critiques of the response and release process.
|
||||
- Maintainers and Fix Team are also encouraged to send their own feedback on the process to
|
||||
[secalert@datawire.io](mailto:secalert@datawire.io), or to discuss it in `#emissary-dev`
|
||||
on the [Emissary Slack](https://a8r.io/slack). Honest critique is the only way we will
|
||||
on the [Emissary Slack](https://a8r.io/slack). Honest critique is the only way we will
|
||||
improve as a community.
|
|
@ -0,0 +1,100 @@
|
|||
A quick primer on GNU Make syntax
|
||||
=================================
|
||||
|
||||
This tries to cover the syntax that is hard to ctrl-f for in
|
||||
<https://www.gnu.org/software/make/manual/make.html> (err, hard to
|
||||
C-s for in `M-: (info "Make")`).
|
||||
|
||||
At the core is a "rule":
|
||||
|
||||
target: dependency1 dependency2
|
||||
command to run
|
||||
|
||||
If `target` something that isn't a real file (like 'build', 'lint', or
|
||||
'test'), then it should be marked as "phony":
|
||||
|
||||
target: dependency1 dependency2
|
||||
command to run
|
||||
.PHONY: target
|
||||
|
||||
You can write reusable "pattern" rules:
|
||||
|
||||
%.o: %.c
|
||||
command to run
|
||||
|
||||
Of course, if you don't have variables for the inputs and outputs,
|
||||
it's hard to write a "command to run" for a pattern rule. The
|
||||
variables that you should know are:
|
||||
|
||||
$@ = the target
|
||||
$^ = the list of dependencies (space separated)
|
||||
$< = the first (left-most) dependency
|
||||
$* = the value of the % glob in a pattern rule
|
||||
|
||||
Each of these have $(@D) and $(@F) variants that are the
|
||||
directory-part and file-part of each value, respectively.
|
||||
|
||||
I think those are easy enough to remember mnemonically:
|
||||
- $@ is where you should direct the output at.
|
||||
- $^ points up at the dependency list
|
||||
- $< points at the left-most member of the dependency list
|
||||
- $* is the % glob; "*" is well-known as the glob char in other languages
|
||||
|
||||
Make will do its best to guess whether to apply a pattern rule for a
|
||||
given file. Or, you can explicitly tell it by using a 3-field
|
||||
(2-colon) version:
|
||||
|
||||
foo.o bar.o: %.o: %.c
|
||||
command to run
|
||||
|
||||
In a non-pattern rule, if there are multiple targets listed, then it
|
||||
is as if rule were duplicated for each target:
|
||||
|
||||
target1 target2: deps
|
||||
command to run
|
||||
|
||||
# is the same as
|
||||
|
||||
target1: deps
|
||||
command to run
|
||||
target2: deps
|
||||
command to run
|
||||
|
||||
Because of this, if you have a command that generates multiple
|
||||
outputs, it _must_ be a pattern rule:
|
||||
|
||||
%.c %.h: %.y
|
||||
command to run
|
||||
|
||||
Normally, Make crawls the entire tree of dependencies, updating a file
|
||||
if any of its dependencies have been updated. There's a really poorly
|
||||
named feature called "order-only" dependencies:
|
||||
|
||||
target: normal-deps | order-only-deps
|
||||
|
||||
Dependencies after the `|` are created if they don't exist, but if
|
||||
they already exist, then don't bother updating them.
|
||||
|
||||
Tips:
|
||||
-----
|
||||
|
||||
- Use absolute filenames; it's silly, but can often reduce headaches.
|
||||
Use `$(OSS_HOME)` to spell the absolute filenames.
|
||||
|
||||
Though, this isn't so helpful in Emissary anymore. The main case
|
||||
where this helps is when the same directory-specific Makefile might
|
||||
be `include`d from several different top-level directories; as the
|
||||
Helm chart Makefile used to be from the main Emissary Makefile, or
|
||||
the Emissary Makefile used to be from Edge Stack. But Emissary no
|
||||
longer does this.
|
||||
|
||||
- If you have a multiple-output command where the output files have
|
||||
dissimilar names, have `%` be just the directory (the above tip
|
||||
about using absolute filenames makes this easier--this is a real
|
||||
pain if you have such a target in the top-level directory and
|
||||
aren't using absolute filenames).
|
||||
|
||||
- It can be useful to use the 2-colon form of a pattern rule when
|
||||
writing a rule for just one file; it lets you use `%` and `$*` to
|
||||
avoid repeating yourself, which can be especially useful with long
|
||||
filenames.
|
|
@ -1,46 +0,0 @@
|
|||
# Emissary Ingress Governance
|
||||
|
||||
This document defines the project governance for Emissary Ingress.
|
||||
|
||||
## Overview
|
||||
|
||||
**Emissary Ingress** is an open source project that is committed to building a thriving community. This document outlines how the community governs itself. All community members must adhere to the [Code of Conduct](https://github.com/emissary-ingress/community/blob/main/CODE_OF_CONDUCT.md)
|
||||
|
||||
## Community Roles
|
||||
|
||||
* **Users:** Members that engage with the Emissary Ingress community via any medium (Slack, GitHub, mailing lists, etc.).
|
||||
* **Contributors:** Regular contributions to projects (documentation, code reviews, responding to issues, participation in proposal discussions, contributing code, etc.).
|
||||
* **Maintainers**: The Emissary Ingress project leaders. They are responsible for the overall health and direction of the project; final reviewers of PRs and responsible for releases. Maintainers are expected to triage issues, proactively fix bugs, review PRs to ensure code quality, and contribute documentation.
|
||||
|
||||
### Maintainers
|
||||
|
||||
New maintainers must be nominated by an existing maintainer and must be elected by a supermajority of existing maintainers. Likewise, maintainers can be removed by a supermajority of the existing maintainers or can resign by notifying one of the maintainers.
|
||||
|
||||
If you're interested in becoming a maintainer, contact an existing maintainer to express your interest. A good way to start is to fix some small issues (if you haven't already), working with one or more existing maintainers. As you build up a representative body of contributions, the maintainers will provide regular feedback on your progress towards maintainer status. After you have built up that representative body of contributions (usually over a period of 3-4 months), the maintainers will meet to discuss and vote on granting maintainer status.
|
||||
|
||||
### Decision Making
|
||||
|
||||
Ideally, all project decisions are resolved by consensus. If impossible, any maintainer may call a vote. Unless otherwise specified in this document, any vote will be decided by a majority of maintainers.
|
||||
|
||||
### Supermajority
|
||||
|
||||
A supermajority is defined as two-thirds of members in the group.
|
||||
|
||||
A supermajority of [Maintainers](#maintainers) is required for adding or removing maintainers.
|
||||
|
||||
### Voting Process
|
||||
|
||||
Voting on decisions will be conducted using GitHub:
|
||||
|
||||
- Open an issue, if an appropriate issue is not already present.
|
||||
- Write a description of the issue at hand in a comment on the issue. The description must include:
|
||||
- A summary of the vote to be taken;
|
||||
- Whether the vote requires a majority or a supermajority; and
|
||||
- The meaning of a yay vote and a nay vote, if not obvious.
|
||||
|
||||
For example, when voting to add a maintainer, the meanings of yay and nay are straightforward. On the other hand, for a choice between two alternatives, the comment should spell out which alternative is supported by a yay vote, and which by a nay vote.
|
||||
- Maintainers vote by placing emoji on the comment: :thumbsup: for yay, :thumbsdown: for nay.
|
||||
|
||||
## Updating Governance
|
||||
|
||||
All substantive changes in Governance require a supermajority agreement by all maintainers.
|
|
@ -1,38 +0,0 @@
|
|||
# Emissary Maintainers
|
||||
|
||||
[GOVERNANCE.md](https://github.com/emissary-ingress/community/blob/main/GOVERNANCE.md)
|
||||
describes governance guidelines and maintainer responsibilities.
|
||||
|
||||
## Maintainers
|
||||
|
||||
Maintainers are listed in alphabetical order.
|
||||
|
||||
| Maintainer | GitHub ID | Affiliation |
|
||||
| ---------------- | --------------------------------------------- | --------------------------------------------------- |
|
||||
| Alex Gervais | [alexgervais](https://github.com/alexgervais) | [Ambassador Labs](https://www.github.com/datawire/) |
|
||||
| Alice Wasko | [aliceproxy](https://github.com/aliceproxy) | [Ambassador Labs](https://www.github.com/datawire/) |
|
||||
| David Dymko | [ddymko](https://github.com/ddymko) | [Ambassador Labs](https://www.github.com/datawire/) |
|
||||
| Flynn | [kflynn](https://github.com/kflynn) | [Buoyant](https://www.buoyant.io) |
|
||||
| Hamzah Qudsi | [haq204](https://github.com/haq204) | [Ambassador Labs](https://www.github.com/datawire/) |
|
||||
| Lance Austin | [lanceea](https://github.com/lanceea) | [Ambassador Labs](https://www.github.com/datawire/) |
|
||||
| Luke Shumaker | [lukeshu](https://github.com/lukeshu) | [Ambassador Labs](https://www.github.com/datawire/) |
|
||||
| Rafael Schloming | [rhs](https://github.com/rhs) | [Ambassador Labs](https://www.github.com/datawire/) |
|
||||
|
||||
|
||||
|
||||
In addition to the maintainers, Emissary releases may be created by any
|
||||
of the following (also listed in alphabetical order):
|
||||
|
||||
| Releaser | GitHub ID | Affiliation |
|
||||
| ------------ | ----------------------------------- | --------------------------------------------------- |
|
||||
| Will Hardin | [w-h37](https://github.com/w-h37) | [Ambassador Labs](https://www.github.com/datawire/) |
|
||||
|
||||
## Maintainers Emeriti
|
||||
|
||||
* Aidan Hahn ([aidanhahn](https://github.com/aidanhahn))
|
||||
* Alix Cook ([acookin](https://github.com/acookin))
|
||||
* John Esmet ([esmet](https://github.com/esmet))
|
||||
|
||||
## Releasers Emeriti
|
||||
|
||||
* Noah Krause ([iNoahNothing](https://github.com/iNoahNothing))
|
|
@ -1,16 +0,0 @@
|
|||
# Community Meeting Schedule
|
||||
|
||||
## Weekly Troubleshooting
|
||||
|
||||
We hold troubleshooting sessions once a week on Thursdays, at 2:30 pm Eastern. These sessions are a way to connect in person with project maintainers and get help with any problems you might be encountering while using Emissary-ingress.
|
||||
|
||||
**Zoom Meeting Link**: https://us02web.zoom.us/j/83032365622
|
||||
|
||||
|
||||
## Monthly Contributors Meeting
|
||||
|
||||
The Emissary-ingress Contributors Meeting is held on the first Wednesday of every month at 1pm Eastern. The focus of this meeting is discussion of technical issues related to development of Emissary-ingress.
|
||||
|
||||
New contributors are always welcome! Check out our [contributor's guide](https://github.com/emissary-ingress/emissary/blob/master/DEVELOPING.md) to learn how you can help make Emissary-ingress better.
|
||||
|
||||
**Zoom Meeting Link**: https://us02web.zoom.us/j/85154302860
|
20
Makefile
20
Makefile
|
@ -12,9 +12,9 @@ BUILD_ARCH ?= linux/amd64
|
|||
# Bootstrapping the build env
|
||||
#
|
||||
# _go-version/deps and _go-version/cmd should mostly only be used via
|
||||
# go-version.txt (in generate.mk), but we have declared them early
|
||||
# here for bootstrapping the build env. Don't use them directly (not
|
||||
# via go-version.txt) except for bootstrapping.
|
||||
# go-version.txt (in deps.mk), but we have declared them early here
|
||||
# for bootstrapping the build env. Don't use them directly (not via
|
||||
# go-version.txt) except for bootstrapping.
|
||||
_go-version/deps = docker/base-python/Dockerfile
|
||||
_go-version/cmd = sed -En 's,.*https://dl\.google\.com/go/go([0-9a-z.-]*)\.linux-amd64\.tar\.gz.*,\1,p' < $(_go-version/deps)
|
||||
ifneq ($(MAKECMDGOALS),$(OSS_HOME)/build-aux/go-version.txt)
|
||||
|
@ -56,14 +56,16 @@ endif
|
|||
|
||||
# Everything else...
|
||||
|
||||
NAME ?= emissary
|
||||
EMISSARY_NAME ?= emissary
|
||||
|
||||
_git_remote_urls := $(shell git remote | xargs -n1 git remote get-url --all)
|
||||
IS_PRIVATE ?= $(findstring private,$(_git_remote_urls))
|
||||
|
||||
include $(OSS_HOME)/build-aux/ci.mk
|
||||
include $(OSS_HOME)/build-aux/deps.mk
|
||||
include $(OSS_HOME)/build-aux/main.mk
|
||||
include $(OSS_HOME)/build-aux/builder.mk
|
||||
include $(OSS_HOME)/build-aux/check.mk
|
||||
include $(OSS_HOME)/builder/builder.mk
|
||||
include $(OSS_HOME)/_cxx/envoy.mk
|
||||
include $(OSS_HOME)/releng/release.mk
|
||||
|
||||
|
@ -155,3 +157,11 @@ python-dev-setup:
|
|||
|
||||
# activate venv
|
||||
@echo "run 'source ./venv/bin/activate' to activate venv in local shell"
|
||||
|
||||
# re-generate docs
|
||||
.PHONY: clean-changelog
|
||||
clean-changelog:
|
||||
rm -f CHANGELOG.md
|
||||
|
||||
.PHONY: generate-changelog
|
||||
generate-changelog: clean-changelog $(PWD)/CHANGELOG.md
|
||||
|
|
|
@ -0,0 +1,176 @@
|
|||
# Emissary-ingress 3.10 Quickstart
|
||||
|
||||
**We recommend using Helm** to install Emissary.
|
||||
|
||||
### Installing if you're starting fresh
|
||||
|
||||
**If you are already running Emissary and just want to upgrade, DO NOT FOLLOW
|
||||
THESE DIRECTIONS.** Instead, check out "Upgrading from an earlier Emissary"
|
||||
below.
|
||||
|
||||
If you're starting from scratch and you don't need to worry about older CRD
|
||||
versions, install using `--set enableLegacyVersions=false` to avoid install
|
||||
the old versions of the CRDs and the conversion webhook:
|
||||
|
||||
```bash
|
||||
helm install emissary-crds \
|
||||
--namespace emissary --create-namespace \
|
||||
oci://ghcr.io/emissary-ingress/emissary-crds-chart --version=3.10.0 \
|
||||
--set enableLegacyVersions=false \
|
||||
--wait
|
||||
```
|
||||
|
||||
This will install only v3alpha1 CRDs and skip the conversion webhook entirely.
|
||||
It will create the `emissary` namespace for you, but there won't be anything
|
||||
in it at this point.
|
||||
|
||||
Next up, install Emissary itself, with `--set waitForApiext.enabled=false` to
|
||||
tell Emissary not to wait for the conversion webhook to be ready:
|
||||
|
||||
```bash
|
||||
helm install emissary \
|
||||
--namespace emissary \
|
||||
oci://ghcr.io/emissary-ingress/emissary-ingress --version=3.10.0 \
|
||||
--set waitForApiext.enabled=false \
|
||||
--wait
|
||||
```
|
||||
|
||||
### Upgrading from an earlier Emissary
|
||||
|
||||
First, install the CRDs and the conversion webhook:
|
||||
|
||||
```bash
|
||||
helm install emissary-crds \
|
||||
--namespace emissary-system --create-namespace \
|
||||
oci://ghcr.io/emissary-ingress/emissary-crds-chart --version=3.10.0 \
|
||||
--wait
|
||||
```
|
||||
|
||||
This will install all the versions of the CRDs (v1, v2, and v3alpha1) and the
|
||||
conversion webhook into the `emissary-system` namespace. Once that's done, you'll install Emissary itself:
|
||||
|
||||
```bash
|
||||
helm install emissary \
|
||||
--namespace emissary --create-namespace \
|
||||
oci://ghcr.io/emissary-ingress/emissary-ingress --version=3.10.0 \
|
||||
--wait
|
||||
```
|
||||
|
||||
### Using Emissary
|
||||
|
||||
In either case above, you should have a running Emissary behind the Service
|
||||
named `emissary-emissary-ingress` in the `emissary` namespace. How exactly you
|
||||
connect to that Service will vary with your cluster provider, but you can
|
||||
start with
|
||||
|
||||
```bash
|
||||
kubectl get svc -n emissary emissary-emissary-ingress
|
||||
```
|
||||
|
||||
and that should get you started. Or, of course, you can use something like
|
||||
|
||||
```bash
|
||||
kubectl port-forward -n emissary svc/emissary-emissary-ingress 8080:80
|
||||
```
|
||||
|
||||
(after you configure a Listener!) and then talk to localhost:8080 with any
|
||||
kind of cluster.
|
||||
|
||||
## Using Faces for a sanity check
|
||||
|
||||
[Faces Demo]: https://github.com/buoyantio/faces-demo
|
||||
|
||||
If you like, you can continue by using the [Faces Demo] as a quick sanity
|
||||
check. First, install Faces itself using Helm:
|
||||
|
||||
```bash
|
||||
helm install faces \
|
||||
--namespace faces --create-namespace \
|
||||
oci://ghcr.io/buoyantio/faces-chart --version 2.0.0-rc.4 \
|
||||
--wait
|
||||
```
|
||||
|
||||
Next, you'll need to configure Emissary to route to Faces. First, we'll do the
|
||||
basic configuration to tell Emissary to listen for HTTP traffic:
|
||||
|
||||
```bash
|
||||
kubectl apply -f - <<EOF
|
||||
---
|
||||
apiVersion: getambassador.io/v3alpha1
|
||||
kind: Listener
|
||||
metadata:
|
||||
name: ambassador-https-listener
|
||||
spec:
|
||||
port: 8443
|
||||
protocol: HTTPS
|
||||
securityModel: XFP
|
||||
hostBinding:
|
||||
namespace:
|
||||
from: ALL
|
||||
---
|
||||
apiVersion: getambassador.io/v3alpha1
|
||||
kind: Listener
|
||||
metadata:
|
||||
name: ambassador-http-listener
|
||||
spec:
|
||||
port: 8080
|
||||
protocol: HTTP
|
||||
securityModel: XFP
|
||||
hostBinding:
|
||||
namespace:
|
||||
from: ALL
|
||||
---
|
||||
apiVersion: getambassador.io/v3alpha1
|
||||
kind: Host
|
||||
metadata:
|
||||
name: wildcard-host
|
||||
spec:
|
||||
hostname: "*"
|
||||
requestPolicy:
|
||||
insecure:
|
||||
action: Route
|
||||
EOF
|
||||
```
|
||||
|
||||
(This actually supports both HTTPS and HTTP, but since we haven't set up TLS
|
||||
certificates, we'll just stick with HTTP.)
|
||||
|
||||
Next, we need two Mappings:
|
||||
|
||||
| Prefix | Routes to Service | in Namespace |
|
||||
| --------- | ----------------- | ------------ |
|
||||
| `/faces/` | `faces-gui` | `faces` |
|
||||
| `/face/` | `face` | `faces` |
|
||||
|
||||
```bash
|
||||
kubectl apply -f - <<EOF
|
||||
---
|
||||
apiVersion: getambassador.io/v3alpha1
|
||||
kind: Mapping
|
||||
metadata:
|
||||
name: gui-mapping
|
||||
namespace: faces
|
||||
spec:
|
||||
hostname: "*"
|
||||
prefix: /faces/
|
||||
service: faces-gui.faces
|
||||
rewrite: /
|
||||
timeout_ms: 0
|
||||
---
|
||||
apiVersion: getambassador.io/v3alpha1
|
||||
kind: Mapping
|
||||
metadata:
|
||||
name: face-mapping
|
||||
namespace: faces
|
||||
spec:
|
||||
hostname: "*"
|
||||
prefix: /face/
|
||||
service: face.faces
|
||||
timeout_ms: 0
|
||||
EOF
|
||||
```
|
||||
|
||||
Once that's done, then you'll be able to access the Faces Demo at `/faces/`,
|
||||
on whatever IP address or hostname your cluster provides for the
|
||||
`emissary-emissary-ingress` Service. Or you can port-forward as above and
|
||||
access it at `http://localhost:8080/faces/`.
|
137
README.md
137
README.md
|
@ -6,81 +6,134 @@ Emissary-ingress
|
|||
[![Docker Repository][badge-docker-img]][badge-docker-link]
|
||||
[![Join Slack][badge-slack-img]][badge-slack-link]
|
||||
[![Core Infrastructure Initiative: Best Practices][badge-cii-img]][badge-cii-link]
|
||||
[![Artifact HUB][badge-artifacthub-img]][badge-artifacthub-link]
|
||||
|
||||
[badge-version-img]: https://img.shields.io/docker/v/emissaryingress/emissary?sort=semver
|
||||
[badge-version-link]: https://github.com/emissary-ingress/emissary/releases
|
||||
[badge-docker-img]: https://img.shields.io/docker/pulls/emissaryingress/emissary
|
||||
[badge-docker-link]: https://hub.docker.com/r/emissaryingress/emissary
|
||||
[badge-slack-img]: https://img.shields.io/badge/slack-join-orange.svg
|
||||
[badge-slack-link]: https://a8r.io/slack
|
||||
[badge-slack-link]: https://communityinviter.com/apps/cloud-native/cncf
|
||||
[badge-cii-img]: https://bestpractices.coreinfrastructure.org/projects/1852/badge
|
||||
[badge-cii-link]: https://bestpractices.coreinfrastructure.org/projects/1852
|
||||
[badge-artifacthub-img]: https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/emissary-ingress
|
||||
[badge-artifacthub-link]: https://artifacthub.io/packages/helm/datawire/emissary-ingress
|
||||
|
||||
<!-- Links are (mostly) at the end of this document, for legibility. -->
|
||||
|
||||
[Emissary-Ingress](https://www.getambassador.io) is an open-source Kubernetes-native API Gateway +
|
||||
Layer 7 load balancer + Kubernetes Ingress built on [Envoy Proxy](https://www.envoyproxy.io).
|
||||
Emissary-ingress is a CNCF incubation project (and was formerly known as Ambassador API Gateway).
|
||||
---
|
||||
|
||||
Emissary-ingress enables its users to:
|
||||
* Manage ingress traffic with [load balancing], support for multiple protocols ([gRPC and HTTP/2], [TCP], and [web sockets]), and Kubernetes integration
|
||||
* Manage changes to routing with an easy to use declarative policy engine and [self-service configuration], via Kubernetes [CRDs] or annotations
|
||||
* Secure microservices with [authentication], [rate limiting], and [TLS]
|
||||
* Ensure high availability with [sticky sessions], [rate limiting], and [circuit breaking]
|
||||
* Leverage observability with integrations with [Grafana], [Prometheus], and [Datadog], and comprehensive [metrics] support
|
||||
* Enable progressive delivery with [canary releases]
|
||||
* Connect service meshes including [Consul], [Linkerd], and [Istio]
|
||||
## QUICKSTART
|
||||
|
||||
Looking to get started as quickly as possible? Check out [the
|
||||
QUICKSTART](https://emissary-ingress.dev/docs/3.10/quick-start/)!
|
||||
|
||||
### Latest Release
|
||||
|
||||
The latest production version of Emissary is **3.10.0**.
|
||||
|
||||
**Note well** that there is also an Ambassador Edge Stack 3.10.0, but
|
||||
**Emissary 3.10 and Edge Stack 3.10 are not equivalent**. Their codebases have
|
||||
diverged and will continue to do so.
|
||||
|
||||
---
|
||||
|
||||
Emissary-ingress
|
||||
================
|
||||
|
||||
[Emissary-ingress](https://www.getambassador.io/docs/open-source) is an
|
||||
open-source, developer-centric, Kubernetes-native API gateway built on [Envoy
|
||||
Proxy]. Emissary-ingress is a CNCF incubating project (and was formerly known
|
||||
as Ambassador API Gateway).
|
||||
|
||||
### Design Goals
|
||||
|
||||
The first problem faced by any organization trying to develop cloud-native
|
||||
applications is the _ingress problem_: allowing users outside the cluster to
|
||||
access the application running inside the cluster. Emissary is built around
|
||||
the idea that the application developers should be able to solve the ingress
|
||||
problem themselves, without needing to become Kubernetes experts and without
|
||||
needing dedicated operations staff: a self-service, developer-centric workflow
|
||||
is necessary to develop at scale.
|
||||
|
||||
Emissary is open-source, developer-centric, role-oriented, opinionated, and
|
||||
Kubernatives-native.
|
||||
|
||||
- open-source: Emissary is licensed under the Apache 2 license, permitting use
|
||||
or modification by anyone.
|
||||
- developer-centric: Emissary is designed taking the application developer
|
||||
into account first.
|
||||
- role-oriented: Emissary's configuration deliberately tries to separate
|
||||
elements to allow separation of concerns between developers and operations.
|
||||
- opinionated: Emissary deliberately tries to make easy things easy, even if
|
||||
that comes of the cost of not allowing some uncommon features.
|
||||
|
||||
### Features
|
||||
|
||||
Emissary supports all the table-stakes features needed for a modern API
|
||||
gateway:
|
||||
|
||||
* Per-request [load balancing]
|
||||
* Support for routing [gRPC], [HTTP/2], [TCP], and [web sockets]
|
||||
* Declarative configuration via Kubernetes [custom resources]
|
||||
* Fine-grained [authentication] and [authorization]
|
||||
* Advanced routing features like [canary releases], [A/B testing], [dynamic routing], and [sticky sessions]
|
||||
* Resilience features like [retries], [rate limiting], and [circuit breaking]
|
||||
* Observability features including comprehensive [metrics] support using the [Prometheus] stack
|
||||
* Easy service mesh integration with [Linkerd], [Istio], [Consul], etc.
|
||||
* [Knative serverless integration]
|
||||
|
||||
See the full list of [features](https://www.getambassador.io/features/) here.
|
||||
See the full list of [features](https://www.getambassador.io/docs/emissary) here.
|
||||
|
||||
Branches
|
||||
========
|
||||
### Branches
|
||||
|
||||
(If you are looking at this list on a branch other than `master`, it
|
||||
may be out of date.)
|
||||
|
||||
- [`master`](https://github.com/emissary-ingress/emissary/tree/master) - branch for Emissary-ingress 3.3.z work (:heavy_check_mark: upcoming release)
|
||||
- [`release/v3.2`](https://github.com/emissary-ingress/emissary/tree/release/v3.2) - branch for Emissary-ingress 3.2.z work
|
||||
- [`release/v2.5`](https://github.com/emissary-ingress/emissary/tree/release/v2.5) - branch for Emissary-ingress 2.5.z work (:heavy_check_mark: upcoming release)
|
||||
- [`release/v1.14`](https://github.com/emissary-ingress/emissary/tree/release/v1.14) - branch for Emissary-ingress 1.14.z work (:heavy_check_mark: maintenance, supported through September 2022)
|
||||
- [`main`](https://github.com/emissary-ingress/emissary/tree/main): Emissary 4 development work
|
||||
|
||||
Architecture
|
||||
============
|
||||
**No further development is planned on any branches listed below.**
|
||||
|
||||
Emissary is configured via Kubernetes CRDs, or via annotations on Kubernetes `Service`s. Internally,
|
||||
it uses the [Envoy Proxy] to actually handle routing data; externally, it relies on Kubernetes for
|
||||
scaling and resiliency. For more on Emissary's architecture and motivation, read [this blog post](https://blog.getambassador.io/building-ambassador-an-open-source-api-gateway-on-kubernetes-and-envoy-ed01ed520844).
|
||||
- [`master`](https://github.com/emissary-ingress/emissary/tree/master) - **Frozen** at Emissary 3.10.0
|
||||
- [`release/v3.10`](https://github.com/emissary-ingress/emissary/tree/release/v3.10) - Emissary-ingress 3.10.0 release branch
|
||||
- [`release/v3.9`](https://github.com/emissary-ingress/emissary/tree/release/v3.9)
|
||||
- Emissary-ingress 3.9.1 release branch
|
||||
- [`release/v2.5`](https://github.com/emissary-ingress/emissary/tree/release/v2.5) - Emissary-ingress 2.5.1 release branch
|
||||
|
||||
Getting Started
|
||||
===============
|
||||
**Note well** that there is also an Ambassador Edge Stack 3.10.0, but
|
||||
**Emissary 3.10 and Edge Stack 3.10 are not equivalent**. Their codebases have
|
||||
diverged and will continue to do so.
|
||||
|
||||
You can get Emissary up and running in just three steps. Follow the instructions here: https://www.getambassador.io/docs/emissary/latest/tutorials/getting-started/
|
||||
#### Community
|
||||
|
||||
If you are looking for a Kubernetes ingress controller, Emissary provides a superset of the functionality of a typical ingress controller. (It does the traditional routing, and layers on a raft of configuration options.) This blog post covers [Kubernetes ingress](https://blog.getambassador.io/kubernetes-ingress-nodeport-load-balancers-and-ingress-controllers-6e29f1c44f2d).
|
||||
Emissary-ingress is a CNCF Incubating project and welcomes any and all
|
||||
contributors.
|
||||
|
||||
For other common questions, view this [FAQ page](https://www.getambassador.io/docs/emissary/latest/about/faq/).
|
||||
Check out the [`Community/`](Community/) directory for information on
|
||||
the way the community is run, including:
|
||||
|
||||
You can also use Helm to install Emissary. For more information, see the instructions in the [Helm installation documentation](https://www.getambassador.io/docs/emissary/latest/topics/install/helm/)
|
||||
- the [`CODE_OF_CONDUCT.md`](Community/CODE_OF_CONDUCT.md)
|
||||
- the [`GOVERNANCE.md`](Community/GOVERNANCE.md) structure
|
||||
- the list of [`MAINTAINERS.md`](Community/MAINTAINERS.md)
|
||||
- the [`MEETING_SCHEDULE.md`](Community/MEETING_SCHEDULE.md) of
|
||||
regular trouble-shooting meetings and contributor meetings
|
||||
- how to get [`SUPPORT.md`](Community/SUPPORT.md).
|
||||
|
||||
Community
|
||||
=========
|
||||
|
||||
Emissary-ingress is a CNCF Incubating project and welcomes any and all contributors. To get started:
|
||||
|
||||
* Join our [Slack channel](https://a8r.io/slack)
|
||||
* Check out the [Emissary documentation](https://www.getambassador.io/docs/emissary/)
|
||||
* Read the [Contributor's Guide](https://github.com/emissary-ingress/emissary/blob/master/DEVELOPING.md).
|
||||
The best way to join the community is to join the `#emissary-ingress` channel
|
||||
in the [CNCF Slack]. This is also the best place for technical information
|
||||
about Emissary's architecture or development.
|
||||
|
||||
If you're interested in contributing, here are some ways:
|
||||
|
||||
* Write a blog post for [our blog](https://blog.getambassador.io)
|
||||
* Investigate an [open issue](https://github.com/emissary-ingress/emissary/issues)
|
||||
* Add [more tests](https://github.com/emissary-ingress/emissary/tree/master/ambassador/tests)
|
||||
|
||||
The Ambassador Edge Stack is a superset of Emissary-ingress that provides additional functionality including OAuth/OpenID Connect, advanced rate limiting, Swagger/OpenAPI support, integrated ACME support for automatic TLS certificate management, and a cloud-based UI. For more information, visit https://www.getambassador.io/editions/.
|
||||
* Add [more tests](https://github.com/emissary-ingress/emissary/tree/main/ambassador/tests)
|
||||
|
||||
<!-- Please keep this list sorted. -->
|
||||
[CNCF Slack]: https://communityinviter.com/apps/cloud-native/cncf
|
||||
[Envoy Proxy]: https://www.envoyproxy.io
|
||||
|
||||
<!-- Legacy: clean up these links! -->
|
||||
|
||||
[authentication]: https://www.getambassador.io/docs/emissary/latest/topics/running/services/auth-service/
|
||||
[canary releases]: https://www.getambassador.io/docs/emissary/latest/topics/using/canary/
|
||||
[circuit breaking]: https://www.getambassador.io/docs/emissary/latest/topics/using/circuit-breakers/
|
||||
|
|
16
SUPPORT.md
16
SUPPORT.md
|
@ -1,16 +0,0 @@
|
|||
## Support for deploying and using Ambassador
|
||||
|
||||
Welcome to Ambassador! We use GitHub for tracking bugs and feature requests. If you need support, the following resources are available. Thanks for understanding.
|
||||
|
||||
### Documentation
|
||||
|
||||
* [User Documentation](https://www.getambassador.io/docs)
|
||||
* [Troubleshooting Guide](https://www.getambassador.io/reference/debugging)
|
||||
|
||||
### Real-time Chat
|
||||
|
||||
* [Slack](https://d6e.co/slack): The `#ambassador` channel is a good place to start.
|
||||
|
||||
### Commercial Support
|
||||
|
||||
* Commercial Support is available as part of [Ambassador Pro](https://www.getambassador.io/pro/).
|
|
@ -4,3 +4,6 @@
|
|||
/envoy-build-container.txt
|
||||
|
||||
/go-control-plane/
|
||||
|
||||
# folder is mounted to envoy build container and build outputs are copied here
|
||||
/envoy-docker-build
|
543
_cxx/envoy.mk
543
_cxx/envoy.mk
|
@ -1,31 +1,33 @@
|
|||
#
|
||||
# Variables that the dev might set in the env or CLI
|
||||
|
||||
# Set to non-empty to enable compiling Envoy as-needed.
|
||||
YES_I_AM_OK_WITH_COMPILING_ENVOY ?=
|
||||
|
||||
# Adjust to run just a subset of the tests.
|
||||
ENVOY_TEST_LABEL ?= //test/...
|
||||
# Set RSYNC_EXTRAS=Pv or something to increase verbosity.
|
||||
RSYNC_EXTRAS ?=
|
||||
ENVOY_TEST_LABEL ?= //contrib/golang/... //test/...
|
||||
export ENVOY_TEST_LABEL
|
||||
|
||||
#
|
||||
# Variables that are meant to be set by editing this file
|
||||
|
||||
# IF YOU MESS WITH ANY OF THESE VALUES, YOU MUST RUN `make update-base`.
|
||||
ENVOY_REPO ?= $(if $(IS_PRIVATE),git@github.com:datawire/envoy-private.git,https://github.com/datawire/envoy.git)
|
||||
ENVOY_COMMIT ?= 82fe811db6c54ef801e9b94d23eb2fcf2d2153f0
|
||||
ENVOY_COMPILATION_MODE ?= opt
|
||||
# Increment BASE_ENVOY_RELVER on changes to `docker/base-envoy/Dockerfile`, or Envoy recipes.
|
||||
# You may reset BASE_ENVOY_RELVER when adjusting ENVOY_COMMIT.
|
||||
BASE_ENVOY_RELVER ?= 0
|
||||
ENVOY_REPO ?= https://github.com/datawire/envoy.git
|
||||
|
||||
# Set to non-empty to enable compiling Envoy in FIPS mode.
|
||||
FIPS_MODE ?=
|
||||
# https://github.com/datawire/envoy/tree/rebase/release/v1.31.3
|
||||
ENVOY_COMMIT ?= 628f5afc75a894a08504fa0f416269ec50c07bf9
|
||||
|
||||
ENVOY_DOCKER_REPO ?= $(if $(IS_PRIVATE),quay.io/datawire-private/base-envoy,docker.io/emissaryingress/base-envoy)
|
||||
ENVOY_DOCKER_VERSION ?= $(BASE_ENVOY_RELVER).$(ENVOY_COMMIT).$(ENVOY_COMPILATION_MODE)$(if $(FIPS_MODE),.FIPS)
|
||||
ENVOY_DOCKER_TAG ?= $(ENVOY_DOCKER_REPO):envoy-$(ENVOY_DOCKER_VERSION)
|
||||
ENVOY_FULL_DOCKER_TAG ?= $(ENVOY_DOCKER_REPO):envoy-full-$(ENVOY_DOCKER_VERSION)
|
||||
ENVOY_COMPILATION_MODE ?= opt
|
||||
# Increment BASE_ENVOY_RELVER on changes to `docker/base-envoy/Dockerfile`, or Envoy recipes.
|
||||
# You may reset BASE_ENVOY_RELVER when adjusting ENVOY_COMMIT.
|
||||
BASE_ENVOY_RELVER ?= 0
|
||||
|
||||
# Set to non-empty to enable compiling Envoy in FIPS mode.
|
||||
FIPS_MODE ?=
|
||||
export FIPS_MODE
|
||||
|
||||
# ENVOY_DOCKER_REPO ?= docker.io/emissaryingress/base-envoy
|
||||
ENVOY_DOCKER_REPO ?= gcr.io/datawire/ambassador-base
|
||||
ENVOY_DOCKER_VERSION ?= $(BASE_ENVOY_RELVER).$(ENVOY_COMMIT).$(ENVOY_COMPILATION_MODE)$(if $(FIPS_MODE),.FIPS)
|
||||
ENVOY_DOCKER_TAG ?= $(ENVOY_DOCKER_REPO):envoy-$(ENVOY_DOCKER_VERSION)
|
||||
# END LIST OF VARIABLES REQUIRING `make update-base`.
|
||||
|
||||
# How to set ENVOY_GO_CONTROL_PLANE_COMMIT: In github.com/envoyproxy/go-control-plane.git, the majority
|
||||
|
@ -36,27 +38,209 @@ RSYNC_EXTRAS ?=
|
|||
# which commits are ancestors, I added `make guess-envoy-go-control-plane-commit` to do that in an
|
||||
# automated way! Still look at the commit yourself to make sure it seems sane; blindly trusting
|
||||
# machines is bad, mmkay?
|
||||
ENVOY_GO_CONTROL_PLANE_COMMIT = 8bcd7ee0191add0cec98e58202bf2950f8ac25b0
|
||||
ENVOY_GO_CONTROL_PLANE_COMMIT = f888b4f71207d0d268dee7cb824de92848da9ede
|
||||
|
||||
# Set ENVOY_DOCKER_REPO to the list of mirrors that we should
|
||||
# sanity-check that things get pushed to.
|
||||
ifneq ($(IS_PRIVATE),)
|
||||
# If $(IS_PRIVATE), then just the private repo...
|
||||
ENVOY_DOCKER_REPOS = $(ENVOY_DOCKER_REPO)
|
||||
else
|
||||
# ...otherwise, this list of repos:
|
||||
ENVOY_DOCKER_REPOS = docker.io/emissaryingress/base-envoy
|
||||
ENVOY_DOCKER_REPOS += gcr.io/datawire/ambassador-base
|
||||
endif
|
||||
# Set ENVOY_DOCKER_REPO to the list of mirrors to check
|
||||
# ENVOY_DOCKER_REPOS = docker.io/emissaryingress/base-envoy
|
||||
# ENVOY_DOCKER_REPOS += gcr.io/datawire/ambassador-base
|
||||
|
||||
#
|
||||
# Intro
|
||||
|
||||
include $(OSS_HOME)/build-aux/prelude.mk
|
||||
|
||||
# for builder.mk...
|
||||
export ENVOY_DOCKER_TAG
|
||||
|
||||
|
||||
#
|
||||
#################### Envoy cxx and build image targets #####################
|
||||
|
||||
$(OSS_HOME)/_cxx/envoy: FORCE
|
||||
@echo "Getting Envoy sources..."
|
||||
# Ensure that GIT_DIR and GIT_WORK_TREE are unset so that `git bisect`
|
||||
# and friends work properly.
|
||||
@PS4=; set -ex; { \
|
||||
unset GIT_DIR GIT_WORK_TREE; \
|
||||
git init $@; \
|
||||
cd $@; \
|
||||
if git remote get-url origin &>/dev/null; then \
|
||||
git remote set-url origin $(ENVOY_REPO); \
|
||||
else \
|
||||
git remote add origin $(ENVOY_REPO); \
|
||||
fi; \
|
||||
if [[ $(ENVOY_REPO) == http://github.com/* || $(ENVOY_REPO) == https://github.com/* || $(ENVOY_REPO) == git://github.com/* ]]; then \
|
||||
git remote set-url --push origin git@github.com:$(word 3,$(subst /, ,$(ENVOY_REPO)))/$(patsubst %.git,%,$(word 4,$(subst /, ,$(ENVOY_REPO)))).git; \
|
||||
fi; \
|
||||
git fetch --tags origin; \
|
||||
if [ $(ENVOY_COMMIT) != '-' ]; then \
|
||||
git checkout $(ENVOY_COMMIT); \
|
||||
elif ! git rev-parse HEAD >/dev/null 2>&1; then \
|
||||
git checkout origin/master; \
|
||||
fi; \
|
||||
}
|
||||
|
||||
$(OSS_HOME)/_cxx/envoy.clean: %.clean:
|
||||
$(if $(filter-out -,$(ENVOY_COMMIT)),rm -rf $*)
|
||||
clobber: $(OSS_HOME)/_cxx/envoy.clean
|
||||
|
||||
# cleanup existing build outputs
|
||||
$(OSS_HOME)/_cxx/envoy-docker-build.clean: %.clean:
|
||||
$(if $(filter-out -,$(ENVOY_COMMIT)),sudo rm -rf $*)
|
||||
clobber: $(OSS_HOME)/_cxx/envoy-docker-build.clean
|
||||
|
||||
$(OSS_HOME)/_cxx/envoy-build-image.txt: $(OSS_HOME)/_cxx/envoy $(tools/write-ifchanged) FORCE
|
||||
@PS4=; set -ex -o pipefail; { \
|
||||
pushd $</ci; \
|
||||
echo "$$(pwd)"; \
|
||||
. envoy_build_sha.sh; \
|
||||
popd; \
|
||||
echo docker.io/envoyproxy/envoy-build-ubuntu:$$ENVOY_BUILD_SHA | $(tools/write-ifchanged) $@; \
|
||||
}
|
||||
clean: $(OSS_HOME)/_cxx/envoy-build-image.txt.rm
|
||||
|
||||
# cleanup build artifacts
|
||||
clean: $(OSS_HOME)/docker/base-envoy/envoy-static.rm
|
||||
clean: $(OSS_HOME)/docker/base-envoy/envoy-static-stripped.rm
|
||||
clean: $(OSS_HOME)/docker/base-envoy/envoy-static.dwp.rm
|
||||
|
||||
################################# Compile Custom Envoy Protos ######################################
|
||||
|
||||
# copy raw protos and compiled go protos into emissary-ingress
|
||||
.PHONY compile-envoy-protos:
|
||||
compile-envoy-protos: $(OSS_HOME)/_cxx/envoy-build-image.txt
|
||||
$(OSS_HOME)/_cxx/tools/compile-protos.sh
|
||||
|
||||
################################# Envoy Build PhonyTargets #########################################
|
||||
|
||||
# helper to trigger the clone of the datawire/envoy repository
|
||||
.PHONY: clone-envoy
|
||||
clone-envoy: $(OSS_HOME)/_cxx/envoy
|
||||
|
||||
# clean up envoy resources
|
||||
.PHONY: clean-envoy
|
||||
clean-envoy:
|
||||
cd $(OSS_HOME)/_cxx/envoy && ./ci/run_envoy_docker.sh "./ci/do_ci.sh 'clean'"
|
||||
|
||||
# Check to see if we have already built and push an image for the
|
||||
.PHONY: verify-base-envoy
|
||||
verify-base-envoy:
|
||||
@PS4=; set -ex; { \
|
||||
if docker pull $(ENVOY_DOCKER_TAG); then \
|
||||
echo 'Already up-to-date: $(ENVOY_DOCKER_TAG)'; \
|
||||
ENVOY_VERSION_OUTPUT=$$(docker run --platform="$(BUILD_ARCH)" --rm -it --entrypoint envoy-static-stripped $(ENVOY_DOCKER_TAG) --version | grep "version:"); \
|
||||
ENVOY_VERSION_EXPECTED="envoy-static-stripped .*version:.* $(ENVOY_COMMIT)/.*"; \
|
||||
if ! echo "$$ENVOY_VERSION_OUTPUT" | grep "$$ENVOY_VERSION_EXPECTED"; then \
|
||||
{ set +x; } &>/dev/null; \
|
||||
echo "error: Envoy base image $(ENVOY_DOCKER_TAG) contains envoy-static-stripped binary that reported an unexpected version string!" \
|
||||
"See ENVOY_VERSION_OUTPUT and ENVOY_VERSION_EXPECTED in the output above. This error is usually not recoverable." \
|
||||
"You may need to rebuild the Envoy base image after either updating ENVOY_COMMIT or bumping BASE_ENVOY_RELVER" \
|
||||
"(or both, depending on what you are doing)."; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
echo "Nothing to build at this time"; \
|
||||
exit 0; \
|
||||
fi; \
|
||||
}
|
||||
|
||||
# builds envoy using release settings, see https://github.com/envoyproxy/envoy/blob/main/ci/README.md for additional
|
||||
# details on configuring builds
|
||||
.PHONY: build-envoy
|
||||
build-envoy: $(OSS_HOME)/_cxx/envoy-build-image.txt
|
||||
$(OSS_HOME)/_cxx/tools/build-envoy.sh
|
||||
|
||||
# build the base-envoy containers and tags them locally, this requires running `build-envoy` first.
|
||||
.PHONY: build-base-envoy-image
|
||||
build-base-envoy-image: $(OSS_HOME)/_cxx/envoy-build-image.txt
|
||||
docker build --platform="$(BUILD_ARCH)" -f $(OSS_HOME)/docker/base-envoy/Dockerfile.stripped -t $(ENVOY_DOCKER_TAG) $(OSS_HOME)/docker/base-envoy
|
||||
|
||||
# Allows pushing the docker image independent of building envoy and docker containers
|
||||
# Note, bump the BASE_ENVOY_RELVER and re-build before pushing when making non-commit changes to have a unique image tag.
|
||||
.PHONY: push-base-envoy-image
|
||||
push-base-envoy-image:
|
||||
docker push $(ENVOY_DOCKER_TAG)
|
||||
|
||||
|
||||
# `make update-base`: Recompile Envoy and do all of the related things.
|
||||
.PHONY: update-base
|
||||
update-base: $(OSS_HOME)/_cxx/envoy-build-image.txt
|
||||
$(MAKE) verify-base-envoy
|
||||
$(MAKE) build-envoy
|
||||
$(MAKE) build-base-envoy-image
|
||||
$(MAKE) push-base-envoy-image
|
||||
$(MAKE) compile-envoy-protos
|
||||
|
||||
.PHONY: check-envoy
|
||||
check-envoy: $(OSS_HOME)/_cxx/envoy-build-image.txt
|
||||
$(OSS_HOME)/_cxx/tools/test-envoy.sh;
|
||||
|
||||
.PHONY: envoy-shell
|
||||
envoy-shell: $(OSS_HOME)/_cxx/envoy-build-image.txt
|
||||
cd $(OSS_HOME)/_cxx/envoy && ./ci/run_envoy_docker.sh bash || true;
|
||||
|
||||
################################# Go-control-plane Targets ####################################
|
||||
#
|
||||
# Recipes used by `make generate`; files that get checked into Git (i.e. protobufs and Go code)
|
||||
#
|
||||
# These targets are depended on by `make generate` in `build-aux/generate.mk`.
|
||||
|
||||
|
||||
# See the comment on ENVOY_GO_CONTROL_PLANE_COMMIT at the top of the file for more explanation on how this target works.
|
||||
guess-envoy-go-control-plane-commit: # Have the computer suggest a value for ENVOY_GO_CONTROL_PLANE_COMMIT
|
||||
guess-envoy-go-control-plane-commit: $(OSS_HOME)/_cxx/envoy $(OSS_HOME)/_cxx/go-control-plane
|
||||
@echo
|
||||
@echo '######################################################################'
|
||||
@echo
|
||||
@set -e; { \
|
||||
(cd $(OSS_HOME)/_cxx/go-control-plane && git log --format='%H %s' origin/main) | sed -n 's, Mirrored from envoyproxy/envoy @ , ,p' | \
|
||||
while read -r go_commit cxx_commit; do \
|
||||
if (cd $(OSS_HOME)/_cxx/envoy && git merge-base --is-ancestor "$$cxx_commit" $(ENVOY_COMMIT) 2>/dev/null); then \
|
||||
echo "ENVOY_GO_CONTROL_PLANE_COMMIT = $$go_commit"; \
|
||||
break; \
|
||||
fi; \
|
||||
done; \
|
||||
}
|
||||
.PHONY: guess-envoy-go-control-plane-commit
|
||||
|
||||
# The unmodified go-control-plane
|
||||
$(OSS_HOME)/_cxx/go-control-plane: FORCE
|
||||
@echo "Getting Envoy go-control-plane sources..."
|
||||
# Ensure that GIT_DIR and GIT_WORK_TREE are unset so that `git bisect`
|
||||
# and friends work properly.
|
||||
@PS4=; set -ex; { \
|
||||
unset GIT_DIR GIT_WORK_TREE; \
|
||||
git init $@; \
|
||||
cd $@; \
|
||||
if git remote get-url origin &>/dev/null; then \
|
||||
git remote set-url origin https://github.com/envoyproxy/go-control-plane; \
|
||||
else \
|
||||
git remote add origin https://github.com/envoyproxy/go-control-plane; \
|
||||
fi; \
|
||||
git fetch --tags origin; \
|
||||
git checkout $(ENVOY_GO_CONTROL_PLANE_COMMIT); \
|
||||
}
|
||||
|
||||
# The go-control-plane patched for our version of the protobufs
|
||||
$(OSS_HOME)/pkg/envoy-control-plane: $(OSS_HOME)/_cxx/go-control-plane FORCE
|
||||
rm -rf $@
|
||||
@PS4=; set -ex; { \
|
||||
unset GIT_DIR GIT_WORK_TREE; \
|
||||
tmpdir=$$(mktemp -d); \
|
||||
trap 'rm -rf "$$tmpdir"' EXIT; \
|
||||
cd "$$tmpdir"; \
|
||||
cd $(OSS_HOME)/_cxx/go-control-plane; \
|
||||
cp -r $$(git ls-files ':[A-Z]*' ':!Dockerfile*' ':!Makefile') pkg/* ratelimit "$$tmpdir"; \
|
||||
find "$$tmpdir" -name '*.go' -exec sed -E -i.bak \
|
||||
-e 's,github\.com/envoyproxy/go-control-plane/pkg,github.com/emissary-ingress/emissary/v3/pkg/envoy-control-plane,g' \
|
||||
-e 's,github\.com/envoyproxy/go-control-plane/envoy,github.com/emissary-ingress/emissary/v3/pkg/api/envoy,g' \
|
||||
-e 's,github\.com/envoyproxy/go-control-plane/ratelimit,github.com/emissary-ingress/emissary/v3/pkg/envoy-control-plane/ratelimit,g' \
|
||||
-- {} +; \
|
||||
sed -i.bak -e 's/^package/\n&/' "$$tmpdir/log/log_test.go"; \
|
||||
find "$$tmpdir" -name '*.bak' -delete; \
|
||||
mv "$$tmpdir" $(abspath $@); \
|
||||
}
|
||||
cd $(OSS_HOME) && gofmt -w -s ./pkg/envoy-control-plane/
|
||||
|
||||
######################### Envoy Version and Mirror Check #######################
|
||||
|
||||
old_envoy_commits = $(shell { \
|
||||
{ \
|
||||
git log --patch --format='' -G'^ *ENVOY_COMMIT' -- _cxx/envoy.mk; \
|
||||
|
@ -66,8 +250,10 @@ old_envoy_commits = $(shell { \
|
|||
git log --patch --format='' -G'^ *ENVOY_BASE_IMAGE' 511ca54c3004019758980ba82f708269c373ba28 -- Makefile | sed -n 's/^. *ENVOY_BASE_IMAGE.*-g//p'; \
|
||||
git log --patch --format='' -G'FROM.*envoy.*:' 7593e7dca9aea2f146ddfd5a3676bcc30ee25aff -- Dockerfile | sed -n '/FROM.*envoy.*:/s/.*://p' | sed -e 's/ .*//' -e 's/.*-g//' -e 's/.*-//' -e '/^latest$$/d'; \
|
||||
} | uniq)
|
||||
|
||||
lost_history += 251b7d345 # mentioned in a605b62ee (wip - patched and fixed authentication, Gabriel, 2019-04-04)
|
||||
lost_history += 27770bf3d # mentioned in 026dc4cd4 (updated envoy image, Gabriel, 2019-04-04)
|
||||
|
||||
check-envoy-version: ## Check that Envoy version has been pushed to the right places
|
||||
check-envoy-version: $(OSS_HOME)/_cxx/envoy
|
||||
# First, we're going to check whether the Envoy commit is tagged, which
|
||||
|
@ -100,301 +286,4 @@ check-envoy-version: $(OSS_HOME)/_cxx/envoy
|
|||
# them... except that gcr.io doesn't allow `manifest inspect`.
|
||||
# So just go ahead and do the `pull` :(
|
||||
$(foreach ENVOY_DOCKER_REPO,$(ENVOY_DOCKER_REPOS), docker pull $(ENVOY_DOCKER_TAG) >/dev/null$(NL))
|
||||
$(foreach ENVOY_DOCKER_REPO,$(ENVOY_DOCKER_REPOS), docker pull $(ENVOY_FULL_DOCKER_TAG) >/dev/null$(NL))
|
||||
.PHONY: check-envoy-version
|
||||
|
||||
# See the comment on ENVOY_GO_CONTROL_PLANE_COMMIT at the top of the file for more explanation on how this target works.
|
||||
guess-envoy-go-control-plane-commit: # Have the computer suggest a value for ENVOY_GO_CONTROL_PLANE_COMMIT
|
||||
guess-envoy-go-control-plane-commit: $(OSS_HOME)/_cxx/envoy $(OSS_HOME)/_cxx/go-control-plane
|
||||
@echo
|
||||
@echo '######################################################################'
|
||||
@echo
|
||||
@set -e; { \
|
||||
(cd $(OSS_HOME)/_cxx/go-control-plane && git log --format='%H %s' origin/main) | sed -n 's, Mirrored from envoyproxy/envoy @ , ,p' | \
|
||||
while read -r go_commit cxx_commit; do \
|
||||
if (cd $(OSS_HOME)/_cxx/envoy && git merge-base --is-ancestor "$$cxx_commit" $(ENVOY_COMMIT) 2>/dev/null); then \
|
||||
echo "ENVOY_GO_CONTROL_PLANE_COMMIT = $$go_commit"; \
|
||||
break; \
|
||||
fi; \
|
||||
done; \
|
||||
}
|
||||
.PHONY: guess-envoy-go-control-plane-commit
|
||||
|
||||
#
|
||||
# Envoy sources and build container
|
||||
|
||||
$(OSS_HOME)/_cxx/envoy: FORCE
|
||||
@echo "Getting Envoy sources..."
|
||||
# Ensure that GIT_DIR and GIT_WORK_TREE are unset so that `git bisect`
|
||||
# and friends work properly.
|
||||
@PS4=; set -ex; { \
|
||||
unset GIT_DIR GIT_WORK_TREE; \
|
||||
git init $@; \
|
||||
cd $@; \
|
||||
if git remote get-url origin &>/dev/null; then \
|
||||
git remote set-url origin $(ENVOY_REPO); \
|
||||
else \
|
||||
git remote add origin $(ENVOY_REPO); \
|
||||
fi; \
|
||||
if [[ $(ENVOY_REPO) == http://github.com/* || $(ENVOY_REPO) == https://github.com/* || $(ENVOY_REPO) == git://github.com/* ]]; then \
|
||||
git remote set-url --push origin git@github.com:$(word 3,$(subst /, ,$(ENVOY_REPO)))/$(patsubst %.git,%,$(word 4,$(subst /, ,$(ENVOY_REPO)))).git; \
|
||||
fi; \
|
||||
git fetch --tags origin; \
|
||||
if [ $(ENVOY_COMMIT) != '-' ]; then \
|
||||
git checkout $(ENVOY_COMMIT); \
|
||||
elif ! git rev-parse HEAD >/dev/null 2>&1; then \
|
||||
git checkout origin/master; \
|
||||
fi; \
|
||||
}
|
||||
$(OSS_HOME)/_cxx/envoy.clean: %.clean:
|
||||
$(if $(filter-out -,$(ENVOY_COMMIT)),rm -rf $*)
|
||||
clobber: $(OSS_HOME)/_cxx/envoy.clean
|
||||
|
||||
$(OSS_HOME)/_cxx/envoy-build-image.txt: $(OSS_HOME)/_cxx/envoy $(tools/write-ifchanged) FORCE
|
||||
@PS4=; set -ex -o pipefail; { \
|
||||
pushd $</ci; \
|
||||
echo "$$(pwd)"; \
|
||||
. envoy_build_sha.sh; \
|
||||
popd; \
|
||||
echo docker.io/envoyproxy/envoy-build-ubuntu:$$ENVOY_BUILD_SHA | $(tools/write-ifchanged) $@; \
|
||||
}
|
||||
clean: $(OSS_HOME)/_cxx/envoy-build-image.txt.rm
|
||||
|
||||
$(OSS_HOME)/_cxx/envoy-build-container.txt: $(OSS_HOME)/_cxx/envoy-build-image.txt FORCE
|
||||
@PS4=; set -ex; { \
|
||||
if [ $@ -nt $< ] && docker exec $$(cat $@) true; then \
|
||||
exit 0; \
|
||||
fi; \
|
||||
if [ -e $@ ]; then \
|
||||
docker kill $$(cat $@) || true; \
|
||||
fi; \
|
||||
docker run --detach --rm --privileged --volume=envoy-build:/root:rw $$(cat $<) tail -f /dev/null > $@; \
|
||||
}
|
||||
$(OSS_HOME)/_cxx/envoy-build-container.txt.clean: %.clean:
|
||||
if [ -e $* ]; then docker kill $$(cat $*) || true; fi
|
||||
rm -f $*
|
||||
if docker volume inspect envoy-build &>/dev/null; then docker volume rm envoy-build >/dev/null; fi
|
||||
clean: $(OSS_HOME)/_cxx/envoy-build-container.txt.clean
|
||||
|
||||
#
|
||||
# Things that run in the Envoy build container
|
||||
#
|
||||
# We do everything with rsync and a persistent build-container
|
||||
# (instead of using a volume), because
|
||||
# 1. Docker for Mac's osxfs is very slow, so volumes are bad for
|
||||
# macOS users.
|
||||
# 2. Volumes mounts just straight-up don't work for people who use
|
||||
# Minikube's dockerd.
|
||||
ENVOY_SYNC_HOST_TO_DOCKER = rsync -a$(RSYNC_EXTRAS) --partial --delete --blocking-io -e "docker exec -i" $(OSS_HOME)/_cxx/envoy/ $$(cat $(OSS_HOME)/_cxx/envoy-build-container.txt):/root/envoy
|
||||
ENVOY_SYNC_DOCKER_TO_HOST = rsync -a$(RSYNC_EXTRAS) --partial --delete --blocking-io -e "docker exec -i" $$(cat $(OSS_HOME)/_cxx/envoy-build-container.txt):/root/envoy/ $(OSS_HOME)/_cxx/envoy/
|
||||
|
||||
ENVOY_BASH.cmd = bash -c 'PS4=; set -ex; $(ENVOY_SYNC_HOST_TO_DOCKER); trap '\''$(ENVOY_SYNC_DOCKER_TO_HOST)'\'' EXIT; '$(call quote.shell,$1)
|
||||
ENVOY_BASH.deps = $(OSS_HOME)/_cxx/envoy-build-container.txt
|
||||
|
||||
ENVOY_DOCKER.env += PATH=/opt/llvm/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
ENVOY_DOCKER.env += CC=clang
|
||||
ENVOY_DOCKER.env += CXX=clang++
|
||||
ENVOY_DOCKER.env += CLANG_FORMAT=/opt/llvm/bin/clang-format
|
||||
ENVOY_DOCKER_EXEC = docker exec --workdir=/root/envoy $(foreach e,$(ENVOY_DOCKER.env), --env=$e ) $$(cat $(OSS_HOME)/_cxx/envoy-build-container.txt)
|
||||
|
||||
$(OSS_HOME)/docker/base-envoy/envoy-static: $(ENVOY_BASH.deps) FORCE
|
||||
mkdir -p $(@D)
|
||||
@PS4=; set -ex; { \
|
||||
if [ '$(ENVOY_COMMIT)' != '-' ] && docker run --rm --entrypoint=true $(ENVOY_FULL_DOCKER_TAG); then \
|
||||
rsync -a$(RSYNC_EXTRAS) --partial --blocking-io -e 'docker run --rm -i' $$(docker image inspect $(ENVOY_FULL_DOCKER_TAG) --format='{{.Id}}' | sed 's/^sha256://'):/usr/local/bin/envoy-static $@; \
|
||||
else \
|
||||
if [ -z '$(YES_I_AM_OK_WITH_COMPILING_ENVOY)' ]; then \
|
||||
{ set +x; } &>/dev/null; \
|
||||
echo 'error: Envoy compilation triggered, but $$YES_I_AM_OK_WITH_COMPILING_ENVOY is not set'; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
$(call ENVOY_BASH.cmd, \
|
||||
$(ENVOY_DOCKER_EXEC) bazel build $(if $(FIPS_MODE), --define boringssl=fips) --verbose_failures -c $(ENVOY_COMPILATION_MODE) --config=clang //source/exe:envoy-static; \
|
||||
rsync -a$(RSYNC_EXTRAS) --partial --blocking-io -e 'docker exec -i' $$(cat $(OSS_HOME)/_cxx/envoy-build-container.txt):/root/envoy/bazel-bin/source/exe/envoy-static $@; \
|
||||
); \
|
||||
fi; \
|
||||
}
|
||||
$(OSS_HOME)/docker/base-envoy/envoy-static-stripped: %-stripped: % FORCE
|
||||
@PS4=; set -ex; { \
|
||||
if [ '$(ENVOY_COMMIT)' != '-' ] && docker run --rm --entrypoint=true $(ENVOY_FULL_DOCKER_TAG); then \
|
||||
rsync -a$(RSYNC_EXTRAS) --partial --blocking-io -e 'docker run --rm -i' $$(docker image inspect $(ENVOY_FULL_DOCKER_TAG) --format='{{.Id}}' | sed 's/^sha256://'):/usr/local/bin/$(@F) $@; \
|
||||
else \
|
||||
if [ -z '$(YES_I_AM_OK_WITH_COMPILING_ENVOY)' ]; then \
|
||||
{ set +x; } &>/dev/null; \
|
||||
echo 'error: Envoy compilation triggered, but $$YES_I_AM_OK_WITH_COMPILING_ENVOY is not set'; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
rsync -a$(RSYNC_EXTRAS) --partial --blocking-io -e 'docker exec -i' $< $$(cat $(OSS_HOME)/_cxx/envoy-build-container.txt):/tmp/$(<F); \
|
||||
docker exec $$(cat $(OSS_HOME)/_cxx/envoy-build-container.txt) strip /tmp/$(<F) -o /tmp/$(@F); \
|
||||
rsync -a$(RSYNC_EXTRAS) --partial --blocking-io -e 'docker exec -i' $$(cat $(OSS_HOME)/_cxx/envoy-build-container.txt):/tmp/$(@F) $@; \
|
||||
fi; \
|
||||
}
|
||||
clobber: $(OSS_HOME)/docker/base-envoy/envoy-static.rm $(OSS_HOME)/docker/base-envoy/envoy-static-stripped.rm
|
||||
|
||||
check-envoy: ## Run the Envoy test suite
|
||||
check-envoy: $(ENVOY_BASH.deps)
|
||||
@echo 'Testing envoy with Bazel label: "$(ENVOY_TEST_LABEL)"'; \
|
||||
$(call ENVOY_BASH.cmd, \
|
||||
$(ENVOY_DOCKER_EXEC) bazel test --config=clang --test_output=errors --verbose_failures -c dbg --test_env=ENVOY_IP_TEST_VERSIONS=v4only $(ENVOY_TEST_LABEL); \
|
||||
)
|
||||
.PHONY: check-envoy
|
||||
|
||||
envoy-shell: ## Run a shell in the Envoy build container
|
||||
envoy-shell: $(ENVOY_BASH.deps)
|
||||
$(call ENVOY_BASH.cmd, \
|
||||
docker exec -it --workdir=/root/envoy $(foreach e,$(ENVOY_DOCKER.env), --env=$e ) $$(cat $(OSS_HOME)/_cxx/envoy-build-container.txt) /bin/bash || true; \
|
||||
)
|
||||
.PHONY: envoy-shell
|
||||
|
||||
#
|
||||
# Recipes used by `make generate`; files that get checked in to Git (i.e. protobufs and Go code)
|
||||
#
|
||||
# These targets are depended on by `make generate` in `build-aux/generate.mk`.
|
||||
|
||||
# Raw protobufs
|
||||
$(OSS_HOME)/api/envoy: $(OSS_HOME)/api/%: $(OSS_HOME)/_cxx/envoy
|
||||
rsync --recursive --delete --delete-excluded --prune-empty-dirs --include='*/' --include='*.proto' --exclude='*' $</api/$*/ $@
|
||||
|
||||
# Go generated from the protobufs
|
||||
$(OSS_HOME)/_cxx/envoy/build_go: $(ENVOY_BASH.deps) FORCE
|
||||
$(call ENVOY_BASH.cmd, \
|
||||
$(ENVOY_DOCKER_EXEC) git config --global --add safe.directory /root/envoy; \
|
||||
$(ENVOY_DOCKER_EXEC) python3 -c 'from tools.api.generate_go_protobuf import generate_protobufs; generate_protobufs("@envoy_api//...", "/root/envoy/build_go", "envoy_api")'; \
|
||||
)
|
||||
test -d $@ && touch $@
|
||||
$(OSS_HOME)/pkg/api/envoy: $(OSS_HOME)/pkg/api/%: $(OSS_HOME)/_cxx/envoy/build_go
|
||||
rm -rf $@
|
||||
@PS4=; set -ex; { \
|
||||
unset GIT_DIR GIT_WORK_TREE; \
|
||||
tmpdir=$$(mktemp -d); \
|
||||
trap 'rm -rf "$$tmpdir"' EXIT; \
|
||||
cp -r $</$* "$$tmpdir"; \
|
||||
find "$$tmpdir" -type f \
|
||||
-exec chmod 644 {} + \
|
||||
-exec sed -E -i.bak \
|
||||
-e 's,github\.com/envoyproxy/go-control-plane/envoy,github.com/emissary-ingress/emissary/v3/pkg/api/envoy,g' \
|
||||
-- {} +; \
|
||||
find "$$tmpdir" -name '*.bak' -delete; \
|
||||
mv "$$tmpdir/$*" $@; \
|
||||
}
|
||||
# Envoy's build system still uses an old `protoc-gen-go` that emits
|
||||
# code that Go 1.19's `gofmt` isn't happy with. Even generated code
|
||||
# should be gofmt-clean, so gofmt it as a post-processing step.
|
||||
gofmt -w -s ./pkg/api/envoy
|
||||
|
||||
# The unmodified go-control-plane
|
||||
$(OSS_HOME)/_cxx/go-control-plane: FORCE
|
||||
@echo "Getting Envoy go-control-plane sources..."
|
||||
# Ensure that GIT_DIR and GIT_WORK_TREE are unset so that `git bisect`
|
||||
# and friends work properly.
|
||||
@PS4=; set -ex; { \
|
||||
unset GIT_DIR GIT_WORK_TREE; \
|
||||
git init $@; \
|
||||
cd $@; \
|
||||
if git remote get-url origin &>/dev/null; then \
|
||||
git remote set-url origin https://github.com/envoyproxy/go-control-plane; \
|
||||
else \
|
||||
git remote add origin https://github.com/envoyproxy/go-control-plane; \
|
||||
fi; \
|
||||
git fetch --tags origin; \
|
||||
git checkout $(ENVOY_GO_CONTROL_PLANE_COMMIT); \
|
||||
}
|
||||
|
||||
# The go-control-plane patched for our version of the protobufs
|
||||
$(OSS_HOME)/pkg/envoy-control-plane: $(OSS_HOME)/_cxx/go-control-plane FORCE
|
||||
rm -rf $@
|
||||
@PS4=; set -ex; { \
|
||||
unset GIT_DIR GIT_WORK_TREE; \
|
||||
tmpdir=$$(mktemp -d); \
|
||||
trap 'rm -rf "$$tmpdir"' EXIT; \
|
||||
cd "$$tmpdir"; \
|
||||
cd $(OSS_HOME)/_cxx/go-control-plane; \
|
||||
cp -r $$(git ls-files ':[A-Z]*' ':!Dockerfile*' ':!Makefile') pkg/* "$$tmpdir"; \
|
||||
find "$$tmpdir" -name '*.go' -exec sed -E -i.bak \
|
||||
-e 's,github\.com/envoyproxy/go-control-plane/pkg,github.com/emissary-ingress/emissary/v3/pkg/envoy-control-plane,g' \
|
||||
-e 's,github\.com/envoyproxy/go-control-plane/envoy,github.com/emissary-ingress/emissary/v3/pkg/api/envoy,g' \
|
||||
-- {} +; \
|
||||
sed -i.bak -e 's/^package/\n&/' "$$tmpdir/log/log_test.go"; \
|
||||
find "$$tmpdir" -name '*.bak' -delete; \
|
||||
mv "$$tmpdir" $(abspath $@); \
|
||||
}
|
||||
cd $(OSS_HOME) && gofmt -w -s ./pkg/envoy-control-plane/
|
||||
|
||||
#
|
||||
# `make update-base`: Recompile Envoy and do all of the related things.
|
||||
|
||||
update-base: $(OSS_HOME)/docker/base-envoy/envoy-static $(OSS_HOME)/docker/base-envoy/envoy-static-stripped $(OSS_HOME)/_cxx/envoy-build-image.txt
|
||||
@PS4=; set -ex; { \
|
||||
if [ '$(ENVOY_COMMIT)' != '-' ] && docker pull $(ENVOY_FULL_DOCKER_TAG); then \
|
||||
echo 'Already up-to-date: $(ENVOY_FULL_DOCKER_TAG)'; \
|
||||
ENVOY_VERSION_OUTPUT=$$(docker run --rm -it --entrypoint envoy-static $(ENVOY_FULL_DOCKER_TAG) --version | grep "version:"); \
|
||||
ENVOY_VERSION_EXPECTED="envoy-static .*version:.* $(ENVOY_COMMIT)/.*"; \
|
||||
if ! echo "$$ENVOY_VERSION_OUTPUT" | grep "$$ENVOY_VERSION_EXPECTED"; then \
|
||||
{ set +x; } &>/dev/null; \
|
||||
echo "error: Envoy base image $(ENVOY_FULL_DOCKER_TAG) contains envoy-static binary that reported an unexpected version string!" \
|
||||
"See ENVOY_VERSION_OUTPUT and ENVOY_VERSION_EXPECTED in the output above. This error is usually not recoverable." \
|
||||
"You may need to rebuild the Envoy base image after either updating ENVOY_COMMIT or bumping BASE_ENVOY_RELVER" \
|
||||
"(or both, depending on what you are doing)."; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
else \
|
||||
if [ -z '$(YES_I_AM_OK_WITH_COMPILING_ENVOY)' ]; then \
|
||||
{ set +x; } &>/dev/null; \
|
||||
echo 'error: Envoy compilation triggered, but $$YES_I_AM_OK_WITH_COMPILING_ENVOY is not set'; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
docker build --build-arg=base=$$(cat $(OSS_HOME)/_cxx/envoy-build-image.txt) -f $(OSS_HOME)/docker/base-envoy/Dockerfile -t $(ENVOY_FULL_DOCKER_TAG) $(OSS_HOME)/docker/base-envoy; \
|
||||
if [ '$(ENVOY_COMMIT)' != '-' ]; then \
|
||||
ENVOY_VERSION_OUTPUT=$$(docker run --rm -it --entrypoint envoy-static $(ENVOY_FULL_DOCKER_TAG) --version | grep "version:"); \
|
||||
ENVOY_VERSION_EXPECTED="envoy-static .*version:.* $(ENVOY_COMMIT)/.*"; \
|
||||
if ! echo "$$ENVOY_VERSION_OUTPUT" | grep "$$ENVOY_VERSION_EXPECTED"; then \
|
||||
{ set +x; } &>/dev/null; \
|
||||
echo "error: Envoy base image $(ENVOY_FULL_DOCKER_TAG) contains envoy-static binary that reported an unexpected version string!" \
|
||||
"See ENVOY_VERSION_OUTPUT and ENVOY_VERSION_EXPECTED in the output above. This error is usually not recoverable." \
|
||||
"You may need to rebuild the Envoy base image after either updating ENVOY_COMMIT or bumping BASE_ENVOY_RELVER" \
|
||||
"(or both, depending on what you are doing)."; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
docker push $(ENVOY_FULL_DOCKER_TAG); \
|
||||
fi; \
|
||||
fi; \
|
||||
}
|
||||
@PS4=; set -ex; { \
|
||||
if [ '$(ENVOY_COMMIT)' != '-' ] && docker pull $(ENVOY_DOCKER_TAG); then \
|
||||
echo 'Already up-to-date: $(ENVOY_DOCKER_TAG)'; \
|
||||
ENVOY_VERSION_OUTPUT=$$(docker run --rm -it --entrypoint envoy-static-stripped $(ENVOY_DOCKER_TAG) --version | grep "version:"); \
|
||||
ENVOY_VERSION_EXPECTED="envoy-static-stripped .*version:.* $(ENVOY_COMMIT)/.*"; \
|
||||
if ! echo "$$ENVOY_VERSION_OUTPUT" | grep "$$ENVOY_VERSION_EXPECTED"; then \
|
||||
{ set +x; } &>/dev/null; \
|
||||
echo "error: Envoy base image $(ENVOY_DOCKER_TAG) contains envoy-static-stripped binary that reported an unexpected version string!" \
|
||||
"See ENVOY_VERSION_OUTPUT and ENVOY_VERSION_EXPECTED in the output above. This error is usually not recoverable." \
|
||||
"You may need to rebuild the Envoy base image after either updating ENVOY_COMMIT or bumping BASE_ENVOY_RELVER" \
|
||||
"(or both, depending on what you are doing)."; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
else \
|
||||
if [ -z '$(YES_I_AM_OK_WITH_COMPILING_ENVOY)' ]; then \
|
||||
{ set +x; } &>/dev/null; \
|
||||
echo 'error: Envoy compilation triggered, but $$YES_I_AM_OK_WITH_COMPILING_ENVOY is not set'; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
docker build -f $(OSS_HOME)/docker/base-envoy/Dockerfile.stripped -t $(ENVOY_DOCKER_TAG) $(OSS_HOME)/docker/base-envoy; \
|
||||
if [ '$(ENVOY_COMMIT)' != '-' ]; then \
|
||||
ENVOY_VERSION_OUTPUT=$$(docker run --rm -it --entrypoint envoy-static-stripped $(ENVOY_DOCKER_TAG) --version | grep "version:"); \
|
||||
ENVOY_VERSION_EXPECTED="envoy-static-stripped .*version:.* $(ENVOY_COMMIT)/.*"; \
|
||||
if ! echo "$$ENVOY_VERSION_OUTPUT" | grep "$$ENVOY_VERSION_EXPECTED"; then \
|
||||
{ set +x; } &>/dev/null; \
|
||||
echo "error: Envoy base image $(ENVOY_DOCKER_TAG) contains envoy-static-stripped binary that reported an unexpected version string!" \
|
||||
"See ENVOY_VERSION_OUTPUT and ENVOY_VERSION_EXPECTED in the output above. This error is usually not recoverable." \
|
||||
"You may need to rebuild the Envoy base image after either updating ENVOY_COMMIT or bumping BASE_ENVOY_RELVER" \
|
||||
"(or both, depending on what you are doing)."; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
docker push $(ENVOY_DOCKER_TAG); \
|
||||
fi; \
|
||||
fi; \
|
||||
}
|
||||
# `make generate` has to come *after* the above, because builder.sh will
|
||||
# try to use the images that the above create.
|
||||
$(MAKE) generate
|
||||
.PHONY: update-base
|
||||
|
|
|
@ -0,0 +1,74 @@
|
|||
#!/bin/bash
|
||||
|
||||
# The phony make targets have been exported when calling from Make.
|
||||
FIPS_MODE=${FIPS_MODE:-}
|
||||
BUILD_ARCH=${BUILD_ARCH:-linux/amd64}
|
||||
|
||||
# base directory vars
|
||||
OSS_SOURCE="$PWD"
|
||||
BASE_ENVOY_DIR="$PWD/_cxx/envoy"
|
||||
ENVOY_DOCKER_BUILD_DIR="$PWD/_cxx/envoy-docker-build"
|
||||
export ENVOY_DOCKER_BUILD_DIR
|
||||
|
||||
# container vars
|
||||
DOCKER_OPTIONS=(
|
||||
"--platform=${BUILD_ARCH}"
|
||||
"--env=ENVOY_DELIVERY_DIR=/build/envoy/x64/contrib/exe/envoy"
|
||||
"--env=ENVOY_BUILD_TARGET=//contrib/exe:envoy-static"
|
||||
"--env=ENVOY_BUILD_DEBUG_INFORMATION=//contrib/exe:envoy-static.dwp"
|
||||
# "--env=BAZEL_BUILD_OPTIONS=\-\-define tcmalloc=gperftools"
|
||||
)
|
||||
|
||||
# unset ssh auth sock because we don't need it in the container and
|
||||
# the `run_envoy_docker.sh` adds it by default. This causes issues
|
||||
# if trying to run builds on docker for mac.
|
||||
SSH_AUTH_SOCK=""
|
||||
export SSH_AUTH_SOCK
|
||||
|
||||
BAZEL_BUILD_EXTRA_OPTIONS=()
|
||||
if [ -n "$FIPS_MODE" ]; then
|
||||
BAZEL_BUILD_EXTRA_OPTIONS+=(--define boringssl=fips)
|
||||
fi;
|
||||
|
||||
if [ ! -d "$BASE_ENVOY_DIR" ]; then
|
||||
echo "Looks like Envoy hasn't been cloned locally yet, run clone-envoy target to ensure it is cloned";
|
||||
exit 1;
|
||||
fi;
|
||||
|
||||
ENVOY_DOCKER_OPTIONS="${DOCKER_OPTIONS[*]}"
|
||||
export ENVOY_DOCKER_OPTIONS
|
||||
|
||||
echo "Building custom build of Envoy using the following parameters:"
|
||||
echo " FIPS_MODE: ${FIPS_MODE}"
|
||||
echo " BUILD_ARCH: ${BUILD_ARCH}"
|
||||
echo " ENVOY_DOCKER_BUILD_DIR: ${ENVOY_DOCKER_BUILD_DIR}"
|
||||
echo " ENVOY_DOCKER_OPTIONS: ${ENVOY_DOCKER_OPTIONS}"
|
||||
echo " SSH_AUTH_SOCK: ${SSH_AUTH_SOCK}"
|
||||
echo " "
|
||||
|
||||
ci_cmd="./ci/do_ci.sh 'release.server_only'"
|
||||
|
||||
if [ ${#BAZEL_BUILD_EXTRA_OPTIONS[@]} -gt 0 ]; then
|
||||
ci_cmd="BAZEL_BUILD_EXTRA_OPTIONS='${BAZEL_BUILD_EXTRA_OPTIONS[*]}' $ci_cmd"
|
||||
fi;
|
||||
|
||||
echo "cleaning up any old build binaries"
|
||||
rm -rf "$ENVOY_DOCKER_BUILD_DIR/envoy";
|
||||
|
||||
# build envoy
|
||||
cd "${BASE_ENVOY_DIR}" || exit
|
||||
./ci/run_envoy_docker.sh "${ci_cmd}"
|
||||
cd "${OSS_SOURCE}" || exit
|
||||
|
||||
echo "Untar release distribution which includes static builds"
|
||||
tar -xvf "${ENVOY_DOCKER_BUILD_DIR}/envoy/x64/bin/release.tar.zst" -C "${ENVOY_DOCKER_BUILD_DIR}/envoy/x64/bin";
|
||||
|
||||
echo "Copying envoy-static and envoy-static-stripped to 'docker/envoy-build'";
|
||||
cp "${ENVOY_DOCKER_BUILD_DIR}/envoy/x64/bin/dbg/envoy-contrib" "${PWD}/docker/base-envoy/envoy-static"
|
||||
chmod +x "${PWD}/docker/base-envoy/envoy-static"
|
||||
|
||||
cp "${ENVOY_DOCKER_BUILD_DIR}/envoy/x64/bin/dbg/envoy-contrib.dwp" "${PWD}/docker/base-envoy/envoy-static.dwp"
|
||||
chmod +x "${PWD}/docker/base-envoy/envoy-static.dwp"
|
||||
|
||||
cp "${ENVOY_DOCKER_BUILD_DIR}/envoy/x64/bin/envoy-contrib" "${PWD}/docker/base-envoy/envoy-static-stripped"
|
||||
chmod +x "${PWD}/docker/base-envoy/envoy-static-stripped"
|
|
@ -0,0 +1,103 @@
|
|||
#!/bin/bash
|
||||
|
||||
BLUE='\033[0;34m'
|
||||
GREEN='\033[0;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
OSS_SOURCE="${PWD}"
|
||||
|
||||
# envoy directories
|
||||
BASE_ENVOY_DIR="${OSS_SOURCE}/_cxx/envoy"
|
||||
ENVOY_PROTO_API_BASE="${BASE_ENVOY_DIR}/api"
|
||||
ENVOY_COMPILED_GO_BASE="${BASE_ENVOY_DIR}/build_go"
|
||||
|
||||
# Emissary directories
|
||||
EMISSARY_PROTO_API_BASE="${OSS_SOURCE}/api"
|
||||
EMISSARY_COMPILED_PROTO_GO_BASE="${OSS_SOURCE}/pkg/api"
|
||||
|
||||
|
||||
|
||||
# envoy build container settings
|
||||
ENVOY_DOCKER_OPTIONS="--platform=${BUILD_ARCH}"
|
||||
export ENVOY_DOCKER_OPTIONS
|
||||
|
||||
# unset ssh auth sock because we don't need it in the container and
|
||||
# the `run_envoy_docker.sh` adds it by default.
|
||||
SSH_AUTH_SOCK=""
|
||||
export SSH_AUTH_SOCK
|
||||
|
||||
############### copy raw protos into emissary repo ######################
|
||||
|
||||
echo -e "${BLUE}removing existing Envoy Protobuf API from:${GREEN} $EMISSARY_PROTO_API_BASE/envoy";
|
||||
rm -rf "${EMISSARY_PROTO_API_BASE}/envoy"
|
||||
|
||||
echo -e "${BLUE}copying Envoy Protobuf API from ${GREEN} ${ENVOY_PROTO_API_BASE}/envoy ${NC}into ${GREEN}${EMISSARY_PROTO_API_BASE}/envoy";
|
||||
rsync --recursive --delete --delete-excluded --prune-empty-dirs --include='*/' \
|
||||
--include='*.proto' --exclude='*' \
|
||||
"${ENVOY_PROTO_API_BASE}/envoy" "${EMISSARY_PROTO_API_BASE}"
|
||||
|
||||
echo -e "${BLUE}removing existing Envoy Contrib Protobuf API from:${GREEN} ${EMISSARY_PROTO_API_BASE}/contrib";
|
||||
rm -rf "${EMISSARY_PROTO_API_BASE}/contrib"
|
||||
mkdir -p "${EMISSARY_PROTO_API_BASE}/contrib/envoy/extensions/filters/http"
|
||||
|
||||
echo -e "${BLUE}copying Envoy Contrib Protobuf API from ${GREEN} ${ENVOY_PROTO_API_BASE}/contrib ${NC}into ${GREEN}${EMISSARY_PROTO_API_BASE}/contrib";
|
||||
rsync --recursive --delete --delete-excluded --prune-empty-dirs \
|
||||
--include='*/' \
|
||||
--include='*.proto' \
|
||||
--exclude='*' \
|
||||
"${ENVOY_PROTO_API_BASE}/contrib/envoy/extensions/filters/http/golang" "${EMISSARY_PROTO_API_BASE}/contrib/envoy/extensions/filters/http"
|
||||
|
||||
############### compile go protos ######################
|
||||
|
||||
echo -e "${BLUE}compiling go-protobufs in envoy build container${NC}";
|
||||
rm -rf "${ENVOY_COMPILED_GO_BASE}"
|
||||
|
||||
cd "${BASE_ENVOY_DIR}" || exit;
|
||||
./ci/run_envoy_docker.sh "./ci/do_ci.sh 'api.go'";
|
||||
cd "${OSS_SOURCE}" || exit;
|
||||
|
||||
|
||||
############## moving envoy compiled protos to emissary #################
|
||||
echo -e "${BLUE}removing existing compiled protos from: ${GREEN} $EMISSARY_COMPILED_PROTO_GO_BASE/envoy${NC}";
|
||||
rm -rf "${EMISSARY_COMPILED_PROTO_GO_BASE}/envoy"
|
||||
|
||||
echo -e "${BLUE}copying compiled protos from: ${GREEN} ${ENVOY_COMPILED_GO_BASE}/envoy${NC} into ${GREEN}${EMISSARY_COMPILED_PROTO_GO_BASE}/envoy${NC}";
|
||||
rsync --recursive --delete --delete-excluded --prune-empty-dirs \
|
||||
--include='*/' \
|
||||
--include='*.go' \
|
||||
--exclude='*' \
|
||||
"${ENVOY_COMPILED_GO_BASE}/envoy" "${EMISSARY_COMPILED_PROTO_GO_BASE}"
|
||||
|
||||
echo -e "${BLUE}Updating import pkg references from: ${GREEN}github.com/envoyproxy/go-control-plane/envoy ${NC}--> ${GREEN}github.com/emissary-ingress/emissary/v3/pkg/api/envoy${NC}"
|
||||
find "${EMISSARY_COMPILED_PROTO_GO_BASE}/envoy" -type f \
|
||||
-exec chmod 644 {} + \
|
||||
-exec sed -E -i.bak \
|
||||
-e 's,github\.com/envoyproxy/go-control-plane/envoy,github.com/emissary-ingress/emissary/v3/pkg/api/envoy,g' \
|
||||
-- {} +;
|
||||
|
||||
find "${EMISSARY_COMPILED_PROTO_GO_BASE}/envoy" -name '*.bak' -delete;
|
||||
|
||||
gofmt -w -s "${EMISSARY_COMPILED_PROTO_GO_BASE}/envoy"
|
||||
|
||||
############## moving contrib compiled protos to emissary #################
|
||||
echo -e "${BLUE}removing existing compiled protos from: ${GREEN} $EMISSARY_COMPILED_PROTO_GO_BASE/contrib${NC}";
|
||||
rm -rf "${EMISSARY_COMPILED_PROTO_GO_BASE}/contrib"
|
||||
mkdir -p "${EMISSARY_COMPILED_PROTO_GO_BASE}/contrib/envoy/extensions/filters/http"
|
||||
|
||||
echo -e "${BLUE}copying compiled protos from: ${GREEN} ${ENVOY_COMPILED_GO_BASE}/contrib${NC} into ${GREEN}${EMISSARY_COMPILED_PROTO_GO_BASE}/contrib${NC}";
|
||||
rsync --recursive --delete --delete-excluded --prune-empty-dirs \
|
||||
--include='*/' \
|
||||
--include='*.go' \
|
||||
--exclude='*' \
|
||||
"${ENVOY_COMPILED_GO_BASE}/contrib/envoy/extensions/filters/http/golang" "${EMISSARY_COMPILED_PROTO_GO_BASE}/contrib/envoy/extensions/filters/http"
|
||||
|
||||
echo -e "${BLUE}Updating import pkg references from: ${GREEN}github.com/envoyproxy/go-control-plane/envoy ${NC}--> ${GREEN}github.com/emissary-ingress/emissary/v3/pkg/api/envoy${NC}"
|
||||
find "${EMISSARY_COMPILED_PROTO_GO_BASE}/contrib" -type f \
|
||||
-exec chmod 644 {} + \
|
||||
-exec sed -E -i.bak \
|
||||
-e 's,github\.com/envoyproxy/go-control-plane/envoy,github.com/emissary-ingress/emissary/v3/pkg/api/envoy,g' \
|
||||
-- {} +;
|
||||
|
||||
find "${EMISSARY_COMPILED_PROTO_GO_BASE}/contrib" -name '*.bak' -delete;
|
||||
|
||||
gofmt -w -s "${EMISSARY_COMPILED_PROTO_GO_BASE}/contrib"
|
|
@ -0,0 +1,62 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Input Args capture from Environement Variables
|
||||
# The phone make targets have been configured to pass these along when using Make.
|
||||
default_test_targets="//contrib/golang/... //test/..."
|
||||
FIPS_MODE=${FIPS_MODE:-}
|
||||
BUILD_ARCH=${BUILD_ARCH:-linux/amd64}
|
||||
ENVOY_TEST_LABEL=${ENVOY_TEST_LABEL:-$default_test_targets}
|
||||
|
||||
# static vars
|
||||
OSS_SOURCE="$PWD"
|
||||
BASE_ENVOY_DIR="$PWD/_cxx/envoy"
|
||||
ENVOY_DOCKER_BUILD_DIR="$PWD/_cxx/envoy-docker-build"
|
||||
export ENVOY_DOCKER_BUILD_DIR
|
||||
|
||||
# Dynamic variables
|
||||
DOCKER_OPTIONS=(
|
||||
"--platform=${BUILD_ARCH}"
|
||||
"--network=host"
|
||||
)
|
||||
|
||||
ENVOY_DOCKER_OPTIONS="${DOCKER_OPTIONS[*]}"
|
||||
export ENVOY_DOCKER_OPTIONS
|
||||
|
||||
# unset ssh auth sock because we don't need it in the container and
|
||||
# the `run_envoy_docker.sh` adds it by default.
|
||||
SSH_AUTH_SOCK=""
|
||||
export SSH_AUTH_SOCK
|
||||
|
||||
BAZEL_BUILD_EXTRA_OPTIONS=()
|
||||
if [ -n "$FIPS_MODE" ]; then
|
||||
BAZEL_BUILD_EXTRA_OPTIONS+=(--define boringssl=fips)
|
||||
fi;
|
||||
|
||||
if [ ! -d "$BASE_ENVOY_DIR" ]; then
|
||||
echo "Looks like Envoy hasn't been cloned locally yet, run clone-envoy target to ensure it is cloned";
|
||||
exit 1;
|
||||
fi;
|
||||
|
||||
|
||||
echo "Running Envoy Tests with the following parameters set:"
|
||||
echo " ENVOY_TEST_LABEL: ${ENVOY_TEST_LABEL}"
|
||||
echo " FIPS_MODE: ${FIPS_MODE}"
|
||||
echo " BUILD_ARCH: ${BUILD_ARCH}"
|
||||
echo " ENVOY_DOCKER_BUILD_DIR: ${ENVOY_DOCKER_BUILD_DIR}"
|
||||
echo " ENVOY_DOCKER_OPTIONS: ${ENVOY_DOCKER_OPTIONS}"
|
||||
echo " SSH_AUTH_SOCK: ${SSH_AUTH_SOCK}"
|
||||
echo " BAZEL_BUILD_EXTRA_OPTIONS: ${BAZEL_BUILD_EXTRA_OPTIONS[*]}"
|
||||
echo " "
|
||||
echo " "
|
||||
|
||||
ci_cmd="bazel test --test_output=errors \
|
||||
--verbose_failures -c dbg --test_env=ENVOY_IP_TEST_VERSIONS=v4only \
|
||||
${ENVOY_TEST_LABEL}";
|
||||
|
||||
if [ ${#BAZEL_BUILD_EXTRA_OPTIONS[@]} -gt 0 ]; then
|
||||
ci_cmd="BAZEL_BUILD_EXTRA_OPTIONS='${BAZEL_BUILD_EXTRA_OPTIONS[*]}' $ci_cmd"
|
||||
fi;
|
||||
|
||||
cd "${BASE_ENVOY_DIR}" || exit;
|
||||
./ci/run_envoy_docker.sh "${ci_cmd}";
|
||||
cd "${OSS_SOURCE}" || exit;
|
|
@ -0,0 +1,99 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package envoy.extensions.filters.http.golang.v3alpha;
|
||||
|
||||
import "google/protobuf/any.proto";
|
||||
|
||||
import "xds/annotations/v3/status.proto";
|
||||
|
||||
import "udpa/annotations/status.proto";
|
||||
import "validate/validate.proto";
|
||||
|
||||
option java_package = "io.envoyproxy.envoy.extensions.filters.http.golang.v3alpha";
|
||||
option java_outer_classname = "GolangProto";
|
||||
option java_multiple_files = true;
|
||||
option go_package = "github.com/envoyproxy/go-control-plane/contrib/envoy/extensions/filters/http/golang/v3alpha";
|
||||
option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
||||
option (xds.annotations.v3.file_status).work_in_progress = true;
|
||||
|
||||
// [#protodoc-title: Golang HTTP filter]
|
||||
//
|
||||
// For an overview of the Golang HTTP filter please see the :ref:`configuration reference documentation <config_http_filters_golang>`.
|
||||
// [#extension: envoy.filters.http.golang]
|
||||
|
||||
// [#next-free-field: 6]
|
||||
message Config {
|
||||
// The meanings are as follows:
|
||||
//
|
||||
// :``MERGE_VIRTUALHOST_ROUTER_FILTER``: Pass all configuration into Go plugin.
|
||||
// :``MERGE_VIRTUALHOST_ROUTER``: Pass merged Virtual host and Router configuration into Go plugin.
|
||||
// :``OVERRIDE``: Pass merged Virtual host, Router, and plugin configuration into Go plugin.
|
||||
//
|
||||
// [#not-implemented-hide:]
|
||||
enum MergePolicy {
|
||||
MERGE_VIRTUALHOST_ROUTER_FILTER = 0;
|
||||
MERGE_VIRTUALHOST_ROUTER = 1;
|
||||
OVERRIDE = 3;
|
||||
}
|
||||
|
||||
// Globally unique ID for a dynamic library file.
|
||||
string library_id = 1 [(validate.rules).string = {min_len: 1}];
|
||||
|
||||
// Path to a dynamic library implementing the
|
||||
// :repo:`StreamFilter API <contrib/golang/common/go/api.StreamFilter>`
|
||||
// interface.
|
||||
// [#comment:TODO(wangfakang): Support for downloading libraries from remote repositories.]
|
||||
string library_path = 2 [(validate.rules).string = {min_len: 1}];
|
||||
|
||||
// Globally unique name of the Go plugin.
|
||||
//
|
||||
// This name **must** be consistent with the name registered in ``http::RegisterHttpFilterConfigFactory``,
|
||||
// and can be used to associate :ref:`route and virtualHost plugin configuration
|
||||
// <envoy_v3_api_field_extensions.filters.http.golang.v3alpha.ConfigsPerRoute.plugins_config>`.
|
||||
//
|
||||
string plugin_name = 3 [(validate.rules).string = {min_len: 1}];
|
||||
|
||||
// Configuration for the Go plugin.
|
||||
//
|
||||
// .. note::
|
||||
// This configuration is only parsed in the go plugin, and is therefore not validated
|
||||
// by Envoy.
|
||||
//
|
||||
// See the :repo:`StreamFilter API <contrib/golang/common/go/api/filter.go>`
|
||||
// for more information about how the plugin's configuration data can be accessed.
|
||||
//
|
||||
google.protobuf.Any plugin_config = 4;
|
||||
|
||||
// Merge policy for plugin configuration.
|
||||
//
|
||||
// The Go plugin configuration supports three dimensions:
|
||||
//
|
||||
// * Virtual host’s :ref:`typed_per_filter_config <envoy_v3_api_field_config.route.v3.VirtualHost.typed_per_filter_config>`
|
||||
// * Route’s :ref:`typed_per_filter_config <envoy_v3_api_field_config.route.v3.Route.typed_per_filter_config>`
|
||||
// * The filter's :ref:`plugin_config <envoy_v3_api_field_extensions.filters.http.golang.v3alpha.Config.plugin_config>`
|
||||
//
|
||||
// [#not-implemented-hide:]
|
||||
MergePolicy merge_policy = 5 [(validate.rules).enum = {defined_only: true}];
|
||||
}
|
||||
|
||||
message RouterPlugin {
|
||||
oneof override {
|
||||
option (validate.required) = true;
|
||||
|
||||
// [#not-implemented-hide:]
|
||||
// Disable the filter for this particular vhost or route.
|
||||
// If disabled is specified in multiple per-filter-configs, the most specific one will be used.
|
||||
bool disabled = 1 [(validate.rules).bool = {const: true}];
|
||||
|
||||
// The config field is used for setting per-route and per-virtualhost plugin config.
|
||||
google.protobuf.Any config = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message ConfigsPerRoute {
|
||||
// Configuration of the Go plugin at the per-router or per-virtualhost level,
|
||||
// keyed on the :ref:`plugin_name <envoy_v3_api_field_extensions.filters.http.golang.v3alpha.Config.plugin_name>`
|
||||
// of the Go plugin.
|
||||
//
|
||||
map<string, RouterPlugin> plugins_config = 1;
|
||||
}
|
|
@ -30,7 +30,7 @@ message Clusters {
|
|||
}
|
||||
|
||||
// Details an individual cluster's current status.
|
||||
// [#next-free-field: 8]
|
||||
// [#next-free-field: 9]
|
||||
message ClusterStatus {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ClusterStatus";
|
||||
|
||||
|
@ -84,6 +84,9 @@ message ClusterStatus {
|
|||
|
||||
// Observability name of the cluster.
|
||||
string observability_name = 7;
|
||||
|
||||
// The :ref:`EDS service name <envoy_v3_api_field_config.cluster.v3.Cluster.EdsClusterConfig.service_name>` if the cluster is an EDS cluster.
|
||||
string eds_service_name = 8;
|
||||
}
|
||||
|
||||
// Current state of a particular host.
|
||||
|
|
|
@ -32,6 +32,9 @@ message ConfigDump {
|
|||
//
|
||||
// * ``bootstrap``: :ref:`BootstrapConfigDump <envoy_v3_api_msg_admin.v3.BootstrapConfigDump>`
|
||||
// * ``clusters``: :ref:`ClustersConfigDump <envoy_v3_api_msg_admin.v3.ClustersConfigDump>`
|
||||
// * ``ecds_filter_http``: :ref:`EcdsConfigDump <envoy_v3_api_msg_admin.v3.EcdsConfigDump>`
|
||||
// * ``ecds_filter_quic_listener``: :ref:`EcdsConfigDump <envoy_v3_api_msg_admin.v3.EcdsConfigDump>`
|
||||
// * ``ecds_filter_tcp_listener``: :ref:`EcdsConfigDump <envoy_v3_api_msg_admin.v3.EcdsConfigDump>`
|
||||
// * ``endpoints``: :ref:`EndpointsConfigDump <envoy_v3_api_msg_admin.v3.EndpointsConfigDump>`
|
||||
// * ``listeners``: :ref:`ListenersConfigDump <envoy_v3_api_msg_admin.v3.ListenersConfigDump>`
|
||||
// * ``scoped_routes``: :ref:`ScopedRoutesConfigDump <envoy_v3_api_msg_admin.v3.ScopedRoutesConfigDump>`
|
||||
|
@ -40,6 +43,9 @@ message ConfigDump {
|
|||
//
|
||||
// EDS Configuration will only be dumped by using parameter ``?include_eds``
|
||||
//
|
||||
// Currently ECDS is supported in HTTP and listener filters. Note, ECDS configuration for
|
||||
// either HTTP or listener filter will only be dumped if it is actually configured.
|
||||
//
|
||||
// You can filter output with the resource and mask query parameters.
|
||||
// See :ref:`/config_dump?resource={} <operations_admin_interface_config_dump_by_resource>`,
|
||||
// :ref:`/config_dump?mask={} <operations_admin_interface_config_dump_by_mask>`,
|
||||
|
|
|
@ -370,3 +370,43 @@ message EndpointsConfigDump {
|
|||
// The dynamically loaded endpoint configs.
|
||||
repeated DynamicEndpointConfig dynamic_endpoint_configs = 3;
|
||||
}
|
||||
|
||||
// Envoy's ECDS service fills this message with all currently extension
|
||||
// configuration. Extension configuration information can be used to recreate
|
||||
// an Envoy ECDS listener and HTTP filters as static filters or by returning
|
||||
// them in ECDS response.
|
||||
message EcdsConfigDump {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.EcdsConfigDump";
|
||||
|
||||
// [#next-free-field: 6]
|
||||
message EcdsFilterConfig {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.admin.v2alpha.EcdsConfigDump.EcdsFilterConfig";
|
||||
|
||||
// This is the per-resource version information. This version is currently
|
||||
// taken from the :ref:`version_info
|
||||
// <envoy_v3_api_field_service.discovery.v3.DiscoveryResponse.version_info>`
|
||||
// field at the time that the ECDS filter was loaded.
|
||||
string version_info = 1;
|
||||
|
||||
// The ECDS filter config.
|
||||
google.protobuf.Any ecds_filter = 2;
|
||||
|
||||
// The timestamp when the ECDS filter was last updated.
|
||||
google.protobuf.Timestamp last_updated = 3;
|
||||
|
||||
// Set if the last update failed, cleared after the next successful update.
|
||||
// The ``error_state`` field contains the rejected version of this
|
||||
// particular resource along with the reason and timestamp. For successfully
|
||||
// updated or acknowledged resource, this field should be empty.
|
||||
// [#not-implemented-hide:]
|
||||
UpdateFailureState error_state = 4;
|
||||
|
||||
// The client status of this resource.
|
||||
// [#not-implemented-hide:]
|
||||
ClientResourceStatus client_status = 5;
|
||||
}
|
||||
|
||||
// The ECDS filter configs.
|
||||
repeated EcdsFilterConfig ecds_filters = 1;
|
||||
}
|
||||
|
|
|
@ -37,6 +37,5 @@ message ListenerStatus {
|
|||
|
||||
// The additional addresses the listener is listening on as specified via the :ref:`additional_addresses <envoy_v3_api_field_config.listener.v3.Listener.additional_addresses>`
|
||||
// configuration.
|
||||
// [#not-implemented-hide:]
|
||||
repeated config.core.v3.Address additional_local_addresses = 3;
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ option java_multiple_files = true;
|
|||
option go_package = "github.com/envoyproxy/go-control-plane/envoy/admin/v3;adminv3";
|
||||
option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
||||
|
||||
// [#protodoc-title: Server State]
|
||||
// [#protodoc-title: Server state]
|
||||
|
||||
// Proto representation of the value returned by /server_info, containing
|
||||
// server version/server status information.
|
||||
|
@ -59,7 +59,7 @@ message ServerInfo {
|
|||
config.core.v3.Node node = 7;
|
||||
}
|
||||
|
||||
// [#next-free-field: 39]
|
||||
// [#next-free-field: 41]
|
||||
message CommandLineOptions {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.admin.v2alpha.CommandLineOptions";
|
||||
|
@ -98,6 +98,12 @@ message CommandLineOptions {
|
|||
// See :option:`--use-dynamic-base-id` for details.
|
||||
bool use_dynamic_base_id = 31;
|
||||
|
||||
// See :option:`--skip-hot-restart-on-no-parent` for details.
|
||||
bool skip_hot_restart_on_no_parent = 39;
|
||||
|
||||
// See :option:`--skip-hot-restart-parent-stats` for details.
|
||||
bool skip_hot_restart_parent_stats = 40;
|
||||
|
||||
// See :option:`--base-id-path` for details.
|
||||
string base_id_path = 32;
|
||||
|
||||
|
|
|
@ -16,7 +16,6 @@ option java_package = "io.envoyproxy.envoy.api.v2";
|
|||
option java_outer_classname = "CdsProto";
|
||||
option java_multiple_files = true;
|
||||
option go_package = "github.com/envoyproxy/go-control-plane/envoy/api/v2;apiv2";
|
||||
option java_generic_services = true;
|
||||
option (udpa.annotations.file_migrate).move_to_package = "envoy.service.cluster.v3";
|
||||
option (udpa.annotations.file_status).package_version_status = FROZEN;
|
||||
|
||||
|
|
|
@ -24,9 +24,9 @@ option (udpa.annotations.file_status).package_version_status = FROZEN;
|
|||
// more information on outlier detection.
|
||||
// [#next-free-field: 21]
|
||||
message OutlierDetection {
|
||||
// The number of consecutive 5xx responses or local origin errors that are mapped
|
||||
// to 5xx error codes before a consecutive 5xx ejection
|
||||
// occurs. Defaults to 5.
|
||||
// The number of consecutive server-side error responses (for HTTP traffic,
|
||||
// 5xx responses; for TCP traffic, connection failures; for Redis, failure to
|
||||
// respond PONG; etc.) before a consecutive 5xx ejection occurs. Defaults to 5.
|
||||
google.protobuf.UInt32Value consecutive_5xx = 1;
|
||||
|
||||
// The time interval between ejection analysis sweeps. This can result in
|
||||
|
|
|
@ -100,7 +100,7 @@ message GrpcService {
|
|||
message StsService {
|
||||
// URI of the token exchange service that handles token exchange requests.
|
||||
// [#comment:TODO(asraa): Add URI validation when implemented. Tracked by
|
||||
// https://github.com/envoyproxy/protoc-gen-validate/issues/303]
|
||||
// https://github.com/bufbuild/protoc-gen-validate/issues/303]
|
||||
string token_exchange_service_uri = 1;
|
||||
|
||||
// Location of the target service or resource where the client
|
||||
|
|
|
@ -16,7 +16,6 @@ option java_package = "io.envoyproxy.envoy.api.v2";
|
|||
option java_outer_classname = "EdsProto";
|
||||
option java_multiple_files = true;
|
||||
option go_package = "github.com/envoyproxy/go-control-plane/envoy/api/v2;apiv2";
|
||||
option java_generic_services = true;
|
||||
option (udpa.annotations.file_migrate).move_to_package = "envoy.service.endpoint.v3";
|
||||
option (udpa.annotations.file_status).package_version_status = FROZEN;
|
||||
|
||||
|
|
|
@ -16,7 +16,6 @@ option java_package = "io.envoyproxy.envoy.api.v2";
|
|||
option java_outer_classname = "LdsProto";
|
||||
option java_multiple_files = true;
|
||||
option go_package = "github.com/envoyproxy/go-control-plane/envoy/api/v2;apiv2";
|
||||
option java_generic_services = true;
|
||||
option (udpa.annotations.file_migrate).move_to_package = "envoy.service.listener.v3";
|
||||
option (udpa.annotations.file_status).package_version_status = FROZEN;
|
||||
|
||||
|
|
|
@ -16,7 +16,6 @@ option java_package = "io.envoyproxy.envoy.api.v2";
|
|||
option java_outer_classname = "RdsProto";
|
||||
option java_multiple_files = true;
|
||||
option go_package = "github.com/envoyproxy/go-control-plane/envoy/api/v2;apiv2";
|
||||
option java_generic_services = true;
|
||||
option (udpa.annotations.file_migrate).move_to_package = "envoy.service.route.v3";
|
||||
option (udpa.annotations.file_status).package_version_status = FROZEN;
|
||||
|
||||
|
|
|
@ -859,8 +859,9 @@ message RouteAction {
|
|||
// Indicates that during forwarding, the host header will be swapped with
|
||||
// the hostname of the upstream host chosen by the cluster manager. This
|
||||
// option is applicable only when the destination cluster for a route is of
|
||||
// type *strict_dns* or *logical_dns*. Setting this to true with other cluster
|
||||
// types has no effect.
|
||||
// type ``STRICT_DNS``, ``LOGICAL_DNS`` or ``STATIC``. For ``STATIC`` clusters, the
|
||||
// hostname attribute of the endpoint must be configured. Setting this to true
|
||||
// with other cluster types has no effect.
|
||||
google.protobuf.BoolValue auto_host_rewrite = 7;
|
||||
|
||||
// Indicates that during forwarding, the host header will be swapped with the content of given
|
||||
|
@ -1264,7 +1265,7 @@ message Tracing {
|
|||
// Target percentage of requests managed by this HTTP connection manager that will be force
|
||||
// traced if the :ref:`x-client-trace-id <config_http_conn_man_headers_x-client-trace-id>`
|
||||
// header is set. This field is a direct analog for the runtime variable
|
||||
// 'tracing.client_sampling' in the :ref:`HTTP Connection Manager
|
||||
// 'tracing.client_enabled' in the :ref:`HTTP Connection Manager
|
||||
// <config_http_conn_man_runtime>`.
|
||||
// Default: 100%
|
||||
type.FractionalPercent client_sampling = 1;
|
||||
|
|
|
@ -39,7 +39,7 @@ option (udpa.annotations.file_status).package_version_status = FROZEN;
|
|||
// fragments:
|
||||
// - header_value_extractor:
|
||||
// name: X-Route-Selector
|
||||
// element_separator: ,
|
||||
// element_separator: ","
|
||||
// element:
|
||||
// separator: =
|
||||
// key: vip
|
||||
|
|
|
@ -16,7 +16,6 @@ option java_package = "io.envoyproxy.envoy.api.v2";
|
|||
option java_outer_classname = "SrdsProto";
|
||||
option java_multiple_files = true;
|
||||
option go_package = "github.com/envoyproxy/go-control-plane/envoy/api/v2;apiv2";
|
||||
option java_generic_services = true;
|
||||
option (udpa.annotations.file_migrate).move_to_package = "envoy.service.route.v3";
|
||||
option (udpa.annotations.file_status).package_version_status = FROZEN;
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@ package envoy.config.accesslog.v3;
|
|||
|
||||
import "envoy/config/core/v3/base.proto";
|
||||
import "envoy/config/route/v3/route_components.proto";
|
||||
import "envoy/data/accesslog/v3/accesslog.proto";
|
||||
import "envoy/type/matcher/v3/metadata.proto";
|
||||
import "envoy/type/v3/percent.proto";
|
||||
|
||||
|
@ -43,7 +44,7 @@ message AccessLog {
|
|||
}
|
||||
}
|
||||
|
||||
// [#next-free-field: 13]
|
||||
// [#next-free-field: 14]
|
||||
message AccessLogFilter {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.config.filter.accesslog.v2.AccessLogFilter";
|
||||
|
@ -87,6 +88,9 @@ message AccessLogFilter {
|
|||
|
||||
// Metadata Filter
|
||||
MetadataFilter metadata_filter = 12;
|
||||
|
||||
// Log Type Filter
|
||||
LogTypeFilter log_type_filter = 13;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -122,7 +126,10 @@ message StatusCodeFilter {
|
|||
ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}];
|
||||
}
|
||||
|
||||
// Filters on total request duration in milliseconds.
|
||||
// Filters based on the duration of the request or stream, in milliseconds.
|
||||
// For end of stream access logs, the total duration of the stream will be used.
|
||||
// For :ref:`periodic access logs<arch_overview_access_log_periodic>`,
|
||||
// the duration of the stream at the time of log recording will be used.
|
||||
message DurationFilter {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.config.filter.accesslog.v2.DurationFilter";
|
||||
|
@ -247,6 +254,9 @@ message ResponseFlagFilter {
|
|||
in: "UPE"
|
||||
in: "NC"
|
||||
in: "OM"
|
||||
in: "DF"
|
||||
in: "DO"
|
||||
in: "DR"
|
||||
}
|
||||
}
|
||||
}];
|
||||
|
@ -307,6 +317,17 @@ message MetadataFilter {
|
|||
google.protobuf.BoolValue match_if_key_not_found = 2;
|
||||
}
|
||||
|
||||
// Filters based on access log type.
|
||||
message LogTypeFilter {
|
||||
// Logs only records which their type is one of the types defined in this field.
|
||||
repeated data.accesslog.v3.AccessLogType types = 1
|
||||
[(validate.rules).repeated = {items {enum {defined_only: true}}}];
|
||||
|
||||
// If this field is set to true, the filter will instead block all records
|
||||
// with a access log type in types field, and allow all other records.
|
||||
bool exclude = 2;
|
||||
}
|
||||
|
||||
// Extension filter is statically registered at runtime.
|
||||
message ExtensionFilter {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
|
|
|
@ -41,7 +41,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
// <config_overview_bootstrap>` for more detail.
|
||||
|
||||
// Bootstrap :ref:`configuration overview <config_overview_bootstrap>`.
|
||||
// [#next-free-field: 35]
|
||||
// [#next-free-field: 42]
|
||||
message Bootstrap {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.config.bootstrap.v2.Bootstrap";
|
||||
|
@ -101,6 +101,48 @@ message Bootstrap {
|
|||
core.v3.ApiConfigSource ads_config = 3;
|
||||
}
|
||||
|
||||
message ApplicationLogConfig {
|
||||
message LogFormat {
|
||||
oneof log_format {
|
||||
option (validate.required) = true;
|
||||
|
||||
// Flush application logs in JSON format. The configured JSON struct can
|
||||
// support all the format flags specified in the :option:`--log-format`
|
||||
// command line options section, except for the ``%v`` and ``%_`` flags.
|
||||
google.protobuf.Struct json_format = 1;
|
||||
|
||||
// Flush application log in a format defined by a string. The text format
|
||||
// can support all the format flags specified in the :option:`--log-format`
|
||||
// command line option section.
|
||||
string text_format = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// Optional field to set the application logs format. If this field is set, it will override
|
||||
// the default log format. Setting both this field and :option:`--log-format` command line
|
||||
// option is not allowed, and will cause a bootstrap error.
|
||||
LogFormat log_format = 1;
|
||||
}
|
||||
|
||||
message DeferredStatOptions {
|
||||
// When the flag is enabled, Envoy will lazily initialize a subset of the stats (see below).
|
||||
// This will save memory and CPU cycles when creating the objects that own these stats, if those
|
||||
// stats are never referenced throughout the lifetime of the process. However, it will incur additional
|
||||
// memory overhead for these objects, and a small increase of CPU usage when a at least one of the stats
|
||||
// is updated for the first time.
|
||||
// Groups of stats that will be lazily initialized:
|
||||
// - Cluster traffic stats: a subgroup of the :ref:`cluster statistics <config_cluster_manager_cluster_stats>`
|
||||
// that are used when requests are routed to the cluster.
|
||||
bool enable_deferred_creation_stats = 1;
|
||||
}
|
||||
|
||||
message GrpcAsyncClientManagerConfig {
|
||||
// Optional field to set the expiration time for the cached gRPC client object.
|
||||
// The minimal value is 5s and the default is 50s.
|
||||
google.protobuf.Duration max_cached_entry_idle_duration = 1
|
||||
[(validate.rules).duration = {gte {seconds: 5}}];
|
||||
}
|
||||
|
||||
reserved 10, 11;
|
||||
|
||||
reserved "runtime";
|
||||
|
@ -163,6 +205,9 @@ message Bootstrap {
|
|||
// Optional set of stats sinks.
|
||||
repeated metrics.v3.StatsSink stats_sinks = 6;
|
||||
|
||||
// Options to control behaviors of deferred creation compatible stats.
|
||||
DeferredStatOptions deferred_stat_options = 39;
|
||||
|
||||
// Configuration for internal processing of stats.
|
||||
metrics.v3.StatsConfig stats_config = 13;
|
||||
|
||||
|
@ -335,6 +380,41 @@ message Bootstrap {
|
|||
// If the value is not specified, Google RE2 will be used by default.
|
||||
// [#extension-category: envoy.regex_engines]
|
||||
core.v3.TypedExtensionConfig default_regex_engine = 34;
|
||||
|
||||
// Optional XdsResourcesDelegate configuration, which allows plugging custom logic into both
|
||||
// fetch and load events during xDS processing.
|
||||
// If a value is not specified, no XdsResourcesDelegate will be used.
|
||||
// TODO(abeyad): Add public-facing documentation.
|
||||
// [#not-implemented-hide:]
|
||||
core.v3.TypedExtensionConfig xds_delegate_extension = 35;
|
||||
|
||||
// Optional XdsConfigTracker configuration, which allows tracking xDS responses in external components,
|
||||
// e.g., external tracer or monitor. It provides the process point when receive, ingest, or fail to
|
||||
// process xDS resources and messages. If a value is not specified, no XdsConfigTracker will be used.
|
||||
//
|
||||
// .. note::
|
||||
//
|
||||
// There are no in-repo extensions currently, and the :repo:`XdsConfigTracker <envoy/config/xds_config_tracker.h>`
|
||||
// interface should be implemented before using.
|
||||
// See :repo:`xds_config_tracker_integration_test <test/integration/xds_config_tracker_integration_test.cc>`
|
||||
// for an example usage of the interface.
|
||||
core.v3.TypedExtensionConfig xds_config_tracker_extension = 36;
|
||||
|
||||
// [#not-implemented-hide:]
|
||||
// This controls the type of listener manager configured for Envoy. Currently
|
||||
// Envoy only supports ListenerManager for this field and Envoy Mobile
|
||||
// supports ApiListenerManager.
|
||||
core.v3.TypedExtensionConfig listener_manager = 37;
|
||||
|
||||
// Optional application log configuration.
|
||||
ApplicationLogConfig application_log_config = 38;
|
||||
|
||||
// Optional gRPC async manager config.
|
||||
GrpcAsyncClientManagerConfig grpc_async_client_manager_config = 40;
|
||||
|
||||
// Optional configuration for memory allocation manager.
|
||||
// Memory releasing is only supported for `tcmalloc allocator <https://github.com/google/tcmalloc>`_.
|
||||
MemoryAllocatorManager memory_allocator_manager = 41;
|
||||
}
|
||||
|
||||
// Administration interface :ref:`operations documentation
|
||||
|
@ -372,6 +452,7 @@ message Admin {
|
|||
}
|
||||
|
||||
// Cluster manager :ref:`architecture overview <arch_overview_cluster_manager>`.
|
||||
// [#next-free-field: 6]
|
||||
message ClusterManager {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.config.bootstrap.v2.ClusterManager";
|
||||
|
@ -412,6 +493,11 @@ message ClusterManager {
|
|||
// <envoy_v3_api_field_config.core.v3.ApiConfigSource.api_type>` :ref:`GRPC
|
||||
// <envoy_v3_api_enum_value_config.core.v3.ApiConfigSource.ApiType.GRPC>`.
|
||||
core.v3.ApiConfigSource load_stats_config = 4;
|
||||
|
||||
// Whether the ClusterManager will create clusters on the worker threads
|
||||
// inline during requests. This will save memory and CPU cycles in cases where
|
||||
// there are lots of inactive clusters and > 1 worker thread.
|
||||
bool enable_deferred_cluster_creation = 5;
|
||||
}
|
||||
|
||||
// Allows you to specify different watchdog configs for different subsystems.
|
||||
|
@ -652,3 +738,14 @@ message CustomInlineHeader {
|
|||
// The type of the header that is expected to be set as the inline header.
|
||||
InlineHeaderType inline_header_type = 2 [(validate.rules).enum = {defined_only: true}];
|
||||
}
|
||||
|
||||
message MemoryAllocatorManager {
|
||||
// Configures tcmalloc to perform background release of free memory in amount of bytes per ``memory_release_interval`` interval.
|
||||
// If equals to ``0``, no memory release will occur. Defaults to ``0``.
|
||||
uint64 bytes_to_release = 1;
|
||||
|
||||
// Interval in milliseconds for memory releasing. If specified, during every
|
||||
// interval Envoy will try to release ``bytes_to_release`` of free memory back to operating system for reuse.
|
||||
// Defaults to 1000 milliseconds.
|
||||
google.protobuf.Duration memory_release_interval = 2;
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@ import "envoy/config/core/v3/health_check.proto";
|
|||
import "envoy/config/core/v3/protocol.proto";
|
||||
import "envoy/config/core/v3/resolver.proto";
|
||||
import "envoy/config/endpoint/v3/endpoint.proto";
|
||||
import "envoy/type/metadata/v3/metadata.proto";
|
||||
import "envoy/type/v3/percent.proto";
|
||||
|
||||
import "google/protobuf/any.proto";
|
||||
|
@ -44,7 +45,7 @@ message ClusterCollection {
|
|||
}
|
||||
|
||||
// Configuration for a single upstream cluster.
|
||||
// [#next-free-field: 57]
|
||||
// [#next-free-field: 58]
|
||||
message Cluster {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster";
|
||||
|
||||
|
@ -167,7 +168,7 @@ message Cluster {
|
|||
// The name of the match, used in stats generation.
|
||||
string name = 1 [(validate.rules).string = {min_len: 1}];
|
||||
|
||||
// Optional endpoint metadata match criteria.
|
||||
// Optional metadata match criteria.
|
||||
// The connection to the endpoint with metadata matching what is set in this field
|
||||
// will use the transport socket configuration specified here.
|
||||
// The endpoint's metadata entry in ``envoy.transport_socket_match`` is used to match
|
||||
|
@ -209,7 +210,7 @@ message Cluster {
|
|||
|
||||
// Optionally divide the endpoints in this cluster into subsets defined by
|
||||
// endpoint metadata and selected by route and weighted cluster metadata.
|
||||
// [#next-free-field: 8]
|
||||
// [#next-free-field: 9]
|
||||
message LbSubsetConfig {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.Cluster.LbSubsetConfig";
|
||||
|
@ -225,6 +226,52 @@ message Cluster {
|
|||
DEFAULT_SUBSET = 2;
|
||||
}
|
||||
|
||||
enum LbSubsetMetadataFallbackPolicy {
|
||||
// No fallback. Route metadata will be used as-is.
|
||||
METADATA_NO_FALLBACK = 0;
|
||||
|
||||
// A special metadata key ``fallback_list`` will be used to provide variants of metadata to try.
|
||||
// Value of ``fallback_list`` key has to be a list. Every list element has to be a struct - it will
|
||||
// be merged with route metadata, overriding keys that appear in both places.
|
||||
// ``fallback_list`` entries will be used in order until a host is found.
|
||||
//
|
||||
// ``fallback_list`` key itself is removed from metadata before subset load balancing is performed.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// for metadata:
|
||||
//
|
||||
// .. code-block:: yaml
|
||||
//
|
||||
// version: 1.0
|
||||
// fallback_list:
|
||||
// - version: 2.0
|
||||
// hardware: c64
|
||||
// - hardware: c32
|
||||
// - version: 3.0
|
||||
//
|
||||
// at first, metadata:
|
||||
//
|
||||
// .. code-block:: json
|
||||
//
|
||||
// {"version": "2.0", "hardware": "c64"}
|
||||
//
|
||||
// will be used for load balancing. If no host is found, metadata:
|
||||
//
|
||||
// .. code-block:: json
|
||||
//
|
||||
// {"version": "1.0", "hardware": "c32"}
|
||||
//
|
||||
// is next to try. If it still results in no host, finally metadata:
|
||||
//
|
||||
// .. code-block:: json
|
||||
//
|
||||
// {"version": "3.0"}
|
||||
//
|
||||
// is used.
|
||||
FALLBACK_LIST = 1;
|
||||
}
|
||||
|
||||
// Specifications for subsets.
|
||||
message LbSubsetSelector {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
|
@ -260,10 +307,7 @@ message Cluster {
|
|||
// Selects a mode of operation in which each subset has only one host. This mode uses the same rules for
|
||||
// choosing a host, but updating hosts is faster, especially for large numbers of hosts.
|
||||
//
|
||||
// If a match is found to a host, that host will be used regardless of priority levels, unless the host is unhealthy.
|
||||
//
|
||||
// Currently, this mode is only supported if ``subset_selectors`` has only one entry, and ``keys`` contains
|
||||
// only one entry.
|
||||
// If a match is found to a host, that host will be used regardless of priority levels.
|
||||
//
|
||||
// When this mode is enabled, configurations that contain more than one host with the same metadata value for the single key in ``keys``
|
||||
// will use only one of the hosts with the given key; no requests will be routed to the others. The cluster gauge
|
||||
|
@ -349,6 +393,16 @@ message Cluster {
|
|||
// endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value
|
||||
// and any of the elements in the list matches the criteria.
|
||||
bool list_as_any = 7;
|
||||
|
||||
// Fallback mechanism that allows to try different route metadata until a host is found.
|
||||
// If load balancing process, including all its mechanisms (like
|
||||
// :ref:`fallback_policy<envoy_v3_api_field_config.cluster.v3.Cluster.LbSubsetConfig.fallback_policy>`)
|
||||
// fails to select a host, this policy decides if and how the process is repeated using another metadata.
|
||||
//
|
||||
// The value defaults to
|
||||
// :ref:`METADATA_NO_FALLBACK<envoy_v3_api_enum_value_config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetMetadataFallbackPolicy.METADATA_NO_FALLBACK>`.
|
||||
LbSubsetMetadataFallbackPolicy metadata_fallback_policy = 8
|
||||
[(validate.rules).enum = {defined_only: true}];
|
||||
}
|
||||
|
||||
// Configuration for :ref:`slow start mode <arch_overview_load_balancing_slow_start>`.
|
||||
|
@ -462,8 +516,8 @@ message Cluster {
|
|||
// Specific configuration for the :ref:`Maglev<arch_overview_load_balancing_types_maglev>`
|
||||
// load balancing policy.
|
||||
message MaglevLbConfig {
|
||||
// The table size for Maglev hashing. The Maglev aims for ‘minimal disruption’ rather than an absolute guarantee.
|
||||
// Minimal disruption means that when the set of upstreams changes, a connection will likely be sent to the same
|
||||
// The table size for Maglev hashing. Maglev aims for "minimal disruption" rather than an absolute guarantee.
|
||||
// Minimal disruption means that when the set of upstream hosts change, a connection will likely be sent to the same
|
||||
// upstream as it was before. Increasing the table size reduces the amount of disruption.
|
||||
// The table size must be prime number limited to 5000011. If it is not specified, the default is 65537.
|
||||
google.protobuf.UInt64Value table_size = 1 [(validate.rules).uint64 = {lte: 5000011}];
|
||||
|
@ -472,6 +526,7 @@ message Cluster {
|
|||
// Specific configuration for the
|
||||
// :ref:`Original Destination <arch_overview_load_balancing_types_original_destination>`
|
||||
// load balancing policy.
|
||||
// [#extension: envoy.clusters.original_dst]
|
||||
message OriginalDstLbConfig {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.Cluster.OriginalDstLbConfig";
|
||||
|
@ -493,6 +548,14 @@ message Cluster {
|
|||
// The http header to override destination address if :ref:`use_http_header <envoy_v3_api_field_config.cluster.v3.Cluster.OriginalDstLbConfig.use_http_header>`.
|
||||
// is set to true. If the value is empty, :ref:`x-envoy-original-dst-host <config_http_conn_man_headers_x-envoy-original-dst-host>` will be used.
|
||||
string http_header_name = 2;
|
||||
|
||||
// The port to override for the original dst address. This port
|
||||
// will take precedence over filter state and header override ports
|
||||
google.protobuf.UInt32Value upstream_port_override = 3 [(validate.rules).uint32 = {lte: 65535}];
|
||||
|
||||
// The dynamic metadata key to override destination address.
|
||||
// First the request metadata is considered, then the connection one.
|
||||
type.metadata.v3.MetadataKey metadata_key = 4;
|
||||
}
|
||||
|
||||
// Common configuration for all load balancer implementations.
|
||||
|
@ -661,7 +724,7 @@ message Cluster {
|
|||
google.protobuf.DoubleValue per_upstream_preconnect_ratio = 1
|
||||
[(validate.rules).double = {lte: 3.0 gte: 1.0}];
|
||||
|
||||
// Indicates how many many streams (rounded up) can be anticipated across a cluster for each
|
||||
// Indicates how many streams (rounded up) can be anticipated across a cluster for each
|
||||
// stream, useful for low QPS services. This is currently supported for a subset of
|
||||
// deterministic non-hash-based load-balancing algorithms (weighted round robin, random).
|
||||
// Unlike ``per_upstream_preconnect_ratio`` this preconnects across the upstream instances in a
|
||||
|
@ -691,12 +754,14 @@ message Cluster {
|
|||
|
||||
reserved "hosts", "tls_context", "extension_protocol_options";
|
||||
|
||||
// Configuration to use different transport sockets for different endpoints.
|
||||
// The entry of ``envoy.transport_socket_match`` in the
|
||||
// :ref:`LbEndpoint.Metadata <envoy_v3_api_field_config.endpoint.v3.LbEndpoint.metadata>`
|
||||
// is used to match against the transport sockets as they appear in the list. The first
|
||||
// :ref:`match <envoy_v3_api_msg_config.cluster.v3.Cluster.TransportSocketMatch>` is used.
|
||||
// For example, with the following match
|
||||
// Configuration to use different transport sockets for different endpoints. The entry of
|
||||
// ``envoy.transport_socket_match`` in the :ref:`LbEndpoint.Metadata
|
||||
// <envoy_v3_api_field_config.endpoint.v3.LbEndpoint.metadata>` is used to match against the
|
||||
// transport sockets as they appear in the list. If a match is not found, the search continues in
|
||||
// :ref:`LocalityLbEndpoints.Metadata
|
||||
// <envoy_v3_api_field_config.endpoint.v3.LocalityLbEndpoints.metadata>`. The first :ref:`match
|
||||
// <envoy_v3_api_msg_config.cluster.v3.Cluster.TransportSocketMatch>` is used. For example, with
|
||||
// the following match
|
||||
//
|
||||
// .. code-block:: yaml
|
||||
//
|
||||
|
@ -720,8 +785,9 @@ message Cluster {
|
|||
// socket match in case above.
|
||||
//
|
||||
// If an endpoint metadata's value under ``envoy.transport_socket_match`` does not match any
|
||||
// ``TransportSocketMatch``, socket configuration fallbacks to use the ``tls_context`` or
|
||||
// ``transport_socket`` specified in this cluster.
|
||||
// ``TransportSocketMatch``, the locality metadata is then checked for a match. Barring any
|
||||
// matches in the endpoint or locality metadata, the socket configuration fallbacks to use the
|
||||
// ``tls_context`` or ``transport_socket`` specified in this cluster.
|
||||
//
|
||||
// This field allows gradual and flexible transport socket configuration changes.
|
||||
//
|
||||
|
@ -952,7 +1018,8 @@ message Cluster {
|
|||
|
||||
// Optional configuration for having cluster readiness block on warm-up. Currently, only applicable for
|
||||
// :ref:`STRICT_DNS<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.STRICT_DNS>`,
|
||||
// or :ref:`LOGICAL_DNS<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>`.
|
||||
// or :ref:`LOGICAL_DNS<envoy_v3_api_enum_value_config.cluster.v3.Cluster.DiscoveryType.LOGICAL_DNS>`,
|
||||
// or :ref:`Redis Cluster<arch_overview_redis>`.
|
||||
// If true, cluster readiness blocks on warm-up. If false, the cluster will complete
|
||||
// initialization whether or not warm-up has completed. Defaults to true.
|
||||
google.protobuf.BoolValue wait_for_warm_on_init = 54;
|
||||
|
@ -1084,6 +1151,22 @@ message Cluster {
|
|||
// from the LRS stream here.]
|
||||
core.v3.ConfigSource lrs_server = 42;
|
||||
|
||||
// [#not-implemented-hide:]
|
||||
// A list of metric names from ORCA load reports to propagate to LRS.
|
||||
//
|
||||
// For map fields in the ORCA proto, the string will be of the form ``<map_field_name>.<map_key>``.
|
||||
// For example, the string ``named_metrics.foo`` will mean to look for the key ``foo`` in the ORCA
|
||||
// ``named_metrics`` field.
|
||||
//
|
||||
// The special map key ``*`` means to report all entries in the map (e.g., ``named_metrics.*`` means to
|
||||
// report all entries in the ORCA named_metrics field). Note that this should be used only with trusted
|
||||
// backends.
|
||||
//
|
||||
// The metric names in LRS will follow the same semantics as this field. In other words, if this field
|
||||
// contains ``named_metrics.foo``, then the LRS load report will include the data with that same string
|
||||
// as the key.
|
||||
repeated string lrs_report_endpoint_metrics = 57;
|
||||
|
||||
// If track_timeout_budgets is true, the :ref:`timeout budget histograms
|
||||
// <config_cluster_manager_cluster_stats_timeout_budgets>` will be published for each
|
||||
// request. These show what percentage of a request's per try and global timeout was used. A value
|
||||
|
@ -1158,6 +1241,7 @@ message LoadBalancingPolicy {
|
|||
|
||||
reserved "config", "name", "typed_config";
|
||||
|
||||
// [#extension-category: envoy.load_balancing_policies]
|
||||
core.v3.TypedExtensionConfig typed_extension_config = 4;
|
||||
}
|
||||
|
||||
|
@ -1167,19 +1251,30 @@ message LoadBalancingPolicy {
|
|||
repeated Policy policies = 1;
|
||||
}
|
||||
|
||||
// An extensible structure containing the address Envoy should bind to when
|
||||
// establishing upstream connections.
|
||||
message UpstreamBindConfig {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.UpstreamBindConfig";
|
||||
|
||||
// The address Envoy should bind to when establishing upstream connections.
|
||||
core.v3.Address source_address = 1;
|
||||
}
|
||||
|
||||
message UpstreamConnectionOptions {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.UpstreamConnectionOptions";
|
||||
|
||||
enum FirstAddressFamilyVersion {
|
||||
// respect the native ranking of destination ip addresses returned from dns
|
||||
// resolution
|
||||
DEFAULT = 0;
|
||||
|
||||
V4 = 1;
|
||||
|
||||
V6 = 2;
|
||||
}
|
||||
|
||||
message HappyEyeballsConfig {
|
||||
// Specify the IP address family to attempt connection first in happy
|
||||
// eyeballs algorithm according to RFC8305#section-4.
|
||||
FirstAddressFamilyVersion first_address_family_version = 1;
|
||||
|
||||
// Specify the number of addresses of the first_address_family_version being
|
||||
// attempted for connection before the other address family.
|
||||
google.protobuf.UInt32Value first_address_family_count = 2 [(validate.rules).uint32 = {gte: 1}];
|
||||
}
|
||||
|
||||
// If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives.
|
||||
core.v3.TcpKeepalive tcp_keepalive = 1;
|
||||
|
||||
|
@ -1187,6 +1282,11 @@ message UpstreamConnectionOptions {
|
|||
// This can be used by extensions during processing of requests. The association mechanism is
|
||||
// implementation specific. Defaults to false due to performance concerns.
|
||||
bool set_local_interface_name_on_upstream_connections = 2;
|
||||
|
||||
// Configurations for happy eyeballs algorithm.
|
||||
// Add configs for first_address_family_version and first_address_family_count
|
||||
// when sorting destination ip addresses.
|
||||
HappyEyeballsConfig happy_eyeballs_config = 3;
|
||||
}
|
||||
|
||||
message TrackClusterStats {
|
||||
|
@ -1201,4 +1301,19 @@ message TrackClusterStats {
|
|||
// <config_cluster_manager_cluster_stats_request_response_sizes>` tracking header and body sizes
|
||||
// of requests and responses will be published.
|
||||
bool request_response_sizes = 2;
|
||||
|
||||
// If true, some stats will be emitted per-endpoint, similar to the stats in admin ``/clusters``
|
||||
// output.
|
||||
//
|
||||
// This does not currently output correct stats during a hot-restart.
|
||||
//
|
||||
// This is not currently implemented by all stat sinks.
|
||||
//
|
||||
// These stats do not honor filtering or tag extraction rules in :ref:`StatsConfig
|
||||
// <envoy_v3_api_msg_config.metrics.v3.StatsConfig>` (but fixed-value tags are supported). Admin
|
||||
// endpoint filtering is supported.
|
||||
//
|
||||
// This may not be used at the same time as
|
||||
// :ref:`load_stats_config <envoy_v3_api_field_config.bootstrap.v3.ClusterManager.load_stats_config>`.
|
||||
bool per_endpoint_stats = 3;
|
||||
}
|
||||
|
|
|
@ -2,6 +2,8 @@ syntax = "proto3";
|
|||
|
||||
package envoy.config.cluster.v3;
|
||||
|
||||
import "envoy/config/core/v3/config_source.proto";
|
||||
|
||||
import "google/protobuf/any.proto";
|
||||
|
||||
import "udpa/annotations/status.proto";
|
||||
|
@ -14,8 +16,8 @@ option java_multiple_files = true;
|
|||
option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3;clusterv3";
|
||||
option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
||||
|
||||
// [#protodoc-title: Upstream filters]
|
||||
// Upstream filters apply to the connections to the upstream cluster hosts.
|
||||
// [#protodoc-title: Upstream network filters]
|
||||
// Upstream network filters apply to the connections to the upstream cluster hosts.
|
||||
|
||||
message Filter {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.cluster.Filter";
|
||||
|
@ -26,6 +28,13 @@ message Filter {
|
|||
// Filter specific configuration which depends on the filter being
|
||||
// instantiated. See the supported filters for further documentation.
|
||||
// Note that Envoy's :ref:`downstream network
|
||||
// filters <config_network_filters>` are not valid upstream filters.
|
||||
// filters <config_network_filters>` are not valid upstream network filters.
|
||||
// Only one of typed_config or config_discovery can be used.
|
||||
google.protobuf.Any typed_config = 2;
|
||||
|
||||
// Configuration source specifier for an extension configuration discovery
|
||||
// service. In case of a failure and without the default configuration, the
|
||||
// listener closes the connections.
|
||||
// Only one of typed_config or config_discovery can be used.
|
||||
core.v3.ExtensionConfigSource config_discovery = 3;
|
||||
}
|
||||
|
|
|
@ -2,6 +2,8 @@ syntax = "proto3";
|
|||
|
||||
package envoy.config.cluster.v3;
|
||||
|
||||
import "envoy/config/core/v3/extension.proto";
|
||||
|
||||
import "google/protobuf/duration.proto";
|
||||
import "google/protobuf/wrappers.proto";
|
||||
|
||||
|
@ -19,14 +21,14 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
|
||||
// See the :ref:`architecture overview <arch_overview_outlier_detection>` for
|
||||
// more information on outlier detection.
|
||||
// [#next-free-field: 23]
|
||||
// [#next-free-field: 26]
|
||||
message OutlierDetection {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.cluster.OutlierDetection";
|
||||
|
||||
// The number of consecutive 5xx responses or local origin errors that are mapped
|
||||
// to 5xx error codes before a consecutive 5xx ejection
|
||||
// occurs. Defaults to 5.
|
||||
// The number of consecutive server-side error responses (for HTTP traffic,
|
||||
// 5xx responses; for TCP traffic, connection failures; for Redis, failure to
|
||||
// respond PONG; etc.) before a consecutive 5xx ejection occurs. Defaults to 5.
|
||||
google.protobuf.UInt32Value consecutive_5xx = 1;
|
||||
|
||||
// The time interval between ejection analysis sweeps. This can result in
|
||||
|
@ -40,8 +42,8 @@ message OutlierDetection {
|
|||
// Defaults to 30000ms or 30s.
|
||||
google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration = {gt {}}];
|
||||
|
||||
// The maximum % of an upstream cluster that can be ejected due to outlier
|
||||
// detection. Defaults to 10% but will eject at least one host regardless of the value.
|
||||
// The maximum % of an upstream cluster that can be ejected due to outlier detection. Defaults to 10% .
|
||||
// Will eject at least one host regardless of the value if :ref:`always_eject_one_host<envoy_v3_api_field_config.cluster.v3.OutlierDetection.always_eject_one_host>` is enabled.
|
||||
google.protobuf.UInt32Value max_ejection_percent = 4 [(validate.rules).uint32 = {lte: 100}];
|
||||
|
||||
// The % chance that a host will be actually ejected when an outlier status
|
||||
|
@ -161,4 +163,18 @@ message OutlierDetection {
|
|||
// See :ref:`max_ejection_time_jitter<envoy_v3_api_field_config.cluster.v3.OutlierDetection.base_ejection_time>`
|
||||
// Defaults to 0s.
|
||||
google.protobuf.Duration max_ejection_time_jitter = 22;
|
||||
|
||||
// If active health checking is enabled and a host is ejected by outlier detection, a successful active health check
|
||||
// unejects the host by default and considers it as healthy. Unejection also clears all the outlier detection counters.
|
||||
// To change this default behavior set this config to ``false`` where active health checking will not uneject the host.
|
||||
// Defaults to true.
|
||||
google.protobuf.BoolValue successful_active_health_check_uneject_host = 23;
|
||||
|
||||
// Set of host's passive monitors.
|
||||
// [#not-implemented-hide:]
|
||||
repeated core.v3.TypedExtensionConfig monitors = 24;
|
||||
|
||||
// If enabled, at least one host is ejected regardless of the value of :ref:`max_ejection_percent<envoy_v3_api_field_config.cluster.v3.OutlierDetection.max_ejection_percent>`.
|
||||
// Defaults to false.
|
||||
google.protobuf.BoolValue always_eject_one_host = 25;
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ option java_multiple_files = true;
|
|||
option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/common/key_value/v3;key_valuev3";
|
||||
option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
||||
|
||||
// [#protodoc-title: Key Value Store storage plugin]
|
||||
// [#protodoc-title: Key/value store storage plugin]
|
||||
|
||||
// This shared configuration for Envoy key value stores.
|
||||
message KeyValueStoreConfig {
|
||||
|
|
|
@ -6,8 +6,6 @@ import "envoy/config/core/v3/extension.proto";
|
|||
import "envoy/config/route/v3/route_components.proto";
|
||||
import "envoy/type/matcher/v3/string.proto";
|
||||
|
||||
import "xds/annotations/v3/status.proto";
|
||||
|
||||
import "udpa/annotations/status.proto";
|
||||
import "validate/validate.proto";
|
||||
|
||||
|
@ -24,9 +22,10 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
// is found the action specified by the most specific on_no_match will be evaluated.
|
||||
// As an on_no_match might result in another matching tree being evaluated, this process
|
||||
// might repeat several times until the final OnMatch (or no match) is decided.
|
||||
//
|
||||
// .. note::
|
||||
// Please use the syntactically equivalent :ref:`matching API <envoy_v3_api_msg_.xds.type.matcher.v3.Matcher>`
|
||||
message Matcher {
|
||||
option (xds.annotations.v3.message_status).work_in_progress = true;
|
||||
|
||||
// What to do if a match is successful.
|
||||
message OnMatch {
|
||||
oneof on_match {
|
||||
|
|
|
@ -2,11 +2,13 @@ syntax = "proto3";
|
|||
|
||||
package envoy.config.common.mutation_rules.v3;
|
||||
|
||||
import "envoy/config/core/v3/base.proto";
|
||||
import "envoy/type/matcher/v3/regex.proto";
|
||||
|
||||
import "google/protobuf/wrappers.proto";
|
||||
|
||||
import "udpa/annotations/status.proto";
|
||||
import "validate/validate.proto";
|
||||
|
||||
option java_package = "io.envoyproxy.envoy.config.common.mutation_rules.v3";
|
||||
option java_outer_classname = "MutationRulesProto";
|
||||
|
@ -14,7 +16,7 @@ option java_multiple_files = true;
|
|||
option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/common/mutation_rules/v3;mutation_rulesv3";
|
||||
option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
||||
|
||||
// [#protodoc-title: Header Mutation Rules]
|
||||
// [#protodoc-title: Header mutation rules]
|
||||
|
||||
// The HeaderMutationRules structure specifies what headers may be
|
||||
// manipulated by a processing filter. This set of rules makes it
|
||||
|
@ -84,3 +86,18 @@ message HeaderMutationRules {
|
|||
// Default is false.
|
||||
google.protobuf.BoolValue disallow_is_error = 7;
|
||||
}
|
||||
|
||||
// The HeaderMutation structure specifies an action that may be taken on HTTP
|
||||
// headers.
|
||||
message HeaderMutation {
|
||||
oneof action {
|
||||
option (validate.required) = true;
|
||||
|
||||
// Remove the specified header if it exists.
|
||||
string remove = 1
|
||||
[(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];
|
||||
|
||||
// Append new header by the specified HeaderValueOption.
|
||||
core.v3.HeaderValueOption append = 2;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,10 +2,12 @@ syntax = "proto3";
|
|||
|
||||
package envoy.config.core.v3;
|
||||
|
||||
import "envoy/config/core/v3/extension.proto";
|
||||
import "envoy/config/core/v3/socket_option.proto";
|
||||
|
||||
import "google/protobuf/wrappers.proto";
|
||||
|
||||
import "envoy/annotations/deprecation.proto";
|
||||
import "udpa/annotations/status.proto";
|
||||
import "udpa/annotations/versioning.proto";
|
||||
import "validate/validate.proto";
|
||||
|
@ -32,15 +34,20 @@ message Pipe {
|
|||
}
|
||||
|
||||
// The address represents an envoy internal listener.
|
||||
// [#comment: TODO(lambdai): Make this address available for listener and endpoint.
|
||||
// TODO(asraa): When address available, remove workaround from test/server/server_fuzz_test.cc:30.]
|
||||
// [#comment: TODO(asraa): When address available, remove workaround from test/server/server_fuzz_test.cc:30.]
|
||||
message EnvoyInternalAddress {
|
||||
oneof address_name_specifier {
|
||||
option (validate.required) = true;
|
||||
|
||||
// [#not-implemented-hide:] The :ref:`listener name <envoy_v3_api_field_config.listener.v3.Listener.name>` of the destination internal listener.
|
||||
// Specifies the :ref:`name <envoy_v3_api_field_config.listener.v3.Listener.name>` of the
|
||||
// internal listener.
|
||||
string server_listener_name = 1;
|
||||
}
|
||||
|
||||
// Specifies an endpoint identifier to distinguish between multiple endpoints for the same internal listener in a
|
||||
// single upstream pool. Only used in the upstream addresses for tracking changes to individual endpoints. This, for
|
||||
// example, may be set to the final destination IP for the target internal listener.
|
||||
string endpoint_id = 2;
|
||||
}
|
||||
|
||||
// [#next-free-field: 7]
|
||||
|
@ -110,15 +117,30 @@ message TcpKeepalive {
|
|||
google.protobuf.UInt32Value keepalive_interval = 3;
|
||||
}
|
||||
|
||||
message ExtraSourceAddress {
|
||||
// The additional address to bind.
|
||||
SocketAddress address = 1 [(validate.rules).message = {required: true}];
|
||||
|
||||
// Additional socket options that may not be present in Envoy source code or
|
||||
// precompiled binaries. If specified, this will override the
|
||||
// :ref:`socket_options <envoy_v3_api_field_config.core.v3.BindConfig.socket_options>`
|
||||
// in the BindConfig. If specified with no
|
||||
// :ref:`socket_options <envoy_v3_api_field_config.core.v3.SocketOptionsOverride.socket_options>`
|
||||
// or an empty list of :ref:`socket_options <envoy_v3_api_field_config.core.v3.SocketOptionsOverride.socket_options>`,
|
||||
// it means no socket option will apply.
|
||||
SocketOptionsOverride socket_options = 2;
|
||||
}
|
||||
|
||||
// [#next-free-field: 7]
|
||||
message BindConfig {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.BindConfig";
|
||||
|
||||
// The address to bind to when creating a socket.
|
||||
SocketAddress source_address = 1 [(validate.rules).message = {required: true}];
|
||||
SocketAddress source_address = 1;
|
||||
|
||||
// Whether to set the ``IP_FREEBIND`` option when creating the socket. When this
|
||||
// flag is set to true, allows the :ref:`source_address
|
||||
// <envoy_v3_api_field_config.cluster.v3.UpstreamBindConfig.source_address>` to be an IP address
|
||||
// <envoy_v3_api_field_config.core.v3.BindConfig.source_address>` to be an IP address
|
||||
// that is not configured on the system running Envoy. When this flag is set
|
||||
// to false, the option ``IP_FREEBIND`` is disabled on the socket. When this
|
||||
// flag is not set (default), the socket is not modified, i.e. the option is
|
||||
|
@ -128,6 +150,23 @@ message BindConfig {
|
|||
// Additional socket options that may not be present in Envoy source code or
|
||||
// precompiled binaries.
|
||||
repeated SocketOption socket_options = 3;
|
||||
|
||||
// Extra source addresses appended to the address specified in the ``source_address``
|
||||
// field. This enables to specify multiple source addresses.
|
||||
// The source address selection is determined by :ref:`local_address_selector
|
||||
// <envoy_v3_api_field_config.core.v3.BindConfig.local_address_selector>`.
|
||||
repeated ExtraSourceAddress extra_source_addresses = 5;
|
||||
|
||||
// Deprecated by
|
||||
// :ref:`extra_source_addresses <envoy_v3_api_field_config.core.v3.BindConfig.extra_source_addresses>`
|
||||
repeated SocketAddress additional_source_addresses = 4
|
||||
[deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"];
|
||||
|
||||
// Custom local address selector to override the default (i.e.
|
||||
// :ref:`DefaultLocalAddressSelector
|
||||
// <envoy_v3_api_msg_config.upstream.local_address_selector.v3.DefaultLocalAddressSelector>`).
|
||||
// [#extension-category: envoy.upstream.local_address_selector]
|
||||
TypedExtensionConfig local_address_selector = 6;
|
||||
}
|
||||
|
||||
// Addresses specify either a logical or physical address and port, which are
|
||||
|
@ -143,7 +182,8 @@ message Address {
|
|||
|
||||
Pipe pipe = 2;
|
||||
|
||||
// [#not-implemented-hide:]
|
||||
// Specifies a user-space address handled by :ref:`internal listeners
|
||||
// <envoy_v3_api_field_config.listener.v3.Listener.internal_listener>`.
|
||||
EnvoyInternalAddress envoy_internal_address = 3;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@ option java_multiple_files = true;
|
|||
option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3";
|
||||
option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
||||
|
||||
// [#protodoc-title: Backoff Strategy]
|
||||
// [#protodoc-title: Backoff strategy]
|
||||
|
||||
// Configuration defining a jittered exponential back off strategy.
|
||||
message BackoffStrategy {
|
||||
|
|
|
@ -245,7 +245,8 @@ message Metadata {
|
|||
// :ref:`typed_filter_metadata <envoy_v3_api_field_config.core.v3.Metadata.typed_filter_metadata>`
|
||||
// fields are present in the metadata with same keys,
|
||||
// only ``typed_filter_metadata`` field will be parsed.
|
||||
map<string, google.protobuf.Struct> filter_metadata = 1;
|
||||
map<string, google.protobuf.Struct> filter_metadata = 1
|
||||
[(validate.rules).map = {keys {string {min_len: 1}}}];
|
||||
|
||||
// Key is the reverse DNS filter name, e.g. com.acme.widget. The ``envoy.*``
|
||||
// namespace is reserved for Envoy's built-in filters.
|
||||
|
@ -253,7 +254,8 @@ message Metadata {
|
|||
// If both :ref:`filter_metadata <envoy_v3_api_field_config.core.v3.Metadata.filter_metadata>`
|
||||
// and ``typed_filter_metadata`` fields are present in the metadata with same keys,
|
||||
// only ``typed_filter_metadata`` field will be parsed.
|
||||
map<string, google.protobuf.Any> typed_filter_metadata = 2;
|
||||
map<string, google.protobuf.Any> typed_filter_metadata = 2
|
||||
[(validate.rules).map = {keys {string {min_len: 1}}}];
|
||||
}
|
||||
|
||||
// Runtime derived uint32 with a default when not specified.
|
||||
|
@ -301,6 +303,59 @@ message RuntimeFeatureFlag {
|
|||
string runtime_key = 2 [(validate.rules).string = {min_len: 1}];
|
||||
}
|
||||
|
||||
message KeyValue {
|
||||
// The key of the key/value pair.
|
||||
string key = 1 [(validate.rules).string = {min_len: 1 max_bytes: 16384}];
|
||||
|
||||
// The value of the key/value pair.
|
||||
bytes value = 2;
|
||||
}
|
||||
|
||||
// Key/value pair plus option to control append behavior. This is used to specify
|
||||
// key/value pairs that should be appended to a set of existing key/value pairs.
|
||||
message KeyValueAppend {
|
||||
// Describes the supported actions types for key/value pair append action.
|
||||
enum KeyValueAppendAction {
|
||||
// If the key already exists, this action will result in the following behavior:
|
||||
//
|
||||
// - Comma-concatenated value if multiple values are not allowed.
|
||||
// - New value added to the list of values if multiple values are allowed.
|
||||
//
|
||||
// If the key doesn't exist then this will add pair with specified key and value.
|
||||
APPEND_IF_EXISTS_OR_ADD = 0;
|
||||
|
||||
// This action will add the key/value pair if it doesn't already exist. If the
|
||||
// key already exists then this will be a no-op.
|
||||
ADD_IF_ABSENT = 1;
|
||||
|
||||
// This action will overwrite the specified value by discarding any existing
|
||||
// values if the key already exists. If the key doesn't exist then this will add
|
||||
// the pair with specified key and value.
|
||||
OVERWRITE_IF_EXISTS_OR_ADD = 2;
|
||||
|
||||
// This action will overwrite the specified value by discarding any existing
|
||||
// values if the key already exists. If the key doesn't exist then this will
|
||||
// be no-op.
|
||||
OVERWRITE_IF_EXISTS = 3;
|
||||
}
|
||||
|
||||
// Key/value pair entry that this option to append or overwrite.
|
||||
KeyValue entry = 1 [(validate.rules).message = {required: true}];
|
||||
|
||||
// Describes the action taken to append/overwrite the given value for an existing
|
||||
// key or to only add this key if it's absent.
|
||||
KeyValueAppendAction action = 2 [(validate.rules).enum = {defined_only: true}];
|
||||
}
|
||||
|
||||
// Key/value pair to append or remove.
|
||||
message KeyValueMutation {
|
||||
// Key/value pair to append or overwrite. Only one of ``append`` or ``remove`` can be set.
|
||||
KeyValueAppend append = 1;
|
||||
|
||||
// Key to remove. Only one of ``append`` or ``remove`` can be set.
|
||||
string remove = 2 [(validate.rules).string = {max_bytes: 16384}];
|
||||
}
|
||||
|
||||
// Query parameter name/value pair.
|
||||
message QueryParameter {
|
||||
// The key of the query parameter. Case sensitive.
|
||||
|
@ -324,8 +379,18 @@ message HeaderValue {
|
|||
// The same :ref:`format specifier <config_access_log_format>` as used for
|
||||
// :ref:`HTTP access logging <config_access_log>` applies here, however
|
||||
// unknown header values are replaced with the empty string instead of ``-``.
|
||||
// Header value is encoded as string. This does not work for non-utf8 characters.
|
||||
// Only one of ``value`` or ``raw_value`` can be set.
|
||||
string value = 2 [
|
||||
(validate.rules).string = {max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}
|
||||
(validate.rules).string = {max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false},
|
||||
(udpa.annotations.field_migrate).oneof_promotion = "value_type"
|
||||
];
|
||||
|
||||
// Header value is encoded as bytes which can support non-utf8 characters.
|
||||
// Only one of ``value`` or ``raw_value`` can be set.
|
||||
bytes raw_value = 3 [
|
||||
(validate.rules).bytes = {min_len: 0 max_len: 16384},
|
||||
(udpa.annotations.field_migrate).oneof_promotion = "value_type"
|
||||
];
|
||||
}
|
||||
|
||||
|
@ -334,11 +399,14 @@ message HeaderValueOption {
|
|||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.core.HeaderValueOption";
|
||||
|
||||
// [#not-implemented-hide:] Describes the supported actions types for header append action.
|
||||
// Describes the supported actions types for header append action.
|
||||
enum HeaderAppendAction {
|
||||
// This action will append the specified value to the existing values if the header
|
||||
// already exists. If the header doesn't exist then this will add the header with
|
||||
// specified key and value.
|
||||
// If the header already exists, this action will result in:
|
||||
//
|
||||
// - Comma-concatenated for predefined inline headers.
|
||||
// - Duplicate header added in the ``HeaderMap`` for other headers.
|
||||
//
|
||||
// If the header doesn't exist then this will add new header with specified key and value.
|
||||
APPEND_IF_EXISTS_OR_ADD = 0;
|
||||
|
||||
// This action will add the header if it doesn't already exist. If the header
|
||||
|
@ -349,6 +417,10 @@ message HeaderValueOption {
|
|||
// the header already exists. If the header doesn't exist then this will add the header
|
||||
// with specified key and value.
|
||||
OVERWRITE_IF_EXISTS_OR_ADD = 2;
|
||||
|
||||
// This action will overwrite the specified value by discarding any existing values if
|
||||
// the header already exists. If the header doesn't exist then this will be no-op.
|
||||
OVERWRITE_IF_EXISTS = 3;
|
||||
}
|
||||
|
||||
// Header name/value pair that this option applies to.
|
||||
|
@ -356,10 +428,20 @@ message HeaderValueOption {
|
|||
|
||||
// Should the value be appended? If true (default), the value is appended to
|
||||
// existing values. Otherwise it replaces any existing values.
|
||||
google.protobuf.BoolValue append = 2;
|
||||
// This field is deprecated and please use
|
||||
// :ref:`append_action <envoy_v3_api_field_config.core.v3.HeaderValueOption.append_action>` as replacement.
|
||||
//
|
||||
// .. note::
|
||||
// The :ref:`external authorization service <envoy_v3_api_msg_service.auth.v3.CheckResponse>` and
|
||||
// :ref:`external processor service <envoy_v3_api_msg_service.ext_proc.v3.ProcessingResponse>` have
|
||||
// default value (``false``) for this field.
|
||||
google.protobuf.BoolValue append = 2
|
||||
[deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"];
|
||||
|
||||
// [#not-implemented-hide:] Describes the action taken to append/overwrite the given value for an existing header
|
||||
// or to only add this header if it's absent. Value defaults to :ref:`APPEND_IF_EXISTS_OR_ADD<envoy_v3_api_enum_value_config.core.v3.HeaderValueOption.HeaderAppendAction.APPEND_IF_EXISTS_OR_ADD>`.
|
||||
// Describes the action taken to append/overwrite the given value for an existing header
|
||||
// or to only add this header if it's absent.
|
||||
// Value defaults to :ref:`APPEND_IF_EXISTS_OR_ADD
|
||||
// <envoy_v3_api_enum_value_config.core.v3.HeaderValueOption.HeaderAppendAction.APPEND_IF_EXISTS_OR_ADD>`.
|
||||
HeaderAppendAction append_action = 3 [(validate.rules).enum = {defined_only: true}];
|
||||
|
||||
// Is the header value allowed to be empty? If false (default), custom headers with empty values are dropped,
|
||||
|
@ -382,6 +464,7 @@ message WatchedDirectory {
|
|||
}
|
||||
|
||||
// Data source consisting of a file, an inline value, or an environment variable.
|
||||
// [#next-free-field: 6]
|
||||
message DataSource {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.DataSource";
|
||||
|
||||
|
@ -400,12 +483,47 @@ message DataSource {
|
|||
// Environment variable data source.
|
||||
string environment_variable = 4 [(validate.rules).string = {min_len: 1}];
|
||||
}
|
||||
|
||||
// Watched directory that is watched for file changes. If this is set explicitly, the file
|
||||
// specified in the ``filename`` field will be reloaded when relevant file move events occur.
|
||||
//
|
||||
// .. note::
|
||||
// This field only makes sense when the ``filename`` field is set.
|
||||
//
|
||||
// .. note::
|
||||
// Envoy only updates when the file is replaced by a file move, and not when the file is
|
||||
// edited in place.
|
||||
//
|
||||
// .. note::
|
||||
// Not all use cases of ``DataSource`` support watching directories. It depends on the
|
||||
// specific usage of the ``DataSource``. See the documentation of the parent message for
|
||||
// details.
|
||||
WatchedDirectory watched_directory = 5;
|
||||
}
|
||||
|
||||
// The message specifies the retry policy of remote data source when fetching fails.
|
||||
// [#next-free-field: 7]
|
||||
message RetryPolicy {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.RetryPolicy";
|
||||
|
||||
// See :ref:`RetryPriority <envoy_v3_api_field_config.route.v3.RetryPolicy.retry_priority>`.
|
||||
message RetryPriority {
|
||||
string name = 1 [(validate.rules).string = {min_len: 1}];
|
||||
|
||||
oneof config_type {
|
||||
google.protobuf.Any typed_config = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// See :ref:`RetryHostPredicate <envoy_v3_api_field_config.route.v3.RetryPolicy.retry_host_predicate>`.
|
||||
message RetryHostPredicate {
|
||||
string name = 1 [(validate.rules).string = {min_len: 1}];
|
||||
|
||||
oneof config_type {
|
||||
google.protobuf.Any typed_config = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// Specifies parameters that control :ref:`retry backoff strategy <envoy_v3_api_msg_config.core.v3.BackoffStrategy>`.
|
||||
// This parameter is optional, in which case the default base interval is 1000 milliseconds. The
|
||||
// default maximum interval is 10 times the base interval.
|
||||
|
@ -415,6 +533,18 @@ message RetryPolicy {
|
|||
// defaults to 1.
|
||||
google.protobuf.UInt32Value num_retries = 2
|
||||
[(udpa.annotations.field_migrate).rename = "max_retries"];
|
||||
|
||||
// For details, see :ref:`retry_on <envoy_v3_api_field_config.route.v3.RetryPolicy.retry_on>`.
|
||||
string retry_on = 3;
|
||||
|
||||
// For details, see :ref:`retry_priority <envoy_v3_api_field_config.route.v3.RetryPolicy.retry_priority>`.
|
||||
RetryPriority retry_priority = 4;
|
||||
|
||||
// For details, see :ref:`RetryHostPredicate <envoy_v3_api_field_config.route.v3.RetryPolicy.retry_host_predicate>`.
|
||||
repeated RetryHostPredicate retry_host_predicate = 5;
|
||||
|
||||
// For details, see :ref:`host_selection_retry_max_attempts <envoy_v3_api_field_config.route.v3.RetryPolicy.host_selection_retry_max_attempts>`.
|
||||
int64 host_selection_retry_max_attempts = 6;
|
||||
}
|
||||
|
||||
// The message specifies how to fetch data from remote and how to verify it.
|
||||
|
|
|
@ -28,12 +28,10 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
// xDS API and non-xDS services version. This is used to describe both resource and transport
|
||||
// protocol versions (in distinct configuration fields).
|
||||
enum ApiVersion {
|
||||
// When not specified, we assume v2, to ease migration to Envoy's stable API
|
||||
// versioning. If a client does not support v2 (e.g. due to deprecation), this
|
||||
// is an invalid value.
|
||||
AUTO = 0 [deprecated = true, (envoy.annotations.deprecated_at_minor_version_enum) = "3.0"];
|
||||
// When not specified, we assume v3; it is the only supported version.
|
||||
AUTO = 0;
|
||||
|
||||
// Use xDS v2 API.
|
||||
// Use xDS v2 API. This is no longer supported.
|
||||
V2 = 1 [deprecated = true, (envoy.annotations.deprecated_at_minor_version_enum) = "3.0"];
|
||||
|
||||
// Use xDS v3 API.
|
||||
|
@ -152,7 +150,8 @@ message RateLimitSettings {
|
|||
google.protobuf.UInt32Value max_tokens = 1;
|
||||
|
||||
// Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens
|
||||
// per second will be used.
|
||||
// per second will be used. The minimal fill rate is once per year. Lower
|
||||
// fill rates will be set to once per year.
|
||||
google.protobuf.DoubleValue fill_rate = 2 [(validate.rules).double = {gt: 0.0}];
|
||||
}
|
||||
|
||||
|
|
|
@ -25,10 +25,11 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
|
||||
// gRPC service configuration. This is used by :ref:`ApiConfigSource
|
||||
// <envoy_v3_api_msg_config.core.v3.ApiConfigSource>` and filter configurations.
|
||||
// [#next-free-field: 6]
|
||||
// [#next-free-field: 7]
|
||||
message GrpcService {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcService";
|
||||
|
||||
// [#next-free-field: 6]
|
||||
message EnvoyGrpc {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.core.GrpcService.EnvoyGrpc";
|
||||
|
@ -43,6 +44,24 @@ message GrpcService {
|
|||
string authority = 2
|
||||
[(validate.rules).string =
|
||||
{min_len: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}];
|
||||
|
||||
// Indicates the retry policy for re-establishing the gRPC stream
|
||||
// This field is optional. If max interval is not provided, it will be set to ten times the provided base interval.
|
||||
// Currently only supported for xDS gRPC streams.
|
||||
// If not set, xDS gRPC streams default base interval:500ms, maximum interval:30s will be applied.
|
||||
RetryPolicy retry_policy = 3;
|
||||
|
||||
// Maximum gRPC message size that is allowed to be received.
|
||||
// If a message over this limit is received, the gRPC stream is terminated with the RESOURCE_EXHAUSTED error.
|
||||
// This limit is applied to individual messages in the streaming response and not the total size of streaming response.
|
||||
// Defaults to 0, which means unlimited.
|
||||
google.protobuf.UInt32Value max_receive_message_length = 4;
|
||||
|
||||
// This provides gRPC client level control over envoy generated headers.
|
||||
// If false, the header will be sent but it can be overridden by per stream option.
|
||||
// If true, the header will be removed and can not be overridden by per stream option.
|
||||
// Default to false.
|
||||
bool skip_envoy_headers = 5;
|
||||
}
|
||||
|
||||
// [#next-free-field: 9]
|
||||
|
@ -142,7 +161,7 @@ message GrpcService {
|
|||
|
||||
// URI of the token exchange service that handles token exchange requests.
|
||||
// [#comment:TODO(asraa): Add URI validation when implemented. Tracked by
|
||||
// https://github.com/envoyproxy/protoc-gen-validate/issues/303]
|
||||
// https://github.com/bufbuild/protoc-gen-validate/issues/303]
|
||||
string token_exchange_service_uri = 1;
|
||||
|
||||
// Location of the target service or resource where the client
|
||||
|
@ -294,4 +313,8 @@ message GrpcService {
|
|||
// documentation on :ref:`custom request headers
|
||||
// <config_http_conn_man_headers_custom_request_headers>`.
|
||||
repeated HeaderValue initial_metadata = 5;
|
||||
|
||||
// Optional default retry policy for streams toward the service.
|
||||
// If an async stream doesn't have retry policy configured in its stream options, this retry policy is used.
|
||||
RetryPolicy retry_policy = 6;
|
||||
}
|
||||
|
|
|
@ -4,6 +4,8 @@ package envoy.config.core.v3;
|
|||
|
||||
import "envoy/config/core/v3/base.proto";
|
||||
import "envoy/config/core/v3/event_service_config.proto";
|
||||
import "envoy/config/core/v3/extension.proto";
|
||||
import "envoy/config/core/v3/proxy_protocol.proto";
|
||||
import "envoy/type/matcher/v3/string.proto";
|
||||
import "envoy/type/v3/http.proto";
|
||||
import "envoy/type/v3/range.proto";
|
||||
|
@ -13,6 +15,7 @@ import "google/protobuf/duration.proto";
|
|||
import "google/protobuf/struct.proto";
|
||||
import "google/protobuf/wrappers.proto";
|
||||
|
||||
import "envoy/annotations/deprecation.proto";
|
||||
import "udpa/annotations/status.proto";
|
||||
import "udpa/annotations/versioning.proto";
|
||||
import "validate/validate.proto";
|
||||
|
@ -60,7 +63,7 @@ message HealthStatusSet {
|
|||
[(validate.rules).repeated = {items {enum {defined_only: true}}}];
|
||||
}
|
||||
|
||||
// [#next-free-field: 25]
|
||||
// [#next-free-field: 27]
|
||||
message HealthCheck {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HealthCheck";
|
||||
|
||||
|
@ -75,12 +78,12 @@ message HealthCheck {
|
|||
// Hex encoded payload. E.g., "000000FF".
|
||||
string text = 1 [(validate.rules).string = {min_len: 1}];
|
||||
|
||||
// [#not-implemented-hide:] Binary payload.
|
||||
// Binary payload.
|
||||
bytes binary = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// [#next-free-field: 13]
|
||||
// [#next-free-field: 15]
|
||||
message HttpHealthCheck {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.core.HealthCheck.HttpHealthCheck";
|
||||
|
@ -93,18 +96,29 @@ message HealthCheck {
|
|||
// left empty (default value), the name of the cluster this health check is associated
|
||||
// with will be used. The host header can be customized for a specific endpoint by setting the
|
||||
// :ref:`hostname <envoy_v3_api_field_config.endpoint.v3.Endpoint.HealthCheckConfig.hostname>` field.
|
||||
string host = 1 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}];
|
||||
string host = 1 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE}];
|
||||
|
||||
// Specifies the HTTP path that will be requested during health checking. For example
|
||||
// ``/healthcheck``.
|
||||
string path = 2
|
||||
[(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}];
|
||||
string path = 2 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE}];
|
||||
|
||||
// [#not-implemented-hide:] HTTP specific payload.
|
||||
Payload send = 3;
|
||||
|
||||
// [#not-implemented-hide:] HTTP specific response.
|
||||
Payload receive = 4;
|
||||
// Specifies a list of HTTP expected responses to match in the first ``response_buffer_size`` bytes of the response body.
|
||||
// If it is set, both the expected response check and status code determine the health check.
|
||||
// When checking the response, “fuzzy” matching is performed such that each payload block must be found,
|
||||
// and in the order specified, but not necessarily contiguous.
|
||||
//
|
||||
// .. note::
|
||||
//
|
||||
// It is recommended to set ``response_buffer_size`` based on the total Payload size for efficiency.
|
||||
// The default buffer size is 1024 bytes when it is not set.
|
||||
repeated Payload receive = 4;
|
||||
|
||||
// Specifies the size of response buffer in bytes that is used to Payload match.
|
||||
// The default value is 1024. Setting to 0 implies that the Payload will be matched against the entire response.
|
||||
google.protobuf.UInt64Value response_buffer_size = 14 [(validate.rules).uint64 = {gte: 0}];
|
||||
|
||||
// Specifies a list of HTTP headers that should be added to each request that is sent to the
|
||||
// health checked cluster. For more information, including details on header value syntax, see
|
||||
|
@ -145,6 +159,12 @@ message HealthCheck {
|
|||
// <envoy_v3_api_msg_type.matcher.v3.StringMatcher>`. See the :ref:`architecture overview
|
||||
// <arch_overview_health_checking_identity>` for more information.
|
||||
type.matcher.v3.StringMatcher service_name_matcher = 11;
|
||||
|
||||
// HTTP Method that will be used for health checking, default is "GET".
|
||||
// GET, HEAD, POST, PUT, DELETE, OPTIONS, TRACE, PATCH methods are supported, but making request body is not supported.
|
||||
// CONNECT method is disallowed because it is not appropriate for health check request.
|
||||
// If a non-200 response is expected by the method, it needs to be set in :ref:`expected_statuses <envoy_v3_api_field_config.core.v3.HealthCheck.HttpHealthCheck.expected_statuses>`.
|
||||
RequestMethod method = 13 [(validate.rules).enum = {defined_only: true not_in: 6}];
|
||||
}
|
||||
|
||||
message TcpHealthCheck {
|
||||
|
@ -155,9 +175,16 @@ message HealthCheck {
|
|||
Payload send = 1;
|
||||
|
||||
// When checking the response, “fuzzy” matching is performed such that each
|
||||
// binary block must be found, and in the order specified, but not
|
||||
// payload block must be found, and in the order specified, but not
|
||||
// necessarily contiguous.
|
||||
repeated Payload receive = 2;
|
||||
|
||||
// When setting this value, it tries to attempt health check request with ProxyProtocol.
|
||||
// When ``send`` is presented, they are sent after preceding ProxyProtocol header.
|
||||
// Only ProxyProtocol header is sent when ``send`` is not presented.
|
||||
// It allows to use both ProxyProtocol V1 and V2. In V1, it presents L3/L4. In V2, it includes
|
||||
// LOCAL command and doesn't include L3/L4.
|
||||
ProxyProtocolConfig proxy_protocol_config = 3;
|
||||
}
|
||||
|
||||
message RedisHealthCheck {
|
||||
|
@ -348,9 +375,19 @@ message HealthCheck {
|
|||
// The default value for "healthy edge interval" is the same as the default interval.
|
||||
google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration = {gt {}}];
|
||||
|
||||
// .. attention::
|
||||
// This field is deprecated in favor of the extension
|
||||
// :ref:`event_logger <envoy_v3_api_field_config.core.v3.HealthCheck.event_logger>` and
|
||||
// :ref:`event_log_path <envoy_v3_api_field_extensions.health_check.event_sinks.file.v3.HealthCheckEventFileSink.event_log_path>`
|
||||
// in the file sink extension.
|
||||
//
|
||||
// Specifies the path to the :ref:`health check event log <arch_overview_health_check_logging>`.
|
||||
// If empty, no event log will be written.
|
||||
string event_log_path = 17;
|
||||
string event_log_path = 17
|
||||
[deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"];
|
||||
|
||||
// A list of event log sinks to process the health check event.
|
||||
// [#extension-category: envoy.health_check.event_sinks]
|
||||
repeated TypedExtensionConfig event_logger = 25;
|
||||
|
||||
// [#not-implemented-hide:]
|
||||
// The gRPC service for the health check event service.
|
||||
|
@ -362,6 +399,11 @@ message HealthCheck {
|
|||
// The default value is false.
|
||||
bool always_log_health_check_failures = 19;
|
||||
|
||||
// If set to true, health check success events will always be logged. If set to false, only host addition event will be logged
|
||||
// if it is the first successful health check, or if the healthy threshold is reached.
|
||||
// The default value is false.
|
||||
bool always_log_health_check_success = 26;
|
||||
|
||||
// This allows overriding the cluster TLS settings, just for health check connections.
|
||||
TlsOptions tls_options = 21;
|
||||
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package envoy.config.core.v3;
|
||||
|
||||
import "envoy/config/core/v3/base.proto";
|
||||
import "envoy/config/core/v3/http_uri.proto";
|
||||
|
||||
import "udpa/annotations/status.proto";
|
||||
import "validate/validate.proto";
|
||||
|
||||
option java_package = "io.envoyproxy.envoy.config.core.v3";
|
||||
option java_outer_classname = "HttpServiceProto";
|
||||
option java_multiple_files = true;
|
||||
option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3";
|
||||
option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
||||
|
||||
// [#protodoc-title: HTTP services]
|
||||
|
||||
// HTTP service configuration.
|
||||
message HttpService {
|
||||
// The service's HTTP URI. For example:
|
||||
//
|
||||
// .. code-block:: yaml
|
||||
//
|
||||
// http_uri:
|
||||
// uri: https://www.myserviceapi.com/v1/data
|
||||
// cluster: www.myserviceapi.com|443
|
||||
//
|
||||
HttpUri http_uri = 1;
|
||||
|
||||
// Specifies a list of HTTP headers that should be added to each request
|
||||
// handled by this virtual host.
|
||||
repeated HeaderValueOption request_headers_to_add = 2
|
||||
[(validate.rules).repeated = {max_items: 1000}];
|
||||
}
|
|
@ -14,7 +14,7 @@ option java_multiple_files = true;
|
|||
option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3";
|
||||
option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
||||
|
||||
// [#protodoc-title: HTTP Service URI ]
|
||||
// [#protodoc-title: HTTP service URI ]
|
||||
|
||||
// Envoy external URI descriptor
|
||||
message HttpUri {
|
||||
|
@ -52,6 +52,7 @@ message HttpUri {
|
|||
// Sets the maximum duration in milliseconds that a response can take to arrive upon request.
|
||||
google.protobuf.Duration timeout = 3 [(validate.rules).duration = {
|
||||
required: true
|
||||
lt {seconds: 4294967296}
|
||||
gte {}
|
||||
}];
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ message QuicKeepAliveSettings {
|
|||
}
|
||||
|
||||
// QUIC protocol options which apply to both downstream and upstream connections.
|
||||
// [#next-free-field: 6]
|
||||
// [#next-free-field: 9]
|
||||
message QuicProtocolOptions {
|
||||
// Maximum number of streams that the client can negotiate per connection. 100
|
||||
// if not specified.
|
||||
|
@ -64,7 +64,7 @@ message QuicProtocolOptions {
|
|||
|
||||
// `Initial stream-level flow-control receive window
|
||||
// <https://tools.ietf.org/html/draft-ietf-quic-transport-34#section-4.1>`_ size. Valid values range from
|
||||
// 1 to 16777216 (2^24, maximum supported by QUICHE) and defaults to 65536 (2^16).
|
||||
// 1 to 16777216 (2^24, maximum supported by QUICHE) and defaults to 16777216 (16 * 1024 * 1024).
|
||||
//
|
||||
// NOTE: 16384 (2^14) is the minimum window size supported in Google QUIC. If configured smaller than it, we will use 16384 instead.
|
||||
// QUICHE IETF Quic implementation supports 1 bytes window. We only support increasing the default window size now, so it's also the minimum.
|
||||
|
@ -76,8 +76,8 @@ message QuicProtocolOptions {
|
|||
[(validate.rules).uint32 = {lte: 16777216 gte: 1}];
|
||||
|
||||
// Similar to ``initial_stream_window_size``, but for connection-level
|
||||
// flow-control. Valid values rage from 1 to 25165824 (24MB, maximum supported by QUICHE) and defaults to 65536 (2^16).
|
||||
// window. Currently, this has the same minimum/default as ``initial_stream_window_size``.
|
||||
// flow-control. Valid values rage from 1 to 25165824 (24MB, maximum supported by QUICHE) and defaults
|
||||
// to 25165824 (24 * 1024 * 1024).
|
||||
//
|
||||
// NOTE: 16384 (2^14) is the minimum window size supported in Google QUIC. We only support increasing the default
|
||||
// window size now, so it's also the minimum.
|
||||
|
@ -85,7 +85,7 @@ message QuicProtocolOptions {
|
|||
[(validate.rules).uint32 = {lte: 25165824 gte: 1}];
|
||||
|
||||
// The number of timeouts that can occur before port migration is triggered for QUIC clients.
|
||||
// This defaults to 1. If set to 0, port migration will not occur on path degrading.
|
||||
// This defaults to 4. If set to 0, port migration will not occur on path degrading.
|
||||
// Timeout here refers to QUIC internal path degrading timeout mechanism, such as PTO.
|
||||
// This has no effect on server sessions.
|
||||
google.protobuf.UInt32Value num_timeouts_to_trigger_port_migration = 4
|
||||
|
@ -94,6 +94,23 @@ message QuicProtocolOptions {
|
|||
// Probes the peer at the configured interval to solicit traffic, i.e. ACK or PATH_RESPONSE, from the peer to push back connection idle timeout.
|
||||
// If absent, use the default keepalive behavior of which a client connection sends PINGs every 15s, and a server connection doesn't do anything.
|
||||
QuicKeepAliveSettings connection_keepalive = 5;
|
||||
|
||||
// A comma-separated list of strings representing QUIC connection options defined in
|
||||
// `QUICHE <https://github.com/google/quiche/blob/main/quiche/quic/core/crypto/crypto_protocol.h>`_ and to be sent by upstream connections.
|
||||
string connection_options = 6;
|
||||
|
||||
// A comma-separated list of strings representing QUIC client connection options defined in
|
||||
// `QUICHE <https://github.com/google/quiche/blob/main/quiche/quic/core/crypto/crypto_protocol.h>`_ and to be sent by upstream connections.
|
||||
string client_connection_options = 7;
|
||||
|
||||
// The duration that a QUIC connection stays idle before it closes itself. If this field is not present, QUICHE
|
||||
// default 600s will be applied.
|
||||
// For internal corporate network, a long timeout is often fine.
|
||||
// But for client facing network, 30s is usually a good choice.
|
||||
google.protobuf.Duration idle_network_timeout = 8 [(validate.rules).duration = {
|
||||
lte {seconds: 600}
|
||||
gte {seconds: 1}
|
||||
}];
|
||||
}
|
||||
|
||||
message UpstreamHttpProtocolOptions {
|
||||
|
@ -104,12 +121,14 @@ message UpstreamHttpProtocolOptions {
|
|||
// upstream connections based on the downstream HTTP host/authority header or any other arbitrary
|
||||
// header when :ref:`override_auto_sni_header <envoy_v3_api_field_config.core.v3.UpstreamHttpProtocolOptions.override_auto_sni_header>`
|
||||
// is set, as seen by the :ref:`router filter <config_http_filters_router>`.
|
||||
// Does nothing if a filter before the http router filter sets the corresponding metadata.
|
||||
bool auto_sni = 1;
|
||||
|
||||
// Automatic validate upstream presented certificate for new upstream connections based on the
|
||||
// downstream HTTP host/authority header or any other arbitrary header when :ref:`override_auto_sni_header <envoy_v3_api_field_config.core.v3.UpstreamHttpProtocolOptions.override_auto_sni_header>`
|
||||
// is set, as seen by the :ref:`router filter <config_http_filters_router>`.
|
||||
// This field is intended to be set with ``auto_sni`` field.
|
||||
// Does nothing if a filter before the http router filter sets the corresponding metadata.
|
||||
bool auto_san_validation = 2;
|
||||
|
||||
// An optional alternative to the host/authority header to be used for setting the SNI value.
|
||||
|
@ -119,6 +138,7 @@ message UpstreamHttpProtocolOptions {
|
|||
// is not found or the value is empty, host/authority header will be used instead.
|
||||
// This field is intended to be set with ``auto_sni`` and/or ``auto_san_validation`` fields.
|
||||
// If none of these fields are set then setting this would be a no-op.
|
||||
// Does nothing if a filter before the http router filter sets the corresponding metadata.
|
||||
string override_auto_sni_header = 3
|
||||
[(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME ignore_empty: true}];
|
||||
}
|
||||
|
@ -127,6 +147,7 @@ message UpstreamHttpProtocolOptions {
|
|||
// make an HTTP connection to an origin server. See https://tools.ietf.org/html/rfc7838 for
|
||||
// HTTP Alternative Services and https://datatracker.ietf.org/doc/html/draft-ietf-dnsop-svcb-https-04
|
||||
// for the "HTTPS" DNS resource record.
|
||||
// [#next-free-field: 6]
|
||||
message AlternateProtocolsCacheOptions {
|
||||
// Allows pre-populating the cache with HTTP/3 alternate protocols entries with a 7 day lifetime.
|
||||
// This will cause Envoy to attempt HTTP/3 to those upstreams, even if the upstreams have not
|
||||
|
@ -171,6 +192,17 @@ message AlternateProtocolsCacheOptions {
|
|||
|
||||
// Allows pre-populating the cache with entries, as described above.
|
||||
repeated AlternateProtocolsCacheEntry prepopulated_entries = 4;
|
||||
|
||||
// Optional list of hostnames suffixes for which Alt-Svc entries can be shared. For example, if
|
||||
// this list contained the value ``.c.example.com``, then an Alt-Svc entry for ``foo.c.example.com``
|
||||
// could be shared with ``bar.c.example.com`` but would not be shared with ``baz.example.com``. On
|
||||
// the other hand, if the list contained the value ``.example.com`` then all three hosts could share
|
||||
// Alt-Svc entries. Each entry must start with ``.``. If a hostname matches multiple suffixes, the
|
||||
// first listed suffix will be used.
|
||||
//
|
||||
// Since lookup in this list is O(n), it is recommended that the number of suffixes be limited.
|
||||
// [#not-implemented-hide:]
|
||||
repeated string canonical_suffixes = 5;
|
||||
}
|
||||
|
||||
// [#next-free-field: 7]
|
||||
|
@ -217,10 +249,9 @@ message HttpProtocolOptions {
|
|||
google.protobuf.Duration idle_timeout = 1;
|
||||
|
||||
// The maximum duration of a connection. The duration is defined as a period since a connection
|
||||
// was established. If not set, there is no max duration. When max_connection_duration is reached
|
||||
// and if there are no active streams, the connection will be closed. If the connection is a
|
||||
// downstream connection and there are any active streams, the drain sequence will kick-in,
|
||||
// and the connection will be force-closed after the drain period. See :ref:`drain_timeout
|
||||
// was established. If not set, there is no max duration. When max_connection_duration is reached,
|
||||
// the drain sequence will kick-in. The connection will be closed after the drain timeout period
|
||||
// if there are no active streams. See :ref:`drain_timeout
|
||||
// <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.drain_timeout>`.
|
||||
google.protobuf.Duration max_connection_duration = 3;
|
||||
|
||||
|
@ -247,7 +278,7 @@ message HttpProtocolOptions {
|
|||
google.protobuf.UInt32Value max_requests_per_connection = 6;
|
||||
}
|
||||
|
||||
// [#next-free-field: 9]
|
||||
// [#next-free-field: 11]
|
||||
message Http1ProtocolOptions {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.core.Http1ProtocolOptions";
|
||||
|
@ -351,6 +382,27 @@ message Http1ProtocolOptions {
|
|||
// (inferred if not present), host (from the host/:authority header) and path
|
||||
// (from first line or :path header).
|
||||
bool send_fully_qualified_url = 8;
|
||||
|
||||
// [#not-implemented-hide:] Hiding so that field can be removed after BalsaParser is rolled out.
|
||||
// If set, force HTTP/1 parser: BalsaParser if true, http-parser if false.
|
||||
// If unset, HTTP/1 parser is selected based on
|
||||
// envoy.reloadable_features.http1_use_balsa_parser.
|
||||
// See issue #21245.
|
||||
google.protobuf.BoolValue use_balsa_parser = 9
|
||||
[(xds.annotations.v3.field_status).work_in_progress = true];
|
||||
|
||||
// [#not-implemented-hide:] Hiding so that field can be removed.
|
||||
// If true, and BalsaParser is used (either `use_balsa_parser` above is true,
|
||||
// or `envoy.reloadable_features.http1_use_balsa_parser` is true and
|
||||
// `use_balsa_parser` is unset), then every non-empty method with only valid
|
||||
// characters is accepted. Otherwise, methods not on the hard-coded list are
|
||||
// rejected.
|
||||
// Once UHV is enabled, this field should be removed, and BalsaParser should
|
||||
// allow any method. UHV validates the method, rejecting empty string or
|
||||
// invalid characters, and provides :ref:`restrict_http_methods
|
||||
// <envoy_v3_api_field_extensions.http.header_validators.envoy_default.v3.HeaderValidatorConfig.restrict_http_methods>`
|
||||
// to reject custom methods.
|
||||
bool allow_custom_methods = 10 [(xds.annotations.v3.field_status).work_in_progress = true];
|
||||
}
|
||||
|
||||
message KeepaliveSettings {
|
||||
|
@ -383,7 +435,7 @@ message KeepaliveSettings {
|
|||
[(validate.rules).duration = {gte {nanos: 1000000}}];
|
||||
}
|
||||
|
||||
// [#next-free-field: 16]
|
||||
// [#next-free-field: 17]
|
||||
message Http2ProtocolOptions {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.core.Http2ProtocolOptions";
|
||||
|
@ -446,10 +498,10 @@ message Http2ProtocolOptions {
|
|||
// Allows proxying Websocket and other upgrades over H2 connect.
|
||||
bool allow_connect = 5;
|
||||
|
||||
// [#not-implemented-hide:] Hiding until envoy has full metadata support.
|
||||
// [#not-implemented-hide:] Hiding until Envoy has full metadata support.
|
||||
// Still under implementation. DO NOT USE.
|
||||
//
|
||||
// Allows metadata. See [metadata
|
||||
// Allows sending and receiving HTTP/2 METADATA frames. See [metadata
|
||||
// docs](https://github.com/envoyproxy/envoy/blob/main/source/docs/h2_metadata.md) for more
|
||||
// information.
|
||||
bool allow_metadata = 6;
|
||||
|
@ -561,6 +613,12 @@ message Http2ProtocolOptions {
|
|||
// Send HTTP/2 PING frames to verify that the connection is still healthy. If the remote peer
|
||||
// does not respond within the configured timeout, the connection will be aborted.
|
||||
KeepaliveSettings connection_keepalive = 15;
|
||||
|
||||
// [#not-implemented-hide:] Hiding so that the field can be removed after oghttp2 is rolled out.
|
||||
// If set, force use of a particular HTTP/2 codec: oghttp2 if true, nghttp2 if false.
|
||||
// If unset, HTTP/2 codec is selected based on envoy.reloadable_features.http2_use_oghttp2.
|
||||
google.protobuf.BoolValue use_oghttp2_codec = 16
|
||||
[(xds.annotations.v3.field_status).work_in_progress = true];
|
||||
}
|
||||
|
||||
// [#not-implemented-hide:]
|
||||
|
@ -572,7 +630,7 @@ message GrpcProtocolOptions {
|
|||
}
|
||||
|
||||
// A message which allows using HTTP/3.
|
||||
// [#next-free-field: 6]
|
||||
// [#next-free-field: 7]
|
||||
message Http3ProtocolOptions {
|
||||
QuicProtocolOptions quic_protocol_options = 1;
|
||||
|
||||
|
@ -591,12 +649,27 @@ message Http3ProtocolOptions {
|
|||
// <https://datatracker.ietf.org/doc/draft-ietf-httpbis-h3-websockets/>`_
|
||||
// Note that HTTP/3 CONNECT is not yet an RFC.
|
||||
bool allow_extended_connect = 5 [(xds.annotations.v3.field_status).work_in_progress = true];
|
||||
|
||||
// [#not-implemented-hide:] Hiding until Envoy has full metadata support.
|
||||
// Still under implementation. DO NOT USE.
|
||||
//
|
||||
// Allows sending and receiving HTTP/3 METADATA frames. See [metadata
|
||||
// docs](https://github.com/envoyproxy/envoy/blob/main/source/docs/h2_metadata.md) for more
|
||||
// information.
|
||||
bool allow_metadata = 6;
|
||||
}
|
||||
|
||||
// A message to control transformations to the :scheme header
|
||||
message SchemeHeaderTransformation {
|
||||
oneof transformation {
|
||||
// Overwrite any Scheme header with the contents of this string.
|
||||
// If set, takes precedence over match_upstream.
|
||||
string scheme_to_overwrite = 1 [(validate.rules).string = {in: "http" in: "https"}];
|
||||
}
|
||||
|
||||
// Set the Scheme header to match the upstream transport protocol. For example, should a
|
||||
// request be sent to the upstream over TLS, the scheme header will be set to "https". Should the
|
||||
// request be sent over plaintext, the scheme header will be set to "http".
|
||||
// If scheme_to_overwrite is set, this field is not used.
|
||||
bool match_upstream = 2;
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ syntax = "proto3";
|
|||
package envoy.config.core.v3;
|
||||
|
||||
import "udpa/annotations/status.proto";
|
||||
import "validate/validate.proto";
|
||||
|
||||
option java_package = "io.envoyproxy.envoy.config.core.v3";
|
||||
option java_outer_classname = "ProxyProtocolProto";
|
||||
|
@ -10,7 +11,26 @@ option java_multiple_files = true;
|
|||
option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3";
|
||||
option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
||||
|
||||
// [#protodoc-title: Proxy Protocol]
|
||||
// [#protodoc-title: Proxy protocol]
|
||||
|
||||
message ProxyProtocolPassThroughTLVs {
|
||||
enum PassTLVsMatchType {
|
||||
// Pass all TLVs.
|
||||
INCLUDE_ALL = 0;
|
||||
|
||||
// Pass specific TLVs defined in tlv_type.
|
||||
INCLUDE = 1;
|
||||
}
|
||||
|
||||
// The strategy to pass through TLVs. Default is INCLUDE_ALL.
|
||||
// If INCLUDE_ALL is set, all TLVs will be passed through no matter the tlv_type field.
|
||||
PassTLVsMatchType match_type = 1;
|
||||
|
||||
// The TLV types that are applied based on match_type.
|
||||
// TLV type is defined as uint8_t in proxy protocol. See `the spec
|
||||
// <https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt>`_ for details.
|
||||
repeated uint32 tlv_type = 2 [(validate.rules).repeated = {items {uint32 {lt: 256}}}];
|
||||
}
|
||||
|
||||
message ProxyProtocolConfig {
|
||||
enum Version {
|
||||
|
@ -23,4 +43,8 @@ message ProxyProtocolConfig {
|
|||
|
||||
// The PROXY protocol version to use. See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details
|
||||
Version version = 1;
|
||||
|
||||
// This config controls which TLVs can be passed to upstream if it is Proxy Protocol
|
||||
// V2 header. If there is no setting for this field, no TLVs will be passed through.
|
||||
ProxyProtocolPassThroughTLVs pass_through_tlvs = 2;
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@ option java_multiple_files = true;
|
|||
option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/core/v3;corev3";
|
||||
option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
||||
|
||||
// [#protodoc-title: Socket Option ]
|
||||
// [#protodoc-title: Socket option]
|
||||
|
||||
// Generic socket option message. This would be used to set socket options that
|
||||
// might not exist in upstream kernels or precompiled Envoy binaries.
|
||||
|
@ -75,3 +75,7 @@ message SocketOption {
|
|||
// STATE_PREBIND is currently the only valid value.
|
||||
SocketState state = 6 [(validate.rules).enum = {defined_only: true}];
|
||||
}
|
||||
|
||||
message SocketOptionsOverride {
|
||||
repeated SocketOption socket_options = 1;
|
||||
}
|
||||
|
|
|
@ -19,9 +19,15 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
|
||||
// [#protodoc-title: Substitution format string]
|
||||
|
||||
// Optional configuration options to be used with json_format.
|
||||
message JsonFormatOptions {
|
||||
// The output JSON string properties will be sorted.
|
||||
bool sort_properties = 1;
|
||||
}
|
||||
|
||||
// Configuration to use multiple :ref:`command operators <config_access_log_command_operators>`
|
||||
// to generate a new string in either plain text or JSON format.
|
||||
// [#next-free-field: 7]
|
||||
// [#next-free-field: 8]
|
||||
message SubstitutionFormatString {
|
||||
oneof format {
|
||||
option (validate.required) = true;
|
||||
|
@ -113,4 +119,7 @@ message SubstitutionFormatString {
|
|||
// See the formatters extensions documentation for details.
|
||||
// [#extension-category: envoy.formatter]
|
||||
repeated TypedExtensionConfig formatters = 6;
|
||||
|
||||
// If json_format is used, the options will be applied to the output JSON string.
|
||||
JsonFormatOptions json_format_options = 7;
|
||||
}
|
||||
|
|
|
@ -35,12 +35,11 @@ message ClusterLoadAssignment {
|
|||
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.ClusterLoadAssignment";
|
||||
|
||||
// Load balancing policy settings.
|
||||
// [#next-free-field: 6]
|
||||
// [#next-free-field: 7]
|
||||
message Policy {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.ClusterLoadAssignment.Policy";
|
||||
|
||||
// [#not-implemented-hide:]
|
||||
message DropOverload {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.ClusterLoadAssignment.Policy.DropOverload";
|
||||
|
@ -75,7 +74,15 @@ message ClusterLoadAssignment {
|
|||
// "throttle"_drop = 60%
|
||||
// "lb"_drop = 20% // 50% of the remaining 'actual' load, which is 40%.
|
||||
// actual_outgoing_load = 20% // remaining after applying all categories.
|
||||
// [#not-implemented-hide:]
|
||||
//
|
||||
// Envoy supports only one element and will NACK if more than one element is present.
|
||||
// Other xDS-capable data planes will not necessarily have this limitation.
|
||||
//
|
||||
// In Envoy, this ``drop_overloads`` config can be overridden by a runtime key
|
||||
// "load_balancing_policy.drop_overload_limit" setting. This runtime key can be set to
|
||||
// any integer number between 0 and 100. 0 means drop 0%. 100 means drop 100%.
|
||||
// When both ``drop_overloads`` config and "load_balancing_policy.drop_overload_limit"
|
||||
// setting are in place, the min of these two wins.
|
||||
repeated DropOverload drop_overloads = 2;
|
||||
|
||||
// Priority levels and localities are considered overprovisioned with this
|
||||
|
@ -99,6 +106,16 @@ message ClusterLoadAssignment {
|
|||
// are considered stale and should be marked unhealthy.
|
||||
// Defaults to 0 which means endpoints never go stale.
|
||||
google.protobuf.Duration endpoint_stale_after = 4 [(validate.rules).duration = {gt {}}];
|
||||
|
||||
// If true, use the :ref:`load balancing weight
|
||||
// <envoy_v3_api_field_config.endpoint.v3.LbEndpoint.load_balancing_weight>` of healthy and unhealthy
|
||||
// hosts to determine the health of the priority level. If false, use the number of healthy and unhealthy hosts
|
||||
// to determine the health of the priority level, or in other words assume each host has a weight of 1 for
|
||||
// this calculation.
|
||||
//
|
||||
// Note: this is not currently implemented for
|
||||
// :ref:`locality weighted load balancing <arch_overview_load_balancing_locality_weighted_lb>`.
|
||||
bool weighted_priority_health = 6;
|
||||
}
|
||||
|
||||
// Name of the cluster. This will be the :ref:`service_name
|
||||
|
|
|
@ -44,6 +44,22 @@ message Endpoint {
|
|||
// to a non-empty value allows overriding the cluster level configuration for a specific
|
||||
// endpoint.
|
||||
string hostname = 2;
|
||||
|
||||
// Optional alternative health check host address.
|
||||
//
|
||||
// .. attention::
|
||||
//
|
||||
// The form of the health check host address is expected to be a direct IP address.
|
||||
core.v3.Address address = 3;
|
||||
|
||||
// Optional flag to control if perform active health check for this endpoint.
|
||||
// Active health check is enabled by default if there is a health checker.
|
||||
bool disable_active_health_check = 4;
|
||||
}
|
||||
|
||||
message AdditionalAddress {
|
||||
// Additional address that is associated with the endpoint.
|
||||
core.v3.Address address = 1;
|
||||
}
|
||||
|
||||
// The upstream host address.
|
||||
|
@ -71,6 +87,13 @@ message Endpoint {
|
|||
// that require a hostname, like
|
||||
// :ref:`auto_host_rewrite <envoy_v3_api_field_config.route.v3.RouteAction.auto_host_rewrite>`.
|
||||
string hostname = 3;
|
||||
|
||||
// An ordered list of addresses that together with ``address`` comprise the
|
||||
// list of addresses for an endpoint. The address given in the ``address`` is
|
||||
// prepended to this list. It is assumed that the list must already be
|
||||
// sorted by preference order of the addresses. This will only be supported
|
||||
// for STATIC and EDS clusters.
|
||||
repeated AdditionalAddress additional_addresses = 4;
|
||||
}
|
||||
|
||||
// An Endpoint that Envoy can route traffic to.
|
||||
|
@ -124,7 +147,7 @@ message LedsClusterLocalityConfig {
|
|||
// A group of endpoints belonging to a Locality.
|
||||
// One can have multiple LocalityLbEndpoints for a locality, but only if
|
||||
// they have different priorities.
|
||||
// [#next-free-field: 9]
|
||||
// [#next-free-field: 10]
|
||||
message LocalityLbEndpoints {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.endpoint.LocalityLbEndpoints";
|
||||
|
@ -138,6 +161,9 @@ message LocalityLbEndpoints {
|
|||
// Identifies location of where the upstream hosts run.
|
||||
core.v3.Locality locality = 1;
|
||||
|
||||
// Metadata to provide additional information about the locality endpoints in aggregate.
|
||||
core.v3.Metadata metadata = 9;
|
||||
|
||||
// The group of endpoints belonging to the locality specified.
|
||||
// [#comment:TODO(adisuissa): Once LEDS is implemented this field needs to be
|
||||
// deprecated and replaced by ``load_balancer_endpoints``.]
|
||||
|
@ -171,9 +197,9 @@ message LocalityLbEndpoints {
|
|||
// default to the highest priority (0).
|
||||
//
|
||||
// Under usual circumstances, Envoy will only select endpoints for the highest
|
||||
// priority (0). In the event all endpoints for a particular priority are
|
||||
// priority (0). In the event that enough endpoints for a particular priority are
|
||||
// unavailable/unhealthy, Envoy will fail over to selecting endpoints for the
|
||||
// next highest priority group.
|
||||
// next highest priority group. Read more at :ref:`priority levels <arch_overview_load_balancing_priority_levels>`.
|
||||
//
|
||||
// Priorities should range from 0 (highest) to N (lowest) without skipping.
|
||||
uint32 priority = 5 [(validate.rules).uint32 = {lte: 128}];
|
||||
|
|
|
@ -8,6 +8,8 @@ import "envoy/config/core/v3/base.proto";
|
|||
import "google/protobuf/duration.proto";
|
||||
import "google/protobuf/struct.proto";
|
||||
|
||||
import "xds/annotations/v3/status.proto";
|
||||
|
||||
import "udpa/annotations/status.proto";
|
||||
import "udpa/annotations/versioning.proto";
|
||||
import "validate/validate.proto";
|
||||
|
@ -23,7 +25,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
// These are stats Envoy reports to the management server at a frequency defined by
|
||||
// :ref:`LoadStatsResponse.load_reporting_interval<envoy_v3_api_field_service.load_stats.v3.LoadStatsResponse.load_reporting_interval>`.
|
||||
// Stats per upstream region/zone and optionally per subzone.
|
||||
// [#next-free-field: 9]
|
||||
// [#next-free-field: 15]
|
||||
message UpstreamLocalityStats {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.endpoint.UpstreamLocalityStats";
|
||||
|
@ -48,7 +50,45 @@ message UpstreamLocalityStats {
|
|||
// upstream endpoints in the locality.
|
||||
uint64 total_issued_requests = 8;
|
||||
|
||||
// Stats for multi-dimensional load balancing.
|
||||
// The total number of connections in an established state at the time of the
|
||||
// report. This field is aggregated over all the upstream endpoints in the
|
||||
// locality.
|
||||
// In Envoy, this information may be based on ``upstream_cx_active metric``.
|
||||
// [#not-implemented-hide:]
|
||||
uint64 total_active_connections = 9 [(xds.annotations.v3.field_status).work_in_progress = true];
|
||||
|
||||
// The total number of connections opened since the last report.
|
||||
// This field is aggregated over all the upstream endpoints in the locality.
|
||||
// In Envoy, this information may be based on ``upstream_cx_total`` metric
|
||||
// compared to itself between start and end of an interval, i.e.
|
||||
// ``upstream_cx_total``(now) - ``upstream_cx_total``(now -
|
||||
// load_report_interval).
|
||||
// [#not-implemented-hide:]
|
||||
uint64 total_new_connections = 10 [(xds.annotations.v3.field_status).work_in_progress = true];
|
||||
|
||||
// The total number of connection failures since the last report.
|
||||
// This field is aggregated over all the upstream endpoints in the locality.
|
||||
// In Envoy, this information may be based on ``upstream_cx_connect_fail``
|
||||
// metric compared to itself between start and end of an interval, i.e.
|
||||
// ``upstream_cx_connect_fail``(now) - ``upstream_cx_connect_fail``(now -
|
||||
// load_report_interval).
|
||||
// [#not-implemented-hide:]
|
||||
uint64 total_fail_connections = 11 [(xds.annotations.v3.field_status).work_in_progress = true];
|
||||
|
||||
// CPU utilization stats for multi-dimensional load balancing.
|
||||
// This typically comes from endpoint metrics reported via ORCA.
|
||||
UnnamedEndpointLoadMetricStats cpu_utilization = 12;
|
||||
|
||||
// Memory utilization for multi-dimensional load balancing.
|
||||
// This typically comes from endpoint metrics reported via ORCA.
|
||||
UnnamedEndpointLoadMetricStats mem_utilization = 13;
|
||||
|
||||
// Blended application-defined utilization for multi-dimensional load balancing.
|
||||
// This typically comes from endpoint metrics reported via ORCA.
|
||||
UnnamedEndpointLoadMetricStats application_utilization = 14;
|
||||
|
||||
// Named stats for multi-dimensional load balancing.
|
||||
// These typically come from endpoint metrics reported via ORCA.
|
||||
repeated EndpointLoadMetricStats load_metric_stats = 5;
|
||||
|
||||
// Endpoint granularity stats information for this locality. This information
|
||||
|
@ -118,6 +158,16 @@ message EndpointLoadMetricStats {
|
|||
double total_metric_value = 3;
|
||||
}
|
||||
|
||||
// Same as EndpointLoadMetricStats, except without the metric_name field.
|
||||
message UnnamedEndpointLoadMetricStats {
|
||||
// Number of calls that finished and included this metric.
|
||||
uint64 num_requests_finished_with_metric = 1;
|
||||
|
||||
// Sum of metric values across all calls that finished with this metric for
|
||||
// load_reporting_interval.
|
||||
double total_metric_value = 2;
|
||||
}
|
||||
|
||||
// Per cluster load stats. Envoy reports these stats a management server in a
|
||||
// :ref:`LoadStatsRequest<envoy_v3_api_msg_service.load_stats.v3.LoadStatsRequest>`
|
||||
// Next ID: 7
|
||||
|
|
|
@ -194,7 +194,7 @@ message AuthorizationResponse {
|
|||
// Note that coexistent headers will be overridden.
|
||||
type.matcher.ListStringMatcher allowed_upstream_headers = 1;
|
||||
|
||||
// When this :ref:`list <envoy_api_msg_type.matcher.ListStringMatcher>`. is set, authorization
|
||||
// When this :ref:`list <envoy_api_msg_type.matcher.ListStringMatcher>` is set, authorization
|
||||
// response headers that have a correspondent match will be added to the client's response. Note
|
||||
// that when this list is *not* set, all the authorization response headers, except *Authority
|
||||
// (Host)* will be in the response to the client. When a header is included in this list, *Path*,
|
||||
|
|
|
@ -46,9 +46,7 @@ message FilterConfig {
|
|||
//
|
||||
// .. attention::
|
||||
// If neither `individual_method_stats_allowlist` nor `stats_for_all_methods` is set, the
|
||||
// behavior will default to `stats_for_all_methods=false`. This default value is changed due
|
||||
// to the previous value being deprecated. This behavior can be changed with runtime override
|
||||
// `envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default`.
|
||||
// behavior will default to `stats_for_all_methods=false`.
|
||||
google.protobuf.BoolValue stats_for_all_methods = 3;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -127,7 +127,7 @@ message HttpConnectionManager {
|
|||
// Target percentage of requests managed by this HTTP connection manager that will be force
|
||||
// traced if the :ref:`x-client-trace-id <config_http_conn_man_headers_x-client-trace-id>`
|
||||
// header is set. This field is a direct analog for the runtime variable
|
||||
// 'tracing.client_sampling' in the :ref:`HTTP Connection Manager
|
||||
// 'tracing.client_enabled' in the :ref:`HTTP Connection Manager
|
||||
// <config_http_conn_man_runtime>`.
|
||||
// Default: 100%
|
||||
type.Percent client_sampling = 3;
|
||||
|
|
|
@ -21,4 +21,42 @@ option (udpa.annotations.file_status).package_version_status = FROZEN;
|
|||
message KafkaBroker {
|
||||
// The prefix to use when emitting :ref:`statistics <config_network_filters_kafka_broker_stats>`.
|
||||
string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}];
|
||||
|
||||
// Set to true if broker filter should attempt to serialize the received responses from the
|
||||
// upstream broker instead of passing received bytes as is.
|
||||
// Disabled by default.
|
||||
bool force_response_rewrite = 2;
|
||||
|
||||
// Optional broker address rewrite specification.
|
||||
// Allows the broker filter to rewrite Kafka responses so that all connections established by
|
||||
// the Kafka clients point to Envoy.
|
||||
// This allows Kafka cluster not to configure its 'advertised.listeners' property
|
||||
// (as the necessary re-pointing will be done by this filter).
|
||||
// This collection of rules should cover all brokers in the cluster that is being proxied,
|
||||
// otherwise some nodes' addresses might leak to the downstream clients.
|
||||
oneof broker_address_rewrite_spec {
|
||||
// Broker address rewrite rules that match by broker ID.
|
||||
IdBasedBrokerRewriteSpec id_based_broker_address_rewrite_spec = 3;
|
||||
}
|
||||
}
|
||||
|
||||
// Collection of rules matching by broker ID.
|
||||
message IdBasedBrokerRewriteSpec {
|
||||
repeated IdBasedBrokerRewriteRule rules = 1;
|
||||
}
|
||||
|
||||
// Defines a rule to rewrite broker address data.
|
||||
message IdBasedBrokerRewriteRule {
|
||||
// Broker ID to match.
|
||||
uint32 id = 1 [(validate.rules).uint32 = {gte: 0}];
|
||||
|
||||
// The host value to use (resembling the host part of Kafka's advertised.listeners).
|
||||
// The value should point to the Envoy (not Kafka) listener, so that all client traffic goes
|
||||
// through Envoy.
|
||||
string host = 2 [(validate.rules).string = {min_len: 1}];
|
||||
|
||||
// The port value to use (resembling the port part of Kafka's advertised.listeners).
|
||||
// The value should point to the Envoy (not Kafka) listener, so that all client traffic goes
|
||||
// through Envoy.
|
||||
uint32 port = 3 [(validate.rules).uint32 = {lte: 65535}];
|
||||
}
|
||||
|
|
|
@ -34,9 +34,17 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
// Listener :ref:`configuration overview <config_listeners>`
|
||||
|
||||
// The additional address the listener is listening on.
|
||||
// [#not-implemented-hide:]
|
||||
message AdditionalAddress {
|
||||
core.v3.Address address = 1;
|
||||
|
||||
// Additional socket options that may not be present in Envoy source code or
|
||||
// precompiled binaries. If specified, this will override the
|
||||
// :ref:`socket_options <envoy_v3_api_field_config.listener.v3.Listener.socket_options>`
|
||||
// in the listener. If specified with no
|
||||
// :ref:`socket_options <envoy_v3_api_field_config.core.v3.SocketOptionsOverride.socket_options>`
|
||||
// or an empty list of :ref:`socket_options <envoy_v3_api_field_config.core.v3.SocketOptionsOverride.socket_options>`,
|
||||
// it means no socket option will apply.
|
||||
core.v3.SocketOptionsOverride socket_options = 2;
|
||||
}
|
||||
|
||||
// Listener list collections. Entries are ``Listener`` resources or references.
|
||||
|
@ -45,7 +53,7 @@ message ListenerCollection {
|
|||
repeated xds.core.v3.CollectionEntry entries = 1;
|
||||
}
|
||||
|
||||
// [#next-free-field: 34]
|
||||
// [#next-free-field: 36]
|
||||
message Listener {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Listener";
|
||||
|
||||
|
@ -104,7 +112,6 @@ message Listener {
|
|||
}
|
||||
|
||||
// Configuration for envoy internal listener. All the future internal listener features should be added here.
|
||||
// [#not-implemented-hide:]
|
||||
message InternalListenerConfig {
|
||||
}
|
||||
|
||||
|
@ -124,7 +131,6 @@ message Listener {
|
|||
// The additional addresses the listener should listen on. The addresses must be unique across all
|
||||
// listeners. Multiple addresses with port 0 can be supplied. When using multiple addresses in a single listener,
|
||||
// all addresses use the same protocol, and multiple internal addresses are not supported.
|
||||
// [#not-implemented-hide:]
|
||||
repeated AdditionalAddress additional_addresses = 33;
|
||||
|
||||
// Optional prefix to use on listener stats. If empty, the stats will be rooted at
|
||||
|
@ -193,7 +199,12 @@ message Listener {
|
|||
// before a connection is created.
|
||||
// UDP Listener filters can be specified when the protocol in the listener socket address in
|
||||
// :ref:`protocol <envoy_v3_api_field_config.core.v3.SocketAddress.protocol>` is :ref:`UDP
|
||||
// <envoy_v3_api_enum_value_config.core.v3.SocketAddress.Protocol.UDP>`.
|
||||
// <envoy_v3_api_enum_value_config.core.v3.SocketAddress.Protocol.UDP>` and no
|
||||
// :ref:`quic_options <envoy_v3_api_field_config.listener.v3.UdpListenerConfig.quic_options>` is specified in :ref:`udp_listener_config <envoy_v3_api_field_config.listener.v3.Listener.udp_listener_config>`.
|
||||
// QUIC listener filters can be specified when :ref:`quic_options
|
||||
// <envoy_v3_api_field_config.listener.v3.UdpListenerConfig.quic_options>` is
|
||||
// specified in :ref:`udp_listener_config <envoy_v3_api_field_config.listener.v3.Listener.udp_listener_config>`.
|
||||
// They are processed sequentially right before connection creation. And like TCP Listener filters, they can be used to manipulate the connection metadata and socket. But the difference is that they can't be used to pause connection creation.
|
||||
repeated ListenerFilter listener_filters = 9;
|
||||
|
||||
// The timeout to wait for all listener filters to complete operation. If the timeout is reached,
|
||||
|
@ -236,7 +247,10 @@ message Listener {
|
|||
google.protobuf.BoolValue freebind = 11;
|
||||
|
||||
// Additional socket options that may not be present in Envoy source code or
|
||||
// precompiled binaries.
|
||||
// precompiled binaries. The socket options can be updated for a listener when
|
||||
// :ref:`enable_reuse_port <envoy_v3_api_field_config.listener.v3.Listener.enable_reuse_port>`
|
||||
// is ``true``. Otherwise, if socket options change during a listener update the update will be rejected
|
||||
// to make it clear that the options were not updated.
|
||||
repeated core.v3.SocketOption socket_options = 13;
|
||||
|
||||
// Whether the listener should accept TCP Fast Open (TFO) connections.
|
||||
|
@ -302,7 +316,9 @@ message Listener {
|
|||
// create one socket for each worker thread. This makes inbound connections
|
||||
// distribute among worker threads roughly evenly in cases where there are a high number
|
||||
// of connections. When this flag is set to false, all worker threads share one socket. This field
|
||||
// defaults to true.
|
||||
// defaults to true. The change of field will be rejected during an listener update when the
|
||||
// runtime flag ``envoy.reloadable_features.enable_update_listener_socket_options`` is enabled.
|
||||
// Otherwise, the update of this field will be ignored quietly.
|
||||
//
|
||||
// .. attention::
|
||||
//
|
||||
|
@ -328,6 +344,17 @@ message Listener {
|
|||
// provided net.core.somaxconn will be used on Linux and 128 otherwise.
|
||||
google.protobuf.UInt32Value tcp_backlog_size = 24;
|
||||
|
||||
// The maximum number of connections to accept from the kernel per socket
|
||||
// event. Envoy may decide to close these connections after accepting them
|
||||
// from the kernel e.g. due to load shedding, or other policies.
|
||||
// If there are more than max_connections_to_accept_per_socket_event
|
||||
// connections pending accept, connections over this threshold will be
|
||||
// accepted in later event loop iterations.
|
||||
// If no value is provided Envoy will accept all connections pending accept
|
||||
// from the kernel.
|
||||
google.protobuf.UInt32Value max_connections_to_accept_per_socket_event = 34
|
||||
[(validate.rules).uint32 = {gt: 0}];
|
||||
|
||||
// Whether the listener should bind to the port. A listener that doesn't
|
||||
// bind can only receive connections redirected from other listeners that set
|
||||
// :ref:`use_original_dst <envoy_v3_api_field_config.listener.v3.Listener.use_original_dst>`
|
||||
|
@ -335,24 +362,21 @@ message Listener {
|
|||
google.protobuf.BoolValue bind_to_port = 26;
|
||||
|
||||
// The exclusive listener type and the corresponding config.
|
||||
// TODO(lambdai): https://github.com/envoyproxy/envoy/issues/15372
|
||||
// Will create and add TcpListenerConfig. Will add UdpListenerConfig and ApiListener.
|
||||
// [#not-implemented-hide:]
|
||||
oneof listener_specifier {
|
||||
// Used to represent an internal listener which does not listen on OSI L4 address but can be used by the
|
||||
// :ref:`envoy cluster <envoy_v3_api_msg_config.cluster.v3.Cluster>` to create a user space connection to.
|
||||
// The internal listener acts as a tcp listener. It supports listener filters and network filter chains.
|
||||
// The internal listener require :ref:`address <envoy_v3_api_field_config.listener.v3.Listener.address>` has
|
||||
// field `envoy_internal_address`.
|
||||
// The internal listener acts as a TCP listener. It supports listener filters and network filter chains.
|
||||
// Upstream clusters refer to the internal listeners by their :ref:`name
|
||||
// <envoy_v3_api_field_config.listener.v3.Listener.name>`. :ref:`Address
|
||||
// <envoy_v3_api_field_config.listener.v3.Listener.address>` must not be set on the internal listeners.
|
||||
//
|
||||
// There are some limitations are derived from the implementation. The known limitations include
|
||||
// There are some limitations that are derived from the implementation. The known limitations include:
|
||||
//
|
||||
// * :ref:`ConnectionBalanceConfig <envoy_v3_api_msg_config.listener.v3.Listener.ConnectionBalanceConfig>` is not
|
||||
// allowed because both cluster connection and listener connection must be owned by the same dispatcher.
|
||||
// allowed because both the cluster connection and the listener connection must be owned by the same dispatcher.
|
||||
// * :ref:`tcp_backlog_size <envoy_v3_api_field_config.listener.v3.Listener.tcp_backlog_size>`
|
||||
// * :ref:`freebind <envoy_v3_api_field_config.listener.v3.Listener.freebind>`
|
||||
// * :ref:`transparent <envoy_v3_api_field_config.listener.v3.Listener.transparent>`
|
||||
// [#not-implemented-hide:]
|
||||
InternalListenerConfig internal_listener = 27;
|
||||
}
|
||||
|
||||
|
@ -363,4 +387,25 @@ message Listener {
|
|||
// Whether the listener should limit connections based upon the value of
|
||||
// :ref:`global_downstream_max_connections <config_overload_manager_limiting_connections>`.
|
||||
bool ignore_global_conn_limit = 31;
|
||||
|
||||
// Whether the listener bypasses configured overload manager actions.
|
||||
bool bypass_overload_manager = 35;
|
||||
}
|
||||
|
||||
// A placeholder proto so that users can explicitly configure the standard
|
||||
// Listener Manager via the bootstrap's :ref:`listener_manager <envoy_v3_api_field_config.bootstrap.v3.Bootstrap.listener_manager>`.
|
||||
// [#not-implemented-hide:]
|
||||
message ListenerManager {
|
||||
}
|
||||
|
||||
// A placeholder proto so that users can explicitly configure the standard
|
||||
// Validation Listener Manager via the bootstrap's :ref:`listener_manager <envoy_v3_api_field_config.bootstrap.v3.Bootstrap.listener_manager>`.
|
||||
// [#not-implemented-hide:]
|
||||
message ValidationListenerManager {
|
||||
}
|
||||
|
||||
// A placeholder proto so that users can explicitly configure the API
|
||||
// Listener Manager via the bootstrap's :ref:`listener_manager <envoy_v3_api_field_config.bootstrap.v3.Bootstrap.listener_manager>`.
|
||||
// [#not-implemented-hide:]
|
||||
message ApiListenerManager {
|
||||
}
|
||||
|
|
|
@ -45,7 +45,6 @@ message Filter {
|
|||
// Configuration source specifier for an extension configuration discovery
|
||||
// service. In case of a failure and without the default configuration, the
|
||||
// listener closes the connections.
|
||||
// [#not-implemented-hide:]
|
||||
core.v3.ExtensionConfigSource config_discovery = 5;
|
||||
}
|
||||
}
|
||||
|
@ -155,6 +154,7 @@ message FilterChainMatch {
|
|||
// will be first matched against ``www.example.com``, then ``*.example.com``, then ``*.com``.
|
||||
//
|
||||
// Note that partial wildcards are not supported, and values like ``*w.example.com`` are invalid.
|
||||
// The value ``*`` is also not supported, and ``server_names`` should be omitted instead.
|
||||
//
|
||||
// .. attention::
|
||||
//
|
||||
|
@ -227,6 +227,12 @@ message FilterChain {
|
|||
// connections established with the listener. Order matters as the filters are
|
||||
// processed sequentially as connection events happen. Note: If the filter
|
||||
// list is empty, the connection will close by default.
|
||||
//
|
||||
// For QUIC listeners, network filters other than HTTP Connection Manager (HCM)
|
||||
// can be created, but due to differences in the connection implementation compared
|
||||
// to TCP, the onData() method will never be called. Therefore, network filters
|
||||
// for QUIC listeners should only expect to do work at the start of a new connection
|
||||
// (i.e. in onNewConnection()). HCM must be the last (or only) filter in the chain.
|
||||
repeated Filter filters = 3;
|
||||
|
||||
// Whether the listener should expect a PROXY protocol V1 header on new
|
||||
|
|
|
@ -9,6 +9,8 @@ import "envoy/config/core/v3/protocol.proto";
|
|||
import "google/protobuf/duration.proto";
|
||||
import "google/protobuf/wrappers.proto";
|
||||
|
||||
import "xds/annotations/v3/status.proto";
|
||||
|
||||
import "udpa/annotations/status.proto";
|
||||
import "udpa/annotations/versioning.proto";
|
||||
import "validate/validate.proto";
|
||||
|
@ -22,7 +24,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
// [#protodoc-title: QUIC listener config]
|
||||
|
||||
// Configuration specific to the UDP QUIC listener.
|
||||
// [#next-free-field: 8]
|
||||
// [#next-free-field: 12]
|
||||
message QuicProtocolOptions {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.listener.QuicProtocolOptions";
|
||||
|
@ -63,4 +65,25 @@ message QuicProtocolOptions {
|
|||
// If not specified the :ref:`default one configured by <envoy_v3_api_msg_extensions.quic.proof_source.v3.ProofSourceConfig>` will be used.
|
||||
// [#extension-category: envoy.quic.proof_source]
|
||||
core.v3.TypedExtensionConfig proof_source_config = 7;
|
||||
|
||||
// Config which implementation of ``quic::ConnectionIdGeneratorInterface`` to be used for this listener.
|
||||
// If not specified the :ref:`default one configured by <envoy_v3_api_msg_extensions.quic.connection_id_generator.v3.DeterministicConnectionIdGeneratorConfig>` will be used.
|
||||
// [#extension-category: envoy.quic.connection_id_generator]
|
||||
core.v3.TypedExtensionConfig connection_id_generator_config = 8;
|
||||
|
||||
// Configure the server's preferred address to advertise so that client can migrate to it. See :ref:`example <envoy_v3_api_msg_extensions.quic.server_preferred_address.v3.FixedServerPreferredAddressConfig>` which configures a pair of v4 and v6 preferred addresses.
|
||||
// The current QUICHE implementation will advertise only one of the preferred IPv4 and IPv6 addresses based on the address family the client initially connects with.
|
||||
// If not specified, Envoy will not advertise any server's preferred address.
|
||||
// [#extension-category: envoy.quic.server_preferred_address]
|
||||
core.v3.TypedExtensionConfig server_preferred_address_config = 9
|
||||
[(xds.annotations.v3.field_status).work_in_progress = true];
|
||||
|
||||
// Configure the server to send transport parameter `disable_active_migration <https://www.rfc-editor.org/rfc/rfc9000#section-18.2-4.30.1>`_.
|
||||
// Defaults to false (do not send this transport parameter).
|
||||
google.protobuf.BoolValue send_disable_active_migration = 10;
|
||||
|
||||
// Configure which implementation of ``quic::QuicConnectionDebugVisitor`` to be used for this listener.
|
||||
// If not specified, no debug visitor will be attached to connections.
|
||||
// [#extension-category: envoy.quic.connection_debug_visitor]
|
||||
core.v3.TypedExtensionConfig connection_debug_visitor_config = 11;
|
||||
}
|
||||
|
|
|
@ -19,6 +19,18 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
|
||||
// [#protodoc-title: Metrics service]
|
||||
|
||||
// HistogramEmitMode is used to configure which metric types should be emitted for histograms.
|
||||
enum HistogramEmitMode {
|
||||
// Emit Histogram and Summary metric types.
|
||||
SUMMARY_AND_HISTOGRAM = 0;
|
||||
|
||||
// Emit only Summary metric types.
|
||||
SUMMARY = 1;
|
||||
|
||||
// Emit only Histogram metric types.
|
||||
HISTOGRAM = 2;
|
||||
}
|
||||
|
||||
// Metrics Service is configured as a built-in ``envoy.stat_sinks.metrics_service`` :ref:`StatsSink
|
||||
// <envoy_v3_api_msg_config.metrics.v3.StatsSink>`. This opaque configuration will be used to create
|
||||
// Metrics Service.
|
||||
|
@ -31,9 +43,9 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
// - name: envoy.stat_sinks.metrics_service
|
||||
// typed_config:
|
||||
// "@type": type.googleapis.com/envoy.config.metrics.v3.MetricsServiceConfig
|
||||
// transport_api_version: V3
|
||||
//
|
||||
// [#extension: envoy.stat_sinks.metrics_service]
|
||||
// [#next-free-field: 6]
|
||||
message MetricsServiceConfig {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.config.metrics.v2.MetricsServiceConfig";
|
||||
|
@ -55,4 +67,7 @@ message MetricsServiceConfig {
|
|||
// and the tag extracted name will be used instead of the full name, which may contain values used by the tag
|
||||
// extractor or additional tags added during stats creation.
|
||||
bool emit_tags_as_labels = 4;
|
||||
|
||||
// Specify which metrics types to emit for histograms. Defaults to SUMMARY_AND_HISTOGRAM.
|
||||
HistogramEmitMode histogram_emit_mode = 5 [(validate.rules).enum = {defined_only: true}];
|
||||
}
|
||||
|
|
|
@ -48,10 +48,11 @@ message StatsConfig {
|
|||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.config.metrics.v2.StatsConfig";
|
||||
|
||||
// Each stat name is iteratively processed through these tag specifiers.
|
||||
// When a tag is matched, the first capture group is removed from the name so
|
||||
// later :ref:`TagSpecifiers <envoy_v3_api_msg_config.metrics.v3.TagSpecifier>` cannot match that
|
||||
// same portion of the match.
|
||||
// Each stat name is independently processed through these tag specifiers. When a tag is
|
||||
// matched, the first capture group is not immediately removed from the name, so later
|
||||
// :ref:`TagSpecifiers <envoy_v3_api_msg_config.metrics.v3.TagSpecifier>` can also match that
|
||||
// same portion of the match. After all tag matching is complete, a tag-extracted version of
|
||||
// the name is produced and is used in stats sinks that represent tags, such as Prometheus.
|
||||
repeated TagSpecifier stats_tags = 1;
|
||||
|
||||
// Use all default tag regexes specified in Envoy. These can be combined with
|
||||
|
@ -120,8 +121,8 @@ message StatsMatcher {
|
|||
// limited by either an exclusion or an inclusion list of :ref:`StringMatcher
|
||||
// <envoy_v3_api_msg_type.matcher.v3.StringMatcher>` protos:
|
||||
//
|
||||
// * If ``reject_all`` is set to `true`, no stats will be instantiated. If ``reject_all`` is set to
|
||||
// `false`, all stats will be instantiated.
|
||||
// * If ``reject_all`` is set to ``true``, no stats will be instantiated. If ``reject_all`` is set to
|
||||
// ``false``, all stats will be instantiated.
|
||||
//
|
||||
// * If an exclusion list is supplied, any stat name matching *any* of the StringMatchers in the
|
||||
// list will not instantiate.
|
||||
|
@ -219,7 +220,11 @@ message TagSpecifier {
|
|||
//
|
||||
// .. note::
|
||||
//
|
||||
// It is invalid to specify the same tag name twice in a config.
|
||||
// A stat name may be spelled in such a way that it matches two different
|
||||
// tag extractors for the same tag name. In that case, all but one of the
|
||||
// tag values will be dropped. It is not specified which tag value will be
|
||||
// retained. The extraction will only occur for one of the extractors, and
|
||||
// only the matched extraction will be removed from the tag name.
|
||||
string tag_name = 1;
|
||||
|
||||
oneof tag_value {
|
||||
|
@ -265,18 +270,18 @@ message TagSpecifier {
|
|||
// }
|
||||
// ]
|
||||
//
|
||||
// The two regexes of the specifiers will be processed in the definition order.
|
||||
// The two regexes of the specifiers will be processed from the elaborated
|
||||
// stat name.
|
||||
//
|
||||
// The first regex will remove ``ios.``, leaving the tag extracted name
|
||||
// ``http.connection_manager_1.user_agent.downstream_cx_total``. The tag
|
||||
// ``envoy.http_user_agent`` will be added with tag value ``ios``.
|
||||
// The first regex will save ``ios.`` as the tag value for ``envoy.http_user_agent``. It will
|
||||
// leave it in the name for potential matching with additional tag specifiers. After all tag
|
||||
// specifiers are processed the tags will be removed from the name.
|
||||
//
|
||||
// The second regex will remove ``connection_manager_1.`` from the tag
|
||||
// extracted name produced by the first regex
|
||||
// ``http.connection_manager_1.user_agent.downstream_cx_total``, leaving
|
||||
// ``http.user_agent.downstream_cx_total`` as the tag extracted name. The tag
|
||||
// ``envoy.http_conn_manager_prefix`` will be added with the tag value
|
||||
// ``connection_manager_1``.
|
||||
// The second regex will populate tag ``envoy.http_conn_manager_prefix`` with value
|
||||
// ``connection_manager_1.``, based on the original stat name.
|
||||
//
|
||||
// As a final step, the matched tags are removed, leaving
|
||||
// ``http.user_agent.downstream_cx_total`` as the tag extracted name.
|
||||
string regex = 2 [(validate.rules).string = {max_bytes: 1024}];
|
||||
|
||||
// Specifies a fixed tag value for the ``tag_name``.
|
||||
|
|
|
@ -134,14 +134,37 @@ message OverloadAction {
|
|||
string name = 1 [(validate.rules).string = {min_len: 1}];
|
||||
|
||||
// A set of triggers for this action. The state of the action is the maximum
|
||||
// state of all triggers, which can be scaling between 0 and 1 or saturated. Listeners
|
||||
// are notified when the overload action changes state.
|
||||
// state of all triggers, which can be scalar values between 0 and 1 or
|
||||
// saturated. Listeners are notified when the overload action changes state.
|
||||
// An overload manager action can only have one trigger for a given resource
|
||||
// e.g. :ref:`Trigger.name
|
||||
// <envoy_v3_api_field_config.overload.v3.Trigger.name>` must be unique
|
||||
// in this list.
|
||||
repeated Trigger triggers = 2 [(validate.rules).repeated = {min_items: 1}];
|
||||
|
||||
// Configuration for the action being instantiated.
|
||||
google.protobuf.Any typed_config = 3;
|
||||
}
|
||||
|
||||
// A point within the connection or request lifecycle that provides context on
|
||||
// whether to shed load at that given stage for the current entity at the
|
||||
// point.
|
||||
message LoadShedPoint {
|
||||
// This is just a well-known string for the LoadShedPoint.
|
||||
// Deployment specific LoadShedPoints e.g. within a custom extension should
|
||||
// be prefixed by the company / deployment name to avoid colliding with any
|
||||
// open source LoadShedPoints.
|
||||
string name = 1 [(validate.rules).string = {min_len: 1}];
|
||||
|
||||
// A set of triggers for this LoadShedPoint. The LoadShedPoint will use the
|
||||
// the maximum state of all triggers, which can be scalar values between 0 and
|
||||
// 1 or saturated. A LoadShedPoint can only have one trigger for a given
|
||||
// resource e.g. :ref:`Trigger.name
|
||||
// <envoy_v3_api_field_config.overload.v3.Trigger.name>` must be unique in
|
||||
// this list.
|
||||
repeated Trigger triggers = 2 [(validate.rules).repeated = {min_items: 1}];
|
||||
}
|
||||
|
||||
// Configuration for which accounts the WatermarkBuffer Factories should
|
||||
// track.
|
||||
message BufferFactoryConfig {
|
||||
|
@ -162,6 +185,7 @@ message BufferFactoryConfig {
|
|||
uint32 minimum_account_to_track_power_of_two = 1 [(validate.rules).uint32 = {lte: 56 gte: 10}];
|
||||
}
|
||||
|
||||
// [#next-free-field: 6]
|
||||
message OverloadManager {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.config.overload.v2alpha.OverloadManager";
|
||||
|
@ -175,6 +199,9 @@ message OverloadManager {
|
|||
// The set of overload actions.
|
||||
repeated OverloadAction actions = 3;
|
||||
|
||||
// The set of load shed points.
|
||||
repeated LoadShedPoint loadshed_points = 5;
|
||||
|
||||
// Configuration for buffer factory.
|
||||
BufferFactoryConfig buffer_factory_config = 4;
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ package envoy.config.rbac.v3;
|
|||
import "envoy/config/core/v3/address.proto";
|
||||
import "envoy/config/core/v3/extension.proto";
|
||||
import "envoy/config/route/v3/route_components.proto";
|
||||
import "envoy/type/matcher/v3/filter_state.proto";
|
||||
import "envoy/type/matcher/v3/metadata.proto";
|
||||
import "envoy/type/matcher/v3/path.proto";
|
||||
import "envoy/type/matcher/v3/string.proto";
|
||||
|
@ -94,6 +95,45 @@ message RBAC {
|
|||
LOG = 2;
|
||||
}
|
||||
|
||||
message AuditLoggingOptions {
|
||||
// Deny and allow here refer to RBAC decisions, not actions.
|
||||
enum AuditCondition {
|
||||
// Never audit.
|
||||
NONE = 0;
|
||||
|
||||
// Audit when RBAC denies the request.
|
||||
ON_DENY = 1;
|
||||
|
||||
// Audit when RBAC allows the request.
|
||||
ON_ALLOW = 2;
|
||||
|
||||
// Audit whether RBAC allows or denies the request.
|
||||
ON_DENY_AND_ALLOW = 3;
|
||||
}
|
||||
|
||||
// [#not-implemented-hide:]
|
||||
message AuditLoggerConfig {
|
||||
// Typed logger configuration.
|
||||
//
|
||||
// [#extension-category: envoy.rbac.audit_loggers]
|
||||
core.v3.TypedExtensionConfig audit_logger = 1;
|
||||
|
||||
// If true, when the logger is not supported, the data plane will not NACK but simply ignore it.
|
||||
bool is_optional = 2;
|
||||
}
|
||||
|
||||
// Condition for the audit logging to happen.
|
||||
// If this condition is met, all the audit loggers configured here will be invoked.
|
||||
//
|
||||
// [#not-implemented-hide:]
|
||||
AuditCondition audit_condition = 1 [(validate.rules).enum = {defined_only: true}];
|
||||
|
||||
// Configurations for RBAC-based authorization audit loggers.
|
||||
//
|
||||
// [#not-implemented-hide:]
|
||||
repeated AuditLoggerConfig logger_configs = 2;
|
||||
}
|
||||
|
||||
// The action to take if a policy matches. Every action either allows or denies a request,
|
||||
// and can also carry out action-specific operations.
|
||||
//
|
||||
|
@ -113,6 +153,12 @@ message RBAC {
|
|||
// Maps from policy name to policy. A match occurs when at least one policy matches the request.
|
||||
// The policies are evaluated in lexicographic order of the policy name.
|
||||
map<string, Policy> policies = 2;
|
||||
|
||||
// Audit logging options that include the condition for audit logging to happen
|
||||
// and audit logger configurations.
|
||||
//
|
||||
// [#not-implemented-hide:]
|
||||
AuditLoggingOptions audit_logging_options = 3;
|
||||
}
|
||||
|
||||
// Policy specifies a role and the principals that are assigned/denied the role.
|
||||
|
@ -148,7 +194,7 @@ message Policy {
|
|||
}
|
||||
|
||||
// Permission defines an action (or actions) that a principal can take.
|
||||
// [#next-free-field: 13]
|
||||
// [#next-free-field: 14]
|
||||
message Permission {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Permission";
|
||||
|
||||
|
@ -224,12 +270,16 @@ message Permission {
|
|||
// Extension for configuring custom matchers for RBAC.
|
||||
// [#extension-category: envoy.rbac.matchers]
|
||||
core.v3.TypedExtensionConfig matcher = 12;
|
||||
|
||||
// URI template path matching.
|
||||
// [#extension-category: envoy.path.match]
|
||||
core.v3.TypedExtensionConfig uri_template = 13;
|
||||
}
|
||||
}
|
||||
|
||||
// Principal defines an identity or a group of identities for a downstream
|
||||
// subject.
|
||||
// [#next-free-field: 12]
|
||||
// [#next-free-field: 13]
|
||||
message Principal {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Principal";
|
||||
|
||||
|
@ -274,6 +324,11 @@ message Principal {
|
|||
|
||||
// A CIDR block that describes the downstream IP.
|
||||
// This address will honor proxy protocol, but will not honor XFF.
|
||||
//
|
||||
// This field is deprecated; either use :ref:`remote_ip
|
||||
// <envoy_v3_api_field_config.rbac.v3.Principal.remote_ip>` for the same
|
||||
// behavior, or use
|
||||
// :ref:`direct_remote_ip <envoy_v3_api_field_config.rbac.v3.Principal.direct_remote_ip>`.
|
||||
core.v3.CidrRange source_ip = 5
|
||||
[deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"];
|
||||
|
||||
|
@ -304,6 +359,9 @@ message Principal {
|
|||
// Metadata that describes additional information about the principal.
|
||||
type.matcher.v3.MetadataMatcher metadata = 7;
|
||||
|
||||
// Identifies the principal using a filter state object.
|
||||
type.matcher.v3.FilterStateMatcher filter_state = 12;
|
||||
|
||||
// Negates matching the provided principal. For instance, if the value of
|
||||
// ``not_id`` would match, this principal would not match. Conversely, if the
|
||||
// value of ``not_id`` would not match, this principal would match.
|
||||
|
|
|
@ -6,6 +6,7 @@ import "envoy/config/core/v3/base.proto";
|
|||
import "envoy/config/core/v3/config_source.proto";
|
||||
import "envoy/config/route/v3/route_components.proto";
|
||||
|
||||
import "google/protobuf/any.proto";
|
||||
import "google/protobuf/wrappers.proto";
|
||||
|
||||
import "udpa/annotations/status.proto";
|
||||
|
@ -22,7 +23,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
// * Routing :ref:`architecture overview <arch_overview_http_routing>`
|
||||
// * HTTP :ref:`router filter <config_http_filters_router>`
|
||||
|
||||
// [#next-free-field: 16]
|
||||
// [#next-free-field: 18]
|
||||
message RouteConfiguration {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.RouteConfiguration";
|
||||
|
||||
|
@ -81,14 +82,11 @@ message RouteConfiguration {
|
|||
(validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}}
|
||||
];
|
||||
|
||||
// By default, headers that should be added/removed are evaluated from most to least specific:
|
||||
//
|
||||
// * route level
|
||||
// * virtual host level
|
||||
// * connection manager level
|
||||
//
|
||||
// To allow setting overrides at the route or virtual host level, this order can be reversed
|
||||
// by setting this option to true. Defaults to false.
|
||||
// Headers mutations at all levels are evaluated, if specified. By default, the order is from most
|
||||
// specific (i.e. route entry level) to least specific (i.e. route configuration level). Later header
|
||||
// mutations may override earlier mutations.
|
||||
// This order can be reversed by setting this field to true. In other words, most specific level mutation
|
||||
// is evaluated last.
|
||||
//
|
||||
bool most_specific_header_mutations_wins = 10;
|
||||
|
||||
|
@ -140,6 +138,23 @@ message RouteConfiguration {
|
|||
// Envoy by default takes ":path" as "<path>;<params>".
|
||||
// For users who want to only match path on the "<path>" portion, this option should be true.
|
||||
bool ignore_path_parameters_in_path_matching = 15;
|
||||
|
||||
// This field can be used to provide RouteConfiguration level per filter config. The key should match the
|
||||
// :ref:`filter config name
|
||||
// <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpFilter.name>`.
|
||||
// See :ref:`Http filter route specific config <arch_overview_http_filters_per_filter_config>`
|
||||
// for details.
|
||||
// [#comment: An entry's value may be wrapped in a
|
||||
// :ref:`FilterConfig<envoy_v3_api_msg_config.route.v3.FilterConfig>`
|
||||
// message to specify additional options.]
|
||||
map<string, google.protobuf.Any> typed_per_filter_config = 16;
|
||||
|
||||
// The metadata field can be used to provide additional information
|
||||
// about the route configuration. It can be used for configuration, stats, and logging.
|
||||
// The metadata should go under the filter namespace that will need it.
|
||||
// For instance, if the metadata is intended for the Router filter,
|
||||
// the filter name should be specified as ``envoy.filters.http.router``.
|
||||
core.v3.Metadata metadata = 17;
|
||||
}
|
||||
|
||||
message Vhds {
|
||||
|
|
|
@ -41,7 +41,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
// host header. This allows a single listener to service multiple top level domain path trees. Once
|
||||
// a virtual host is selected based on the domain, the routes are processed in order to see which
|
||||
// upstream cluster to route to or whether to perform a redirect.
|
||||
// [#next-free-field: 23]
|
||||
// [#next-free-field: 25]
|
||||
message VirtualHost {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.VirtualHost";
|
||||
|
||||
|
@ -142,18 +142,22 @@ message VirtualHost {
|
|||
items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}}
|
||||
}];
|
||||
|
||||
// Indicates that the virtual host has a CORS policy.
|
||||
CorsPolicy cors = 8;
|
||||
|
||||
// The per_filter_config field can be used to provide virtual host-specific configurations for filters.
|
||||
// The key should match the :ref:`filter config name
|
||||
// <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpFilter.name>`.
|
||||
// The canonical filter name (e.g., ``envoy.filters.http.buffer`` for the HTTP buffer filter) can also
|
||||
// be used for the backwards compatibility. If there is no entry referred by the filter config name, the
|
||||
// entry referred by the canonical filter name will be provided to the filters as fallback.
|
||||
// Indicates that the virtual host has a CORS policy. This field is ignored if related cors policy is
|
||||
// found in the
|
||||
// :ref:`VirtualHost.typed_per_filter_config<envoy_v3_api_field_config.route.v3.VirtualHost.typed_per_filter_config>`.
|
||||
//
|
||||
// Use of this field is filter specific;
|
||||
// see the :ref:`HTTP filter documentation <config_http_filters>` for if and how it is utilized.
|
||||
// .. attention::
|
||||
//
|
||||
// This option has been deprecated. Please use
|
||||
// :ref:`VirtualHost.typed_per_filter_config<envoy_v3_api_field_config.route.v3.VirtualHost.typed_per_filter_config>`
|
||||
// to configure the CORS HTTP filter.
|
||||
CorsPolicy cors = 8 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"];
|
||||
|
||||
// This field can be used to provide virtual host level per filter config. The key should match the
|
||||
// :ref:`filter config name
|
||||
// <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpFilter.name>`.
|
||||
// See :ref:`Http filter route specific config <arch_overview_http_filters_per_filter_config>`
|
||||
// for details.
|
||||
// [#comment: An entry's value may be wrapped in a
|
||||
// :ref:`FilterConfig<envoy_v3_api_msg_config.route.v3.FilterConfig>`
|
||||
// message to specify additional options.]
|
||||
|
@ -198,6 +202,10 @@ message VirtualHost {
|
|||
// independently (e.g.: values are not inherited).
|
||||
HedgePolicy hedge_policy = 17;
|
||||
|
||||
// Decides whether to include the :ref:`x-envoy-is-timeout-retry <config_http_filters_router_x-envoy-is-timeout-retry>`
|
||||
// request header in retries initiated by per try timeouts.
|
||||
bool include_is_timeout_retry_header = 23;
|
||||
|
||||
// The maximum bytes which will be buffered for retries and shadowing.
|
||||
// If set and a route-specific limit is not set, the bytes actually buffered will be the minimum
|
||||
// value of this and the listener per_connection_buffer_limit_bytes.
|
||||
|
@ -207,6 +215,13 @@ message VirtualHost {
|
|||
// It takes precedence over the route config mirror policy entirely.
|
||||
// That is, policies are not merged, the most specific non-empty one becomes the mirror policies.
|
||||
repeated RouteAction.RequestMirrorPolicy request_mirror_policies = 22;
|
||||
|
||||
// The metadata field can be used to provide additional information
|
||||
// about the virtual host. It can be used for configuration, stats, and logging.
|
||||
// The metadata should go under the filter namespace that will need it.
|
||||
// For instance, if the metadata is intended for the Router filter,
|
||||
// the filter name should be specified as ``envoy.filters.http.router``.
|
||||
core.v3.Metadata metadata = 24;
|
||||
}
|
||||
|
||||
// A filter-defined action type.
|
||||
|
@ -216,6 +231,13 @@ message FilterAction {
|
|||
google.protobuf.Any action = 1;
|
||||
}
|
||||
|
||||
// This can be used in route matcher :ref:`VirtualHost.matcher <envoy_v3_api_field_config.route.v3.VirtualHost.matcher>`.
|
||||
// When the matcher matches, routes will be matched and run.
|
||||
message RouteList {
|
||||
// The list of routes that will be matched and run, in order. The first route that matches will be used.
|
||||
repeated Route routes = 1;
|
||||
}
|
||||
|
||||
// A route is both a specification of how to match a request as well as an indication of what to do
|
||||
// next (e.g., redirect, forward, rewrite, etc.).
|
||||
//
|
||||
|
@ -273,15 +295,11 @@ message Route {
|
|||
// Decorator for the matched route.
|
||||
Decorator decorator = 5;
|
||||
|
||||
// The per_filter_config field can be used to provide route-specific configurations for filters.
|
||||
// The key should match the :ref:`filter config name
|
||||
// This field can be used to provide route specific per filter config. The key should match the
|
||||
// :ref:`filter config name
|
||||
// <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpFilter.name>`.
|
||||
// The canonical filter name (e.g., ``envoy.filters.http.buffer`` for the HTTP buffer filter) can also
|
||||
// be used for the backwards compatibility. If there is no entry referred by the filter config name, the
|
||||
// entry referred by the canonical filter name will be provided to the filters as fallback.
|
||||
//
|
||||
// Use of this field is filter specific;
|
||||
// see the :ref:`HTTP filter documentation <config_http_filters>` for if and how it is utilized.
|
||||
// See :ref:`Http filter route specific config <arch_overview_http_filters_per_filter_config>`
|
||||
// for details.
|
||||
// [#comment: An entry's value may be wrapped in a
|
||||
// :ref:`FilterConfig<envoy_v3_api_msg_config.route.v3.FilterConfig>`
|
||||
// message to specify additional options.]
|
||||
|
@ -386,10 +404,11 @@ message WeightedCluster {
|
|||
(udpa.annotations.field_migrate).oneof_promotion = "cluster_specifier"
|
||||
];
|
||||
|
||||
// An integer between 0 and :ref:`total_weight
|
||||
// <envoy_v3_api_field_config.route.v3.WeightedCluster.total_weight>`. When a request matches the route,
|
||||
// the choice of an upstream cluster is determined by its weight. The sum of weights across all
|
||||
// entries in the clusters array must add up to the total_weight, if total_weight is greater than 0.
|
||||
// The weight of the cluster. This value is relative to the other clusters'
|
||||
// weights. When a request matches the route, the choice of an upstream cluster
|
||||
// is determined by its weight. The sum of weights across all
|
||||
// entries in the clusters array must be greater than 0, and must not exceed
|
||||
// uint32_t maximal value (4294967295).
|
||||
google.protobuf.UInt32Value weight = 2;
|
||||
|
||||
// Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in
|
||||
|
@ -431,16 +450,11 @@ message WeightedCluster {
|
|||
items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}
|
||||
}];
|
||||
|
||||
// The per_filter_config field can be used to provide weighted cluster-specific configurations
|
||||
// for filters.
|
||||
// The key should match the :ref:`filter config name
|
||||
// This field can be used to provide weighted cluster specific per filter config. The key should match the
|
||||
// :ref:`filter config name
|
||||
// <envoy_v3_api_field_extensions.filters.network.http_connection_manager.v3.HttpFilter.name>`.
|
||||
// The canonical filter name (e.g., ``envoy.filters.http.buffer`` for the HTTP buffer filter) can also
|
||||
// be used for the backwards compatibility. If there is no entry referred by the filter config name, the
|
||||
// entry referred by the canonical filter name will be provided to the filters as fallback.
|
||||
//
|
||||
// Use of this field is filter specific;
|
||||
// see the :ref:`HTTP filter documentation <config_http_filters>` for if and how it is utilized.
|
||||
// See :ref:`Http filter route specific config <arch_overview_http_filters_per_filter_config>`
|
||||
// for details.
|
||||
// [#comment: An entry's value may be wrapped in a
|
||||
// :ref:`FilterConfig<envoy_v3_api_msg_config.route.v3.FilterConfig>`
|
||||
// message to specify additional options.]
|
||||
|
@ -459,7 +473,10 @@ message WeightedCluster {
|
|||
|
||||
// Specifies the total weight across all clusters. The sum of all cluster weights must equal this
|
||||
// value, if this is greater than 0.
|
||||
google.protobuf.UInt32Value total_weight = 3;
|
||||
// This field is now deprecated, and the client will use the sum of all
|
||||
// cluster weights. It is up to the management server to supply the correct weights.
|
||||
google.protobuf.UInt32Value total_weight = 3
|
||||
[deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"];
|
||||
|
||||
// Specifies the runtime key prefix that should be used to construct the
|
||||
// runtime keys associated with each cluster. When the ``runtime_key_prefix`` is
|
||||
|
@ -514,10 +531,20 @@ message RouteMatch {
|
|||
|
||||
// If specified, the route will match against whether or not a certificate is validated.
|
||||
// If not specified, certificate validation status (true or false) will not be considered when route matching.
|
||||
//
|
||||
// .. warning::
|
||||
//
|
||||
// Client certificate validation is not currently performed upon TLS session resumption. For
|
||||
// a resumed TLS session the route will match only when ``validated`` is false, regardless of
|
||||
// whether the client TLS certificate is valid.
|
||||
//
|
||||
// The only known workaround for this issue is to disable TLS session resumption entirely, by
|
||||
// setting both :ref:`disable_stateless_session_resumption <envoy_v3_api_field_extensions.transport_sockets.tls.v3.DownstreamTlsContext.disable_stateless_session_resumption>`
|
||||
// and :ref:`disable_stateful_session_resumption <envoy_v3_api_field_extensions.transport_sockets.tls.v3.DownstreamTlsContext.disable_stateful_session_resumption>` on the DownstreamTlsContext.
|
||||
google.protobuf.BoolValue validated = 2;
|
||||
}
|
||||
|
||||
// An extensible message for matching CONNECT requests.
|
||||
// An extensible message for matching CONNECT or CONNECT-UDP requests.
|
||||
message ConnectMatcher {
|
||||
}
|
||||
|
||||
|
@ -550,11 +577,10 @@ message RouteMatch {
|
|||
// stripping. This needs more thought.]
|
||||
type.matcher.v3.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}];
|
||||
|
||||
// If this is used as the matcher, the matcher will only match CONNECT requests.
|
||||
// Note that this will not match HTTP/2 upgrade-style CONNECT requests
|
||||
// (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style
|
||||
// upgrades.
|
||||
// This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2,
|
||||
// If this is used as the matcher, the matcher will only match CONNECT or CONNECT-UDP requests.
|
||||
// Note that this will not match other Extended CONNECT requests (WebSocket and the like) as
|
||||
// they are normalized in Envoy as HTTP/1.1 style upgrades.
|
||||
// This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2 and HTTP/3,
|
||||
// where Extended CONNECT requests may have a path, the path matchers will work if
|
||||
// there is a path present.
|
||||
// Note that CONNECT support is currently considered alpha in Envoy.
|
||||
|
@ -572,34 +598,8 @@ message RouteMatch {
|
|||
// Expect the value to not contain ``?`` or ``#`` and not to end in ``/``
|
||||
string path_separated_prefix = 14 [(validate.rules).string = {pattern: "^[^?#]+[^?#/]$"}];
|
||||
|
||||
// If specified, the route is a template match rule meaning that the
|
||||
// ``:path`` header (without the query string) must match the given
|
||||
// ``path_template`` pattern.
|
||||
//
|
||||
// Path template matching types:
|
||||
//
|
||||
// * ``*`` : Matches a single path component, up to the next path separator: /
|
||||
//
|
||||
// * ``**`` : Matches zero or more path segments. If present, must be the last operator.
|
||||
//
|
||||
// * ``{name} or {name=*}`` : A named variable matching one path segment up to the next path separator: /.
|
||||
//
|
||||
// * ``{name=videos/*}`` : A named variable matching more than one path segment.
|
||||
// The path component matching videos/* is captured as the named variable.
|
||||
//
|
||||
// * ``{name=**}`` : A named variable matching zero or more path segments.
|
||||
//
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// * ``/videos/*/*/*.m4s`` would match ``videos/123414/hls/1080p5000_00001.m4s``
|
||||
//
|
||||
// * ``/videos/{file}`` would match ``/videos/1080p5000_00001.m4s``
|
||||
//
|
||||
// * ``/**.mpd`` would match ``/content/123/india/dash/55/manifest.mpd``
|
||||
// [#not-implemented-hide:]
|
||||
string path_template = 15
|
||||
[(validate.rules).string = {min_len: 1 max_len: 256 ignore_empty: true}];
|
||||
// [#extension-category: envoy.path.match]
|
||||
core.v3.TypedExtensionConfig path_match_policy = 15;
|
||||
}
|
||||
|
||||
// Indicates that prefix/path matching should be case sensitive. The default
|
||||
|
@ -635,7 +635,8 @@ message RouteMatch {
|
|||
// match. The router will check the query string from the ``path`` header
|
||||
// against all the specified query parameters. If the number of specified
|
||||
// query parameters is nonzero, they all must match the ``path`` header's
|
||||
// query string for a match to occur.
|
||||
// query string for a match to occur. In the event query parameters are
|
||||
// repeated, only the first value for each key will be considered.
|
||||
//
|
||||
// .. note::
|
||||
//
|
||||
|
@ -664,7 +665,15 @@ message RouteMatch {
|
|||
repeated type.matcher.v3.MetadataMatcher dynamic_metadata = 13;
|
||||
}
|
||||
|
||||
// [#next-free-field: 12]
|
||||
// Cors policy configuration.
|
||||
//
|
||||
// .. attention::
|
||||
//
|
||||
// This message has been deprecated. Please use
|
||||
// :ref:`CorsPolicy in filter extension <envoy_v3_api_msg_extensions.filters.http.cors.v3.CorsPolicy>`
|
||||
// as as alternative.
|
||||
//
|
||||
// [#next-free-field: 14]
|
||||
message CorsPolicy {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.CorsPolicy";
|
||||
|
||||
|
@ -712,6 +721,16 @@ message CorsPolicy {
|
|||
// Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate
|
||||
// and track the request's ``Origin`` to determine if it's valid but will not enforce any policies.
|
||||
core.v3.RuntimeFractionalPercent shadow_enabled = 10;
|
||||
|
||||
// Specify whether allow requests whose target server's IP address is more private than that from
|
||||
// which the request initiator was fetched.
|
||||
//
|
||||
// More details refer to https://developer.chrome.com/blog/private-network-access-preflight.
|
||||
google.protobuf.BoolValue allow_private_network_access = 12;
|
||||
|
||||
// Specifies if preflight requests not matching the configured allowed origin should be forwarded
|
||||
// to the upstream. Default is true.
|
||||
google.protobuf.BoolValue forward_not_matching_preflights = 13;
|
||||
}
|
||||
|
||||
// [#next-free-field: 42]
|
||||
|
@ -744,12 +763,17 @@ message RouteAction {
|
|||
// collected for the shadow cluster making this feature useful for testing.
|
||||
//
|
||||
// During shadowing, the host/authority header is altered such that ``-shadow`` is appended. This is
|
||||
// useful for logging. For example, ``cluster1`` becomes ``cluster1-shadow``.
|
||||
// useful for logging. For example, ``cluster1`` becomes ``cluster1-shadow``. This behavior can be
|
||||
// disabled by setting ``disable_shadow_host_suffix_append`` to ``true``.
|
||||
//
|
||||
// .. note::
|
||||
//
|
||||
// Shadowing will not be triggered if the primary cluster does not exist.
|
||||
// [#next-free-field: 6]
|
||||
//
|
||||
// .. note::
|
||||
//
|
||||
// Shadowing doesn't support Http CONNECT and upgrades.
|
||||
// [#next-free-field: 7]
|
||||
message RequestMirrorPolicy {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.route.RouteAction.RequestMirrorPolicy";
|
||||
|
@ -795,6 +819,9 @@ message RouteAction {
|
|||
|
||||
// Determines if the trace span should be sampled. Defaults to true.
|
||||
google.protobuf.BoolValue trace_sampled = 4;
|
||||
|
||||
// Disables appending the ``-shadow`` suffix to the shadowed ``Host`` header. Defaults to ``false``.
|
||||
bool disable_shadow_host_suffix_append = 6;
|
||||
}
|
||||
|
||||
// Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer
|
||||
|
@ -818,6 +845,18 @@ message RouteAction {
|
|||
type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 2;
|
||||
}
|
||||
|
||||
// CookieAttribute defines an API for adding additional attributes for a HTTP cookie.
|
||||
message CookieAttribute {
|
||||
// The name of the cookie attribute.
|
||||
string name = 1
|
||||
[(validate.rules).string =
|
||||
{min_len: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}];
|
||||
|
||||
// The optional value of the cookie attribute.
|
||||
string value = 2 [(validate.rules).string =
|
||||
{max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}];
|
||||
}
|
||||
|
||||
// Envoy supports two types of cookie affinity:
|
||||
//
|
||||
// 1. Passive. Envoy takes a cookie that's present in the cookies header and
|
||||
|
@ -849,6 +888,9 @@ message RouteAction {
|
|||
// The name of the path for the cookie. If no path is specified here, no path
|
||||
// will be set for the cookie.
|
||||
string path = 3;
|
||||
|
||||
// Additional attributes for the cookie. They will be used when generating a new cookie.
|
||||
repeated CookieAttribute attributes = 4;
|
||||
}
|
||||
|
||||
message ConnectionProperties {
|
||||
|
@ -865,7 +907,8 @@ message RouteAction {
|
|||
|
||||
// The name of the URL query parameter that will be used to obtain the hash
|
||||
// key. If the parameter is not present, no hash will be produced. Query
|
||||
// parameter names are case-sensitive.
|
||||
// parameter names are case-sensitive. If query parameters are repeated, only
|
||||
// the first value will be considered.
|
||||
string name = 1 [(validate.rules).string = {min_len: 1}];
|
||||
}
|
||||
|
||||
|
@ -1046,8 +1089,8 @@ message RouteAction {
|
|||
// <config_http_filters_router_x-envoy-original-path>` header.
|
||||
//
|
||||
// Only one of :ref:`regex_rewrite <envoy_v3_api_field_config.route.v3.RouteAction.regex_rewrite>`
|
||||
// [#comment:TODO(silverstar194) add the following once path_template_rewrite is implemented: :ref:`path_template_rewrite <envoy_v3_api_field_config.route.v3.RouteAction.path_template_rewrite>`]
|
||||
// or ``prefix_rewrite`` may be specified.
|
||||
// :ref:`path_rewrite_policy <envoy_v3_api_field_config.route.v3.RouteAction.path_rewrite_policy>`,
|
||||
// or :ref:`prefix_rewrite <envoy_v3_api_field_config.route.v3.RouteAction.prefix_rewrite>` may be specified.
|
||||
//
|
||||
// .. attention::
|
||||
//
|
||||
|
@ -1082,9 +1125,10 @@ message RouteAction {
|
|||
// before the rewrite into the :ref:`x-envoy-original-path
|
||||
// <config_http_filters_router_x-envoy-original-path>` header.
|
||||
//
|
||||
// Only one of :ref:`prefix_rewrite <envoy_v3_api_field_config.route.v3.RouteAction.prefix_rewrite>`
|
||||
// [#comment:TODO(silverstar194) add the following once path_template_rewrite is implemented: :ref:`path_template_rewrite <envoy_v3_api_field_config.route.v3.RouteAction.path_template_rewrite>`,]
|
||||
// or ``regex_rewrite`` may be specified.
|
||||
// Only one of :ref:`regex_rewrite <envoy_v3_api_field_config.route.v3.RouteAction.regex_rewrite>`,
|
||||
// :ref:`prefix_rewrite <envoy_v3_api_field_config.route.v3.RouteAction.prefix_rewrite>`, or
|
||||
// :ref:`path_rewrite_policy <envoy_v3_api_field_config.route.v3.RouteAction.path_rewrite_policy>`]
|
||||
// may be specified.
|
||||
//
|
||||
// Examples using Google's `RE2 <https://github.com/google/re2>`_ engine:
|
||||
//
|
||||
|
@ -1104,48 +1148,8 @@ message RouteAction {
|
|||
// ``/aaa/yyy/bbb``.
|
||||
type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 32;
|
||||
|
||||
// Indicates that during forwarding, portions of the path that match the
|
||||
// pattern should be rewritten, even allowing the substitution of variables
|
||||
// from the match pattern into the new path as specified by the rewrite template.
|
||||
// This is useful to allow application paths to be
|
||||
// rewritten in a way that is aware of segments with variable content like
|
||||
// identifiers. The router filter will place the original path as it was
|
||||
// before the rewrite into the :ref:`x-envoy-original-path
|
||||
// <config_http_filters_router_x-envoy-original-path>` header.
|
||||
//
|
||||
// Only one of :ref:`prefix_rewrite <envoy_v3_api_field_config.route.v3.RouteAction.prefix_rewrite>`,
|
||||
// :ref:`regex_rewrite <envoy_v3_api_field_config.route.v3.RouteAction.regex_rewrite>`,
|
||||
// or ``path_template_rewrite`` may be specified.
|
||||
//
|
||||
// Template pattern matching types:
|
||||
//
|
||||
// * ``*`` : Matches a single path component, up to the next path separator: /
|
||||
//
|
||||
// * ``**`` : Matches zero or more path segments. If present, must be the last operator.
|
||||
//
|
||||
// * ``{name} or {name=*}`` : A named variable matching one path segment up to the next path separator: /.
|
||||
//
|
||||
// * ``{name=videos/*}`` : A named variable matching more than one path segment.
|
||||
// The path component matching videos/* is captured as the named variable.
|
||||
//
|
||||
// * ``{name=**}`` : A named variable matching zero or more path segments.
|
||||
//
|
||||
// Only named matches can be used to perform rewrites.
|
||||
//
|
||||
// Examples using path_template_rewrite:
|
||||
//
|
||||
// * The pattern ``/{one}/{two}`` paired with a substitution string of ``/{two}/{one}`` would
|
||||
// transform ``/cat/dog`` into ``/dog/cat``.
|
||||
//
|
||||
// * The pattern ``/videos/{language=lang/*}/*`` paired with a substitution string of
|
||||
// ``/{language}`` would transform ``/videos/lang/en/video.m4s`` into ``lang/en``.
|
||||
//
|
||||
// * The path pattern ``/content/{format}/{lang}/{id}/{file}.vtt`` paired with a substitution
|
||||
// string of ``/{lang}/{format}/{file}.vtt`` would transform ``/content/hls/en-us/12345/en_193913.vtt``
|
||||
// into ``/en-us/hls/en_193913.vtt``.
|
||||
// [#not-implemented-hide:]
|
||||
string path_template_rewrite = 41
|
||||
[(validate.rules).string = {min_len: 1 max_len: 256 ignore_empty: true}];
|
||||
// [#extension-category: envoy.path.rewrite]
|
||||
core.v3.TypedExtensionConfig path_rewrite_policy = 41;
|
||||
|
||||
oneof host_rewrite_specifier {
|
||||
// Indicates that during forwarding, the host header will be swapped with
|
||||
|
@ -1159,7 +1163,9 @@ message RouteAction {
|
|||
// Indicates that during forwarding, the host header will be swapped with
|
||||
// the hostname of the upstream host chosen by the cluster manager. This
|
||||
// option is applicable only when the destination cluster for a route is of
|
||||
// type ``strict_dns`` or ``logical_dns``. Setting this to true with other cluster types
|
||||
// type ``strict_dns`` or ``logical_dns``,
|
||||
// or when :ref:`hostname <envoy_v3_api_field_config.endpoint.v3.Endpoint.hostname>`
|
||||
// field is not empty. Setting this to true with other cluster types
|
||||
// has no effect. Using this option will append the
|
||||
// :ref:`config_http_conn_man_headers_x-forwarded-host` header if
|
||||
// :ref:`append_x_forwarded_host <envoy_v3_api_field_config.route.v3.RouteAction.append_x_forwarded_host>`
|
||||
|
@ -1212,7 +1218,7 @@ message RouteAction {
|
|||
// :ref:`host_rewrite_header <envoy_v3_api_field_config.route.v3.RouteAction.host_rewrite_header>`, or
|
||||
// :ref:`host_rewrite_path_regex <envoy_v3_api_field_config.route.v3.RouteAction.host_rewrite_path_regex>`)
|
||||
// causes the original value of the host header, if any, to be appended to the
|
||||
// :ref:`config_http_conn_man_headers_x-forwarded-host` HTTP header.
|
||||
// :ref:`config_http_conn_man_headers_x-forwarded-host` HTTP header if it is different to the last value appended.
|
||||
bool append_x_forwarded_host = 38;
|
||||
|
||||
// Specifies the upstream timeout for the route. If not specified, the default is 15s. This
|
||||
|
@ -1304,8 +1310,17 @@ message RouteAction {
|
|||
// ignoring the rest of the hash policy list.
|
||||
repeated HashPolicy hash_policy = 15;
|
||||
|
||||
// Indicates that the route has a CORS policy.
|
||||
CorsPolicy cors = 17;
|
||||
// Indicates that the route has a CORS policy. This field is ignored if related cors policy is
|
||||
// found in the :ref:`Route.typed_per_filter_config<envoy_v3_api_field_config.route.v3.Route.typed_per_filter_config>` or
|
||||
// :ref:`WeightedCluster.ClusterWeight.typed_per_filter_config<envoy_v3_api_field_config.route.v3.WeightedCluster.ClusterWeight.typed_per_filter_config>`.
|
||||
//
|
||||
// .. attention::
|
||||
//
|
||||
// This option has been deprecated. Please use
|
||||
// :ref:`Route.typed_per_filter_config<envoy_v3_api_field_config.route.v3.Route.typed_per_filter_config>` or
|
||||
// :ref:`WeightedCluster.ClusterWeight.typed_per_filter_config<envoy_v3_api_field_config.route.v3.WeightedCluster.ClusterWeight.typed_per_filter_config>`
|
||||
// to configure the CORS HTTP filter.
|
||||
CorsPolicy cors = 17 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"];
|
||||
|
||||
// Deprecated by :ref:`grpc_timeout_header_max <envoy_v3_api_field_config.route.v3.RouteAction.MaxStreamDuration.grpc_timeout_header_max>`
|
||||
// If present, and the request is a gRPC request, use the
|
||||
|
@ -1787,7 +1802,7 @@ message Tracing {
|
|||
// Target percentage of requests managed by this HTTP connection manager that will be force
|
||||
// traced if the :ref:`x-client-trace-id <config_http_conn_man_headers_x-client-trace-id>`
|
||||
// header is set. This field is a direct analog for the runtime variable
|
||||
// 'tracing.client_sampling' in the :ref:`HTTP Connection Manager
|
||||
// 'tracing.client_enabled' in the :ref:`HTTP Connection Manager
|
||||
// <config_http_conn_man_runtime>`.
|
||||
// Default: 100%
|
||||
type.v3.FractionalPercent client_sampling = 1;
|
||||
|
@ -1858,7 +1873,7 @@ message VirtualCluster {
|
|||
message RateLimit {
|
||||
option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit";
|
||||
|
||||
// [#next-free-field: 11]
|
||||
// [#next-free-field: 12]
|
||||
message Action {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.api.v2.route.RateLimit.Action";
|
||||
|
@ -2027,6 +2042,7 @@ message RateLimit {
|
|||
// .. code-block:: cpp
|
||||
//
|
||||
// ("<descriptor_key>", "<value_queried_from_metadata>")
|
||||
// [#next-free-field: 6]
|
||||
message MetaData {
|
||||
enum Source {
|
||||
// Query :ref:`dynamic metadata <well_known_dynamic_metadata>`
|
||||
|
@ -2044,11 +2060,44 @@ message RateLimit {
|
|||
type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}];
|
||||
|
||||
// An optional value to use if ``metadata_key`` is empty. If not set and
|
||||
// no value is present under the metadata_key then no descriptor is generated.
|
||||
// no value is present under the metadata_key then ``skip_if_absent`` is followed to
|
||||
// skip calling the rate limiting service or skip the descriptor.
|
||||
string default_value = 3;
|
||||
|
||||
// Source of metadata
|
||||
Source source = 4 [(validate.rules).enum = {defined_only: true}];
|
||||
|
||||
// If set to true, Envoy skips the descriptor while calling rate limiting service
|
||||
// when ``metadata_key`` is empty and ``default_value`` is not set. By default it skips calling the
|
||||
// rate limiting service in that case.
|
||||
bool skip_if_absent = 5;
|
||||
}
|
||||
|
||||
// The following descriptor entry is appended to the descriptor:
|
||||
//
|
||||
// .. code-block:: cpp
|
||||
//
|
||||
// ("query_match", "<descriptor_value>")
|
||||
message QueryParameterValueMatch {
|
||||
// The key to use in the descriptor entry. Defaults to ``query_match``.
|
||||
string descriptor_key = 4;
|
||||
|
||||
// The value to use in the descriptor entry.
|
||||
string descriptor_value = 1 [(validate.rules).string = {min_len: 1}];
|
||||
|
||||
// If set to true, the action will append a descriptor entry when the
|
||||
// request matches the headers. If set to false, the action will append a
|
||||
// descriptor entry when the request does not match the headers. The
|
||||
// default value is true.
|
||||
google.protobuf.BoolValue expect_match = 2;
|
||||
|
||||
// Specifies a set of query parameters that the rate limit action should match
|
||||
// on. The action will check the request’s query parameters against all the
|
||||
// specified query parameters in the config. A match will happen if all the
|
||||
// query parameters in the config are present in the request with the same values
|
||||
// (or based on presence if the value field is not in the config).
|
||||
repeated QueryParameterMatcher query_parameters = 3
|
||||
[(validate.rules).repeated = {min_items: 1}];
|
||||
}
|
||||
|
||||
oneof action_specifier {
|
||||
|
@ -2097,6 +2146,9 @@ message RateLimit {
|
|||
|
||||
// Rate limit on masked remote address.
|
||||
MaskedRemoteAddress masked_remote_address = 10;
|
||||
|
||||
// Rate limit on the existence of query parameters.
|
||||
QueryParameterValueMatch query_parameter_value_match = 11;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2159,7 +2211,9 @@ message RateLimit {
|
|||
//
|
||||
// {
|
||||
// "name": ":method",
|
||||
// "exact_match": "POST"
|
||||
// "string_match": {
|
||||
// "exact": "POST"
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// .. attention::
|
||||
|
@ -2318,6 +2372,7 @@ message QueryParameterMatcher {
|
|||
}
|
||||
|
||||
// HTTP Internal Redirect :ref:`architecture overview <arch_overview_internal_redirects>`.
|
||||
// [#next-free-field: 6]
|
||||
message InternalRedirectPolicy {
|
||||
// An internal redirect is not handled, unless the number of previous internal redirects that a
|
||||
// downstream request has encountered is lower than this value.
|
||||
|
@ -2343,6 +2398,14 @@ message InternalRedirectPolicy {
|
|||
// Allow internal redirect to follow a target URI with a different scheme than the value of
|
||||
// x-forwarded-proto. The default is false.
|
||||
bool allow_cross_scheme_redirect = 4;
|
||||
|
||||
// Specifies a list of headers, by name, to copy from the internal redirect into the subsequent
|
||||
// request. If a header is specified here but not present in the redirect, it will be cleared in
|
||||
// the subsequent request.
|
||||
repeated string response_headers_to_copy = 5 [(validate.rules).repeated = {
|
||||
unique: true
|
||||
items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}
|
||||
}];
|
||||
}
|
||||
|
||||
// A simple wrapper for an HTTP filter config. This is intended to be used as a wrapper for the
|
||||
|
@ -2351,7 +2414,6 @@ message InternalRedirectPolicy {
|
|||
// :ref:`Route.typed_per_filter_config<envoy_v3_api_field_config.route.v3.Route.typed_per_filter_config>`,
|
||||
// or :ref:`WeightedCluster.ClusterWeight.typed_per_filter_config<envoy_v3_api_field_config.route.v3.WeightedCluster.ClusterWeight.typed_per_filter_config>`
|
||||
// to add additional flags to the filter.
|
||||
// [#not-implemented-hide:]
|
||||
message FilterConfig {
|
||||
// The filter config.
|
||||
google.protobuf.Any config = 1;
|
||||
|
@ -2360,4 +2422,20 @@ message FilterConfig {
|
|||
// not support the specified filter, it may ignore the map entry rather
|
||||
// than rejecting the config.
|
||||
bool is_optional = 2;
|
||||
|
||||
// If true, the filter is disabled in the route or virtual host and the ``config`` field is ignored.
|
||||
// See :ref:`route based filter chain <arch_overview_http_filters_route_based_filter_chain>`
|
||||
// for more details.
|
||||
//
|
||||
// .. note::
|
||||
//
|
||||
// This field will take effect when the request arrive and filter chain is created for the request.
|
||||
// If initial route is selected for the request and a filter is disabled in the initial route, then
|
||||
// the filter will not be added to the filter chain.
|
||||
// And if the request is mutated later and re-match to another route, the disabled filter by the
|
||||
// initial route will not be added back to the filter chain because the filter chain is already
|
||||
// created and it is too late to change the chain.
|
||||
//
|
||||
// This field only make sense for the downstream HTTP filters for now.
|
||||
bool disabled = 3;
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
// fragments:
|
||||
// - header_value_extractor:
|
||||
// name: X-Route-Selector
|
||||
// element_separator: ,
|
||||
// element_separator: ","
|
||||
// element:
|
||||
// separator: =
|
||||
// key: vip
|
||||
|
|
|
@ -4,6 +4,7 @@ package envoy.config.tap.v3;
|
|||
|
||||
import "envoy/config/common/matcher/v3/matcher.proto";
|
||||
import "envoy/config/core/v3/base.proto";
|
||||
import "envoy/config/core/v3/extension.proto";
|
||||
import "envoy/config/core/v3/grpc_service.proto";
|
||||
import "envoy/config/route/v3/route_components.proto";
|
||||
|
||||
|
@ -21,7 +22,7 @@ option java_multiple_files = true;
|
|||
option go_package = "github.com/envoyproxy/go-control-plane/envoy/config/tap/v3;tapv3";
|
||||
option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
||||
|
||||
// [#protodoc-title: Common tap configuration]
|
||||
// [#protodoc-title: Tap common configuration]
|
||||
|
||||
// Tap configuration.
|
||||
message TapConfig {
|
||||
|
@ -183,7 +184,7 @@ message OutputConfig {
|
|||
}
|
||||
|
||||
// Tap output sink configuration.
|
||||
// [#next-free-field: 6]
|
||||
// [#next-free-field: 7]
|
||||
message OutputSink {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.service.tap.v2alpha.OutputSink";
|
||||
|
@ -259,6 +260,9 @@ message OutputSink {
|
|||
// been configured to receive tap configuration from some other source (e.g., static
|
||||
// file, XDS, etc.) configuring the buffered admin output type will fail.
|
||||
BufferedAdminSink buffered_admin = 5;
|
||||
|
||||
// Tap output filter will be defined by an extension type
|
||||
core.v3.TypedExtensionConfig custom_sink = 6;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2,6 +2,8 @@ syntax = "proto3";
|
|||
|
||||
package envoy.config.trace.v3;
|
||||
|
||||
import "google/protobuf/duration.proto";
|
||||
|
||||
import "udpa/annotations/migrate.proto";
|
||||
import "udpa/annotations/status.proto";
|
||||
import "udpa/annotations/versioning.proto";
|
||||
|
@ -16,6 +18,13 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
|
||||
// [#protodoc-title: Datadog tracer]
|
||||
|
||||
// Configuration for the Remote Configuration feature.
|
||||
message DatadogRemoteConfig {
|
||||
// Frequency at which new configuration updates are queried.
|
||||
// If no value is provided, the default value is delegated to the Datadog tracing library.
|
||||
google.protobuf.Duration polling_interval = 1;
|
||||
}
|
||||
|
||||
// Configuration for the Datadog tracer.
|
||||
// [#extension: envoy.tracers.datadog]
|
||||
message DatadogConfig {
|
||||
|
@ -27,4 +36,15 @@ message DatadogConfig {
|
|||
|
||||
// The name used for the service when traces are generated by envoy.
|
||||
string service_name = 2 [(validate.rules).string = {min_len: 1}];
|
||||
|
||||
// Optional hostname to use when sending spans to the collector_cluster. Useful for collectors
|
||||
// that require a specific hostname. Defaults to :ref:`collector_cluster <envoy_v3_api_field_config.trace.v3.DatadogConfig.collector_cluster>` above.
|
||||
string collector_hostname = 3;
|
||||
|
||||
// Enables and configures remote configuration.
|
||||
// Remote Configuration allows to configure the tracer from Datadog's user interface.
|
||||
// This feature can drastically increase the number of connections to the Datadog Agent.
|
||||
// Each tracer regularly polls for configuration updates, and the number of tracers is the product
|
||||
// of the number of listeners and worker threads.
|
||||
DatadogRemoteConfig remote_config = 4;
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ package envoy.config.trace.v3;
|
|||
|
||||
import "google/protobuf/struct.proto";
|
||||
|
||||
import "envoy/annotations/deprecation.proto";
|
||||
import "udpa/annotations/migrate.proto";
|
||||
import "udpa/annotations/status.proto";
|
||||
import "udpa/annotations/versioning.proto";
|
||||
|
@ -29,9 +30,18 @@ message DynamicOtConfig {
|
|||
|
||||
// Dynamic library implementing the `OpenTracing API
|
||||
// <https://github.com/opentracing/opentracing-cpp>`_.
|
||||
string library = 1 [(validate.rules).string = {min_len: 1}];
|
||||
string library = 1 [
|
||||
deprecated = true,
|
||||
(validate.rules).string = {min_len: 1},
|
||||
(envoy.annotations.deprecated_at_minor_version) = "3.0",
|
||||
(envoy.annotations.disallowed_by_default) = true
|
||||
];
|
||||
|
||||
// The configuration to use when creating a tracer from the given dynamic
|
||||
// library.
|
||||
google.protobuf.Struct config = 2;
|
||||
google.protobuf.Struct config = 2 [
|
||||
deprecated = true,
|
||||
(envoy.annotations.deprecated_at_minor_version) = "3.0",
|
||||
(envoy.annotations.disallowed_by_default) = true
|
||||
];
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
|
|||
|
||||
// Configuration for the LightStep tracer.
|
||||
// [#extension: envoy.tracers.lightstep]
|
||||
// [#not-implemented-hide:]
|
||||
message LightstepConfig {
|
||||
option (udpa.annotations.versioning).previous_message_type =
|
||||
"envoy.config.trace.v2.LightstepConfig";
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue