Compare commits
276 Commits
Author | SHA1 | Date |
---|---|---|
|
4eed00df58 | |
|
a8268d17fa | |
|
d4fe97995f | |
|
ef0ea1562a | |
|
5defc46ad7 | |
|
93c35805c8 | |
|
70d98c7d85 | |
|
40c6533df0 | |
|
4afd518035 | |
|
02002c6385 | |
|
07ceb652e4 | |
|
ae35913d9e | |
|
c65c2e5307 | |
|
64ee0aca3b | |
|
4170ebee42 | |
|
4743971a7b | |
|
5443169a57 | |
|
59e80c12f8 | |
|
8e9fbd1a66 | |
|
115d4a269a | |
|
b56a4ef20a | |
|
eb240c0032 | |
|
ae61c8748f | |
|
153cf463ad | |
|
56cdd22f8b | |
|
70a9e98ac9 | |
|
45f4855d28 | |
|
2181049316 | |
|
d510ef3ba5 | |
|
c463257976 | |
|
c4d3c072b8 | |
|
fd2b3f0ace | |
|
c8323c1a4a | |
|
229ee6b774 | |
|
b0018dac43 | |
|
56aefe0f89 | |
|
b763a21337 | |
|
d8bd8bfc68 | |
|
05e983628b | |
|
d1a05a362e | |
|
26b6e8af13 | |
|
2921284522 | |
|
f82d24b3c3 | |
|
41fa4dae5d | |
|
de54d9a4f6 | |
|
e9c1b4adbc | |
|
d20c551ac7 | |
|
424e3b2aef | |
|
cf443454ed | |
|
9d1d6dd78a | |
|
a4e27628f7 | |
|
425649e585 | |
|
f9bb82709a | |
|
c2d2a63433 | |
|
851f403549 | |
|
8f8b758abb | |
|
8660f92cea | |
|
5ac77e48ad | |
|
155f437679 | |
|
1003b1360c | |
|
6bb927837c | |
|
1ca6270620 | |
|
f41d86f64e | |
|
fb674b90c8 | |
|
cdd7484e9c | |
|
f93292b939 | |
|
659c4a3eda | |
|
6fe01012cd | |
|
9a31988600 | |
|
e9c6c92dc1 | |
|
74d68750d8 | |
|
148a656ddf | |
|
13ace9f76b | |
|
c621c1de44 | |
|
c0a31023f5 | |
|
918947d2a0 | |
|
cc69e5dbed | |
|
72d724f521 | |
|
c47f41ebaa | |
|
c3e216b1bf | |
|
a4ded2021b | |
|
41764e626a | |
|
e8abd473c9 | |
|
b30e532d45 | |
|
c102726595 | |
|
3eef05f1d0 | |
|
29da1cb1ad | |
|
2c719ce2dc | |
|
0911a041ea | |
|
f3c3b99141 | |
|
3fcc2edce6 | |
|
82d885c213 | |
|
2aff3ed56a | |
|
7ad7cb026a | |
|
2c62309f54 | |
|
731dfc54a0 | |
|
fc5730294d | |
|
fda9fecbae | |
|
af3459e03f | |
|
7344aa2d12 | |
|
eaecd4e7d6 | |
|
43f2fad7a1 | |
|
282c381290 | |
|
565f0c2d82 | |
|
06a2b64409 | |
|
e6b395a57d | |
|
8204df8bb8 | |
|
ea84edde6b | |
|
bf0d11a94a | |
|
85a8f9fbaf | |
|
501ff2c4de | |
|
454b19164c | |
|
0d4aee0587 | |
|
a269dd6c86 | |
|
635aa64911 | |
|
ffd50e865c | |
|
54badb9d35 | |
|
03eeee3dc5 | |
|
73ee0ec794 | |
|
3d73c5ac1a | |
|
fd0ee74b86 | |
|
9dcd5a22f8 | |
|
c44c092a9a | |
|
dc045e4d36 | |
|
5e2daa4425 | |
|
bb9e8cfc2e | |
|
070c7f517c | |
|
1b36cfc207 | |
|
3ae068dea5 | |
|
6d7dcbe736 | |
|
609f3570d9 | |
|
d7bbffe0c8 | |
|
1d8391e99d | |
|
b7da08812c | |
|
cd85b036c3 | |
|
3adbdd8408 | |
|
2cae96665d | |
|
b97ad6abc6 | |
|
38e61d9837 | |
|
d9139d3ef3 | |
|
0fffd11b29 | |
|
c3f2672708 | |
|
ed99eac6af | |
|
0ec48001ad | |
|
951ec99c39 | |
|
42b2f9a916 | |
|
cd686d649c | |
|
2e13e7a87f | |
|
1ca875da9f | |
|
7d65ef441d | |
|
7cec32aacc | |
|
3eff7f3f34 | |
|
641dc2f324 | |
|
8b60fb9d58 | |
|
339e40094c | |
|
661f8cc273 | |
|
1969448406 | |
|
e2ab3597f0 | |
|
dbdd9b9b25 | |
|
375e380091 | |
|
4551cbe7a8 | |
|
006385cc7f | |
|
8265598001 | |
|
5f7ed55aa1 | |
|
ea928b6ca7 | |
|
b943a5d012 | |
|
5652b0e4fe | |
|
a7b592562b | |
|
99ba64dc3a | |
|
720d6b21aa | |
|
59b41a2bfe | |
|
291b134f64 | |
|
271f7494cc | |
|
8f214b53f6 | |
|
3c02ecc0cc | |
|
02d4858fc9 | |
|
0fbd538798 | |
|
de1f666dd2 | |
|
c03983be81 | |
|
8ec160fa55 | |
|
802ba7ec0f | |
|
2884e47de4 | |
|
e332e81a58 | |
|
8408d0501a | |
|
0e5d56f4a6 | |
|
bfcdf48c00 | |
|
211038d26e | |
|
238c15bffc | |
|
f27ae2a2f2 | |
|
4619ed1597 | |
|
b1c63e6884 | |
|
23a4e1c56d | |
|
21faff977f | |
|
c5583e8ab1 | |
|
4dda5990a6 | |
|
f56e2a71a9 | |
|
16b7f8b9c2 | |
|
d7ac9e1f74 | |
|
2976fbb8fa | |
|
2a065bf08c | |
|
ae236810c5 | |
|
08d88617d4 | |
|
76e6a6e88e | |
|
8b8a56616c | |
|
291c6d87ed | |
|
4904e0ea65 | |
|
7024ba05dc | |
|
922fef7c0c | |
|
cc86774745 | |
|
e3cee1fee6 | |
|
1ccbb1c651 | |
|
619f45bb02 | |
|
079c728c92 | |
|
44d6f887bc | |
|
a13052a91b | |
|
2e7b8af233 | |
|
708eb93bdd | |
|
985b8ba468 | |
|
c09ac92ae4 | |
|
71345fc4cf | |
|
f0e3dd377b | |
|
1695f763c7 | |
|
882258394f | |
|
5c70b3550f | |
|
3680ce1a11 | |
|
6ea1446f5c | |
|
8dbfe6d907 | |
|
a53f6e6ba0 | |
|
1aa7c66bed | |
|
c65f93c714 | |
|
efe2a77092 | |
|
c4404b42f9 | |
|
eac5c9230f | |
|
6df3d12d94 | |
|
6b10a97c94 | |
|
59e0fac72e | |
|
31d08aa80d | |
|
296a69db84 | |
|
843d3f2d0c | |
|
e20b14aaf4 | |
|
4ffa1a4f7a | |
|
5b92dce6f2 | |
|
786184287e | |
|
51467682c6 | |
|
8d1d604a30 | |
|
ea37e2c569 | |
|
42e47893a2 | |
|
d3131dea6c | |
|
56df18c371 | |
|
2b2e817927 | |
|
ff9b2ded7f | |
|
32c4d7d0d4 | |
|
559e2324f2 | |
|
b62bf34006 | |
|
9f89def18e | |
|
4080f8167d | |
|
be9cf1f8e1 | |
|
ebe413439e | |
|
9e33bb9639 | |
|
ca81365d88 | |
|
78df02d414 | |
|
7d16e9f402 | |
|
a1a9507fec | |
|
4cec217724 | |
|
81e35ad5e7 | |
|
4d941b1eef | |
|
926c480fc4 | |
|
17a9fc7dae | |
|
f71f59d826 | |
|
11c1ecde59 | |
|
f03a356f6f | |
|
1adb7e67c0 | |
|
10cd92a84c | |
|
c80d76f593 | |
|
ac541f931b | |
|
55110e498d |
|
@ -0,0 +1,41 @@
|
|||
name: Report an issue with registry.k8s.io
|
||||
description: Report a bug encountered while using registry.k8s.io
|
||||
labels: ["sig/k8s-infra", "kind/support", "needs/triage"]
|
||||
body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Is there an existing issue for this?
|
||||
description: Please search to see if an issue already exists for the bug you encountered.
|
||||
options:
|
||||
- label: I have searched the existing issues
|
||||
required: true
|
||||
- type: textarea
|
||||
id: what-expected
|
||||
attributes:
|
||||
label: What did you expect to happen?
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: Debugging Information
|
||||
description: >-
|
||||
Bugs **MUST** include debugging logs from general steps in https://github.com/kubernetes/registry.k8s.io/blob/main/docs/debugging.md.
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Anything else?
|
||||
description: |
|
||||
Links? References? Anything that will provide more context about the issue you are encountering!
|
||||
|
||||
Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.
|
||||
validations:
|
||||
required: false
|
||||
- type: checkboxes
|
||||
id: terms
|
||||
attributes:
|
||||
label: Code of Conduct
|
||||
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/kubernetes/community/blob/master/code-of-conduct.md)
|
||||
options:
|
||||
- label: I agree to follow this project's Code of Conduct
|
||||
required: true
|
|
@ -1 +1 @@
|
|||
1.19.2
|
||||
1.22.5
|
||||
|
|
23
Makefile
23
Makefile
|
@ -45,10 +45,13 @@ SHELL:=env PATH=$(subst $(SPACE),\$(SPACE),$(PATH)) $(SHELL)
|
|||
# ============================== OPTIONS =======================================
|
||||
# the output binary name, overridden when cross compiling
|
||||
ARCHEIO_BINARY_NAME?=archeio
|
||||
GERANOS_BINARY_NAME?=geranos
|
||||
# build flags for the archeio binary
|
||||
# - reproducible builds: -trimpath
|
||||
# - smaller binaries: -w (trim debugger data, but not panics)
|
||||
ARCHEIO_BUILD_FLAGS?=-trimpath -ldflags="-w"
|
||||
GO_BUILD_FLAGS?=-trimpath -ldflags="-w"
|
||||
ARCHEIO_BUILD_FLAGS?=$(GO_BUILD_FLAGS)
|
||||
GERANOS_BUILD_FLAGS?=$(GO_BUILD_FLAGS)
|
||||
################################################################################
|
||||
# ================================= Building ===================================
|
||||
# standard "make" target -> builds
|
||||
|
@ -56,8 +59,11 @@ all: build
|
|||
# builds archeio, outputs to $(OUT_DIR)
|
||||
archeio:
|
||||
go build -v -o "$(OUT_DIR)/$(ARCHEIO_BINARY_NAME)" $(ARCHEIO_BUILD_FLAGS) ./cmd/archeio
|
||||
# alias for building archeio
|
||||
build: archeio
|
||||
# builds geranos, outputs to $(OUT_DIR)
|
||||
geranos:
|
||||
go build -v -o "$(OUT_DIR)/$(GERANOS_BINARY_NAME)" $(GERANOS_BUILD_FLAGS) ./cmd/geranos
|
||||
# alias for building binaries
|
||||
build: archeio geranos
|
||||
# build images to local tarball
|
||||
images:
|
||||
hack/make-rules/images.sh
|
||||
|
@ -79,9 +85,16 @@ unit:
|
|||
# integration tests
|
||||
integration:
|
||||
MODE=integration hack/make-rules/test.sh
|
||||
# all tests
|
||||
# unit + integration tests
|
||||
test:
|
||||
hack/make-rules/test.sh
|
||||
# e2e tests
|
||||
e2e-test:
|
||||
hack/make-rules/e2e-test.sh
|
||||
# e2e tests, but against a local instance instead of staging
|
||||
# useful for developing the tests if staging is broken, otherwise use e2e-test
|
||||
e2e-test-local:
|
||||
hack/make-rules/e2e-test-local.sh
|
||||
################################################################################
|
||||
# ================================= Cleanup ====================================
|
||||
# standard cleanup target
|
||||
|
@ -112,4 +125,4 @@ lint:
|
|||
shellcheck:
|
||||
hack/make-rules/shellcheck.sh
|
||||
#################################################################################
|
||||
.PHONY: all archeio build unit integration clean update gofmt verify verify-generated lint shellcheck
|
||||
.PHONY: all archeio geranos build unit integration test e2e-test clean update gofmt verify verify-generated lint shellcheck
|
||||
|
|
55
README.md
55
README.md
|
@ -3,11 +3,46 @@
|
|||
This project implements the backend for registry.k8s.io, Kubernetes's container
|
||||
image registry.
|
||||
|
||||
For more details on the implementation see [cmd/archeio](./cmd/archeio/README.md)
|
||||
Known user-facing issues will be pinned at the top of [our issue tracker][issues].
|
||||
|
||||
For details on the implementation see [cmd/archeio](./cmd/archeio/README.md)
|
||||
|
||||
The community deployment configs are documented at in the k8s.io repo with
|
||||
the rest of the community infra deployments:
|
||||
https://github.com/kubernetes/k8s.io/tree/main/registry.k8s.io
|
||||
the rest of the community infra deployments, but primarily
|
||||
[here][infra-configs].
|
||||
|
||||
For publishing to registry.k8s.io, refer to [the docs][publishing] at in k8s.io
|
||||
under `registry.k8s.io/`.
|
||||
|
||||
## Stability
|
||||
|
||||
registry.k8s.io is GA and we ask that all users migrate from k8s.gcr.io as
|
||||
soon as possible.
|
||||
|
||||
However, unequivocally: **DO NOT depend on the implementation details of this registry.**
|
||||
|
||||
**Please note that there is NO uptime SLA as this is a free, volunteer managed
|
||||
service**. We will however do our best to respond to issues and the system is
|
||||
designed to be reliable and low-maintenance. If you need higher uptime guarantees
|
||||
please consider [mirroring] images to a location you control.
|
||||
|
||||
**Other than `registry.k8s.io` serving an [OCI][distribution-spec] compliant registry:
|
||||
API endpoints, IP addresses, and backing services used
|
||||
are subject to change at _anytime_ as new resources become available or as otherwise
|
||||
necessary.**
|
||||
|
||||
**If you need to allow-list domains or IPs in your environment, we highly recommend
|
||||
[mirroring] images to a location you control instead.**
|
||||
|
||||
The Kubernetes project is currently sending traffic to GCP and AWS
|
||||
thanks to their donations but we hope to redirect traffic to more
|
||||
sponsors and their respective API endpoints in the future to keep the project
|
||||
sustainable.
|
||||
|
||||
See Also:
|
||||
- Pinned issues in our [our issue tracker][issues]
|
||||
- Our [debugging guide][debugging] for identifying and resolving or reporting issues
|
||||
- Our [mirroring guide][mirroring] for how to mirror and use mirrored Kubernetes images
|
||||
|
||||
## Privacy
|
||||
|
||||
|
@ -19,8 +54,8 @@ https://registry.k8s.io/privacy
|
|||
Previously all of Kubernetes' image hosting has been out of gcr.io ("Google Container Registry").
|
||||
|
||||
We've incurred significant egress traffic costs from users on other cloud providers
|
||||
in particular in doing so, severely limiting our ability to use the infra budget
|
||||
for purposes other than hosting end-user downloads.
|
||||
in particular in doing so, severely limiting our ability to use the
|
||||
GCP credits from Google for purposes other than hosting end-user downloads.
|
||||
|
||||
We're now moving to shift all traffic behind a community controlled domain, so
|
||||
we can quickly implement cost-cutting measures like serving the bulk of the traffic
|
||||
|
@ -28,10 +63,12 @@ for AWS-users from AWS-local storage funded by Amazon, or potentially leveraging
|
|||
other providers in the future.
|
||||
|
||||
For additional context on why we did this and what we're changing about kubernetes images
|
||||
see https://github.com/kubernetes/k8s.io/wiki/New-Registry-url-for-Kubernetes-(registry.k8s.io)
|
||||
see: https://kubernetes.io/blog/2022/11/28/registry-k8s-io-faster-cheaper-ga
|
||||
|
||||
Essentially, this repo implements the backend sources for the steps outlined there.
|
||||
|
||||
For a talk with more details see: ["Why We Moved the Kubernetes Image Registry"](https://www.youtube.com/watch?v=9CdzisDQkjE)
|
||||
|
||||
## Community, discussion, contribution, and support
|
||||
|
||||
Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/).
|
||||
|
@ -47,3 +84,9 @@ Participation in the Kubernetes community is governed by the [Kubernetes Code of
|
|||
|
||||
[owners]: https://git.k8s.io/community/contributors/guide/owners.md
|
||||
[Creative Commons 4.0]: https://git.k8s.io/website/LICENSE
|
||||
[distribution-spec]: https://github.com/opencontainers/distribution-spec
|
||||
[publishing]: https://git.k8s.io/k8s.io/registry.k8s.io#managing-kubernetes-container-registries
|
||||
[infra-configs]: https://github.com/kubernetes/k8s.io/tree/main/infra/gcp/terraform
|
||||
[mirroring]: ./docs/mirroring/README.md
|
||||
[debugging]: ./docs/debugging.md
|
||||
[issues]: https://github.com/kubernetes/registry.k8s.io/issues
|
||||
|
|
|
@ -15,3 +15,4 @@ bentheelder
|
|||
dims
|
||||
spiffxp
|
||||
thockin
|
||||
hh
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
# See https://cloud.google.com/cloud-build/docs/build-config
|
||||
options:
|
||||
machineType: E2_HIGHCPU_8
|
||||
steps:
|
||||
- id: build-image
|
||||
name: gcr.io/cloud-builders/docker
|
||||
|
@ -6,14 +8,43 @@ steps:
|
|||
args:
|
||||
- "push-images"
|
||||
- "TAG=$_GIT_TAG"
|
||||
- id: clone-k8s.io
|
||||
name: gcr.io/cloud-builders/git
|
||||
entrypoint: git
|
||||
args:
|
||||
- clone
|
||||
- --filter=tree:0
|
||||
- https://github.com/kubernetes/k8s.io
|
||||
- /k8s.io
|
||||
volumes:
|
||||
# make deploy assumes k8s.io will be at ./../k8s.io
|
||||
# default working dir in cloudbuild is /workspace
|
||||
- name: 'k8sio'
|
||||
path: '/k8s.io'
|
||||
# run immediately
|
||||
waitFor: ['-']
|
||||
- id: deploy-staging
|
||||
name: "gcr.io/google.com/cloudsdktool/cloud-sdk"
|
||||
name: "gcr.io/k8s-staging-infra-tools/k8s-infra:v20220912-7d7ed3258@sha256:48fb967be4c36da551584c3004330c7ce37568e4226ea7233eeb08c979374bc6"
|
||||
entrypoint: "/usr/bin/make"
|
||||
volumes:
|
||||
- name: 'k8sio'
|
||||
path: '/k8s.io'
|
||||
args:
|
||||
- "deploy"
|
||||
- "TAG=$_GIT_TAG"
|
||||
- "CLOUDBUILD_SET_PROJECT=k8s-infra-oci-proxy"
|
||||
waitFor:
|
||||
- build-image
|
||||
- clone-k8s.io
|
||||
# run quick e2e-tests immediately following deployment, aside from the assorted
|
||||
# testgrid reported periodic results
|
||||
- id: test-staging
|
||||
name: "gcr.io/k8s-staging-infra-tools/k8s-infra:v20220912-7d7ed3258@sha256:48fb967be4c36da551584c3004330c7ce37568e4226ea7233eeb08c979374bc6"
|
||||
entrypoint: "/usr/bin/make"
|
||||
args:
|
||||
- "e2e-test"
|
||||
waitFor:
|
||||
- deploy-staging
|
||||
substitutions:
|
||||
# variables set by kubernetes/test-infra/images/builder
|
||||
# set by image-builder to vYYYYMMDD-hash
|
||||
|
|
|
@ -7,14 +7,6 @@ OCI artifact ("docker image") hosting.
|
|||
|
||||
Current design details will be detailed here as they mature.
|
||||
|
||||
The original design doc is shared with members of
|
||||
[dev@kubernetes.io](https://groups.google.com/a/kubernetes.io/g/dev),
|
||||
anyone can join this list and gain access to read
|
||||
[the document](https://docs.google.com/document/d/1yNQ7DaDE5LbDJf9ku82YtlKZK0tcg5Wpk9L72-x2S2k/).
|
||||
It is not accessible to accounts that are not members of the Kubernetes mailinglist
|
||||
due to organization constraints and joining the list is the most reliable way to gain
|
||||
access. See https://git.k8s.io/community/community-membership.md
|
||||
|
||||
For more current details see also: https://github.com/kubernetes/k8s.io/wiki/New-Registry-url-for-Kubernetes-(registry.k8s.io)
|
||||
|
||||
**NOTE**: The code in this binary is **not** intended to be fully reusable,
|
||||
|
@ -23,28 +15,81 @@ Kubernetes SIG K8s-Infra's needs. However, some of the packages under
|
|||
[`pkg/`](./../../pkg/) may be useful if you have a similar problem,
|
||||
and they should be pretty generalized and re-usable.
|
||||
|
||||
Please also see the main repo README and in particular [the "stability" note](../../README.md#stability).
|
||||
|
||||
-----
|
||||
|
||||
For a rough TLDR of the current design:
|
||||
|
||||
- Images are hosted primarily in the existing Kubernetes [GCR](https://gcr.io/) registry
|
||||
- Mirrors *of content* blobs are hosted in S3 buckets in AWS
|
||||
- AWS clients are detected by client IP address and redirect to a local S3 bucket copy
|
||||
*only* when requesting content blobs, *not* manifests, manifest lists, tags etc.
|
||||
- All other requests are redirected to the original upstream registry
|
||||
- Images are hosted primarily in [Artifact Registry][artifact-registry] instances as the source of truth
|
||||
- Why AR?
|
||||
- Kubernetes has non-trivial tooling for managing, securing, monitoring etc. of our registries using GCP APIs that fill gaps in the OCI distribution spec, and otherwise allow synchronization and discovery of content, notably https://github.com/opencontainers/distribution-spec/issues/222
|
||||
- We have directly migrated all of this infrastructure from GCR (previously k8s.gcr.io) to AR with ~no code changes
|
||||
- Until recently our infrastructure funding has primarily been GCP (now matched by AWS) and GCP remains a significant source of funding
|
||||
- Mirrors *of content-addressed* layers are hosted in S3 buckets in AWS
|
||||
- Why mirror only [Content-Addresed][content-addressed] Image APIs?
|
||||
- Image Layers (which are content-addressed) in particular are the vast majority of bandwidth == costs. Serving them from one cloud to another is expensive.
|
||||
- Content Addressed APIs are relatively safe to serve from untrusted or less-secured hosts, since all major clients confirming the result matches the requested digest
|
||||
- We detect client IP address and match it to Cloud Providers we use in order to serve content-addressed API calls from the most local and cost-effective copy
|
||||
- Other API calls (getting and listing tags etc) are redirected to the regional upstream source-of-truth Artifact Registries
|
||||
|
||||
This allows us to offload substantial bandwidth securely, while not having to fully
|
||||
implement a registry from scratch and maintaining the project's existing security
|
||||
controls around the GCP source registries (implemented elsewhere in the Kubernetes project).
|
||||
We only re-route some content-addressed storage requests to additional hosts.
|
||||
|
||||
Clients do still need to either pull by digest (`registry.k8s.io/foo@sha256:...`),
|
||||
verify sigstore signatures, or else trust that the redirector instance is secure,
|
||||
but not the S3 buckets or additional future content-addressed storage hosts.
|
||||
|
||||
We maintain relatively tight control over the production redirector instance and
|
||||
the source registries. Very few contributors have access to this infrastructure.
|
||||
|
||||
We have a development instance at https://registry-sandbox.k8s.io which is
|
||||
*not* supported for any usage outside of the development of this project and
|
||||
may or may not be working at any given time.
|
||||
Changes will be deployed there before we deploy to production, and be exercised
|
||||
by a subset of Kubernetes' own CI.
|
||||
|
||||
Mirroring content-addressed content to object storage is currently handled by [`cmd/geranos`](./../geranos).
|
||||
|
||||
For more detail see:
|
||||
- [docs/request-handling.md](./docs/request-handling.md)
|
||||
- [docs/testing.md](./docs/testing.md)
|
||||
- How requests are handled: [docs/request-handling.md](./docs/request-handling.md)
|
||||
- How we test registry.k8s.io changes: [docs/testing.md](./docs/testing.md)
|
||||
- For IP matching info for both AWS and GCP ranges: [`pkg/net/cloudcidrs`](./../../pkg/net/cloudcidrs)
|
||||
|
||||
In addition, in order to get the registry.k8s.io domain in place, initially this
|
||||
binary is *only* serving the trivial redirect to the existing registry
|
||||
(https://k8s.gcr.io), so we can safely move users / clients to the new domain
|
||||
that will eventually serve the more complex version.
|
||||
----
|
||||
|
||||
Development is at https://registry-sandbox.k8s.io which is *not* supported for
|
||||
any usage outside of the development of this project and may or may not be
|
||||
working at any given time. Changes will be deployed there before we deploy
|
||||
to production, and be exercised by a subset of Kubernetes' own CI.
|
||||
Historical Context:
|
||||
|
||||
For AWS client-IP matching, see [`pkg/net/cidrs/aws`](./../../pkg/net/cidrs/aws)
|
||||
**You must join one of the open community mailinglists below to access the original design doc.**
|
||||
|
||||
The original design doc is shared with members of
|
||||
[dev@kubernetes.io](https://groups.google.com/a/kubernetes.io/g/dev),
|
||||
anyone can join this list and gain access to read
|
||||
[the document](https://docs.google.com/document/d/1yNQ7DaDE5LbDJf9ku82YtlKZK0tcg5Wpk9L72-x2S2k/).
|
||||
It is not accessible to accounts that are not members of the Kubernetes mailinglist
|
||||
due to organization constraints and joining the list is the **only** way to gain
|
||||
access. See https://git.k8s.io/community/community-membership.md
|
||||
|
||||
It is not fully reflective of the current design anyhow, but some may find it
|
||||
interesting.
|
||||
|
||||
Originally the project primarily needed to take advantage of an offer from Amazon
|
||||
to begin paying for AWS user traffic, which was the majority of our traffic and
|
||||
cost a lot due to high amounts of egress traffic between GCP<>AWS.
|
||||
|
||||
In addition, in order to get the registry.k8s.io domain in place, initially we
|
||||
only served a trivial redirect to the existing registry
|
||||
(https://k8s.gcr.io), so we could safely start to move users / clients to the new domain
|
||||
that would eventually serve the more complex version.
|
||||
|
||||
Since then we've redesigned a bit to make populating content into AWS async and
|
||||
not blocked on the image promoter, as well as extending our Geo-routing approach
|
||||
to detect and route users on dimensions other than "is a known AWS IP in a known AWS region".
|
||||
|
||||
More changes will come in the future, and these implementation details while documented
|
||||
**CANNOT** be depended on.
|
||||
|
||||
[artifact-registry]: https://cloud.google.com/artifact-registry
|
||||
[content-addressed]: https://en.wikipedia.org/wiki/Content-addressable_storage
|
||||
|
|
|
@ -1,82 +0,0 @@
|
|||
//go:build !nointegration
|
||||
// +build !nointegration
|
||||
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package app
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCachedBlobChecker(t *testing.T) {
|
||||
bucket := awsRegionToS3URL("us-east-1")
|
||||
blobs := newCachedBlobChecker()
|
||||
testCases := []struct {
|
||||
Name string
|
||||
BlobURL string
|
||||
Bucket string
|
||||
HashKey string
|
||||
ExpectExists bool
|
||||
}{
|
||||
{
|
||||
Name: "known bucket entry",
|
||||
BlobURL: bucket + "/containers/images/sha256%3Ada86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e",
|
||||
Bucket: bucket,
|
||||
HashKey: "3Ada86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e",
|
||||
ExpectExists: true,
|
||||
},
|
||||
{
|
||||
Name: "known bucket, bad entry",
|
||||
Bucket: bucket,
|
||||
BlobURL: bucket + "/c0ntainers/images/sha256%3Ada86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e",
|
||||
ExpectExists: false,
|
||||
},
|
||||
{
|
||||
Name: "bogus bucket on domain without webserver",
|
||||
Bucket: "http://bogus.k8s.io/",
|
||||
BlobURL: "http://bogus.k8s.io/foo",
|
||||
HashKey: "b0guS",
|
||||
ExpectExists: false,
|
||||
},
|
||||
}
|
||||
// run test cases in parallel and then serial
|
||||
// this populates the cache on the first run while doing parallel testing
|
||||
// and allows us to check cached behavior on the second run
|
||||
for i := range testCases {
|
||||
tc := testCases[i]
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
url := tc.BlobURL
|
||||
exists := blobs.BlobExists(url, tc.Bucket, tc.HashKey)
|
||||
if exists != tc.ExpectExists {
|
||||
t.Fatalf("expected: %v but got: %v", tc.ExpectExists, exists)
|
||||
}
|
||||
})
|
||||
}
|
||||
for i := range testCases {
|
||||
tc := testCases[i]
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
url := tc.BlobURL
|
||||
exists := blobs.BlobExists(url, tc.Bucket, tc.HashKey)
|
||||
if exists != tc.ExpectExists {
|
||||
t.Fatalf("expected: %v but got: %v", tc.ExpectExists, exists)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
|
@ -1,38 +0,0 @@
|
|||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package app
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/registry.k8s.io/pkg/net/cidrs/aws"
|
||||
)
|
||||
|
||||
func TestRegionToAWSRegionToS3URL(t *testing.T) {
|
||||
// ensure all known regions return a configured bucket
|
||||
regions := aws.Regions()
|
||||
for region := range regions {
|
||||
url := awsRegionToS3URL(region)
|
||||
if url == "" {
|
||||
t.Fatalf("received empty string for known region %q url", region)
|
||||
}
|
||||
}
|
||||
// ensure bogus region would return "" so we know above test is valid
|
||||
if url := awsRegionToS3URL("nonsensical-region"); url != "" {
|
||||
t.Fatalf("received non-empty URL string for made up region \"nonsensical-region\": %q", url)
|
||||
}
|
||||
}
|
|
@ -2,17 +2,19 @@
|
|||
|
||||
Requests to archeio follows the following flow:
|
||||
|
||||
1. If it's a request for `/`: redirect to our wiki page about the project
|
||||
1. If it's a request for `/privacy`: redirect to linux foundation privacy policy page
|
||||
1. If it's a request for `/`: Redirect to our wiki page about the project
|
||||
1. If it's a request for `/privacy`: Redirect to Linux Foundation privacy policy page
|
||||
1. If it's not a request for `/` or `/privacy` and does not start with `/v2/`: 404 error
|
||||
1. For registry API requests, all of which start with `/v2/`:
|
||||
- If it's a manifest request: redirect to Upstream Registry
|
||||
- If it's not a known AWS IP: redirect to Upstream Registry
|
||||
- If it's a known AWS IP AND HEAD request for the layer succeeeds in S3: redirect to S3
|
||||
- If it's a known AWS IP AND HEAD fails: redirect to Upstream Registry
|
||||
1. OCI Distribution [Specification](https://github.com/opencontainers/distribution-spec/blob/main/spec.md)
|
||||
- If it's a non-standard API call (`/v2/_catalog`): 404 error
|
||||
- If it's a manifest request: Redirect to Upstream Registry
|
||||
- If it's from a known GCP IP: Redirect to Upstream Registry
|
||||
- If it's a known AWS IP AND HEAD request for the layer succeeeds in S3: Redirect to S3
|
||||
- If it's a known AWS IP AND HEAD fails: Redirect to Upstream Registry
|
||||
|
||||
Currently the `Upstream Registry` is https://k8s.gcr.io.
|
||||
See also: OCI Distribution [Specification](https://github.com/opencontainers/distribution-spec/blob/main/spec.md)
|
||||
|
||||
Currently the `Upstream Registry` is a region specific Artifact Registry backend.
|
||||
|
||||
Or in chart form:
|
||||
```mermaid
|
||||
|
@ -21,13 +23,18 @@ flowchart TD
|
|||
A(Does the request path start with /v2/?) -->|No, it is not a registry API call| B(Is the request for /?)
|
||||
B -->|No| D[Is the request for /privacy?]
|
||||
D -->|No, it is an unknown path| C[Serve 404 error]
|
||||
D -->|Yes| K[Serve redirect to linux foundation privacy policy page]
|
||||
D -->|Yes| K[Serve redirect to Linux Foundation privacy policy page]
|
||||
B -->|Yes| E[Serve redirect to registry wiki page]
|
||||
A -->|Yes, it is a registry API call| F(Is it a blob request?)
|
||||
F -->|No| G[Serve redirect to Upstream Registry https://k8s.gcr.io]
|
||||
F -->|Yes, it matches known blob request format| H(Is the client IP known to be from AWS?)
|
||||
H -->|No| G
|
||||
H -->|Yes| I(Does the blob exist in S3?<br/>Check by way of cached HEAD on the bucket we've selected based on client IP.)
|
||||
A -->|Yes, it is a registry API call| L(Is it an OCI Distribution Standard API Call?)
|
||||
L -->|No, it is a non-standard API call.<br>Currently: `/v2/_catalog`.| M[Serve 404 error]
|
||||
L -->|Yes, it is a standard API call| F(Is it a blob request?)
|
||||
F -->|No| G[Serve redirect to Source Registry on GCP]
|
||||
F -->|Yes, it matches known blob request format| H(Is the client IP known to be from GCP?)
|
||||
H -->|Yes| G
|
||||
H -->|No| I(Does the blob exist in S3?<br/>Check by way of cached HEAD on the bucket we've selected based on client IP.)
|
||||
I -->|No| G
|
||||
I -->|Yes| J[Redirect to blob copy in S3]
|
||||
```
|
||||
|
||||
This allows us to efficiently serve traffic in the most local copy available
|
||||
based on the cloud resource funding the Kubernetes project receives.
|
||||
|
|
|
@ -9,12 +9,13 @@ These are standard Go unit tests. In addition to typical unit tests with granula
|
|||
methods, we also have unit tests covering the HTTP Handlers and full
|
||||
[request handling flow](./request-handling.md).
|
||||
|
||||
These tests run on every pull request and must pass before merge.
|
||||
|
||||
**This level of coverage must be maintained**, it is imperative that we have robust
|
||||
testing in this project that may soon serve all Kubernetes project image downloads.
|
||||
TODO: this should be enforced by CI. Currently it is enforced by reviewers.
|
||||
We automatically enforce 100% code coverage for archeio sources.
|
||||
|
||||
Coverage results are visible by clicking the `pull-oci-proxy-test` context link
|
||||
Coverage results are visible by clicking the `pull-registry-test` context link
|
||||
at the bottom of the pull request.
|
||||
|
||||
Coverage results can be viewed locally by `make test` + open `bin/all-filtered.html`.
|
||||
|
@ -31,12 +32,35 @@ through a running instance using [crane].
|
|||
|
||||
`make integration` runs only integration tests.
|
||||
|
||||
Because our CI runs primarily in GCP, this currently covers pulling from outside AWS.
|
||||
The integration tests are able to exploit running against a local instance without
|
||||
a loadbalancer etc in front and fake the client IP address to test provider-IP
|
||||
codepaths.
|
||||
|
||||
These tests run on every pull request in `pull-registry-test` and must pass before merge.
|
||||
|
||||
## E2E Testing
|
||||
|
||||
Changes to archeio are auto-deployed to registry-sandbox.k8s.io and NOT to
|
||||
registry.k8s.io. registry.k8s.io serves stable releases.
|
||||
Changes to archeio are auto-deployed to the registry-sandbox.k8s.io staging intance
|
||||
and NOT to registry.k8s.io. registry.k8s.io serves stable releases.
|
||||
|
||||
### e2e tests
|
||||
|
||||
We have quick and cheap e2e results using real clients in `make e2e-test`
|
||||
and `make e2e-test-local`. We run `make e2e-test` against the staging instance.
|
||||
|
||||
These are limited to clients we can run locally and in containerize CI
|
||||
without privilege escalation (e.g. [crane] again).
|
||||
|
||||
These run immediately in the staging deploy jobs and
|
||||
continuously against the staging instance here:
|
||||
|
||||
https://testgrid.k8s.io/sig-k8s-infra-registry#registry-sandbox-e2e-gcp
|
||||
https://testgrid.k8s.io/sig-k8s-infra-registry#registry-sandbox-e2e-aws
|
||||
|
||||
`make e2e-test-local` runs against PRs to ensure the e2e tests themselves work
|
||||
and must pass before merge.
|
||||
|
||||
### Cluster e2e Testing
|
||||
|
||||
The instance at registry-sandbox.k8s.io has [kops] cluster CI running in AWS
|
||||
pointed at it to validate pulling from AWS. The kops clusters run in random
|
||||
|
@ -44,7 +68,11 @@ regions.
|
|||
|
||||
This E2E CI should be consulted before promoting code to stable release + registry.k8s.io.
|
||||
|
||||
Results are visible in [testgrid] at: https://testgrid.k8s.io/sig-k8s-infra-oci-proxy#Summary
|
||||
Results are visible in [testgrid] at: https://testgrid.k8s.io/sig-k8s-infra-registry#Summary
|
||||
|
||||
The Kubernetes project itself has substantial assorted CI usage of the production instance
|
||||
and many CI jobs that primarily exist for other purposes will alert us if pulling from it fails.
|
||||
This includes many variations on "real" clusters and image build clients.
|
||||
|
||||
[crane]: https://github.com/google/go-containerregistry/blob/main/cmd/crane/README.md
|
||||
[kops]: https://github.com/kubernetes/kops
|
||||
|
|
|
@ -19,14 +19,15 @@ package app
|
|||
import (
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// awsRegionToS3URL returns the base S3 bucket URL for an OCI layer blob given the AWS region
|
||||
// awsRegionToHostURL returns the base S3 bucket URL for an OCI layer blob given the AWS region
|
||||
//
|
||||
// blobs in the buckets should be stored at /containers/images/sha256:$hash
|
||||
func awsRegionToS3URL(region string) string {
|
||||
func awsRegionToHostURL(region, defaultURL string) string {
|
||||
switch region {
|
||||
// each of these has the region in which we have a bucket listed first
|
||||
// and then additional regions we're mapping to that bucket
|
||||
|
@ -36,45 +37,34 @@ func awsRegionToS3URL(region string) string {
|
|||
// shifting other regions that do not have their own bucket
|
||||
|
||||
// US East (N. Virginia)
|
||||
case "us-east-1", "sa-east-1", "us-gov-east-1", "GLOBAL":
|
||||
case "us-east-1", "sa-east-1", "mx-central-1":
|
||||
return "https://prod-registry-k8s-io-us-east-1.s3.dualstack.us-east-1.amazonaws.com"
|
||||
// US East (Ohio)
|
||||
case "us-east-2", "ca-central-1":
|
||||
return "https://prod-registry-k8s-io-us-east-2.s3.dualstack.us-east-2.amazonaws.com"
|
||||
// US West (N. California)
|
||||
case "us-west-1", "us-gov-west-1":
|
||||
case "us-west-1":
|
||||
return "https://prod-registry-k8s-io-us-west-1.s3.dualstack.us-west-1.amazonaws.com"
|
||||
// US West (Oregon)
|
||||
case "us-west-2", "ca-west-1":
|
||||
return "https://prod-registry-k8s-io-us-west-2.s3.dualstack.us-west-2.amazonaws.com"
|
||||
// Asia Pacific (Mumbai)
|
||||
case "ap-south-1", "ap-south-2", "me-south-1", "me-central-1":
|
||||
case "ap-south-1", "ap-south-2", "me-south-1", "me-central-1", "me-west-1":
|
||||
return "https://prod-registry-k8s-io-ap-south-1.s3.dualstack.ap-south-1.amazonaws.com"
|
||||
// Asia Pacific (Tokyo)
|
||||
case "ap-northeast-1", "ap-northeast-2", "ap-northeast-3":
|
||||
return "https://prod-registry-k8s-io-ap-northeast-1.s3.dualstack.ap-northeast-1.amazonaws.com"
|
||||
// Asia Pacific (Singapore)
|
||||
case "ap-southeast-1", "ap-southeast-2", "ap-southeast-3", "ap-southeast-4", "ap-southeast-6", "ap-east-1", "cn-northwest-1", "cn-north-1":
|
||||
case "ap-southeast-1", "ap-southeast-2", "ap-southeast-3", "ap-southeast-4", "ap-southeast-5", "ap-southeast-6", "ap-southeast-7", "ap-east-1", "ap-east-2", "cn-northwest-1", "cn-north-1":
|
||||
return "https://prod-registry-k8s-io-ap-southeast-1.s3.dualstack.ap-southeast-1.amazonaws.com"
|
||||
// Europe (Frankfurt)
|
||||
case "eu-central-1", "eu-central-2", "eu-south-1", "eu-south-2", "il-central-1":
|
||||
return "https://prod-registry-k8s-io-eu-central-1.s3.dualstack.eu-central-1.amazonaws.com"
|
||||
// Europe (Ireland)
|
||||
case "eu-west-1", "af-south-1":
|
||||
case "eu-west-1", "af-south-1", "eu-west-2", "eu-west-3", "eu-north-1":
|
||||
return "https://prod-registry-k8s-io-eu-west-1.s3.dualstack.eu-west-1.amazonaws.com"
|
||||
// Europe (London)
|
||||
case "eu-west-2", "eu-west-3", "eu-north-1":
|
||||
return "https://prod-registry-k8s-io-eu-west-2.s3.dualstack.eu-west-2.amazonaws.com"
|
||||
default:
|
||||
// TestRegionToAWSRegionToS3URL checks we return a non-empty result for all regions
|
||||
// that this app knows about
|
||||
//
|
||||
// we will not attempt to route to a region we do now know about
|
||||
//
|
||||
// if we see empty string returned, then we've failed to account for all regions
|
||||
//
|
||||
// we want to precompute the mapping for all regions
|
||||
return ""
|
||||
return defaultURL
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -82,7 +72,7 @@ func awsRegionToS3URL(region string) string {
|
|||
type blobChecker interface {
|
||||
// BlobExists should check that blobURL exists
|
||||
// bucket and layerHash may be used for caching purposes
|
||||
BlobExists(blobURL, bucket, layerHash string) bool
|
||||
BlobExists(blobURL string) bool
|
||||
}
|
||||
|
||||
// cachedBlobChecker just performs an HTTP HEAD check against the blob
|
||||
|
@ -90,52 +80,39 @@ type blobChecker interface {
|
|||
// TODO: potentially replace with a caching implementation
|
||||
// should be plenty fast for now, HTTP HEAD on s3 is cheap
|
||||
type cachedBlobChecker struct {
|
||||
http.Client
|
||||
blobCache
|
||||
}
|
||||
|
||||
func newCachedBlobChecker() *cachedBlobChecker {
|
||||
return &cachedBlobChecker{
|
||||
blobCache: blobCache{
|
||||
cache: make(map[string]map[string]struct{}),
|
||||
},
|
||||
}
|
||||
return &cachedBlobChecker{}
|
||||
}
|
||||
|
||||
type blobCache struct {
|
||||
// cache contains bucket:key for observed keys
|
||||
// it is not bounded, we can afford to store all keys if need be
|
||||
// and the cloud run container will spin down after an idle period
|
||||
cache map[string]map[string]struct{}
|
||||
lock sync.RWMutex
|
||||
m sync.Map
|
||||
}
|
||||
|
||||
func (b *blobCache) Get(bucket, layerHash string) bool {
|
||||
b.lock.RLock()
|
||||
defer b.lock.RUnlock()
|
||||
if m, exists := b.cache[bucket]; exists {
|
||||
_, exists = m[layerHash]
|
||||
return exists
|
||||
}
|
||||
return false
|
||||
func (b *blobCache) Get(blobURL string) bool {
|
||||
_, exists := b.m.Load(blobURL)
|
||||
return exists
|
||||
}
|
||||
|
||||
func (b *blobCache) Put(bucket, layerHash string) {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
if _, exists := b.cache[bucket]; !exists {
|
||||
b.cache[bucket] = make(map[string]struct{})
|
||||
}
|
||||
b.cache[bucket][layerHash] = struct{}{}
|
||||
func (b *blobCache) Put(blobURL string) {
|
||||
b.m.Store(blobURL, struct{}{})
|
||||
}
|
||||
|
||||
func (c *cachedBlobChecker) BlobExists(blobURL, bucket, layerHash string) bool {
|
||||
if c.blobCache.Get(bucket, layerHash) {
|
||||
func (c *cachedBlobChecker) BlobExists(blobURL string) bool {
|
||||
if c.blobCache.Get(blobURL) {
|
||||
klog.V(3).InfoS("blob existence cache hit", "url", blobURL)
|
||||
return true
|
||||
}
|
||||
klog.V(3).InfoS("blob existence cache miss", "url", blobURL)
|
||||
r, err := c.Client.Head(blobURL)
|
||||
// NOTE: this client will still share http.DefaultTransport
|
||||
// We do not wish to share the rest of the client state currently
|
||||
client := &http.Client{
|
||||
// ensure sensible timeouts
|
||||
Timeout: time.Second * 5,
|
||||
}
|
||||
r, err := client.Head(blobURL)
|
||||
// fallback to assuming blob is unavailable on errors
|
||||
if err != nil {
|
||||
return false
|
||||
|
@ -144,7 +121,7 @@ func (c *cachedBlobChecker) BlobExists(blobURL, bucket, layerHash string) bool {
|
|||
// if the blob exists it HEAD should return 200 OK
|
||||
// this is true for S3 and for OCI registries
|
||||
if r.StatusCode == http.StatusOK {
|
||||
c.blobCache.Put(bucket, layerHash)
|
||||
c.blobCache.Put(blobURL)
|
||||
return true
|
||||
}
|
||||
return false
|
|
@ -0,0 +1,137 @@
|
|||
//go:build !nointegration
|
||||
// +build !nointegration
|
||||
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package app
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"k8s.io/registry.k8s.io/pkg/net/cloudcidrs"
|
||||
)
|
||||
|
||||
func TestIntegrationCachedBlobChecker(t *testing.T) {
|
||||
t.Parallel()
|
||||
bucket := awsRegionToHostURL("us-east-1", "")
|
||||
blobs := newCachedBlobChecker()
|
||||
testCases := []struct {
|
||||
Name string
|
||||
BlobURL string
|
||||
Bucket string
|
||||
HashKey string
|
||||
ExpectExists bool
|
||||
}{
|
||||
{
|
||||
Name: "known bucket entry",
|
||||
BlobURL: bucket + "/containers/images/sha256:da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e",
|
||||
ExpectExists: true,
|
||||
},
|
||||
// to cover the case that we get a cache hit
|
||||
{
|
||||
Name: "same-known bucket entry",
|
||||
BlobURL: bucket + "/containers/images/sha256:da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e",
|
||||
ExpectExists: true,
|
||||
},
|
||||
{
|
||||
Name: "known bucket, bad entry",
|
||||
BlobURL: bucket + "/c0ntainers/images/sha256:da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e",
|
||||
ExpectExists: false,
|
||||
},
|
||||
{
|
||||
Name: "bogus bucket on domain without webserver",
|
||||
BlobURL: "http://bogus.k8s.io/foo",
|
||||
ExpectExists: false,
|
||||
},
|
||||
}
|
||||
// run test cases in parallel and then serial
|
||||
// this populates the cache on the first run while doing parallel testing
|
||||
// and allows us to check cached behavior on the second run
|
||||
for i := range testCases {
|
||||
tc := testCases[i]
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
url := tc.BlobURL
|
||||
exists := blobs.BlobExists(url)
|
||||
if exists != tc.ExpectExists {
|
||||
t.Fatalf("expected: %v but got: %v", tc.ExpectExists, exists)
|
||||
}
|
||||
})
|
||||
}
|
||||
for i := range testCases {
|
||||
tc := testCases[i]
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
url := tc.BlobURL
|
||||
exists := blobs.BlobExists(url)
|
||||
if exists != tc.ExpectExists {
|
||||
t.Fatalf("expected: %v but got: %v", tc.ExpectExists, exists)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegrationAllBucketsValid(t *testing.T) {
|
||||
t.Parallel()
|
||||
// a known pause image blob
|
||||
const testBlob = "da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e"
|
||||
expectedDigest, err := hex.DecodeString(testBlob)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to decode test blob digest: %v", err)
|
||||
}
|
||||
// iterate all AWS regions and their mapped buckets
|
||||
ipInfos := cloudcidrs.AllIPInfos()
|
||||
for i := range ipInfos {
|
||||
ipInfo := ipInfos[i]
|
||||
// we only have bucket mappings for AWS currently
|
||||
// otherwise these are the deployed terraform defaults,
|
||||
// which are a subset of the buckets for AWS-external traffic
|
||||
// see also: https://github.com/kubernetes/registry.k8s.io/issues/194
|
||||
if ipInfo.Cloud != cloudcidrs.AWS {
|
||||
continue
|
||||
}
|
||||
// skip regions that aren't mapped and would've used the default
|
||||
baseURL := awsRegionToHostURL(ipInfo.Region, "")
|
||||
if baseURL == "" {
|
||||
continue
|
||||
}
|
||||
// for all remaining regions, fetch a real blob to make sure this
|
||||
// bucket will work
|
||||
t.Run(ipInfo.Region, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
url := baseURL + "/containers/images/sha256:" + testBlob
|
||||
// this is test code, the URL is not user supplied
|
||||
// nolint:gosec
|
||||
r, err := http.Get(url)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get %q: %v", url, err)
|
||||
}
|
||||
defer r.Body.Close()
|
||||
b, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read body for %q: %v", url, err)
|
||||
}
|
||||
digest := sha256.Sum256(b)
|
||||
if !bytes.Equal(digest[:], expectedDigest) {
|
||||
t.Fatalf("Digest for %q was %q but expected %q", url, hex.EncodeToString(digest[:]), hex.EncodeToString(expectedDigest))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package app
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/registry.k8s.io/pkg/net/cloudcidrs"
|
||||
)
|
||||
|
||||
func TestRegionToAWSRegionToHostURL(t *testing.T) {
|
||||
// ensure known regions return a configured bucket
|
||||
regions := []string{}
|
||||
for _, ipInfo := range cloudcidrs.AllIPInfos() {
|
||||
// AWS regions, excluding "GLOBAL" meta region, AWS US Gov Cloud and European Soveign Cloud
|
||||
if ipInfo.Cloud == cloudcidrs.AWS &&
|
||||
ipInfo.Region != "GLOBAL" && !strings.HasPrefix(ipInfo.Region, "us-gov-") && !strings.HasPrefix(ipInfo.Region, "eusc-") {
|
||||
regions = append(regions, ipInfo.Region)
|
||||
}
|
||||
}
|
||||
for _, region := range regions {
|
||||
url := awsRegionToHostURL(region, "")
|
||||
if url == "" {
|
||||
t.Fatalf("received empty string for known region %q", region)
|
||||
}
|
||||
}
|
||||
// test default region
|
||||
if url := awsRegionToHostURL("nonsensical-region", "____default____"); url != "____default____" {
|
||||
t.Fatalf("received non-empty URL string for made up region \"nonsensical-region\": %q", url)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlobCache(t *testing.T) {
|
||||
bc := &blobCache{}
|
||||
bc.Put("foo")
|
||||
if !bc.Get("foo") {
|
||||
t.Fatal("Cache did not contain key we just put")
|
||||
}
|
||||
if bc.Get("bar") {
|
||||
t.Fatal("Cache contained key we did not put")
|
||||
}
|
||||
}
|
|
@ -18,12 +18,14 @@ package app
|
|||
|
||||
import (
|
||||
"net/http"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/registry.k8s.io/pkg/net/cidrs/aws"
|
||||
"k8s.io/registry.k8s.io/pkg/net/clientip"
|
||||
"k8s.io/registry.k8s.io/pkg/net/cloudcidrs"
|
||||
)
|
||||
|
||||
type RegistryConfig struct {
|
||||
|
@ -31,7 +33,7 @@ type RegistryConfig struct {
|
|||
UpstreamRegistryPath string
|
||||
InfoURL string
|
||||
PrivacyURL string
|
||||
ServeImagesfromAWS bool
|
||||
DefaultAWSBaseURL string
|
||||
}
|
||||
|
||||
// MakeHandler returns the root archeio HTTP handler
|
||||
|
@ -70,12 +72,17 @@ func MakeHandler(rc RegistryConfig) http.Handler {
|
|||
|
||||
func makeV2Handler(rc RegistryConfig, blobs blobChecker) func(w http.ResponseWriter, r *http.Request) {
|
||||
// matches blob requests, captures the requested blob hash
|
||||
reBlob := regexp.MustCompile("^/v2/.*/blobs/sha256:([0-9a-f]{64})$")
|
||||
// https://github.com/opencontainers/distribution-spec/blob/main/spec.md#pull
|
||||
// Blobs are at `/v2/<name>/blobs/<digest>`
|
||||
// Note that ':' cannot be contained in <name> but *must* be contained in <digest>
|
||||
// <digest> also cannot contain `/` so we can use a relatively simple and cheap regex
|
||||
// to match blob requests and capture the digest
|
||||
reBlob := regexp.MustCompile("^/v2/.*/blobs/([^/]+:[a-zA-Z0-9=_-]+)$")
|
||||
// initialize map of clientIP to AWS region
|
||||
regionMapper := aws.NewAWSRegionMapper()
|
||||
regionMapper := cloudcidrs.NewIPMapper()
|
||||
// capture these in a http handler lambda
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
path := r.URL.Path
|
||||
rPath := r.URL.Path
|
||||
|
||||
// we only care about publicly readable GCR as the backing registry
|
||||
// or publicly readable blob storage
|
||||
|
@ -89,72 +96,73 @@ func makeV2Handler(rc RegistryConfig, blobs blobChecker) func(w http.ResponseWri
|
|||
// it turns out publicly readable GCR repos do not actually care about
|
||||
// the presence of a token for any API calls, despite the /v2/ API call
|
||||
// returning 401, prompting token auth
|
||||
if path == "/v2/" || path == "/v2" {
|
||||
klog.V(2).InfoS("serving 200 OK for /v2/ check", "path", path)
|
||||
if rPath == "/v2/" || rPath == "/v2" {
|
||||
klog.V(2).InfoS("serving 200 OK for /v2/ check", "path", rPath)
|
||||
// NOTE: OCI does not require this, but the docker v2 spec include it, and GCR sets this
|
||||
// Docker distribution v2 clients may fallback to an older version if this is not set.
|
||||
w.Header().Set("Docker-Distribution-Api-Version", "registry/2.0")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return
|
||||
}
|
||||
|
||||
if rc.ServeImagesfromAWS {
|
||||
// check if blob request
|
||||
matches := reBlob.FindStringSubmatch(path)
|
||||
if len(matches) != 2 {
|
||||
// not a blob request so forward it to the main upstream registry
|
||||
redirectPath := calculateRedirectPath(rc, path)
|
||||
klog.V(2).InfoS("redirecting manifest request to upstream registry", "path", path, "redirect", rc.UpstreamRegistryEndpoint+redirectPath)
|
||||
http.Redirect(w, r, rc.UpstreamRegistryEndpoint+redirectPath, http.StatusTemporaryRedirect)
|
||||
return
|
||||
}
|
||||
|
||||
// for blob requests, check the client IP and determine the best backend
|
||||
clientIP, err := getClientIP(r)
|
||||
if err != nil {
|
||||
// this should not happen
|
||||
klog.ErrorS(err, "failed to get client IP")
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// check if client is known to be coming from an AWS region
|
||||
awsRegion, ipIsKnown := regionMapper.GetIP(clientIP)
|
||||
if !ipIsKnown {
|
||||
// no region match, redirect to main upstream registry
|
||||
redirectPath := calculateRedirectPath(rc, path)
|
||||
klog.V(2).InfoS("redirecting blob request to upstream registry", "path", path, "redirect", rc.UpstreamRegistryEndpoint+redirectPath)
|
||||
http.Redirect(w, r, rc.UpstreamRegistryEndpoint+redirectPath, http.StatusTemporaryRedirect)
|
||||
return
|
||||
}
|
||||
|
||||
// check if blob is available in our S3 bucket for the region
|
||||
bucketURL := awsRegionToS3URL(awsRegion)
|
||||
hash := matches[1]
|
||||
// this matches GCR's GCS layout, which we will use for other buckets
|
||||
blobURL := bucketURL + "/containers/images/sha256%3A" + hash
|
||||
if blobs.BlobExists(blobURL, bucketURL, hash) {
|
||||
// blob known to be available in S3, redirect client there
|
||||
klog.V(2).InfoS("redirecting blob request to S3", "path", path)
|
||||
http.Redirect(w, r, blobURL, http.StatusTemporaryRedirect)
|
||||
return
|
||||
}
|
||||
|
||||
// we don't support the non-standard _catalog API
|
||||
// https://github.com/kubernetes/registry.k8s.io/issues/162
|
||||
if rPath == "/v2/_catalog" {
|
||||
http.Error(w, "_catalog is not supported", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
// check if blob request
|
||||
matches := reBlob.FindStringSubmatch(rPath)
|
||||
if len(matches) != 2 {
|
||||
// not a blob request so forward it to the main upstream registry
|
||||
redirectURL := upstreamRedirectURL(rc, rPath)
|
||||
klog.V(2).InfoS("redirecting manifest request to upstream registry", "path", rPath, "redirect", redirectURL)
|
||||
http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect)
|
||||
return
|
||||
}
|
||||
// it is a blob request, grab the hash for later
|
||||
digest := matches[1]
|
||||
|
||||
// for blob requests, check the client IP and determine the best backend
|
||||
clientIP, err := clientip.Get(r)
|
||||
if err != nil {
|
||||
// this should not happen
|
||||
klog.ErrorS(err, "failed to get client IP")
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// if client is coming from GCP, stay in GCP
|
||||
ipInfo, ipIsKnown := regionMapper.GetIP(clientIP)
|
||||
if ipIsKnown && ipInfo.Cloud == cloudcidrs.GCP {
|
||||
redirectURL := upstreamRedirectURL(rc, rPath)
|
||||
klog.V(2).InfoS("redirecting GCP blob request to upstream registry", "path", rPath, "redirect", redirectURL)
|
||||
http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect)
|
||||
return
|
||||
}
|
||||
|
||||
// check if blob is available in our AWS layer storage for the region
|
||||
region := ""
|
||||
if ipIsKnown {
|
||||
region = ipInfo.Region
|
||||
}
|
||||
bucketURL := awsRegionToHostURL(region, rc.DefaultAWSBaseURL)
|
||||
// this matches GCR's GCS layout, which we will use for other buckets
|
||||
blobURL := bucketURL + "/containers/images/" + digest
|
||||
if blobs.BlobExists(blobURL) {
|
||||
// blob known to be available in AWS, redirect client there
|
||||
klog.V(2).InfoS("redirecting blob request to AWS", "path", rPath)
|
||||
http.Redirect(w, r, blobURL, http.StatusTemporaryRedirect)
|
||||
return
|
||||
}
|
||||
|
||||
// fall back to redirect to upstream
|
||||
redirectPath := calculateRedirectPath(rc, path)
|
||||
klog.V(2).InfoS("redirecting blob request to upstream registry", "path", path, "redirect", rc.UpstreamRegistryEndpoint+redirectPath)
|
||||
http.Redirect(w, r, rc.UpstreamRegistryEndpoint+redirectPath, http.StatusTemporaryRedirect)
|
||||
redirectURL := upstreamRedirectURL(rc, rPath)
|
||||
klog.V(2).InfoS("redirecting blob request to upstream registry", "path", rPath, "redirect", redirectURL)
|
||||
http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect)
|
||||
}
|
||||
}
|
||||
|
||||
func calculateRedirectPath(rc RegistryConfig, path string) string {
|
||||
redirectPath := path
|
||||
// if path is not just /v2/, which is a special endpoint
|
||||
if len(path) > 5 && rc.UpstreamRegistryPath != "" {
|
||||
redirectPath = "/v2/" + rc.UpstreamRegistryPath + strings.TrimPrefix(path, "/v2")
|
||||
} else if len(path) > 5 && rc.UpstreamRegistryPath == "" {
|
||||
redirectPath = "/v2" + strings.TrimPrefix(path, "/v2")
|
||||
}
|
||||
return redirectPath
|
||||
func upstreamRedirectURL(rc RegistryConfig, originalPath string) string {
|
||||
return rc.UpstreamRegistryEndpoint + path.Join("/v2/", rc.UpstreamRegistryPath, strings.TrimPrefix(originalPath, "/v2"))
|
||||
}
|
|
@ -30,7 +30,6 @@ func TestMakeHandler(t *testing.T) {
|
|||
UpstreamRegistryPath: "k8s-artifacts-prod",
|
||||
InfoURL: "https://github.com/kubernetes/k8s.io/tree/main/registry.k8s.io",
|
||||
PrivacyURL: "https://www.linuxfoundation.org/privacy-policy/",
|
||||
ServeImagesfromAWS: false,
|
||||
}
|
||||
handler := MakeHandler(registryConfig)
|
||||
testCases := []struct {
|
||||
|
@ -96,6 +95,16 @@ func TestMakeHandler(t *testing.T) {
|
|||
return r
|
||||
}(),
|
||||
ExpectedStatus: http.StatusTemporaryRedirect,
|
||||
ExpectedURL: "https://prod-registry-k8s-io-eu-west-1.s3.dualstack.eu-west-1.amazonaws.com/containers/images/sha256:da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e",
|
||||
},
|
||||
{
|
||||
Name: "GCP IP, /v2/pause/blobs/sha256:da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e",
|
||||
Request: func() *http.Request {
|
||||
r := httptest.NewRequest("GET", "http://localhost:8080/v2/pause/blobs/sha256:da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e", nil)
|
||||
r.RemoteAddr = "35.220.26.1:888"
|
||||
return r
|
||||
}(),
|
||||
ExpectedStatus: http.StatusTemporaryRedirect,
|
||||
ExpectedURL: "https://us.gcr.io/v2/k8s-artifacts-prod/pause/blobs/sha256:da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e",
|
||||
},
|
||||
}
|
||||
|
@ -138,7 +147,7 @@ type fakeBlobsChecker struct {
|
|||
knownURLs map[string]bool
|
||||
}
|
||||
|
||||
func (f *fakeBlobsChecker) BlobExists(blobURL, bucket, hashKey string) bool {
|
||||
func (f *fakeBlobsChecker) BlobExists(blobURL string) bool {
|
||||
return f.knownURLs[blobURL]
|
||||
}
|
||||
|
||||
|
@ -148,19 +157,16 @@ func TestMakeV2Handler(t *testing.T) {
|
|||
UpstreamRegistryPath: "",
|
||||
InfoURL: "https://github.com/kubernetes/k8s.io/tree/main/registry.k8s.io",
|
||||
PrivacyURL: "https://www.linuxfoundation.org/privacy-policy/",
|
||||
ServeImagesfromAWS: true,
|
||||
}
|
||||
blobs := fakeBlobsChecker{
|
||||
knownURLs: map[string]bool{
|
||||
"https://prod-registry-k8s-io-ap-south-1.s3.dualstack.ap-south-1.amazonaws.com/containers/images/sha256%3Ada86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e": true,
|
||||
"https://prod-registry-k8s-io-ap-southeast-1.s3.dualstack.ap-southeast-1.amazonaws.com/containers/images/sha256%3Ada86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e": true,
|
||||
"https://prod-registry-k8s-io-eu-central-1.s3.dualstack.eu-central-1.amazonaws.com/containers/images/sha256%3Ada86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e": true,
|
||||
"https://prod-registry-k8s-io-eu-west-1.s3.dualstack.eu-west-1.amazonaws.com/containers/images/sha256%3Ada86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e": true,
|
||||
"https://prod-registry-k8s-io-us-east-1.s3.dualstack.us-east-2.amazonaws.com/containers/images/sha256%3Ada86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e": true,
|
||||
"https://prod-registry-k8s-io-us-east-2.s3.dualstack.us-east-2.amazonaws.com/containers/images/sha256%3Ada86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e": true,
|
||||
"https://prod-registry-k8s-io-us-west-1.s3.dualstack.us-west-1.amazonaws.com/containers/images/sha256%3Ada86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e": true,
|
||||
"https://prod-registry-k8s-io-us-west-2.s3.dualstack.us-west-2.amazonaws.com/containers/images/sha256%3Ada86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e": true,
|
||||
"https://prod-registry-k8s-io-eu-west-2.s3.dualstack.eu-west-2.amazonaws.com/containers/images/sha256%3Ada86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e": true,
|
||||
"https://prod-registry-k8s-io-ap-south-1.s3.dualstack.ap-south-1.amazonaws.com/containers/images/sha256:da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e": true,
|
||||
"https://prod-registry-k8s-io-ap-southeast-1.s3.dualstack.ap-southeast-1.amazonaws.com/containers/images/sha256:da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e": true,
|
||||
"https://prod-registry-k8s-io-eu-central-1.s3.dualstack.eu-central-1.amazonaws.com/containers/images/sha256:da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e": true,
|
||||
"https://prod-registry-k8s-io-eu-west-1.s3.dualstack.eu-west-1.amazonaws.com/containers/images/sha256:da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e": true,
|
||||
"https://prod-registry-k8s-io-us-east-1.s3.dualstack.us-east-2.amazonaws.com/containers/images/sha256:da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e": true,
|
||||
"https://prod-registry-k8s-io-us-east-2.s3.dualstack.us-east-2.amazonaws.com/containers/images/sha256:da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e": true,
|
||||
"https://prod-registry-k8s-io-us-west-1.s3.dualstack.us-west-1.amazonaws.com/containers/images/sha256:da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e": true,
|
||||
},
|
||||
}
|
||||
handler := makeV2Handler(registryConfig, &blobs)
|
||||
|
@ -176,6 +182,13 @@ func TestMakeV2Handler(t *testing.T) {
|
|||
ExpectedStatus: http.StatusTemporaryRedirect,
|
||||
ExpectedURL: "https://k8s.gcr.io/v2/pause/blobs/sha256:da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e",
|
||||
},
|
||||
{
|
||||
// future-proofing tests for other digest algorithms, even though we only have sha256 content as of March 2023
|
||||
Name: "/v2/pause/blobs/sha512:3b0998121425143be7164ea1555efbdf5b8a02ceedaa26e01910e7d017ff78ddbba27877bd42510a06cc14ac1bc6c451128ca3f0d0afba28b695e29b2702c9c7",
|
||||
Request: httptest.NewRequest("GET", "http://localhost:8080/v2/pause/blobs/sha256:3b0998121425143be7164ea1555efbdf5b8a02ceedaa26e01910e7d017ff78ddbba27877bd42510a06cc14ac1bc6c451128ca3f0d0afba28b695e29b2702c9c7", nil),
|
||||
ExpectedStatus: http.StatusTemporaryRedirect,
|
||||
ExpectedURL: "https://k8s.gcr.io/v2/pause/blobs/sha256:3b0998121425143be7164ea1555efbdf5b8a02ceedaa26e01910e7d017ff78ddbba27877bd42510a06cc14ac1bc6c451128ca3f0d0afba28b695e29b2702c9c7",
|
||||
},
|
||||
{
|
||||
Name: "Somehow bogus remote addr, /v2/pause/blobs/sha256:da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e",
|
||||
Request: func() *http.Request {
|
||||
|
@ -187,6 +200,15 @@ func TestMakeV2Handler(t *testing.T) {
|
|||
// This should only happen with a bug in the stdlib http server ...
|
||||
ExpectedStatus: http.StatusBadRequest,
|
||||
},
|
||||
{
|
||||
Name: "/v2/_catalog",
|
||||
Request: func() *http.Request {
|
||||
r := httptest.NewRequest("GET", "http://localhost:8080/v2/_catalog", nil)
|
||||
r.RemoteAddr = "35.180.1.1:888"
|
||||
return r
|
||||
}(),
|
||||
ExpectedStatus: http.StatusNotFound,
|
||||
},
|
||||
{
|
||||
Name: "AWS IP, /v2/pause/blobs/sha256:da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e",
|
||||
Request: func() *http.Request {
|
||||
|
@ -195,7 +217,7 @@ func TestMakeV2Handler(t *testing.T) {
|
|||
return r
|
||||
}(),
|
||||
ExpectedStatus: http.StatusTemporaryRedirect,
|
||||
ExpectedURL: "https://prod-registry-k8s-io-eu-west-2.s3.dualstack.eu-west-2.amazonaws.com/containers/images/sha256%3Ada86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e",
|
||||
ExpectedURL: "https://prod-registry-k8s-io-eu-west-1.s3.dualstack.eu-west-1.amazonaws.com/containers/images/sha256:da86e6ba6ca197bf6bc5e9d900febd906b133eaa4750e6bed647b0fbe50ed43e",
|
||||
},
|
||||
{
|
||||
Name: "AWS IP, /v2/pause/manifests/latest",
|
|
@ -0,0 +1,121 @@
|
|||
//go:build linux && !noe2e
|
||||
// +build linux,!noe2e
|
||||
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestE2EContainerdPull(t *testing.T) {
|
||||
t.Parallel()
|
||||
containerdVersions := []string{"1.6.20", "1.7.0"}
|
||||
for i := range containerdVersions {
|
||||
containerdVersion := containerdVersions[i]
|
||||
t.Run("v"+containerdVersion, func(t *testing.T) {
|
||||
testE2EContainerdPull(t, containerdVersion)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testE2EContainerdPull(t *testing.T, containerdVersion string) {
|
||||
t.Parallel()
|
||||
// install containerd and image puller tool
|
||||
installDir := filepath.Join(binDir, "containerd-"+containerdVersion)
|
||||
// nolint:gosec
|
||||
installCmd := exec.Command(filepath.Join(repoRoot, "hack", "tools", "e2e-setup-containerd.sh"))
|
||||
installCmd.Env = append(installCmd.Env,
|
||||
"CONTAINERD_VERSION="+containerdVersion,
|
||||
"CONTAINERD_INSTALL_DIR="+installDir,
|
||||
)
|
||||
installCmd.Stderr = os.Stderr
|
||||
if err := installCmd.Run(); err != nil {
|
||||
t.Fatalf("Failed to install containerd: %v", err)
|
||||
}
|
||||
|
||||
// start rootless containerd, which only needs to be able to pull images
|
||||
tmpDir, err := os.MkdirTemp("", "containerd")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup tmpdir: %v", err)
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
os.RemoveAll(tmpDir)
|
||||
})
|
||||
socketAddress := filepath.Join(tmpDir, "containerd.sock")
|
||||
// nolint:gosec
|
||||
containerdCmd := exec.Command(
|
||||
filepath.Join(installDir, "containerd"),
|
||||
// config generated by e2e-setup-containerd.sh
|
||||
"--config="+filepath.Join(installDir, "containerd-config.toml"),
|
||||
"--root="+filepath.Join(tmpDir, "root"),
|
||||
"--state="+filepath.Join(tmpDir, "state"),
|
||||
"--address="+socketAddress,
|
||||
"--log-level=trace",
|
||||
)
|
||||
containerdCmd.Stderr = os.Stderr
|
||||
if err := containerdCmd.Start(); err != nil {
|
||||
t.Fatalf("Failed to start containerd: %v", err)
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
if err := containerdCmd.Process.Signal(os.Interrupt); err != nil {
|
||||
t.Fatalf("failed to signal containerd: %v", err)
|
||||
}
|
||||
// kill if it doesn't exit gracefully after 1s
|
||||
done := make(chan error)
|
||||
go func() { done <- containerdCmd.Wait() }()
|
||||
select {
|
||||
case <-done:
|
||||
// exited
|
||||
case <-time.After(time.Second):
|
||||
// timed out
|
||||
if err := containerdCmd.Process.Kill(); err != nil {
|
||||
t.Fatalf("Failed to kill containerd: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// wait for containerd to be ready
|
||||
containerdReady := false
|
||||
for i := 0; i < 5; i++ {
|
||||
// nolint:gosec
|
||||
if err := exec.Command(filepath.Join(installDir, "ctr"), "--address="+socketAddress, "version").Run(); err == nil {
|
||||
containerdReady = true
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Duration(i) * time.Second)
|
||||
}
|
||||
if !containerdReady {
|
||||
t.Fatalf("Failed to wait for containerd to be ready")
|
||||
}
|
||||
|
||||
// pull test images
|
||||
for i := range testCases {
|
||||
tc := &testCases[i]
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
// nolint:gosec
|
||||
pullCmd := exec.Command(filepath.Join(installDir, "ctr"), "--address="+socketAddress, "content", "fetch", tc.Ref())
|
||||
testPull(t, tc, pullCmd)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,135 @@
|
|||
//go:build !noe2e
|
||||
// +build !noe2e
|
||||
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// e2e contains end-to-end tests for registry.k8s.io
|
||||
package e2e
|
||||
|
||||
/*
|
||||
This exists to test against the staging instance of the registry.
|
||||
|
||||
Compare to cmd/archeio/main_test.go which exists to integration test
|
||||
a local instance. There is much overlap but they serve different purposes
|
||||
and cover different aspects.
|
||||
|
||||
The integration tests can run quickly in presubmit and leverage faking
|
||||
locations to cover more codepaths. They do not however cover all interactions
|
||||
with actually deployed infrastructure including e.g. the loadbalancer and
|
||||
WAF rules in front of the deployed instances.
|
||||
|
||||
These tests instead will run from multiple locations and cover the actual
|
||||
production-like infrastructure but cannot fake IP addr there by design as
|
||||
we accurately determine IP there and wouldn't want clients (ab)using this.
|
||||
|
||||
These tests are still expected to be quick and cheap and only cover clients
|
||||
we can run in a containerized, non-privileged environment.
|
||||
|
||||
We have other coverage, see cmd/archeio/docs/testing.md
|
||||
*/
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/registry.k8s.io/internal/integration"
|
||||
)
|
||||
|
||||
var endpoint = "registry-sandbox.k8s.io"
|
||||
|
||||
type testCase struct {
|
||||
Name string
|
||||
Digest string
|
||||
}
|
||||
|
||||
func (tc *testCase) Ref() string {
|
||||
return endpoint + "/" + tc.Name
|
||||
}
|
||||
|
||||
var testCases = []testCase{
|
||||
{Name: "pause:3.1", Digest: "sha256:f78411e19d84a252e53bff71a4407a5686c46983a2c2eeed83929b888179acea"},
|
||||
{Name: "pause:3.2", Digest: "sha256:927d98197ec1141a368550822d18fa1c60bdae27b78b0c004f705f548c07814f"},
|
||||
{Name: "pause:3.5", Digest: "sha256:1ff6c18fbef2045af6b9c16bf034cc421a29027b800e4f9b68ae9b1cb3e9ae07"},
|
||||
{Name: "pause:3.9", Digest: "sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097"},
|
||||
}
|
||||
|
||||
var repoRoot = ""
|
||||
var binDir = ""
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if e := os.Getenv("REGISTRY_ENDPOINT"); e != "" {
|
||||
endpoint = e
|
||||
}
|
||||
rr, err := integration.ModuleRootDir()
|
||||
if err != nil {
|
||||
panic("failed to get root dir: " + err.Error())
|
||||
}
|
||||
repoRoot = rr
|
||||
binDir = filepath.Join(repoRoot, "bin")
|
||||
if err := os.Chdir(repoRoot); err != nil {
|
||||
panic("failed to chdir to repo root: " + err.Error())
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
// installs tool to binDir using go install
|
||||
func goInstall(t *testing.T, tool string) {
|
||||
buildCmd := exec.Command("go", "install", tool)
|
||||
buildCmd.Env = append(os.Environ(), "GOBIN="+binDir, "GOTOOLCHAIN=auto")
|
||||
if out, err := buildCmd.CombinedOutput(); err != nil {
|
||||
t.Errorf("Failed to get %q: %v", tool, err)
|
||||
t.Error("Output:")
|
||||
t.Fatal(string(out))
|
||||
}
|
||||
}
|
||||
|
||||
func binPath(name string) string {
|
||||
return filepath.Join(binDir, name)
|
||||
}
|
||||
|
||||
// common helper for executing test pull and checking output
|
||||
func testPull(t *testing.T, tc *testCase, pullCmd *exec.Cmd) {
|
||||
out, err := pullCmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Errorf("Failed to pull image: %q with err %v", tc.Name, err)
|
||||
t.Error("Output from command:")
|
||||
t.Fatal(string(out))
|
||||
} else if tc.Digest != "" && !strings.Contains(string(out), tc.Digest) {
|
||||
t.Error("pull output does not contain expected digest")
|
||||
t.Error("Output from command:")
|
||||
t.Fatal(string(out))
|
||||
}
|
||||
}
|
||||
|
||||
func TestE2ECranePull(t *testing.T) {
|
||||
t.Parallel()
|
||||
// install crane
|
||||
goInstall(t, "github.com/google/go-containerregistry/cmd/crane@latest")
|
||||
// pull test images
|
||||
for i := range testCases {
|
||||
tc := &testCases[i]
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
// nolint:gosec
|
||||
pullCmd := exec.Command(binPath("crane"), "pull", "--verbose", tc.Ref(), "/dev/null")
|
||||
testPull(t, tc, pullCmd)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -22,12 +22,12 @@ import (
|
|||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/registry.k8s.io/cmd/archeio/app"
|
||||
|
||||
"k8s.io/registry.k8s.io/cmd/archeio/internal/app"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
@ -46,13 +46,7 @@ func main() {
|
|||
UpstreamRegistryPath: getEnv("UPSTREAM_REGISTRY_PATH", "k8s-artifacts-prod/images"),
|
||||
InfoURL: "https://github.com/kubernetes/registry.k8s.io",
|
||||
PrivacyURL: "https://www.linuxfoundation.org/privacy-policy/",
|
||||
}
|
||||
|
||||
var err error
|
||||
// feature gate AWS S3 serving feature
|
||||
registryConfig.ServeImagesfromAWS, err = strconv.ParseBool(getEnv("SERVE_IMAGES_FROM_AWS", "true"))
|
||||
if err != nil {
|
||||
klog.Fatal("SERVE_IMAGES_FROM_AWS environment variable is not set to a boolean value %v", err)
|
||||
DefaultAWSBaseURL: getEnv("DEFAULT_AWS_BASE_URL", "https://prod-registry-k8s-io-us-east-1.s3.dualstack.us-east-1.amazonaws.com"),
|
||||
}
|
||||
|
||||
// configure server with reasonable timeout
|
||||
|
@ -70,7 +64,7 @@ func main() {
|
|||
|
||||
// start serving
|
||||
go func() {
|
||||
if err = server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||||
if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||||
klog.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
|
|
@ -20,16 +20,29 @@ limitations under the License.
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/netip"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/crane"
|
||||
"github.com/google/go-containerregistry/pkg/v1/validate"
|
||||
|
||||
"k8s.io/registry.k8s.io/internal/integration"
|
||||
"k8s.io/registry.k8s.io/pkg/net/cloudcidrs"
|
||||
)
|
||||
|
||||
type integrationTestCase struct {
|
||||
Name string
|
||||
FakeIP string
|
||||
Image string
|
||||
Digest string
|
||||
}
|
||||
|
||||
// TestIntegrationMain tests the entire, built binary with an integration
|
||||
// test, pulling images with crane
|
||||
func TestIntegrationMain(t *testing.T) {
|
||||
|
@ -38,10 +51,6 @@ func TestIntegrationMain(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("Failed to detect module root dir: %v", err)
|
||||
}
|
||||
// NOTE: also ensures rootDir/bin is in front of $PATH
|
||||
if err := integration.EnsureCrane(rootDir); err != nil {
|
||||
t.Fatalf("Failed to ensure crane: %v", err)
|
||||
}
|
||||
|
||||
// build binary
|
||||
buildCmd := exec.Command("make", "archeio")
|
||||
|
@ -54,15 +63,22 @@ func TestIntegrationMain(t *testing.T) {
|
|||
testPort := "61337"
|
||||
testAddr := "localhost:" + testPort
|
||||
serverErrChan := make(chan error)
|
||||
cmdContext, serverCancel := context.WithCancel(context.TODO())
|
||||
serverCmd := exec.CommandContext(cmdContext, "archeio")
|
||||
serverCmd := exec.Command("./archeio", "-v=9")
|
||||
serverCmd.Dir = filepath.Join(rootDir, "bin")
|
||||
serverCmd.Env = append(serverCmd.Env, "PORT="+testPort)
|
||||
// serverCmd.Stderr = os.Stderr
|
||||
defer serverCancel()
|
||||
serverCmd.Stderr = os.Stderr
|
||||
go func() {
|
||||
serverErrChan <- serverCmd.Start()
|
||||
serverErrChan <- serverCmd.Wait()
|
||||
}()
|
||||
t.Cleanup(func() {
|
||||
if err := serverCmd.Process.Signal(os.Interrupt); err != nil {
|
||||
t.Fatalf("failed to signal archeio: %v", err)
|
||||
}
|
||||
if err := <-serverErrChan; err != nil {
|
||||
t.Fatalf("archeio did not exit cleanly: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
// wait for server to be up and running
|
||||
startErr := <-serverErrChan
|
||||
|
@ -76,30 +92,118 @@ func TestIntegrationMain(t *testing.T) {
|
|||
t.Fatal("timed out waiting for archeio to be ready")
|
||||
}
|
||||
|
||||
// TODO: fake being on AWS
|
||||
testPull := func(image string) {
|
||||
// nolint:gosec // this is not user suplied input ...
|
||||
cmd := exec.Command("crane", "pull", testAddr+"/"+image, os.DevNull)
|
||||
//cmd.Stderr = os.Stderr
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Errorf("pull for %q failed: %v", image, err)
|
||||
t.Error("output: ")
|
||||
t.Error(string(out))
|
||||
t.Fail()
|
||||
// perform many test pulls ...
|
||||
testCases := makeTestCases(t)
|
||||
for i := range testCases {
|
||||
tc := testCases[i]
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ref := testAddr + "/" + tc.Image
|
||||
// ensure we supply fake IP info from test case
|
||||
craneOpts := []crane.Option{crane.WithTransport(newFakeIPTransport(tc.FakeIP))}
|
||||
// test fetching digest first
|
||||
digest, err := crane.Digest(ref, craneOpts...)
|
||||
if err != nil {
|
||||
t.Errorf("Fetch digest for %q failed: %v", ref, err)
|
||||
}
|
||||
if digest != tc.Digest {
|
||||
t.Errorf("Wrong digest for %q", ref)
|
||||
t.Errorf("Received: %q", digest)
|
||||
t.Errorf("Expected: %q", tc.Digest)
|
||||
}
|
||||
err = pull(ref, craneOpts...)
|
||||
if err != nil {
|
||||
t.Errorf("Pull for %q failed: %v", ref, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func makeTestCases(t *testing.T) []integrationTestCase {
|
||||
// a few small images that we really should be able to pull
|
||||
wellKnownImages := []struct {
|
||||
Name string
|
||||
Digest string
|
||||
}{
|
||||
{
|
||||
Name: "pause:3.1",
|
||||
Digest: "sha256:f78411e19d84a252e53bff71a4407a5686c46983a2c2eeed83929b888179acea",
|
||||
},
|
||||
{
|
||||
Name: "pause:3.9",
|
||||
Digest: "sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097",
|
||||
},
|
||||
}
|
||||
|
||||
// collect interesting IPs after checking that they meet expectations
|
||||
type interestingIP struct {
|
||||
Name string
|
||||
IP string
|
||||
}
|
||||
interestingIPs := []interestingIP{}
|
||||
cidrs := cloudcidrs.NewIPMapper()
|
||||
|
||||
// One for GCP because we host there and have code paths for this
|
||||
const gcpIP = "35.220.26.1"
|
||||
if info, matches := cidrs.GetIP(netip.MustParseAddr(gcpIP)); !matches || info.Cloud != cloudcidrs.GCP {
|
||||
t.Fatalf("Expected %q to be a GCP IP but is not detected as one with current data", gcpIP)
|
||||
}
|
||||
interestingIPs = append(interestingIPs, interestingIP{Name: "GCP", IP: gcpIP})
|
||||
|
||||
// One for AWS because we host there and have code paths for this
|
||||
const awsIP = "35.180.1.1"
|
||||
if info, matches := cidrs.GetIP(netip.MustParseAddr(awsIP)); !matches || info.Cloud != cloudcidrs.AWS {
|
||||
t.Fatalf("Expected %q to be an AWS IP but is not detected as one with current data", awsIP)
|
||||
}
|
||||
interestingIPs = append(interestingIPs, interestingIP{Name: "AWS", IP: awsIP})
|
||||
|
||||
// we obviously won't see this in the wild, but we also know
|
||||
// it should not match GCP, AWS or any future providers
|
||||
const externalIP = "192.168.0.1"
|
||||
if _, matches := cidrs.GetIP(netip.MustParseAddr(externalIP)); matches {
|
||||
t.Fatalf("Expected %q to not match any provider IP range but it dies", externalIP)
|
||||
}
|
||||
interestingIPs = append(interestingIPs, interestingIP{Name: "External", IP: externalIP})
|
||||
|
||||
// generate testcases from test data, for every interesting IP pull each image
|
||||
testCases := []integrationTestCase{}
|
||||
for _, image := range wellKnownImages {
|
||||
for _, ip := range interestingIPs {
|
||||
testCases = append(testCases, integrationTestCase{
|
||||
Name: fmt.Sprintf("IP:%s (%q),Image:%q", ip.Name, ip.IP, image.Name),
|
||||
FakeIP: ip.IP,
|
||||
Image: image.Name,
|
||||
Digest: image.Digest,
|
||||
})
|
||||
}
|
||||
}
|
||||
return testCases
|
||||
}
|
||||
|
||||
// test pulling pause image
|
||||
// TODO: test pulling more things
|
||||
testPull("pause:3.1")
|
||||
|
||||
// we're done, cleanup
|
||||
if err := serverCmd.Process.Signal(os.Interrupt); err != nil {
|
||||
t.Fatalf("failed to signal archeio: %v", err)
|
||||
func pull(image string, options ...crane.Option) error {
|
||||
img, err := crane.Pull(image, options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := <-serverErrChan; err != nil {
|
||||
t.Fatalf("archeio did not exit cleanly: %v", err)
|
||||
return validate.Image(img)
|
||||
}
|
||||
|
||||
type fakeIPTransport struct {
|
||||
fakeXForwardFor string
|
||||
h http.RoundTripper
|
||||
}
|
||||
|
||||
var _ http.RoundTripper = &fakeIPTransport{}
|
||||
|
||||
func (f *fakeIPTransport) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
r.Header.Add("X-Forwarded-For", f.fakeXForwardFor)
|
||||
return f.h.RoundTrip(r)
|
||||
}
|
||||
|
||||
func newFakeIPTransport(fakeIP string) *fakeIPTransport {
|
||||
return &fakeIPTransport{
|
||||
fakeXForwardFor: fakeIP + ",0.0.0.0",
|
||||
h: http.DefaultTransport,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
# Geranos
|
||||
|
||||
γερανός (geranós) is Greek for "crane"
|
||||
|
||||
This binary is a tool based on [crane] which is used to copy image layers
|
||||
from registries to object storage for backing [archeio](./../archeio)
|
||||
|
||||
Currently it only supports Google Container Registry / Artifact Registry to S3.
|
||||
|
||||
Other object stores can be easily added, but container registry portability is blocked
|
||||
on https://github.com/opencontainers/distribution-spec/issues/222
|
||||
|
||||
[crane]: https://github.com/google/go-containerregistry/tree/main/cmd/crane
|
|
@ -0,0 +1,82 @@
|
|||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/crane"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
func main() {
|
||||
Main()
|
||||
}
|
||||
|
||||
// Main is the application entrypoint, which injects globals to Run
|
||||
func Main() {
|
||||
klog.InitFlags(flag.CommandLine)
|
||||
flag.Parse()
|
||||
if err := Run(os.Args); err != nil {
|
||||
klog.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Run implements the actual application logic, accepting global inputs
|
||||
func Run(_ []string) error {
|
||||
// one of the backing registries for registry.k8s.io
|
||||
// TODO: make configurable later
|
||||
const sourceRegistry = "us-central1-docker.pkg.dev/k8s-artifacts-prod/images"
|
||||
|
||||
// TODO: make configurable later
|
||||
const s3Bucket = "prod-registry-k8s-io-us-east-2"
|
||||
|
||||
// 80*60s = 4800 RPM, below our current 5000 RPM per-user limit on the registry
|
||||
// Even with the host node making other registry API calls
|
||||
registryRateLimit := NewRateLimitRoundTripper(80)
|
||||
|
||||
repo, err := name.NewRepository(sourceRegistry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s3Uploader, err := newS3Uploader(os.Getenv("REALLY_UPLOAD") == "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// copy layers from all images in the repo
|
||||
// TODO: print some progress logs at lower frequency instead of logging each image
|
||||
// We will punt this temporarily, as we're about to refactor how this works anyhow
|
||||
// to avoid fetching manifests for images we've already uploaded
|
||||
err = WalkImageLayersGCP(registryRateLimit, repo,
|
||||
func(ref name.Reference, layers []v1.Layer) error {
|
||||
klog.Infof("Processing image: %s", ref.String())
|
||||
return s3Uploader.UploadImage(s3Bucket, ref, layers, crane.WithTransport(registryRateLimit))
|
||||
},
|
||||
func(imageHash string) bool {
|
||||
s, _ := s3Uploader.ImageAlreadyUploaded(s3Bucket, imageHash)
|
||||
return s
|
||||
})
|
||||
if err == nil {
|
||||
klog.Info("Done!")
|
||||
}
|
||||
return err
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
// RateLimitRoundTripper wraps an http.RoundTripper with rate limiting
|
||||
type RateLimitRoundTripper struct {
|
||||
rateLimiter *rate.Limiter
|
||||
roundTripper http.RoundTripper
|
||||
}
|
||||
|
||||
var _ http.RoundTripper = &RateLimitRoundTripper{}
|
||||
|
||||
func NewRateLimitRoundTripper(limit rate.Limit) *RateLimitRoundTripper {
|
||||
return &RateLimitRoundTripper{
|
||||
rateLimiter: rate.NewLimiter(limit, 1),
|
||||
roundTripper: http.DefaultTransport,
|
||||
}
|
||||
}
|
||||
|
||||
func (rt *RateLimitRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
err := rt.rateLimiter.Wait(context.Background())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rt.roundTripper.RoundTrip(r)
|
||||
}
|
|
@ -0,0 +1,219 @@
|
|||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/crane"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/aws/smithy-go"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// see cmd/archeio, this matches the layout of GCR's GCS bucket
|
||||
// containers/images/sha256:$layer_digest
|
||||
const blobKeyPrefix = "containers/images/"
|
||||
|
||||
// this is where geranos *internally* records manifests
|
||||
// these are not for user consumption
|
||||
const manifestKeyPrefix = "geranos/uploaded-images/"
|
||||
|
||||
type s3Uploader struct {
|
||||
svc *s3.Client
|
||||
uploader *manager.Uploader
|
||||
reuploadLayers bool
|
||||
dryRun bool
|
||||
}
|
||||
|
||||
func newS3Uploader(dryRun bool) (*s3Uploader, error) {
|
||||
cfg, err := config.LoadDefaultConfig(context.TODO())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if dryRun {
|
||||
// Use anonymous credentials for dry run
|
||||
cfg.Credentials = aws.AnonymousCredentials{}
|
||||
}
|
||||
// Create S3 client
|
||||
client := s3.NewFromConfig(cfg)
|
||||
r := &s3Uploader{
|
||||
dryRun: dryRun,
|
||||
svc: client,
|
||||
}
|
||||
// Create uploader
|
||||
r.uploader = manager.NewUploader(client)
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (s *s3Uploader) UploadImage(bucket string, ref name.Reference, layers []v1.Layer, opts ...crane.Option) error {
|
||||
for _, layer := range layers {
|
||||
if err := s.copyLayerToS3(bucket, layer); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
m, err := manifestBlobFromRef(ref, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.copyManifestToS3(bucket, m)
|
||||
}
|
||||
|
||||
func (s *s3Uploader) ImageAlreadyUploaded(bucket string, imageDigest string) (bool, error) {
|
||||
return s.blobExists(bucket, keyForImageRecord(imageDigest))
|
||||
}
|
||||
|
||||
// imageBlob requires the subset of v1.Layer methods
|
||||
// required for uploading a blob
|
||||
type imageBlob interface {
|
||||
Digest() (v1.Hash, error)
|
||||
Compressed() (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
type manifestBlob struct {
|
||||
raw []byte
|
||||
digest v1.Hash
|
||||
}
|
||||
|
||||
func manifestBlobFromRef(ref name.Reference, opts ...crane.Option) (*manifestBlob, error) {
|
||||
p := strings.Split(ref.Name(), "@")
|
||||
if len(p) != 2 {
|
||||
return nil, errors.New("invalid reference")
|
||||
}
|
||||
digest, err := v1.NewHash(p[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
manifest, err := crane.Manifest(ref.Name(), opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &manifestBlob{
|
||||
raw: manifest,
|
||||
digest: digest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *manifestBlob) Digest() (v1.Hash, error) {
|
||||
return m.digest, nil
|
||||
}
|
||||
|
||||
func (m *manifestBlob) Compressed() (io.ReadCloser, error) {
|
||||
return io.NopCloser(bytes.NewReader(m.raw)), nil
|
||||
}
|
||||
|
||||
func (s *s3Uploader) copyManifestToS3(bucket string, layer imageBlob) error {
|
||||
digest, err := layer.Digest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := keyForImageRecord(digest.String())
|
||||
return s.copyToS3(bucket, key, layer)
|
||||
}
|
||||
|
||||
func (s *s3Uploader) copyLayerToS3(bucket string, layer imageBlob) error {
|
||||
digest, err := layer.Digest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := keyForLayer(digest.String())
|
||||
return s.copyToS3(bucket, key, layer)
|
||||
}
|
||||
|
||||
func (s *s3Uploader) copyToS3(bucket, key string, layer imageBlob) error {
|
||||
digest, err := layer.Digest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !s.reuploadLayers {
|
||||
exists, err := s.blobExists(bucket, key)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to check if blob exists: %v", err)
|
||||
} else if exists {
|
||||
klog.V(4).Infof("Layer already exists: %s", key)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
r, err := layer.Compressed()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer r.Close()
|
||||
uploadInput := &s3.PutObjectInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Key: aws.String(key),
|
||||
Body: r,
|
||||
}
|
||||
// TODO: what if it isn't sha256?
|
||||
if digest.Algorithm == "SHA256" {
|
||||
b, err := hex.DecodeString(digest.Hex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uploadInput.ChecksumSHA256 = aws.String(base64.StdEncoding.EncodeToString(b))
|
||||
}
|
||||
// skip actually uploading if this is a dry-run, otherwise finally upload
|
||||
klog.Infof("Uploading: %s", key)
|
||||
if s.dryRun {
|
||||
return nil
|
||||
}
|
||||
_, err = s.uploader.Upload(context.TODO(), uploadInput)
|
||||
return err
|
||||
}
|
||||
|
||||
func keyForLayer(digest string) string {
|
||||
return blobKeyPrefix + digest
|
||||
}
|
||||
|
||||
func keyForImageRecord(imageDigest string) string {
|
||||
return manifestKeyPrefix + imageDigest
|
||||
}
|
||||
|
||||
func (s *s3Uploader) blobExists(bucket, key string) (bool, error) {
|
||||
_, err := s.svc.HeadObject(context.TODO(), &s3.HeadObjectInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
if err != nil {
|
||||
var notFound *types.NotFound
|
||||
var apiErr smithy.APIError
|
||||
if errors.As(err, ¬Found) {
|
||||
return false, nil
|
||||
} else if errors.As(err, &apiErr) {
|
||||
if apiErr.ErrorCode() == "NotFound" {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
)
|
||||
|
||||
// layersForV1 gets the layers for a v1 schema image
|
||||
func layersForV1(transport http.RoundTripper, ref name.Reference, desc *remote.Descriptor) ([]v1.Layer, error) {
|
||||
m := &schema1{}
|
||||
if err := json.NewDecoder(bytes.NewReader(desc.Manifest)).Decode(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
layers := make([]v1.Layer, len(m.FSLayers))
|
||||
for i, fsLayer := range m.FSLayers {
|
||||
layerDigest, err := name.NewDigest(fmt.Sprintf("%s@%s", ref.Context(), fsLayer.BlobSum))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
layer, err := remote.Layer(layerDigest, remote.WithTransport(transport))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
layers[i] = layer
|
||||
}
|
||||
return layers, nil
|
||||
}
|
||||
|
||||
type fslayer struct {
|
||||
BlobSum string `json:"blobSum"`
|
||||
}
|
||||
|
||||
type schema1 struct {
|
||||
FSLayers []fslayer `json:"fsLayers"`
|
||||
}
|
|
@ -0,0 +1,133 @@
|
|||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/google"
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
||||
// WalkImageLAyersFunc is used to visit an image
|
||||
type WalkImageLayersFunc func(ref name.Reference, layers []v1.Layer) error
|
||||
|
||||
// Unfortunately this is only doable on GCP currently.
|
||||
//
|
||||
// TODO: To support other registries in the meantime, we could require a list of
|
||||
// image names as an input and plumb that through, then list tags and get something
|
||||
// close to this. The _catalog endpoint + tag listing could also work in some cases.
|
||||
//
|
||||
// However, even then, this is more complete because it lists all manifests, not just tags.
|
||||
// It's also simpler and more efficient.
|
||||
//
|
||||
// See: https://github.com/opencontainers/distribution-spec/issues/222
|
||||
func WalkImageLayersGCP(transport http.RoundTripper, repo name.Repository, walkImageLayers WalkImageLayersFunc, skipImage func(string) bool) error {
|
||||
g := new(errgroup.Group)
|
||||
// TODO: This is really just an approximation to avoid exceeding typical socket limits
|
||||
// See also quota limits:
|
||||
// https://cloud.google.com/artifact-registry/quotas
|
||||
g.SetLimit(1000)
|
||||
g.Go(func() error {
|
||||
return google.Walk(repo, func(r name.Repository, tags *google.Tags, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for digest, metadata := range tags.Manifests {
|
||||
digest := digest
|
||||
// google.Walk already walks the child manifests
|
||||
if metadata.MediaType == string(types.DockerManifestList) || metadata.MediaType == string(types.OCIImageIndex) {
|
||||
continue
|
||||
}
|
||||
ref, err := name.ParseReference(fmt.Sprintf("%s@%s", r, digest))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
g.Go(func() error {
|
||||
if skipImage(digest) {
|
||||
klog.V(4).Infof("Skipping already-uploaded: %s", ref)
|
||||
return nil
|
||||
}
|
||||
return walkManifestLayers(transport, ref, walkImageLayers)
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}, google.WithTransport(transport))
|
||||
})
|
||||
return g.Wait()
|
||||
}
|
||||
|
||||
func walkManifestLayers(transport http.RoundTripper, ref name.Reference, walkImageLayers WalkImageLayersFunc) error {
|
||||
desc, err := remote.Get(ref, remote.WithTransport(transport))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// google.Walk already resolves these to individual manifests
|
||||
if desc.MediaType.IsIndex() {
|
||||
klog.Warningf("Skipping Index: %s", ref.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
// Specially handle schema 1
|
||||
// https://github.com/google/go-containerregistry/issues/377
|
||||
if desc.MediaType == types.DockerManifestSchema1 || desc.MediaType == types.DockerManifestSchema1Signed {
|
||||
layers, err := layersForV1(transport, ref, desc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return walkImageLayers(ref, layers)
|
||||
}
|
||||
|
||||
// we don't expect anything other than index, or image ...
|
||||
if !desc.MediaType.IsImage() {
|
||||
klog.Warningf("Un-handled type: %s for %s", desc.MediaType, ref.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handle normal images
|
||||
image, err := desc.Image()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
layers, err := imageToLayers(image)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return walkImageLayers(ref, layers)
|
||||
}
|
||||
|
||||
func imageToLayers(image v1.Image) ([]v1.Layer, error) {
|
||||
layers, err := image.Layers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configLayer, err := partial.ConfigLayer(image)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return append(layers, configLayer), nil
|
||||
}
|
|
@ -0,0 +1,131 @@
|
|||
# Debugging issues with registry.k8s.io
|
||||
|
||||
The registry.k8s.io is a Kubernetes container images registry that behaves generally like an [OCI](https://github.com/opencontainers/distribution-spec) compliant registry. Since registry.k8s.io is a proxy routing traffic to the closest available source, you will need connectivity to several domains to download images. It is also best for performance to create your own registry mirror.
|
||||
|
||||
When you are debugging issues, make sure you run these commands on the node that is attempting to run images. Things may be working fine on your laptop, but not on the Kubernetes node.
|
||||
|
||||
<!--TODO: identify what this looks like on s3 etc.-->
|
||||
> **Note**
|
||||
>
|
||||
> If you see a [403 error][http-403] like `Your client does not have permission to get URL`,
|
||||
> this error is not specific to the Kubernetes project / registry.k8s.io and
|
||||
> you need to work with your cloud vendor / service provider to get unblocked
|
||||
> by GCP.
|
||||
>
|
||||
> Please file an issue with your provider, the Kubernetes project does not
|
||||
> control this and it is not specific to us.
|
||||
|
||||
## Verify DNS resolution
|
||||
|
||||
You may use the `dig` or `nslookup` command to validate DNS resolution of the registry.k8s.io domain or any domain it references. For example, running `dig registry.k8s.io` should return an answer that contains:
|
||||
|
||||
```log
|
||||
;; ANSWER SECTION:
|
||||
registry.k8s.io. 3600 IN A 34.107.244.51
|
||||
```
|
||||
|
||||
If you cannot successfully resolve a domain, check your DNS configuration, often configured in your resolv.conf file.
|
||||
|
||||
## Verify HTTP connectivity
|
||||
|
||||
You may use `curl` or `wget` to validate HTTP connectivity. For example, running `curl -v https://registry.k8s.io/v2/` should return an answer that contains:
|
||||
|
||||
```log
|
||||
< HTTP/2 200
|
||||
< docker-distribution-api-version: registry/2.0
|
||||
< x-cloud-trace-context: ca200d1c5a504b919e999b0cf80e3b71
|
||||
< date: Fri, 17 Mar 2023 09:13:18 GMT
|
||||
< content-type: text/html
|
||||
< server: Google Frontend
|
||||
< content-length: 0
|
||||
< via: 1.1 google
|
||||
< alt-svc: h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
|
||||
<
|
||||
```
|
||||
|
||||
If do not have HTTP connectivity, check your firewall or HTTP proxy settings.
|
||||
|
||||
## Verify image repositories and tags
|
||||
|
||||
You may use `crane` or `oras` to validate the available tags in the registry. You may also use [https://explore.ggcr.dev/?repo=registry.k8s.io](https://explore.ggcr.dev/?repo=registry.k8s.io) to verify the existence of an image repository and tag, but these commands will verify your node can access them. For example, the `crane ls registry.k8s.io/pause` or `oras repo tags registry.k8s.io/pause` will return:
|
||||
|
||||
```log
|
||||
0.8.0
|
||||
1.0
|
||||
2.0
|
||||
3.0
|
||||
3.1
|
||||
3.2
|
||||
3.3
|
||||
3.4.1
|
||||
3.5
|
||||
3.6
|
||||
3.7
|
||||
3.8
|
||||
3.9
|
||||
go
|
||||
latest
|
||||
sha256-7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097.sig
|
||||
sha256-9001185023633d17a2f98ff69b6ff2615b8ea02a825adffa40422f51dfdcde9d.sig
|
||||
test
|
||||
test2
|
||||
```
|
||||
|
||||
## Verify image pulls
|
||||
|
||||
Since registry.k8s.io proxies image components to the nearest source, you should validate the ability to pull images. The ability to pull images should be tested on the machine running the image which will often be a node in your Kubernetes cluster. The location where you pull image components from depends on the source IP address of the node.
|
||||
|
||||
You may use commands such as `crane`, `oras`, `crictl` or `docker` to verify the ability to pull an image. If you run the command `crane pull --verbose registry.k8s.io/pause:3.9 pause.tgz` for example, you will see it query registry.k8s.io first and then at least two other domains to download the image. If things are working correctly and you ran `crane pull --verbose registry.k8s.io/pause:3.9 pause.tgz 2>&1 | grep 'GET https'` (from Colorado):
|
||||
|
||||
```log
|
||||
2023/03/17 04:45:48 --> GET https://registry.k8s.io/v2/
|
||||
2023/03/17 04:45:48 --> GET https://registry.k8s.io/v2/pause/manifests/3.9
|
||||
2023/03/17 04:45:48 --> GET https://us-west1-docker.pkg.dev/v2/k8s-artifacts-prod/images/pause/manifests/3.9
|
||||
2023/03/17 04:45:48 --> GET https://registry.k8s.io/v2/pause/manifests/sha256:8d4106c88ec0bd28001e34c975d65175d994072d65341f62a8ab0754b0fafe10
|
||||
2023/03/17 04:45:48 --> GET https://us-west1-docker.pkg.dev/v2/k8s-artifacts-prod/images/pause/manifests/sha256:8d4106c88ec0bd28001e34c975d65175d994072d65341f62a8ab0754b0fafe10
|
||||
2023/03/17 04:45:49 --> GET https://registry.k8s.io/v2/pause/blobs/sha256:e6f1816883972d4be47bd48879a08919b96afcd344132622e4d444987919323c
|
||||
2023/03/17 04:45:49 --> GET https://prod-registry-k8s-io-us-west-2.s3.dualstack.us-west-2.amazonaws.com/containers/images/sha256%3Ae6f1816883972d4be47bd48879a08919b96afcd344132622e4d444987919323c
|
||||
2023/03/17 04:45:49 --> GET https://registry.k8s.io/v2/pause/blobs/sha256:61fec91190a0bab34406027bbec43d562218df6e80d22d4735029756f23c7007 [body redacted: omitting binary blobs from logs]
|
||||
2023/03/17 04:45:49 --> GET https://prod-registry-k8s-io-us-west-2.s3.dualstack.us-west-2.amazonaws.com/containers/images/sha256%3A61fec91190a0bab34406027bbec43d562218df6e80d22d4735029756f23c7007 [body redacted: omitting binary blobs from logs]
|
||||
```
|
||||
|
||||
From my location, the pull command accesses registry.k8s.io, us-west1-docker.pkg.dev and prod-registry-k8s-io-us-west-2.s3.dualstack.us-west-2.amazonaws.com. You will need to have DNS and HTTP access to these domains on your node to pull images.
|
||||
|
||||
It's also possible to run these commands on your node if you don't have SSH access by using `kubectl run`:
|
||||
|
||||
```sh
|
||||
kubectl run --rm -it crane --restart=Never --image=gcr.io/go-containerregistry/crane --overrides='{"spec": {"hostNetwork":true}}' -- pull --verbose registry.k8s.io/pause:3.9 /dev/null
|
||||
```
|
||||
|
||||
## Example Logs
|
||||
|
||||
If there are problems accessing registry.k8s.io, you are likely to see failures starting pods with an `ErrImagePull` status. The `kubectl describe pod` command may give you more details:
|
||||
|
||||
```log
|
||||
Warning Failed 2s (x2 over 16s) kubelet Failed to pull image "registry.k8s.io/pause:3.10": rpc error: code = NotFound desc = failed to pull and unpack image "registry.k8s.io/pause:3.10": failed to resolve reference "registry.k8s.io/pause:3.10": registry.k8s.io/pause:3.10: not found
|
||||
Warning Failed 2s (x2 over 16s) kubelet Error: ErrImagePull
|
||||
```
|
||||
|
||||
If you were to check your kubelet log for example, you might see (with something like `journalctl -xeu kubelet`):
|
||||
|
||||
```log
|
||||
Mar 17 11:33:05 kind-control-plane kubelet[804]: E0317 11:33:05.192844 804 kuberuntime_manager.go:862] container &Container{Name:my-puase-container,Image:registry.k8s.io/pause:3.10,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4bv66,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,} start failed in pod my-pause_default(4b642716-1dba-44d4-833b-1eccd6b6ca7a): ErrImagePull: rpc error: code = NotFound desc = failed to pull and unpack image "registry.k8s.io/pause:3.10": failed to resolve reference "registry.k8s.io/pause:3.10": registry.k8s.io/pause:3.10: not found
|
||||
```
|
||||
|
||||
You may see similar errors in the containerd log (with something like `journalctl -xeu containerd`):
|
||||
|
||||
```log
|
||||
Mar 17 11:33:04 kind-control-plane containerd[224]: time="2023-03-17T11:33:04.658642300Z" level=info msg="PullImage \"registry.k8s.io/pause:3.10\""
|
||||
Mar 17 11:33:05 kind-control-plane containerd[224]: time="2023-03-17T11:33:05.189169600Z" level=info msg="trying next host - response was http.StatusNotFound" host=registry.k8s.io
|
||||
Mar 17 11:33:05 kind-control-plane containerd[224]: time="2023-03-17T11:33:05.191777300Z" level=error msg="PullImage \"registry.k8s.io/pause:3.10\" failed" error="rpc error: code = NotFound desc = failed to pull and unpack image \"registry.k8s.io/pause:3.10\": failed to resolve reference \"registry.k8s.io/pause:3.10\": registry.k8s.io/pause:3.10: not found"
|
||||
```
|
||||
|
||||
## Example issues
|
||||
|
||||
- https://github.com/kubernetes/registry.k8s.io/issues/137#issuecomment-1376574499
|
||||
- https://github.com/kubernetes/registry.k8s.io/issues/174#issuecomment-1467646821
|
||||
- https://github.com/kubernetes-sigs/kind/issues/1895#issuecomment-1468991168
|
||||
- https://github.com/kubernetes/registry.k8s.io/issues/174#issuecomment-1467646821
|
||||
- https://github.com/kubernetes/registry.k8s.io/issues/154#issuecomment-1435028502
|
||||
|
||||
[http-403]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/403
|
|
@ -0,0 +1,147 @@
|
|||
# Mirroring
|
||||
|
||||
This guide covers mirroring images you use on registry.k8s.io
|
||||
to a host under your own control and using those images.
|
||||
|
||||
The specific sub-steps will depend on the tools you use, but in general you will need to:
|
||||
|
||||
1. Identify the images you need: [Identifying Images To Mirror][identifying-images]
|
||||
2. Mirror those images to your own registry: [Mirroring Images][mirroring-images]
|
||||
3. Configure your tools to use the mirrored images: [Using Mirrored Images][using-mirrored-images]
|
||||
|
||||
We have guides here for each of these steps.
|
||||
|
||||
## Identifying Images To Mirror
|
||||
<!--
|
||||
NOTE: Wherever possible do not duplicate external content.
|
||||
|
||||
Instead, link to existing official guides and merely provide a lightweight pointer here.
|
||||
|
||||
See: https://kubernetes.io/docs/contribute/style/content-guide/#dual-sourced-content
|
||||
-->
|
||||
|
||||
<!--TODO: Generically identifying registry.k8s.io images in manifests / charts / addons.-->
|
||||
|
||||
If you have a running cluster then our [community-images] krew plugin can
|
||||
help you identify Kubernetes Project image references to mirror like this:
|
||||
|
||||
```console
|
||||
kubectl community-images --mirror
|
||||
```
|
||||
|
||||
**NOTE**: This will only find images specified in your currently running pods,
|
||||
and not for example the "pause" image used to implement pods in containerd / cri-o / dockershim.
|
||||
|
||||
For specific tools we have these guides:
|
||||
|
||||
- For containerd see: [containerd.md](./containerd.md)
|
||||
- For cri-o see: [cri-o.md](./cri-o.md)
|
||||
- For cri-dockerd see: [cri-dockerd.md](./cri-dockerd.md)
|
||||
- For kubeadm see: [kubeadm.md](./kubeadm.md)
|
||||
- For kOps see: [kOps.md](./kOps.md)
|
||||
- For Cluster API see: [cluster-api.md](./cluster-api.md)
|
||||
|
||||
|
||||
## Mirroring Images
|
||||
<!--
|
||||
NOTE: Wherever possible do not duplicate external content.
|
||||
|
||||
Instead, link to existing official guides and merely provide a lightweight pointer here.
|
||||
|
||||
See: https://kubernetes.io/docs/contribute/style/content-guide/#dual-sourced-content
|
||||
-->
|
||||
|
||||
Here are some options for copying images you wish to mirror to your own registry.
|
||||
|
||||
<!-- FOSS Mirroring Tools Go First Below Here! -->
|
||||
<!-- Commercial / Non-FOSS Mirroring Options Go Further Below -->
|
||||
|
||||
### Mirroring With `crane` Or `gcrane`
|
||||
|
||||
`crane` is an open-source tool for interacting with remote images and registries.
|
||||
`gcrane` is a superset of crane with GCP specific additional features.
|
||||
|
||||
For `crane` use `crane copy registry.k8s.io/pause:3.9 my-registry.com/pause:3.9`.
|
||||
Docs: https://github.com/google/go-containerregistry/blob/main/cmd/crane/doc/crane_copy.md
|
||||
|
||||
For `gcrane` see: https://cloud.google.com/container-registry/docs/migrate-external-containers
|
||||
|
||||
To mirror all images surfaced by [community-images], you can use this shell snippet:
|
||||
```shell
|
||||
# set MIRROR to your own host
|
||||
export MIRROR=my-registry.com
|
||||
# copy all Kubernetes project images in your current cluster to MIRROR
|
||||
kubectl community-images --mirror --plain |\
|
||||
xargs -i bash -c 'set -x; crane copy "$1" "${1/registry.k8s.io/'"${MIRROR}"'}"' - '{}'
|
||||
```
|
||||
|
||||
Once you're done, see [Using Mirrored Images][using-mirrored-images].
|
||||
|
||||
### Mirroring With `oras`
|
||||
|
||||
`oras` is an open-source tool for managing images and other artifacts in OCI registries.
|
||||
|
||||
For `oras` use `oras copy registry.k8s.io/pause:3.9 my-registry.com/pause:3.9`.
|
||||
Docs: https://oras.land/cli_reference/4_oras_copy/
|
||||
|
||||
To mirror all images surfaced by [community-images], you can use this shell snippet:
|
||||
```shell
|
||||
# set MIRROR to your own host
|
||||
export MIRROR=my-registry.com
|
||||
# copy all Kubernetes project images in your current cluster to MIRROR
|
||||
kubectl community-images --mirror --plain |\
|
||||
xargs -i bash -c 'set -x; oras copy "$1" "${1/registry.k8s.io/'"${MIRROR}"'}"' - '{}'
|
||||
```
|
||||
|
||||
Once you're done, see [Using Mirrored Images][using-mirrored-images].
|
||||
|
||||
### Mirroring With Harbor
|
||||
|
||||
You can use Harbor to set up a proxy cache for Kubernetes images.
|
||||
|
||||
From the Harbor web interface, go to "Registries" and click "New Endpoint".
|
||||
Create an endpoint `registry.k8s.io` with the endpoint URL https://registry.k8s.io.
|
||||
Go to "Projects" and click "New Project".
|
||||
Create a project named something like 'k8s', click "Proxy Cache" and select your `registry.k8s.io` endpoint.
|
||||
Docs: https://goharbor.io/docs/2.1.0/administration/configure-proxy-cache/
|
||||
|
||||
Once you're done, see [Using Mirrored Images][using-mirrored-images].
|
||||
|
||||
<!-- NON-FOSS Mirroring Tools Go Below Here! -->
|
||||
|
||||
### Mirroring With ECR
|
||||
|
||||
AWS ECR wrote a guide for configuring a `registry.k8s.io` pull-through cache here:
|
||||
|
||||
https://aws.amazon.com/blogs/containers/announcing-pull-through-cache-for-registry-k8s-io-in-amazon-elastic-container-registry/
|
||||
|
||||
After following this guide, you may additionally want to see our [Using Mirrored Images][using-mirrored-images] reference below.
|
||||
|
||||
|
||||
## Using Mirrored Images
|
||||
<!--
|
||||
NOTE: Wherever possible do not duplicate external content.
|
||||
|
||||
Instead, link to existing official guides and merely provide a lightweight pointer here.
|
||||
|
||||
See: https://kubernetes.io/docs/contribute/style/content-guide/#dual-sourced-content
|
||||
-->
|
||||
|
||||
In many cases it is sufficient to update the `image` fields in your
|
||||
Kubernetes manifests (deployments, pods, replicasets, etc) to reference
|
||||
your mirrored images instead.
|
||||
|
||||
For specific tools we have these guides:
|
||||
|
||||
- For containerd see: [containerd.md](./containerd.md)
|
||||
- For cri-o see: [cri-o.md](./cri-o.md)
|
||||
- For cri-dockerd see: [cri-dockerd.md](./cri-dockerd.md)
|
||||
- For kubeadm see: [kubeadm.md](./kubeadm.md)
|
||||
- For kOps see: [kOps.md](./kOps.md)
|
||||
- For Cluster API see: [cluster-api.md](./cluster-api.md)
|
||||
|
||||
[identifying-images]: #Identifying-Images-To-Mirror
|
||||
[mirroring-images]: #Mirroring-Images
|
||||
[using-mirrored-images]: #Using-Mirrored-Images
|
||||
[community-images]: https://github.com/kubernetes-sigs/community-images
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
# Mirroring with Cluster API
|
||||
|
||||
## Identifying Images To Mirror
|
||||
|
||||
You can use [`clusterctl`](https://cluster-api.sigs.k8s.io/clusterctl/overview.html) to list the images used by Cluster API and the Cluster API provider in use:
|
||||
```
|
||||
clusterctl init list-images --infrastructure <infrastructure-provider>
|
||||
```
|
||||
For more details see:
|
||||
https://cluster-api.sigs.k8s.io/clusterctl/commands/additional-commands.html#clusterctl-init-list-images
|
||||
|
||||
## Mirroring Images
|
||||
|
||||
See our general list of [mirroring options](./README.md#Mirroring-Images)
|
||||
|
||||
## Using Mirrored Images
|
||||
|
||||
To use Cluster API with mirrored images, you can configure clusterctl to use image overrides.
|
||||
|
||||
For more details see:
|
||||
https://cluster-api.sigs.k8s.io/clusterctl/configuration.html#image-overrides
|
|
@ -0,0 +1,35 @@
|
|||
# Mirroring With Containerd
|
||||
|
||||
# Identifying Images to Mirror
|
||||
|
||||
If you're using containerd as a Kubernetes [CRI] implementation, containerd
|
||||
uses the ["pause" image][pause] from Kubernetes in every pod.
|
||||
You may want to mirror this critical image to your own host.
|
||||
|
||||
The version used by default can be found by `containerd config default | grep sandbox_image`.
|
||||
|
||||
Containerd config is generally at `/etc/containerd/config.toml` and may contain
|
||||
a customized "sandbox image" rather than the default, for more details see:
|
||||
https://github.com/containerd/containerd/blob/main/docs/cri/config.md#registry-configuration
|
||||
|
||||
## Mirroring Images
|
||||
|
||||
See our general list of [mirroring options](./README.md#Mirroring-Images)
|
||||
|
||||
# Using Mirrored Images
|
||||
|
||||
|
||||
You may want to configure `sandbox_image` under `[plugins."io.containerd.grpc.v1.cri"]`
|
||||
to point to your own mirrored image for "pause".
|
||||
|
||||
`containerd` also supports configuring mirrors for registry hosts.
|
||||
|
||||
If you're using containerd with Kubernetes, see:
|
||||
https://github.com/containerd/containerd/blob/main/docs/cri/config.md#registry-configuration
|
||||
|
||||
If you're using containerd directly, see:
|
||||
https://github.com/containerd/containerd/blob/main/docs/hosts.md
|
||||
|
||||
[containerd]: https://containerd.io/
|
||||
[pause]: https://www.ianlewis.org/en/almighty-pause-container
|
||||
[CRI]: https://kubernetes.io/docs/concepts/architecture/cri/
|
|
@ -0,0 +1,28 @@
|
|||
# Mirroring With cri-dockerd
|
||||
|
||||
# Identifying Images to Mirror
|
||||
|
||||
If you're using [cri-dockerd] as a Kubernetes [CRI] implementation, cri-dockerd
|
||||
uses the ["pause" image][pause] from Kubernetes to implement pods.
|
||||
You may want to mirror this critical image to your own host.
|
||||
|
||||
To find the default pause image you can run:
|
||||
```
|
||||
cri-dockerd --help | grep pod-infra-container-image
|
||||
```
|
||||
|
||||
## Mirroring Images
|
||||
|
||||
See our general list of [mirroring options](./README.md#Mirroring-Images)
|
||||
|
||||
# Using Mirrored Images
|
||||
|
||||
For pause you can set the `--pod-infra-sandbox-container-image` flag.
|
||||
https://github.com/Mirantis/cri-dockerd/blob/47abdab2c31ffc8b54c826063760662590ef3801/config/options.go#L107
|
||||
|
||||
cri-dockerd does not appear to support configuring mirrors more generally.
|
||||
|
||||
|
||||
[cri-dockerd]: https://github.com/Mirantis/cri-dockerd/
|
||||
[pause]: https://www.ianlewis.org/en/almighty-pause-container
|
||||
[CRI]: https://kubernetes.io/docs/concepts/architecture/cri/
|
|
@ -0,0 +1,31 @@
|
|||
# Mirroring With cri-o
|
||||
|
||||
# Identifying Images to Mirror
|
||||
|
||||
If you're using [cri-o] as a Kubernetes [CRI] implementation, cri-o
|
||||
uses the ["pause" image][pause] from Kubernetes to implement pods.
|
||||
You may want to mirror this critical image to your own host.
|
||||
|
||||
The pause image confiured can be found by running:
|
||||
```shell
|
||||
cri-o config | grep pause_image
|
||||
```
|
||||
|
||||
## Mirroring Images
|
||||
|
||||
See our general list of [mirroring options](./README.md#Mirroring-Images)
|
||||
|
||||
# Using Mirrored Images
|
||||
|
||||
For pause see `pause_image` in the `cri.image` config docs:
|
||||
https://github.com/cri-o/cri-o/blob/main/docs/crio.conf.5.md#crioimage-table
|
||||
|
||||
cri-o also supports configuring mirrors for registry hosts, which is documented at:
|
||||
https://github.com/containers/image/blob/main/docs/containers-registries.conf.5.md
|
||||
|
||||
You can use containers-registries.conf to configure a mirror for registry.k8s.io
|
||||
|
||||
|
||||
[cri-o]: https://cri-o.io/
|
||||
[pause]: https://www.ianlewis.org/en/almighty-pause-container
|
||||
[CRI]: https://kubernetes.io/docs/concepts/architecture/cri/
|
|
@ -0,0 +1,27 @@
|
|||
# Mirroring with kOps
|
||||
|
||||
## Identifying Images To Mirror
|
||||
|
||||
`kops get assets` can list images and files needed by kOps.
|
||||
|
||||
Docs: https://kops.sigs.k8s.io/cli/kops_get_assets/
|
||||
|
||||
## Mirroring Images
|
||||
|
||||
`kops get asssets --copy` can be used to mirror.
|
||||
|
||||
See:
|
||||
- https://kops.sigs.k8s.io/cli/kops_get_assets/
|
||||
- https://kops.sigs.k8s.io/operations/asset-repository/
|
||||
|
||||
See also our general list of [mirroring options](./README.md#Mirroring-Images)
|
||||
|
||||
## Using Mirrored Images
|
||||
|
||||
kOps has documentation for using local assets to create a cluster at:
|
||||
https://kops.sigs.k8s.io/operations/asset-repository/
|
||||
|
||||
You can also configure containerd to use registry mirrors for in the kOps cluster spec.
|
||||
You'll need to add an entry for `"registry.k8s.io"` with your mirror.
|
||||
|
||||
Docs: https://kops.sigs.k8s.io/cluster_spec/#registry-mirrors
|
|
@ -0,0 +1,20 @@
|
|||
# Mirroring with Kubeadm
|
||||
|
||||
## Identifying Images To Mirror
|
||||
|
||||
You can use `kubeadm config images list` to get a list of images kubeadm requires.
|
||||
|
||||
For more see:
|
||||
https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-config/#cmd-config-images-list
|
||||
|
||||
## Mirroring Images
|
||||
|
||||
See our general list of [mirroring options](./README.md#Mirroring-Images)
|
||||
|
||||
## Using Mirrored Images
|
||||
|
||||
To use kubeadm with mirrored images, you can pass the `--image-repository` flag
|
||||
to [`kubeadm init`][kubeadm init] or the `imageRepository` field of [kubeadm config].
|
||||
|
||||
[kubeadm init]: https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init/
|
||||
[kubeadm config]: https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init/#config-file
|
47
go.mod
47
go.mod
|
@ -1,7 +1,48 @@
|
|||
module k8s.io/registry.k8s.io
|
||||
|
||||
go 1.19
|
||||
go 1.22
|
||||
|
||||
require k8s.io/klog/v2 v2.80.1
|
||||
require (
|
||||
github.com/aws/aws-sdk-go-v2 v1.30.3
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.27
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.9
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2
|
||||
github.com/aws/smithy-go v1.20.3
|
||||
github.com/google/go-containerregistry v0.20.1
|
||||
golang.org/x/sync v0.7.0
|
||||
golang.org/x/time v0.5.0
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
)
|
||||
|
||||
require github.com/go-logr/logr v1.2.0 // indirect
|
||||
require (
|
||||
cloud.google.com/go/compute/metadata v0.5.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.27 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect
|
||||
github.com/docker/cli v27.0.3+incompatible // indirect
|
||||
github.com/docker/distribution v2.8.3+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.2 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/klauspost/compress v1.17.9 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/vbatts/tar-split v0.11.5 // indirect
|
||||
golang.org/x/oauth2 v0.21.0 // indirect
|
||||
golang.org/x/sys v0.22.0 // indirect
|
||||
)
|
||||
|
|
97
go.sum
97
go.sum
|
@ -1,4 +1,93 @@
|
|||
github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE=
|
||||
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
|
||||
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY=
|
||||
cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY=
|
||||
github.com/aws/aws-sdk-go-v2 v1.30.3 h1:jUeBtG0Ih+ZIFH0F4UkmL9w3cSpaMv9tYYDbzILP8dY=
|
||||
github.com/aws/aws-sdk-go-v2 v1.30.3/go.mod h1:nIQjQVp5sfpQcTc9mPSr1B0PaWK5ByX9MOoDadSN4lc=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 h1:tW1/Rkad38LA15X4UQtjXZXNKsCgkshC3EbmcUmghTg=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3/go.mod h1:UbnqO+zjqk3uIt9yCACHJ9IVNhyhOCnYk8yA19SAWrM=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.27 h1:HdqgGt1OAP0HkEDDShEl0oSYa9ZZBSOmKpdpsDMdO90=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.27/go.mod h1:MVYamCg76dFNINkZFu4n4RjDixhVr51HLj4ErWzrVwg=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.27 h1:2raNba6gr2IfA0eqqiP2XiQ0UVOpGPgDSi0I9iAP+UI=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.27/go.mod h1:gniiwbGahQByxan6YjQUMcW4Aov6bLC3m+evgcoN4r4=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 h1:KreluoV8FZDEtI6Co2xuNk/UqI9iwMrOx/87PBNIKqw=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11/go.mod h1:SeSUYBLsMYFoRvHE0Tjvn7kbxaUhl75CJi1sbfhMxkU=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.9 h1:TC2vjvaAv1VNl9A0rm+SeuBjrzXnrlwk6Yop+gKRi38=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.9/go.mod h1:WPv2FRnkIOoDv/8j2gSUsI4qDc7392w5anFB/I89GZ8=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 h1:SoNJ4RlFEQEbtDcCEt+QG56MY4fm4W8rYirAmq+/DdU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15/go.mod h1:U9ke74k1n2bf+RIgoX1SXFed1HLs51OgUSs+Ph0KJP8=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 h1:C6WHdGnTDIYETAm5iErQUiVNsclNx9qbJVPIt03B6bI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15/go.mod h1:ZQLZqhcu+JhSrA9/NXRm8SkDvsycE+JkV3WGY41e+IM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 h1:Z5r7SycxmSllHYmaAZPpmN8GviDrSGhMS6bldqtXZPw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15/go.mod h1:CetW7bDE00QoGEmPUoZuRog07SGVAUVW6LFpNP0YfIg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 h1:dT3MqvGhSoaIhRseqw2I0yH81l7wiR2vjs57O51EAm8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3/go.mod h1:GlAeCkHwugxdHaueRr4nhPuY+WW+gR8UjlcqzPr1SPI=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 h1:YPYe6ZmvUfDDDELqEKtAd6bo8zxhkm+XEFEzQisqUIE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17/go.mod h1:oBtcnYua/CgzCWYN7NZ5j7PotFDaFSUjCYVTtfyn7vw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 h1:HGErhhrxZlQ044RiM+WdoZxp0p+EGM62y3L6pwA4olE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17/go.mod h1:RkZEx4l0EHYDJpWppMJ3nD9wZJAa8/0lq9aVC+r2UII=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 h1:246A4lSTXWJw/rmlQI+TT2OcqeDMKBdyjEQrafMaQdA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15/go.mod h1:haVfg3761/WF7YPuJOER2MP0k4UAXyHaLclKXB6usDg=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2 h1:sZXIzO38GZOU+O0C+INqbH7C2yALwfMWpd64tONS/NE=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2/go.mod h1:Lcxzg5rojyVPU/0eFwLtcyTaek/6Mtic5B1gJo7e/zE=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 h1:BXx0ZIxvrJdSgSvKTZ+yRBeSqqgPM89VPlulEcl37tM=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.22.4/go.mod h1:ooyCOXjvJEsUw7x+ZDHeISPMhtwI3ZCB7ggFMcFfWLU=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 h1:yiwVzJW2ZxZTurVbYWA7QOrAaCYQR72t0wrSBfoesUE=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4/go.mod h1:0oxfLkpz3rQ/CHlx5hB7H69YUpFiI1tql6Q6Ne+1bCw=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 h1:ZsDKRLXGWHk8WdtyYMoGNO7bTudrvuKpDKgMVRlepGE=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.30.3/go.mod h1:zwySh8fpFyXp9yOr/KVzxOl8SRqgf/IDw5aUt9UKFcQ=
|
||||
github.com/aws/smithy-go v1.20.3 h1:ryHwveWzPV5BIof6fyDvor6V3iUL7nTfiTKXHiW05nE=
|
||||
github.com/aws/smithy-go v1.20.3/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/docker/cli v27.0.3+incompatible h1:usGs0/BoBW8MWxGeEtqPMkzOY56jZ6kYlSN5BLDioCQ=
|
||||
github.com/docker/cli v27.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
|
||||
github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-containerregistry v0.20.1 h1:eTgx9QNYugV4DN5mz4U8hiAGTi1ybXn0TPi4Smd8du0=
|
||||
github.com/google/go-containerregistry v0.20.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI=
|
||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
|
||||
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts=
|
||||
github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk=
|
||||
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
|
||||
golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
|
||||
golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
|
||||
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
|
|
|
@ -19,43 +19,36 @@ set -o errexit -o nounset -o pipefail
|
|||
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd -P)"
|
||||
cd "${REPO_ROOT}"
|
||||
|
||||
# if we're in cloudbuild then we might want to change the project to point
|
||||
# at where we're deploying instead of deploying from
|
||||
if [[ -n "${CLOUDBUILD_SET_PROJECT:-}" ]]; then
|
||||
gcloud config set project "${CLOUDBUILD_SET_PROJECT:?}"
|
||||
fi
|
||||
|
||||
# make sure we have a k8s.io clone for the prod terraform
|
||||
k8sio_dir="$(cd "${REPO_ROOT}"/../k8s.io && pwd -P)"
|
||||
if [[ ! -d "${k8sio_dir}" ]]; then
|
||||
>&2 echo "Deploying requires a github.com/kubernetes/k8s.io clone at ./../k8s.io"
|
||||
>&2 echo "FAIL"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# install crane and get current image digest
|
||||
TAG="${TAG:-"$(date +v%Y%m%d)-$(git describe --always --dirty)"}"
|
||||
SERVICE_BASENAME="${SERVICE_BASENAME:-k8s-infra-oci-proxy}"
|
||||
IMAGE_REPO="${IMAGE_REPO:-gcr.io/k8s-staging-infra-tools/archeio}"
|
||||
PROJECT="${PROJECT:-k8s-infra-oci-proxy}"
|
||||
# TODO: this can't actually be overridden currently
|
||||
# the terraform always uses the default here
|
||||
IMAGE_REPO="${IMAGE_REPO:-us-central1-docker.pkg.dev/k8s-staging-images/infra-tools/archeio}"
|
||||
GOBIN="${REPO_ROOT}/bin" go install github.com/google/go-containerregistry/cmd/crane@latest
|
||||
IMAGE_DIGEST="${IMAGE_DIGEST:-$(bin/crane digest "${IMAGE_REPO}:${TAG}")}"
|
||||
export IMAGE_DIGEST
|
||||
|
||||
REGIONS=(
|
||||
asia-east1
|
||||
asia-northeast1
|
||||
asia-northeast2
|
||||
asia-south1
|
||||
australia-southeast1
|
||||
europe-north1
|
||||
europe-southwest1
|
||||
europe-west1
|
||||
europe-west2
|
||||
europe-west4
|
||||
europe-west8
|
||||
europe-west9
|
||||
southamerica-west1
|
||||
us-central1
|
||||
us-east1
|
||||
us-east4
|
||||
us-east5
|
||||
us-south1
|
||||
us-west1
|
||||
us-west2
|
||||
)
|
||||
|
||||
for REGION in "${REGIONS[@]}"; do
|
||||
gcloud --project="${PROJECT}" \
|
||||
run services update "${SERVICE_BASENAME}-${REGION}" \
|
||||
--image "${IMAGE_REPO}:${TAG}" \
|
||||
--region "${REGION}" \
|
||||
--concurrency 1000 \
|
||||
--max-instances 10 \
|
||||
`# NOTE: should match number of cores configured` \
|
||||
--update-env-vars GOMAXPROCS=1,UPSTREAM_REGISTRY_PATH=k8s-artifacts-prod/images,"UPSTREAM_REGISTRY_ENDPOINT=https://$REGION-docker.pkg.dev" \
|
||||
`# TODO: if we use this to deploy prod, we need to handle this differently` \
|
||||
--args=-v=3
|
||||
done
|
||||
# cd to staging terraform and apply
|
||||
cd "${k8sio_dir}"/infra/gcp/terraform/k8s-infra-oci-proxy
|
||||
# use tfswitch to control terraform version based on sources, if available
|
||||
if command -v tfswitch >/dev/null; then
|
||||
tfswitch
|
||||
fi
|
||||
terraform -v
|
||||
terraform init
|
||||
# NOTE: this must use :? expansion to ensure we will not run with unset variables
|
||||
(set -x; terraform apply -auto-approve -var digest="${IMAGE_DIGEST:?}")
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
#!/bin/bash
|
||||
# Copyright 2023 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit -o nounset -o pipefail
|
||||
|
||||
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd -P)"
|
||||
cd "${REPO_ROOT}"
|
||||
|
||||
# normally the e2e tests would run against the staging endpoint
|
||||
# this runs them against a local instance so we can test the e2e tests themselves
|
||||
# and merge them even if staging is currently broken
|
||||
set -x;
|
||||
make archeio
|
||||
bin/archeio &>"${ARTIFACTS:-./bin}"/archeio-log.txt &
|
||||
trap 'kill $(jobs -p)' EXIT
|
||||
make e2e-test "REGISTRY_ENDPOINT=localhost:8080"
|
|
@ -0,0 +1,45 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2022 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# script to run unit / integration tests, with coverage enabled and junit xml output
|
||||
set -o errexit -o nounset -o pipefail
|
||||
|
||||
# cd to the repo root and setup go
|
||||
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd -P)"
|
||||
cd "${REPO_ROOT}"
|
||||
source hack/tools/setup-go.sh
|
||||
|
||||
# build gotestsum
|
||||
cd 'hack/tools'
|
||||
go build -o "${REPO_ROOT}/bin/gotestsum" gotest.tools/gotestsum
|
||||
cd "${REPO_ROOT}"
|
||||
|
||||
# run e2e tests with junit output
|
||||
# TODO: because we expect relatively few packages to have e2e we only
|
||||
# test those packages to limit CI noise, but this approach would work with ./...
|
||||
# at the cost of reporting lots of no-test packages
|
||||
# (versus the combined integration and unit testing results)
|
||||
# this is also slightly faster
|
||||
(
|
||||
set -x;
|
||||
"${REPO_ROOT}/bin/gotestsum" --junitfile="${REPO_ROOT}/bin/e2e-junit.xml" \
|
||||
-- '-run' '^TestE2E' './cmd/archeio/internal/e2e'
|
||||
)
|
||||
|
||||
# if we are in CI, copy to the artifact upload location
|
||||
if [[ -n "${ARTIFACTS:-}" ]]; then
|
||||
cp "bin/e2e-junit.xml" "${ARTIFACTS:?}/junit.xml"
|
||||
fi
|
|
@ -17,6 +17,8 @@
|
|||
# script to build container images with go
|
||||
set -o errexit -o nounset -o pipefail
|
||||
|
||||
export KO_DEFAULTBASEIMAGE='gcr.io/distroless/static-debian12@sha256:3f2b64ef97bd285e36132c684e6b2ae8f2723293d09aae046196cca64251acac'
|
||||
|
||||
# cd to the repo root and setup go
|
||||
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd -P)"
|
||||
cd "${REPO_ROOT}"
|
||||
|
@ -26,7 +28,7 @@ source hack/tools/setup-go.sh
|
|||
IMAGES="${IMAGES:-cmd/archeio}"
|
||||
IFS=" " read -r -a images <<< "$IMAGES"
|
||||
# overridable registry to use
|
||||
KO_DOCKER_REPO="${KO_DOCKER_REPO:-gcr.io/k8s-staging-infra-tools}"
|
||||
KO_DOCKER_REPO="${KO_DOCKER_REPO:-us-central1-docker.pkg.dev/k8s-staging-images/infra-tools}"
|
||||
export KO_DOCKER_REPO
|
||||
# push or local tar?
|
||||
PUSH="${PUSH:-false}"
|
||||
|
|
|
@ -28,4 +28,5 @@ go build -o "${REPO_ROOT}"/bin/golangci-lint github.com/golangci/golangci-lint/c
|
|||
cd "${REPO_ROOT}"
|
||||
|
||||
# lint the main module
|
||||
go mod download # fetch deps first to avoid including it in timeout
|
||||
"${REPO_ROOT}"/bin/golangci-lint --config "${REPO_ROOT}/hack/tools/.golangci.yml" run ./...
|
||||
|
|
|
@ -23,14 +23,13 @@ set -o pipefail
|
|||
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." &> /dev/null && pwd -P)"
|
||||
cd "${REPO_ROOT}"
|
||||
|
||||
# allow overriding docker cli, which should work fine for this script
|
||||
DOCKER="${DOCKER:-docker}"
|
||||
# we will be installing under bin_dir if necessary, and re-using if possible
|
||||
bin_dir="${REPO_ROOT}/bin"
|
||||
export PATH="${bin_dir}:${PATH}"
|
||||
|
||||
# required version for this script, if not installed on the host we will
|
||||
# use the official docker image instead. keep this in sync with SHELLCHECK_IMAGE
|
||||
# required version for this script, if not installed on the host already we will
|
||||
# install it under bin/
|
||||
SHELLCHECK_VERSION="0.8.0"
|
||||
# upstream shellcheck latest stable image as of October 23rd, 2019
|
||||
SHELLCHECK_IMAGE="docker.io/koalaman/shellcheck-alpine@sha256:f42fde76d2d14a645a848826e54a4d650150e151d9c81057c898da89a82c8a56"
|
||||
|
||||
# Find all shell scripts excluding:
|
||||
# - Anything git-ignored - No need to lint untracked files.
|
||||
|
@ -61,29 +60,47 @@ SHELLCHECK_OPTIONS=(
|
|||
# detect if the host machine has the required shellcheck version installed
|
||||
# if so, we will use that instead.
|
||||
HAVE_SHELLCHECK=false
|
||||
if which shellcheck &>/dev/null; then
|
||||
if command -v shellcheck &>/dev/null; then
|
||||
detected_version="$(shellcheck --version | grep 'version: .*')"
|
||||
if [[ "${detected_version}" = "version: ${SHELLCHECK_VERSION}" ]]; then
|
||||
HAVE_SHELLCHECK=true
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
# tell the user which we've selected and lint all scripts
|
||||
# The shellcheck errors are printed to stdout by default, hence they need to be redirected
|
||||
# to stderr in order to be well parsed for Junit representation by juLog function
|
||||
res=0
|
||||
if ${HAVE_SHELLCHECK}; then
|
||||
echo "Using host shellcheck ${SHELLCHECK_VERSION} binary."
|
||||
shellcheck "${SHELLCHECK_OPTIONS[@]}" "${all_shell_scripts[@]}" >&2 || res=$?
|
||||
else
|
||||
echo "Using shellcheck ${SHELLCHECK_VERSION} docker image."
|
||||
"${DOCKER}" run \
|
||||
--rm -v "${KUBE_ROOT}:${KUBE_ROOT}" -w "${KUBE_ROOT}" \
|
||||
"${SHELLCHECK_IMAGE}" \
|
||||
shellcheck "${SHELLCHECK_OPTIONS[@]}" "${all_shell_scripts[@]}" >&2 || res=$?
|
||||
# install shellcheck to bin/ if missing or the wrong version
|
||||
if ! ${HAVE_SHELLCHECK}; then
|
||||
echo "Installing shellcheck v${SHELLCHECK_VERSION} under bin/ ..." >&2
|
||||
# in CI we can install xz so we can untar the upstream release
|
||||
# otherwise tell the user they must install xz or shellcheck
|
||||
if ! command -v xz &>/dev/null; then
|
||||
if [[ -n "${PROW_JOB_ID}" ]]; then
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
apt-get -qq update
|
||||
DEBCONF_NOWARNINGS="yes" apt-get -qq install --no-install-recommends xz-utils >/dev/null
|
||||
else
|
||||
echo "xz is required to install shellcheck in bin/!" >&2
|
||||
echo "either install xz or install shellcheck v${SHELLCHECK_VERSION}" >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
os=$(uname | tr '[:upper:]' '[:lower:]')
|
||||
arch=$(uname -m)
|
||||
# TODO: shellcheck currently only has x86_64 binaries on macOS, but those will work on M1
|
||||
if [[ "${os}" == 'darwin' ]]; then
|
||||
arch='x86_64'
|
||||
fi
|
||||
mkdir -p "${bin_dir}"
|
||||
# download and untar shellcheck into bin_dir
|
||||
curl -sSL "https://github.com/koalaman/shellcheck/releases/download/v${SHELLCHECK_VERSION?}/shellcheck-v${SHELLCHECK_VERSION?}.${os}.${arch}.tar.xz" \
|
||||
| tar -C "${bin_dir}" --strip-components=1 -xJ -f - "shellcheck-v${SHELLCHECK_VERSION}/shellcheck"
|
||||
# debug newly setup version
|
||||
shellcheck --version >&2
|
||||
fi
|
||||
|
||||
|
||||
# lint all scripts
|
||||
res=0
|
||||
shellcheck "${SHELLCHECK_OPTIONS[@]}" "${all_shell_scripts[@]}" >&2 || res=$?
|
||||
# print a message based on the result
|
||||
if [ $res -eq 0 ]; then
|
||||
echo 'Congratulations! All shell files are passing lint :-)'
|
||||
|
|
|
@ -36,9 +36,12 @@ go_test_opts=(
|
|||
'-coverpkg' 'k8s.io/registry.k8s.io/...'
|
||||
)
|
||||
if [[ "${MODE}" = 'unit' ]]; then
|
||||
go_test_opts+=('-tags=nointegration')
|
||||
go_test_opts+=('-tags=nointegration,noe2e')
|
||||
elif [[ "${MODE}" = 'integration' ]]; then
|
||||
go_test_opts+=('-run' '^TestIntegration')
|
||||
go_test_opts+=('-run' '^TestIntegration' '-tags=noe2e')
|
||||
else
|
||||
go_test_opts+=('-tags=noe2e')
|
||||
MODE="all"
|
||||
fi
|
||||
|
||||
# run unit tests with coverage enabled and junit output
|
||||
|
@ -48,15 +51,19 @@ fi
|
|||
-- "${go_test_opts[@]}" './...'
|
||||
)
|
||||
|
||||
# filter out generated files
|
||||
sed '/zz_generated/d' "${REPO_ROOT}/bin/${MODE}.cov" > "${REPO_ROOT}/bin/${MODE}-filtered.cov"
|
||||
|
||||
# generate cover html
|
||||
go tool cover -html="${REPO_ROOT}/bin/${MODE}-filtered.cov" -o "${REPO_ROOT}/bin/${MODE}-filtered.html"
|
||||
go tool cover -html="${REPO_ROOT}/bin/${MODE}.cov" -o "${REPO_ROOT}/bin/${MODE}.html"
|
||||
|
||||
# if we are in CI, copy to the artifact upload location
|
||||
if [[ -n "${ARTIFACTS:-}" ]]; then
|
||||
cp "bin/${MODE}-junit.xml" "${ARTIFACTS:?}/junit.xml"
|
||||
cp "${REPO_ROOT}/bin/${MODE}-filtered.cov" "${ARTIFACTS:?}/filtered.cov"
|
||||
cp "${REPO_ROOT}/bin/${MODE}-filtered.html" "${ARTIFACTS:?}/filtered.html"
|
||||
# TODO: currently these names are required in $ARTIFACTS in order to render
|
||||
# in the spyglass view. however we're not filtering anymore
|
||||
cp "${REPO_ROOT}/bin/${MODE}.cov" "${ARTIFACTS:?}/filtered.cov"
|
||||
cp "${REPO_ROOT}/bin/${MODE}.html" "${ARTIFACTS:?}/filtered.html"
|
||||
fi
|
||||
|
||||
# enforce coverage levels if we're running all tests
|
||||
if [[ "${MODE}" = 'all' ]]; then
|
||||
(set -x; cd ./hack/tools && go run ./require-coverage)
|
||||
fi
|
||||
|
|
|
@ -28,9 +28,11 @@ trap 'rm -rf ${tmpdir?}' EXIT
|
|||
# generate and compare
|
||||
OUT_FILE="${tmpdir}"/zz_generated_range_data.go
|
||||
export OUT_FILE
|
||||
./pkg/net/cidrs/aws/internal/ranges2go/run.sh
|
||||
DATA_DIR="${REPO_ROOT}"/pkg/net/cloudcidrs/internal/ranges2go/data
|
||||
export DATA_DIR
|
||||
./pkg/net/cloudcidrs/internal/ranges2go/run.sh
|
||||
|
||||
if ! diff "${OUT_FILE}" ./pkg/net/cidrs/aws/zz_generated_range_data.go; then
|
||||
if ! diff "${OUT_FILE}" ./pkg/net/cloudcidrs/zz_generated_range_data.go; then
|
||||
>&2 echo ""
|
||||
>&2 echo "generated file is out of date, please run 'go generate ./...' to regenerate"
|
||||
exit 1
|
||||
|
|
|
@ -12,9 +12,7 @@ linters:
|
|||
- ineffassign
|
||||
- staticcheck
|
||||
- typecheck
|
||||
# TODO: reenable, seems to be broken in go 1.18
|
||||
# keep an eye on https://github.com/golangci/golangci-lint/issues/2649
|
||||
#- unused
|
||||
- unused
|
||||
|
||||
# additional lints
|
||||
- gochecknoinits
|
||||
|
|
|
@ -1,45 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2022 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# script to install shellcheck in CI
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
# cd to repo root
|
||||
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." &> /dev/null && pwd -P)"
|
||||
cd "${REPO_ROOT}"
|
||||
|
||||
# get version from shellcheck script
|
||||
scversion="v$(sed -nr 's/SHELLCHECK_VERSION="(.*)"/\1/p' hack/make-rules/shellcheck.sh)"
|
||||
echo "Installing shellcheck ${scversion} from upstream to ensure CI version ..."
|
||||
echo ""
|
||||
|
||||
# install xz so we can untar the upstream release
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
apt-get -qq update
|
||||
DEBCONF_NOWARNINGS="yes" apt-get -qq install --no-install-recommends xz-utils >/dev/null
|
||||
|
||||
# download and untar shellcheck into /usr/bin
|
||||
wget -qO- "https://github.com/koalaman/shellcheck/releases/download/${scversion?}/shellcheck-${scversion?}.linux.x86_64.tar.xz" \
|
||||
| tar -C /usr/bin --strip-components=1 -xJ -f - "shellcheck-${scversion}/shellcheck"
|
||||
|
||||
# debug installed version
|
||||
shellcheck --version
|
||||
|
||||
echo ""
|
||||
echo "Done installing shellcheck ..."
|
||||
echo ""
|
|
@ -0,0 +1,45 @@
|
|||
#!/usr/bin/env bash
|
||||
# Copyright 2023 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# script to ensure containerd binaries for e2e testing
|
||||
set -o errexit -o nounset -o pipefail
|
||||
|
||||
# cd to repo root
|
||||
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." &> /dev/null && pwd -P)"
|
||||
cd "${REPO_ROOT}"
|
||||
|
||||
# script inputs, install dir should be versioned
|
||||
readonly CONTAINERD_VERSION="${CONTAINERD_VERSION:?}"
|
||||
readonly CONTAINERD_INSTALL_DIR="${CONTAINERD_INSTALL_DIR:?}"
|
||||
|
||||
containerd_path="${CONTAINERD_INSTALL_DIR}/containerd"
|
||||
if [[ -f "${containerd_path}" ]] && "${containerd_path}" --version | grep -q "${CONTAINERD_VERSION}"; then
|
||||
echo "Already have ${containerd_path} ${CONTAINERD_VERSION}"
|
||||
else
|
||||
# downlod containerd to bindir
|
||||
mkdir -p "${CONTAINERD_INSTALL_DIR}"
|
||||
curl -sSL \
|
||||
"https://github.com/containerd/containerd/releases/download/v${CONTAINERD_VERSION}/containerd-${CONTAINERD_VERSION}-linux-amd64.tar.gz" \
|
||||
| tar -C "${CONTAINERD_INSTALL_DIR}/" -zxvf - --strip-components=1
|
||||
fi
|
||||
|
||||
# generate config for current user
|
||||
cat <<EOF >"${CONTAINERD_INSTALL_DIR}"/containerd-config.toml
|
||||
# own socket as as current user.
|
||||
# we will be running as this user and only fetching
|
||||
[grpc]
|
||||
uid = $(id -u)
|
||||
gid = $(id -g)
|
||||
EOF
|
|
@ -1,54 +1,60 @@
|
|||
module k8s.io/registry.k8s.io/hack/tools
|
||||
|
||||
go 1.19
|
||||
go 1.22.0
|
||||
|
||||
toolchain go1.22.1
|
||||
|
||||
require (
|
||||
github.com/golangci/golangci-lint v1.49.0
|
||||
github.com/google/go-containerregistry v0.11.0
|
||||
github.com/google/ko v0.12.0
|
||||
gotest.tools/gotestsum v1.8.2
|
||||
github.com/golangci/golangci-lint v1.51.1
|
||||
github.com/google/ko v0.13.0
|
||||
golang.org/x/tools v0.22.0
|
||||
gotest.tools/gotestsum v1.9.0
|
||||
k8s.io/apimachinery v0.30.2
|
||||
)
|
||||
|
||||
require (
|
||||
4d63.com/gochecknoglobals v0.1.0 // indirect
|
||||
cloud.google.com/go/compute v1.7.0 // indirect
|
||||
4d63.com/gocheckcompilerdirectives v1.2.1 // indirect
|
||||
4d63.com/gochecknoglobals v0.2.1 // indirect
|
||||
cloud.google.com/go/compute v1.18.0 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
github.com/Abirdcfly/dupword v0.0.9 // indirect
|
||||
github.com/Antonboom/errname v0.1.7 // indirect
|
||||
github.com/Antonboom/nilnil v0.1.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go v66.0.0+incompatible // indirect
|
||||
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.28 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 // indirect
|
||||
github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.21 // indirect
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 // indirect
|
||||
github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/BurntSushi/toml v1.2.0 // indirect
|
||||
github.com/BurntSushi/toml v1.2.1 // indirect
|
||||
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect
|
||||
github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0 // indirect
|
||||
github.com/Masterminds/semver v1.5.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
github.com/OpenPeeDeeP/depguard v1.1.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.0 // indirect
|
||||
github.com/OpenPeeDeeP/depguard v1.1.1 // indirect
|
||||
github.com/alessio/shellescape v1.4.1 // indirect
|
||||
github.com/alexkohler/prealloc v1.0.0 // indirect
|
||||
github.com/alingse/asasalint v0.0.11 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect
|
||||
github.com/ashanbrown/forbidigo v1.3.0 // indirect
|
||||
github.com/ashanbrown/makezero v1.1.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.16.14 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.17.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.12.18 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.21 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.22 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.17.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.28 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ecr v1.17.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.13.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.11.21 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.17 // indirect
|
||||
github.com/aws/smithy-go v1.13.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.21 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.18.0 // indirect
|
||||
github.com/aws/smithy-go v1.13.5 // indirect
|
||||
github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20220517224237-e6f29200ae04 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bkielbasa/cyclop v1.2.0 // indirect
|
||||
|
@ -57,69 +63,71 @@ require (
|
|||
github.com/breml/bidichk v0.2.3 // indirect
|
||||
github.com/breml/errchkjson v0.3.0 // indirect
|
||||
github.com/butuzov/ireturn v0.1.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/charithe/durationcheck v0.0.9 // indirect
|
||||
github.com/chavacava/garif v0.0.0-20220630083739-93517212f375 // indirect
|
||||
github.com/chavacava/garif v0.0.0-20221024190013-b3ef35877348 // indirect
|
||||
github.com/chrismellard/docker-credential-acr-env v0.0.0-20220327082430-c57b701bfc08 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.12.0 // indirect
|
||||
github.com/curioswitch/go-reassign v0.1.2 // indirect
|
||||
github.com/daixiang0/gci v0.6.3 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect
|
||||
github.com/curioswitch/go-reassign v0.2.0 // indirect
|
||||
github.com/daixiang0/gci v0.9.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/denis-tingaikin/go-header v0.4.3 // indirect
|
||||
github.com/dimchansky/utfbom v1.1.1 // indirect
|
||||
github.com/dnephin/pflag v1.0.7 // indirect
|
||||
github.com/docker/cli v20.10.17+incompatible // indirect
|
||||
github.com/docker/cli v23.0.1+incompatible // indirect
|
||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||
github.com/docker/docker v20.10.18+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.6.4 // indirect
|
||||
github.com/docker/docker v23.0.1+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dprotaso/go-yit v0.0.0-20220510233725-9ba8df137936 // indirect
|
||||
github.com/esimonov/ifshort v1.0.4 // indirect
|
||||
github.com/ettle/strcase v0.1.1 // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/fatih/color v1.14.1 // indirect
|
||||
github.com/fatih/structtag v1.2.0 // indirect
|
||||
github.com/firefart/nonamedreturns v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/fzipp/gocyclo v0.6.0 // indirect
|
||||
github.com/go-critic/go-critic v0.6.4 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-critic/go-critic v0.6.5 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-openapi/analysis v0.21.4 // indirect
|
||||
github.com/go-openapi/errors v0.20.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/loads v0.21.2 // indirect
|
||||
github.com/go-openapi/runtime v0.24.1 // indirect
|
||||
github.com/go-openapi/runtime v0.25.0 // indirect
|
||||
github.com/go-openapi/spec v0.20.7 // indirect
|
||||
github.com/go-openapi/strfmt v0.21.3 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-openapi/validate v0.22.0 // indirect
|
||||
github.com/go-toolsmith/astcast v1.0.0 // indirect
|
||||
github.com/go-toolsmith/astcopy v1.0.1 // indirect
|
||||
github.com/go-toolsmith/astequal v1.0.2 // indirect
|
||||
github.com/go-toolsmith/astcopy v1.0.3 // indirect
|
||||
github.com/go-toolsmith/astequal v1.0.3 // indirect
|
||||
github.com/go-toolsmith/astfmt v1.0.0 // indirect
|
||||
github.com/go-toolsmith/astp v1.0.0 // indirect
|
||||
github.com/go-toolsmith/strparse v1.0.0 // indirect
|
||||
github.com/go-toolsmith/typep v1.0.2 // indirect
|
||||
github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b // indirect
|
||||
github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/gofrs/flock v0.8.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.4.2 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect
|
||||
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect
|
||||
github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe // indirect
|
||||
github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a // indirect
|
||||
github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 // indirect
|
||||
github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect
|
||||
github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect
|
||||
github.com/golangci/misspell v0.3.5 // indirect
|
||||
github.com/golangci/misspell v0.4.0 // indirect
|
||||
github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 // indirect
|
||||
github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/go-containerregistry v0.13.1-0.20230310164735-e94d40893b2d // indirect
|
||||
github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 // indirect
|
||||
github.com/gordonklaus/ineffassign v0.0.0-20230107090616-13ace0543b28 // indirect
|
||||
github.com/gostaticanalysis/analysisutil v0.7.1 // indirect
|
||||
github.com/gostaticanalysis/comment v1.4.2 // indirect
|
||||
github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect
|
||||
|
@ -129,96 +137,99 @@ require (
|
|||
github.com/hashicorp/go-version v1.6.0 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/hexops/gotextdiff v1.0.3 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jgautheron/goconst v1.5.1 // indirect
|
||||
github.com/jingyugao/rowserrcheck v1.1.1 // indirect
|
||||
github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/julz/importas v0.1.0 // indirect
|
||||
github.com/kisielk/errcheck v1.6.2 // indirect
|
||||
github.com/junk1tm/musttag v0.4.4 // indirect
|
||||
github.com/kisielk/errcheck v1.6.3 // indirect
|
||||
github.com/kisielk/gotool v1.0.0 // indirect
|
||||
github.com/klauspost/compress v1.15.8 // indirect
|
||||
github.com/kkHAIKE/contextcheck v1.1.3 // indirect
|
||||
github.com/klauspost/compress v1.16.0 // indirect
|
||||
github.com/kulti/thelper v0.6.3 // indirect
|
||||
github.com/kunwardeep/paralleltest v1.0.6 // indirect
|
||||
github.com/kyoh86/exportloopref v0.1.8 // indirect
|
||||
github.com/kyoh86/exportloopref v0.1.11 // indirect
|
||||
github.com/ldez/gomoddirectives v0.2.3 // indirect
|
||||
github.com/ldez/tagliatelle v0.3.1 // indirect
|
||||
github.com/leonklingele/grouper v1.1.0 // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20220723181115-27de4befb95e // indirect
|
||||
github.com/ldez/tagliatelle v0.4.0 // indirect
|
||||
github.com/leonklingele/grouper v1.1.1 // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf // indirect
|
||||
github.com/lufeee/execinquery v1.2.1 // indirect
|
||||
github.com/magiconair/properties v1.8.6 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/maratori/testableexamples v1.0.0 // indirect
|
||||
github.com/maratori/testpackage v1.1.0 // indirect
|
||||
github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||
github.com/mattn/go-isatty v0.0.17 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mbilski/exhaustivestruct v1.2.0 // indirect
|
||||
github.com/mgechev/revive v1.2.3 // indirect
|
||||
github.com/mgechev/revive v1.2.5 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moricho/tparallel v0.2.1 // indirect
|
||||
github.com/nakabonne/nestif v0.3.1 // indirect
|
||||
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect
|
||||
github.com/nishanths/exhaustive v0.8.1 // indirect
|
||||
github.com/nishanths/exhaustive v0.9.5 // indirect
|
||||
github.com/nishanths/predeclared v0.2.2 // indirect
|
||||
github.com/nunnatsa/ginkgolinter v0.8.1 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc2 // indirect
|
||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
|
||||
github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/polyfloyd/go-errorlint v1.0.2 // indirect
|
||||
github.com/prometheus/client_golang v1.13.0 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/quasilyte/go-ruleguard v0.3.17 // indirect
|
||||
github.com/quasilyte/gogrep v0.0.0-20220120141003-628d8b3623b5 // indirect
|
||||
github.com/polyfloyd/go-errorlint v1.0.6 // indirect
|
||||
github.com/prometheus/client_golang v1.14.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.39.0 // indirect
|
||||
github.com/prometheus/procfs v0.9.0 // indirect
|
||||
github.com/quasilyte/go-ruleguard v0.3.18 // indirect
|
||||
github.com/quasilyte/gogrep v0.0.0-20220828223005-86e4605de09f // indirect
|
||||
github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 // indirect
|
||||
github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/ryancurrah/gomodguard v1.2.4 // indirect
|
||||
github.com/ryanrolds/sqlclosecheck v0.3.0 // indirect
|
||||
github.com/sanposhiho/wastedassign/v2 v2.0.6 // indirect
|
||||
github.com/ryancurrah/gomodguard v1.3.0 // indirect
|
||||
github.com/ryanrolds/sqlclosecheck v0.4.0 // indirect
|
||||
github.com/sanposhiho/wastedassign/v2 v2.0.7 // indirect
|
||||
github.com/sashamelentyev/interfacebloat v1.1.0 // indirect
|
||||
github.com/sashamelentyev/usestdlibvars v1.13.0 // indirect
|
||||
github.com/securego/gosec/v2 v2.13.1 // indirect
|
||||
github.com/sashamelentyev/usestdlibvars v1.21.1 // indirect
|
||||
github.com/securego/gosec/v2 v2.14.0 // indirect
|
||||
github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect
|
||||
github.com/sigstore/cosign v1.12.1 // indirect
|
||||
github.com/sigstore/rekor v0.12.1-0.20220915152154-4bb6f441c1b2 // indirect
|
||||
github.com/sigstore/sigstore v1.4.1 // indirect
|
||||
github.com/sigstore/cosign/v2 v2.0.0 // indirect
|
||||
github.com/sigstore/rekor v1.0.1 // indirect
|
||||
github.com/sigstore/sigstore v1.5.1 // indirect
|
||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||
github.com/sivchari/containedctx v1.0.2 // indirect
|
||||
github.com/sivchari/nosnakecase v1.7.0 // indirect
|
||||
github.com/sivchari/tenv v1.7.0 // indirect
|
||||
github.com/sivchari/tenv v1.7.1 // indirect
|
||||
github.com/sonatard/noctx v0.0.1 // indirect
|
||||
github.com/sourcegraph/go-diff v0.6.1 // indirect
|
||||
github.com/spf13/afero v1.8.2 // indirect
|
||||
github.com/sourcegraph/go-diff v0.7.0 // indirect
|
||||
github.com/spf13/afero v1.9.3 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/spf13/cobra v1.5.0 // indirect
|
||||
github.com/spf13/cobra v1.6.1 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/viper v1.13.0 // indirect
|
||||
github.com/spf13/viper v1.15.0 // indirect
|
||||
github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect
|
||||
github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect
|
||||
github.com/stretchr/objx v0.4.0 // indirect
|
||||
github.com/stretchr/testify v1.8.0 // indirect
|
||||
github.com/subosito/gotenv v1.4.1 // indirect
|
||||
github.com/sylvia7788/contextcheck v1.0.6 // indirect
|
||||
github.com/stretchr/objx v0.5.0 // indirect
|
||||
github.com/stretchr/testify v1.8.4 // indirect
|
||||
github.com/subosito/gotenv v1.4.2 // indirect
|
||||
github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c // indirect
|
||||
github.com/tdakkota/asciicheck v0.1.1 // indirect
|
||||
github.com/tetafro/godot v1.4.11 // indirect
|
||||
github.com/theupdateframework/go-tuf v0.5.0 // indirect
|
||||
github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 // indirect
|
||||
github.com/timonwong/logrlint v0.1.0 // indirect
|
||||
github.com/theupdateframework/go-tuf v0.5.2 // indirect
|
||||
github.com/timakin/bodyclose v0.0.0-20221125081123-e39cf3fc478e // indirect
|
||||
github.com/timonwong/loggercheck v0.9.3 // indirect
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
||||
github.com/tomarrell/wrapcheck/v2 v2.6.2 // indirect
|
||||
github.com/tommy-muehle/go-mnd/v2 v2.5.0 // indirect
|
||||
github.com/tomarrell/wrapcheck/v2 v2.8.0 // indirect
|
||||
github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect
|
||||
github.com/ultraware/funlen v0.0.3 // indirect
|
||||
github.com/ultraware/whitespace v0.0.5 // indirect
|
||||
github.com/uudashr/gocognit v1.0.6 // indirect
|
||||
|
@ -229,35 +240,33 @@ require (
|
|||
go.mongodb.org/mongo-driver v1.10.2 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/automaxprocs v1.5.1 // indirect
|
||||
go.uber.org/multierr v1.8.0 // indirect
|
||||
go.uber.org/zap v1.23.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 // indirect
|
||||
go.uber.org/multierr v1.9.0 // indirect
|
||||
go.uber.org/zap v1.24.0 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20220823124025-807a23277127 // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
|
||||
golang.org/x/net v0.0.0-20220909164309-bea034e7d591 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094 // indirect
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
|
||||
golang.org/x/sys v0.0.0-20220907062415-87db552b00fd // indirect
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect
|
||||
golang.org/x/text v0.3.8-0.20211004125949-5bd84dd9b33b // indirect
|
||||
golang.org/x/tools v0.1.12 // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20221208152030-732eee02a75a // indirect
|
||||
golang.org/x/mod v0.18.0 // indirect
|
||||
golang.org/x/net v0.26.0 // indirect
|
||||
golang.org/x/oauth2 v0.6.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/sys v0.21.0 // indirect
|
||||
golang.org/x/term v0.21.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220805133916-01dd62135a58 // indirect
|
||||
google.golang.org/grpc v1.49.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc // indirect
|
||||
google.golang.org/grpc v1.53.0 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
honnef.co/go/tools v0.3.3 // indirect
|
||||
k8s.io/apimachinery v0.25.2 // indirect
|
||||
k8s.io/klog/v2 v2.70.1 // indirect
|
||||
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect
|
||||
mvdan.cc/gofumpt v0.3.1 // indirect
|
||||
honnef.co/go/tools v0.4.0 // indirect
|
||||
k8s.io/klog/v2 v2.120.1 // indirect
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
|
||||
mvdan.cc/gofumpt v0.4.0 // indirect
|
||||
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect
|
||||
mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect
|
||||
mvdan.cc/unparam v0.0.0-20220706161116-678bad134442 // indirect
|
||||
sigs.k8s.io/kind v0.16.0 // indirect
|
||||
mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d // indirect
|
||||
sigs.k8s.io/kind v0.17.0 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
|
|
2764
hack/tools/go.sum
2764
hack/tools/go.sum
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,106 @@
|
|||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// A small utility to enforce code coverage levels
|
||||
// hack/make-rules/test.sh && (cd ./hack/tools && go run ./require-coverage)
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"golang.org/x/tools/cover"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
// TODO: instead of fully excluding files, maybe we should have a more
|
||||
// flexible pattern of minimum coverage?
|
||||
//
|
||||
// For now the goal is to require 100% coverage for production serving code.
|
||||
// See also: cmd/archeio/docs/testing.md
|
||||
//
|
||||
// Reviewers should be wary of approving additions to this list.
|
||||
var knownFailingFiles = sets.NewString(
|
||||
// this code is used only at development time and integration testing it
|
||||
// is probably excessive
|
||||
"k8s.io/registry.k8s.io/pkg/net/cloudcidrs/internal/ranges2go/main.go",
|
||||
// TODO: this is reasonable to test but shy of 100% coverage, mostly error handling ...
|
||||
"k8s.io/registry.k8s.io/pkg/net/cloudcidrs/internal/ranges2go/gen.go",
|
||||
// geranos is not easily tested and is not in the blocking path in production
|
||||
// we should still test it better
|
||||
"k8s.io/registry.k8s.io/cmd/geranos/main.go",
|
||||
"k8s.io/registry.k8s.io/cmd/geranos/ratelimitroundtrip.go",
|
||||
"k8s.io/registry.k8s.io/cmd/geranos/s3uploader.go",
|
||||
"k8s.io/registry.k8s.io/cmd/geranos/schemav1.go",
|
||||
"k8s.io/registry.k8s.io/cmd/geranos/walkimages.go",
|
||||
// We cover this with integration tests and including integration coverage
|
||||
// here would mask a lack of unit test coverage.
|
||||
"k8s.io/registry.k8s.io/cmd/archeio/main.go",
|
||||
)
|
||||
|
||||
func main() {
|
||||
fmt.Println("Checking coverage ...")
|
||||
profiles, err := cover.ParseProfiles("./../../bin/all.cov")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
failedAny := false
|
||||
needToRemove := []string{}
|
||||
for _, profile := range profiles {
|
||||
coverage := coverPercent(profile)
|
||||
file := profile.FileName
|
||||
if coverage < 100.0 {
|
||||
if !knownFailingFiles.Has(file) {
|
||||
failedAny = true
|
||||
fmt.Printf("FAILED: %s %v%%\n", file, coverage)
|
||||
} else {
|
||||
fmt.Printf("IGNORE: %s %v%%\n", file, coverage)
|
||||
}
|
||||
} else {
|
||||
if knownFailingFiles.Has(file) {
|
||||
needToRemove = append(needToRemove, file)
|
||||
}
|
||||
fmt.Printf("PASSED: %s %v%%\n", file, coverage)
|
||||
}
|
||||
}
|
||||
if failedAny {
|
||||
fmt.Println("Failed required coverage levels for one or more go files")
|
||||
os.Exit(-1)
|
||||
} else {
|
||||
fmt.Println("All code coverage either acceptable or ignored")
|
||||
}
|
||||
if len(needToRemove) > 0 {
|
||||
fmt.Println("FAILED: The following files are now passing and must be removed frmo the ignored list:")
|
||||
for _, file := range needToRemove {
|
||||
fmt.Println(file)
|
||||
}
|
||||
os.Exit(-1)
|
||||
}
|
||||
}
|
||||
|
||||
func coverPercent(profile *cover.Profile) float64 {
|
||||
totalStatements := 0
|
||||
coveredStatements := 0
|
||||
for _, block := range profile.Blocks {
|
||||
totalStatements += block.NumStmt
|
||||
if block.Count > 0 {
|
||||
coveredStatements += block.NumStmt
|
||||
}
|
||||
}
|
||||
return float64(coveredStatements) / float64(totalStatements) * 100
|
||||
}
|
|
@ -0,0 +1,62 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2023 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Dependencies
|
||||
# - rclone
|
||||
# - awscli
|
||||
|
||||
# Usage
|
||||
# ./hack/tools//sync-to-s3.sh
|
||||
|
||||
SOURCE=gcs:artifacts.k8s-artifacts-prod.appspot.com
|
||||
DESTINATION=s3:prod-registry-k8s-io-us-east-2
|
||||
|
||||
CALLER_ID="$(aws sts get-caller-identity --output json | jq -r .UserId)"
|
||||
|
||||
while true; do
|
||||
if [ ! -f /var/run/secrets/aws-iam-token/serviceaccount/token ]; then
|
||||
unset AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_SESSION_TOKEN
|
||||
JSON=$(aws sts assume-role \
|
||||
--role-arn "arn:aws:iam::513428760722:role/registry.k8s.io_s3writer" \
|
||||
--role-session-name "${CALLER_ID:-}-registry.k8s.io_s3writer" \
|
||||
--duration-seconds 43200 \
|
||||
--output json || exit 1)
|
||||
|
||||
AWS_ACCESS_KEY_ID=$(echo "${JSON}" | jq --raw-output ".Credentials[\"AccessKeyId\"]")
|
||||
AWS_SECRET_ACCESS_KEY=$(echo "${JSON}" | jq --raw-output ".Credentials[\"SecretAccessKey\"]")
|
||||
AWS_SESSION_TOKEN=$(echo "${JSON}" | jq --raw-output ".Credentials[\"SessionToken\"]")
|
||||
export AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_SESSION_TOKEN
|
||||
fi
|
||||
|
||||
RCLONE_CONFIG="$(mktemp)"
|
||||
echo "Wrote rclone config to '${RCLONE_CONFIG:-}'"
|
||||
|
||||
cat << EOF > "${RCLONE_CONFIG:-}"
|
||||
[gcs]
|
||||
type = google cloud storage
|
||||
bucket_acl = private
|
||||
|
||||
[s3]
|
||||
type = s3
|
||||
provider = AWS
|
||||
env_auth = true
|
||||
region = us-east-2
|
||||
EOF
|
||||
echo "Running sync between '${SOURCE:-}' and '${DESTINATION:-}'"
|
||||
if rclone sync --config "${RCLONE_CONFIG:-}" -P "${SOURCE:-}" "${DESTINATION:-}"; then
|
||||
exit 0;
|
||||
fi
|
||||
done
|
|
@ -32,7 +32,4 @@ import (
|
|||
|
||||
// image builder
|
||||
_ "github.com/google/ko"
|
||||
|
||||
// for testing
|
||||
_ "github.com/google/go-containerregistry/cmd/crane"
|
||||
)
|
||||
|
|
|
@ -1,63 +0,0 @@
|
|||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func EnsureBinsInPath(binDir string) error {
|
||||
path := os.Getenv("PATH")
|
||||
// if bins are already at front of PATH, do nothing
|
||||
if strings.HasPrefix(path, binDir+string(os.PathSeparator)) {
|
||||
return nil
|
||||
}
|
||||
// otherwise prepend and set
|
||||
newPath := binDir + string(os.PathListSeparator) + path
|
||||
return os.Setenv("PATH", newPath)
|
||||
}
|
||||
|
||||
// EnsureCrane ensures crane is available in PATH for testing
|
||||
// under rootPath/bin
|
||||
// See also: EnsureBinsInPath
|
||||
func EnsureCrane(rootPath string) error {
|
||||
// ensure $REPO_ROOT/bin is in the front of $PATH
|
||||
root, err := ModuleRootDir()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to detect path to project root: %w", err)
|
||||
}
|
||||
binDir := rootToBinDir(root)
|
||||
if err := EnsureBinsInPath(binDir); err != nil {
|
||||
return fmt.Errorf("failed to ensure PATH: %w", err)
|
||||
}
|
||||
// install crane
|
||||
// nolint:gosec // we *want* user supplied command arguments ...
|
||||
cmd := exec.Command(
|
||||
"go", "build",
|
||||
"-o", filepath.Join(binDir, "crane"),
|
||||
"github.com/google/go-containerregistry/cmd/crane",
|
||||
)
|
||||
cmd.Dir = rootToToolsDir(root)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to install crane: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -23,8 +23,12 @@ import (
|
|||
)
|
||||
|
||||
func ModuleRootDir() (string, error) {
|
||||
return moduleRootDir(os.Getwd)
|
||||
}
|
||||
|
||||
func moduleRootDir(getWD func() (string, error)) (string, error) {
|
||||
// in a test, the working directory will be the test package source dir
|
||||
wd, err := os.Getwd()
|
||||
wd, err := getWD()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -35,8 +39,6 @@ func ModuleRootDir() (string, error) {
|
|||
_, err := os.Stat(filepath.Join(currDir, "go.mod"))
|
||||
if err == nil {
|
||||
return currDir, nil
|
||||
} else if !os.IsNotExist(err) {
|
||||
return "", err
|
||||
}
|
||||
// if we get back the same path, we've hit the disk / volume root
|
||||
nextDir := filepath.Dir(currDir)
|
||||
|
@ -46,11 +48,3 @@ func ModuleRootDir() (string, error) {
|
|||
currDir = nextDir
|
||||
}
|
||||
}
|
||||
|
||||
func rootToBinDir(root string) string {
|
||||
return filepath.Join(root, "bin")
|
||||
}
|
||||
|
||||
func rootToToolsDir(root string) string {
|
||||
return filepath.Join(root, "hack", "tools")
|
||||
}
|
||||
|
|
|
@ -16,14 +16,35 @@ limitations under the License.
|
|||
|
||||
package integration
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestModuleRootDir(t *testing.T) {
|
||||
root, err := ModuleRootDir()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error getting root dir: %v", err)
|
||||
} else if root == "" {
|
||||
t.Fatal("expected root dir to be non-empty string")
|
||||
}
|
||||
if root == "" {
|
||||
t.Fatalf("expected root dir to be non-empty string")
|
||||
|
||||
// we reasonably assume the filesystem root is not a module
|
||||
wdAlwaysRoot := func() (string, error) { return "/", nil }
|
||||
root, err = moduleRootDir(wdAlwaysRoot)
|
||||
if err == nil {
|
||||
t.Fatal("expected error getting moduleRootDir for /")
|
||||
} else if root != "" {
|
||||
t.Fatal("did not expect non-empty string getting moduleRootDir for /")
|
||||
}
|
||||
|
||||
// test error handling for os.Getwd
|
||||
expectErr := errors.New("err")
|
||||
wdAlwaysError := func() (string, error) { return "", expectErr }
|
||||
root, err = moduleRootDir(wdAlwaysError)
|
||||
if err == nil {
|
||||
t.Fatal("expected error getting moduleRootDir with erroring getWD")
|
||||
} else if root != "" {
|
||||
t.Fatal("did not expect non-empty string getting moduleRootDir for erroring getWD")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,54 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2022 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit -o nounset -o pipefail
|
||||
|
||||
# cd to self
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
|
||||
DATA_URL='https://ip-ranges.amazonaws.com/ip-ranges.json'
|
||||
|
||||
# emit ip ranges data into a go source file with the string contents
|
||||
# this data changes infrequently and this simplifies generating the runtime
|
||||
# data
|
||||
{
|
||||
cat <<EOF
|
||||
/*
|
||||
Copyright $(date +%Y) The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// File generated by genrawdata.sh DO NOT EDIT
|
||||
|
||||
package main
|
||||
|
||||
// ipRangesRaw contains the contents of ${DATA_URL}
|
||||
var ipRangesRaw = \`
|
||||
EOF
|
||||
curl "${DATA_URL}"
|
||||
echo '`'
|
||||
}>zz_generated_rawdata.go
|
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package app
|
||||
package clientip
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
@ -24,14 +24,18 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
// getClientIP gets the client IP for an http.Request
|
||||
// Get gets the client IP for an http.Request
|
||||
//
|
||||
// NOTE: currently only two scenarios are supported:
|
||||
// 1. no loadbalancer, local testing
|
||||
// 2. behind Google Cloud LoadBalancer
|
||||
// 2. behind Google Cloud LoadBalancer (as in cloudrun)
|
||||
//
|
||||
// At this time we have no need to complicate it further
|
||||
func getClientIP(r *http.Request) (netip.Addr, error) {
|
||||
// Note that in particular we do not support hitting the CloudRun endpoint
|
||||
// directly (though we could easily do so here). Cloud Armor is on the GCLB,
|
||||
// so directly accessing the CloudRun endpoint would bypass that.
|
||||
//
|
||||
// At this time we have no need to complicate it further.
|
||||
func Get(r *http.Request) (netip.Addr, error) {
|
||||
// Upstream docs:
|
||||
// https://cloud.google.com/load-balancing/docs/https#x-forwarded-for_header
|
||||
//
|
||||
|
@ -70,23 +74,6 @@ func getClientIP(r *http.Request) (netip.Addr, error) {
|
|||
if len(keys) < 2 {
|
||||
return netip.Addr{}, fmt.Errorf("invalid X-Forwarded-For value: %s", rawXFwdFor)
|
||||
}
|
||||
// detect cloud run bug where the header is actually like
|
||||
// <client-ip>, <load-balancer-ip>,<client-ip>
|
||||
// (last ,<client-ip> should not be there)
|
||||
// for googlers this is b/209919936
|
||||
// TODO: Remove this once cloud run bug is fixed
|
||||
//
|
||||
// NOTE: Once this bug is fixed, a client could set the header:
|
||||
// X-Forwarded-For: <load-balancer-ip>
|
||||
// and confuse us into thinking this bug is still active, causing us to
|
||||
// server their traffic from the upstream registry instead of redirecting.
|
||||
// ... however, it is extremely unclear why anyone would do this,
|
||||
// or why we would care for our use case ...
|
||||
// Otherwise this implementation will use the normal path below
|
||||
// automatically when the bug is fixed.
|
||||
if len(keys) > 2 && keys[len(keys)-1] == keys[len(keys)-3] {
|
||||
return netip.ParseAddr(keys[len(keys)-3])
|
||||
}
|
||||
// normal case, we expect the client-ip to be 2 from the end
|
||||
return netip.ParseAddr(keys[len(keys)-2])
|
||||
}
|
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package app
|
||||
package clientip
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
@ -22,7 +22,7 @@ import (
|
|||
"testing"
|
||||
)
|
||||
|
||||
func TestGetClientIP(t *testing.T) {
|
||||
func TestGet(t *testing.T) {
|
||||
testCases := []struct {
|
||||
Name string
|
||||
Request http.Request
|
||||
|
@ -53,16 +53,6 @@ func TestGetClientIP(t *testing.T) {
|
|||
},
|
||||
ExpectedIP: netip.MustParseAddr("8.8.8.8"),
|
||||
},
|
||||
{
|
||||
Name: "X-Forwarded-For without client-supplied + cloud-run bug",
|
||||
Request: http.Request{
|
||||
Header: http.Header{
|
||||
"X-Forwarded-For": []string{"8.8.8.8,8.8.8.9,8.8.8.8"},
|
||||
},
|
||||
RemoteAddr: "127.0.0.1:8888",
|
||||
},
|
||||
ExpectedIP: netip.MustParseAddr("8.8.8.8"),
|
||||
},
|
||||
{
|
||||
Name: "X-Forwarded-For with clean client-supplied",
|
||||
Request: http.Request{
|
||||
|
@ -73,16 +63,6 @@ func TestGetClientIP(t *testing.T) {
|
|||
},
|
||||
ExpectedIP: netip.MustParseAddr("8.8.8.8"),
|
||||
},
|
||||
{
|
||||
Name: "X-Forwarded-For with clean client-supplied + cloud-run bug",
|
||||
Request: http.Request{
|
||||
Header: http.Header{
|
||||
"X-Forwarded-For": []string{"127.0.0.1, 8.8.8.8, 8.8.8.9,8.8.8.8"},
|
||||
},
|
||||
RemoteAddr: "127.0.0.1:8888",
|
||||
},
|
||||
ExpectedIP: netip.MustParseAddr("8.8.8.8"),
|
||||
},
|
||||
{
|
||||
Name: "X-Forwarded-For with garbage client-supplied",
|
||||
Request: http.Request{
|
||||
|
@ -103,15 +83,36 @@ func TestGetClientIP(t *testing.T) {
|
|||
},
|
||||
ExpectError: true,
|
||||
},
|
||||
{
|
||||
Name: "X-Forwarded-For for IPv6 with load balancer",
|
||||
Request: http.Request{
|
||||
Header: http.Header{
|
||||
"X-Forwarded-For": []string{"2001:0db8:1234:5678:abcd:1234:5678:abcd, 2001:0db8:0:abcd::"},
|
||||
},
|
||||
RemoteAddr: "127.0.0.1:8888",
|
||||
},
|
||||
ExpectedIP: netip.MustParseAddr("2001:0db8:1234:5678:abcd:1234:5678:abcd"),
|
||||
},
|
||||
{
|
||||
Name: "X-Forwarded-For for IPv6 without load balancer",
|
||||
Request: http.Request{
|
||||
Header: http.Header{
|
||||
"X-Forwarded-For": []string{"2001:0db8:1234:5678:abcd:1234:5678:abcd"},
|
||||
},
|
||||
RemoteAddr: "127.0.0.1:8888",
|
||||
},
|
||||
// We could accept this, we choose to require the load balancer
|
||||
ExpectError: true,
|
||||
},
|
||||
}
|
||||
for i := range testCases {
|
||||
tc := testCases[i]
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
//t.Parallel()
|
||||
ip, err := getClientIP(&tc.Request)
|
||||
ip, err := Get(&tc.Request)
|
||||
if err != nil {
|
||||
if !tc.ExpectError {
|
||||
t.Fatalf("unexpted error: %v", err)
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
} else if tc.ExpectError {
|
||||
t.Fatal("expected error but err was nil")
|
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// aws contains AWS CIDR matching
|
||||
package aws
|
||||
// cloudcidrs contains Cloud CIDR matching
|
||||
package cloudcidrs
|
||||
|
||||
//go:generate ./internal/ranges2go/run.sh
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -40,23 +40,56 @@ limitations under the License.
|
|||
|
||||
// File generated by ranges2go DO NOT EDIT
|
||||
|
||||
package aws
|
||||
package cloudcidrs
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
)
|
||||
|
||||
// regionToRanges contains a preparsed map of AWS regions to netip.Prefix
|
||||
var regionToRanges = map[string][]netip.Prefix{
|
||||
`
|
||||
|
||||
func generateRangesGo(w io.Writer, rtp regionsToPrefixes) error {
|
||||
// generate source file
|
||||
func generateRangesGo(w io.Writer, cloudToRTP map[string]regionsToPrefixes) error {
|
||||
// generate source file header
|
||||
if _, err := io.WriteString(w, fileHeader); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// ensure iteration order is predictable
|
||||
// ensure iteration order is predictable for reproducible codegen
|
||||
clouds := make([]string, 0, len(cloudToRTP))
|
||||
for cloud := range cloudToRTP {
|
||||
clouds = append(clouds, cloud)
|
||||
}
|
||||
sort.Strings(clouds)
|
||||
|
||||
// generate constants for each cloud
|
||||
for _, cloud := range clouds {
|
||||
if _, err := fmt.Fprintf(w, "// %s cloud\nconst %s = %q\n\n", cloud, cloud, cloud); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// generate main data variable
|
||||
if _, err := io.WriteString(w, `// regionToRanges contains a preparsed map of cloud IPInfo to netip.Prefix
|
||||
var regionToRanges = map[IPInfo][]netip.Prefix{
|
||||
`,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, cloud := range clouds {
|
||||
rtp := cloudToRTP[cloud]
|
||||
if err := genCloud(w, cloud, rtp); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if _, err := io.WriteString(w, "}\n"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func genCloud(w io.Writer, cloud string, rtp regionsToPrefixes) error {
|
||||
// ensure iteration order is predictable for reproducible codegen
|
||||
regions := make([]string, 0, len(rtp))
|
||||
for region := range rtp {
|
||||
regions = append(regions, region)
|
||||
|
@ -64,7 +97,7 @@ func generateRangesGo(w io.Writer, rtp regionsToPrefixes) error {
|
|||
sort.Strings(regions)
|
||||
for _, region := range regions {
|
||||
prefixes := rtp[region]
|
||||
if _, err := fmt.Fprintf(w, "\t%q: {\n", region); err != nil {
|
||||
if _, err := fmt.Fprintf(w, "\t{Cloud: %s, Region: %q}: {\n", cloud, region); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, prefix := range prefixes {
|
||||
|
@ -97,9 +130,5 @@ func generateRangesGo(w io.Writer, rtp regionsToPrefixes) error {
|
|||
return err
|
||||
}
|
||||
}
|
||||
if _, err := io.WriteString(w, "}\n"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -23,7 +23,7 @@ import (
|
|||
|
||||
func TestGenerateRangesGo(t *testing.T) {
|
||||
// raw data to generate from
|
||||
const rawData = `{
|
||||
const rawAWSData = `{
|
||||
"syncToken": "1649878400",
|
||||
"createDate": "2022-04-13-19-33-20",
|
||||
"prefixes": [
|
||||
|
@ -80,7 +80,41 @@ func TestGenerateRangesGo(t *testing.T) {
|
|||
]
|
||||
}
|
||||
`
|
||||
rtp, err := regionsToPrefixesFromRaw(rawData)
|
||||
awsRTP, err := parseAWS(rawAWSData)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error parsing test data: %v", err)
|
||||
}
|
||||
const rawGCPData = `{
|
||||
"syncToken": "1678334702365",
|
||||
"creationTime": "2023-03-08T20:05:02.365608",
|
||||
"prefixes": [{
|
||||
"ipv4Prefix": "34.80.0.0/15",
|
||||
"service": "Google Cloud",
|
||||
"scope": "asia-east1"
|
||||
}, {
|
||||
"ipv4Prefix": "34.137.0.0/16",
|
||||
"service": "Google Cloud",
|
||||
"scope": "asia-east1"
|
||||
}, {
|
||||
"ipv4Prefix": "35.185.128.0/19",
|
||||
"service": "Google Cloud",
|
||||
"scope": "asia-east1"
|
||||
}, {
|
||||
"ipv4Prefix": "130.211.240.0/20",
|
||||
"service": "Google Cloud",
|
||||
"scope": "asia-east1"
|
||||
}, {
|
||||
"ipv6Prefix": "2600:1900:4030::/44",
|
||||
"service": "Google Cloud",
|
||||
"scope": "asia-east1"
|
||||
}, {
|
||||
"ipv6Prefix": "2600:1900:4180::/44",
|
||||
"service": "Google Cloud",
|
||||
"scope": "us-west4"
|
||||
}]
|
||||
}
|
||||
`
|
||||
gcpRTP, err := parseGCP(rawGCPData)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error parsing test data: %v", err)
|
||||
}
|
||||
|
@ -104,31 +138,51 @@ limitations under the License.
|
|||
|
||||
// File generated by ranges2go DO NOT EDIT
|
||||
|
||||
package aws
|
||||
package cloudcidrs
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
)
|
||||
|
||||
// regionToRanges contains a preparsed map of AWS regions to netip.Prefix
|
||||
var regionToRanges = map[string][]netip.Prefix{
|
||||
"ap-northeast-2": {
|
||||
// AWS cloud
|
||||
const AWS = "AWS"
|
||||
|
||||
// GCP cloud
|
||||
const GCP = "GCP"
|
||||
|
||||
// regionToRanges contains a preparsed map of cloud IPInfo to netip.Prefix
|
||||
var regionToRanges = map[IPInfo][]netip.Prefix{
|
||||
{Cloud: AWS, Region: "ap-northeast-2"}: {
|
||||
netip.PrefixFrom(netip.AddrFrom4([4]byte{3, 5, 140, 0}), 22),
|
||||
},
|
||||
"eu-south-1": {
|
||||
{Cloud: AWS, Region: "eu-south-1"}: {
|
||||
netip.PrefixFrom(netip.AddrFrom16([16]byte{42, 5, 208, 58, 160, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0}), 56),
|
||||
netip.PrefixFrom(netip.AddrFrom16([16]byte{42, 5, 208, 58, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}), 56),
|
||||
netip.PrefixFrom(netip.AddrFrom16([16]byte{42, 5, 208, 122, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}), 40),
|
||||
},
|
||||
"me-south-1": {
|
||||
{Cloud: AWS, Region: "me-south-1"}: {
|
||||
netip.PrefixFrom(netip.AddrFrom4([4]byte{52, 95, 174, 0}), 24),
|
||||
netip.PrefixFrom(netip.AddrFrom4([4]byte{69, 107, 7, 136}), 29),
|
||||
},
|
||||
{Cloud: GCP, Region: "asia-east1"}: {
|
||||
netip.PrefixFrom(netip.AddrFrom16([16]byte{38, 0, 25, 0, 64, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}), 44),
|
||||
netip.PrefixFrom(netip.AddrFrom4([4]byte{34, 137, 0, 0}), 16),
|
||||
netip.PrefixFrom(netip.AddrFrom4([4]byte{34, 80, 0, 0}), 15),
|
||||
netip.PrefixFrom(netip.AddrFrom4([4]byte{35, 185, 128, 0}), 19),
|
||||
},
|
||||
{Cloud: GCP, Region: "us-west4"}: {
|
||||
netip.PrefixFrom(netip.AddrFrom16([16]byte{38, 0, 25, 0, 65, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}), 44),
|
||||
},
|
||||
}
|
||||
`
|
||||
|
||||
cloudToRTP := map[string]regionsToPrefixes{
|
||||
"AWS": awsRTP,
|
||||
"GCP": gcpRTP,
|
||||
}
|
||||
// generate and compare
|
||||
w := &bytes.Buffer{}
|
||||
if err := generateRangesGo(w, rtp); err != nil {
|
||||
if err := generateRangesGo(w, cloudToRTP); err != nil {
|
||||
t.Fatalf("unexpected error generating: %v", err)
|
||||
}
|
||||
result := w.String()
|
|
@ -18,16 +18,31 @@ limitations under the License.
|
|||
// See also genrawdata.sh for downloading the raw data to this binary.
|
||||
package main
|
||||
|
||||
import "os"
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// overridable for make verify
|
||||
outputPath := os.Getenv("OUT_FILE")
|
||||
dataDir := os.Getenv("DATA_DIR")
|
||||
if outputPath == "" {
|
||||
outputPath = "./zz_generated_range_data.go"
|
||||
}
|
||||
if dataDir == "" {
|
||||
dataDir = "./internal/ranges2go/data"
|
||||
}
|
||||
// read in data
|
||||
awsRaw := mustReadFile(filepath.Join(dataDir, "aws-ip-ranges.json"))
|
||||
gcpRaw := mustReadFile(filepath.Join(dataDir, "gcp-cloud.json"))
|
||||
// parse raw AWS IP range data
|
||||
rtp, err := regionsToPrefixesFromRaw(ipRangesRaw)
|
||||
awsRTP, err := parseAWS(awsRaw)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// parse GCP IP range data
|
||||
gcpRTP, err := parseGCP(gcpRaw)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -36,7 +51,19 @@ func main() {
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := generateRangesGo(f, rtp); err != nil {
|
||||
cloudToRTP := map[string]regionsToPrefixes{
|
||||
"AWS": awsRTP,
|
||||
"GCP": gcpRTP,
|
||||
}
|
||||
if err := generateRangesGo(f, cloudToRTP); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func mustReadFile(filePath string) string {
|
||||
contents, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return string(contents)
|
||||
}
|
|
@ -22,14 +22,14 @@ import (
|
|||
"sort"
|
||||
)
|
||||
|
||||
// regionsToPrefixesFromRaw parses raw AWS IP ranges JSON data
|
||||
// parseAWS parses raw AWS IP ranges JSON data
|
||||
// and processes it to a regionsToPrefixes map
|
||||
func regionsToPrefixesFromRaw(raw string) (regionsToPrefixes, error) {
|
||||
parsed, err := parseIPRangesJSON([]byte(raw))
|
||||
func parseAWS(raw string) (regionsToPrefixes, error) {
|
||||
parsed, err := parseAWSIPRangesJSON([]byte(raw))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return regionsToPrefixesFromData(parsed)
|
||||
return awsRegionsToPrefixesFromData(parsed)
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -37,20 +37,20 @@ func regionsToPrefixesFromRaw(raw string) (regionsToPrefixes, error) {
|
|||
https://docs.aws.amazon.com/general/latest/gr/aws-ip-ranges.html
|
||||
*/
|
||||
|
||||
type IPRangesJSON struct {
|
||||
Prefixes []Prefix `json:"prefixes"`
|
||||
IPv6Prefixes []IPv6Prefix `json:"ipv6_prefixes"`
|
||||
type AWSIPRangesJSON struct {
|
||||
Prefixes []AWSPrefix `json:"prefixes"`
|
||||
IPv6Prefixes []AWSIPv6Prefix `json:"ipv6_prefixes"`
|
||||
// syncToken and createDate omitted
|
||||
}
|
||||
|
||||
type Prefix struct {
|
||||
type AWSPrefix struct {
|
||||
IPPrefix string `json:"ip_prefix"`
|
||||
Region string `json:"region"`
|
||||
Service string `json:"service"`
|
||||
// network_border_group omitted
|
||||
}
|
||||
|
||||
type IPv6Prefix struct {
|
||||
type AWSIPv6Prefix struct {
|
||||
IPv6Prefix string `json:"ipv6_prefix"`
|
||||
Region string `json:"region"`
|
||||
Service string `json:"service"`
|
||||
|
@ -59,19 +59,16 @@ type IPv6Prefix struct {
|
|||
|
||||
// parseIPRangesJSON parse AWS IP ranges JSON data
|
||||
// https://docs.aws.amazon.com/general/latest/gr/aws-ip-ranges.html
|
||||
func parseIPRangesJSON(rawJSON []byte) (*IPRangesJSON, error) {
|
||||
r := &IPRangesJSON{}
|
||||
func parseAWSIPRangesJSON(rawJSON []byte) (*AWSIPRangesJSON, error) {
|
||||
r := &AWSIPRangesJSON{}
|
||||
if err := json.Unmarshal(rawJSON, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// regionsToPrefixes is the structure we process the JSON into
|
||||
type regionsToPrefixes map[string][]netip.Prefix
|
||||
|
||||
// regionsToPrefixesFromData processes the raw unmarshalled JSON into regionsToPrefixes map
|
||||
func regionsToPrefixesFromData(data *IPRangesJSON) (regionsToPrefixes, error) {
|
||||
// awsRegionsToPrefixesFromData processes the raw unmarshalled JSON into regionsToPrefixes map
|
||||
func awsRegionsToPrefixesFromData(data *AWSIPRangesJSON) (regionsToPrefixes, error) {
|
||||
// convert from AWS published structure to a map by region, parse Prefixes
|
||||
rtp := regionsToPrefixes{}
|
||||
for _, prefix := range data.Prefixes {
|
||||
|
@ -105,20 +102,3 @@ func regionsToPrefixesFromData(data *IPRangesJSON) (regionsToPrefixes, error) {
|
|||
|
||||
return rtp, nil
|
||||
}
|
||||
|
||||
func dedupeSortedPrefixes(s []netip.Prefix) []netip.Prefix {
|
||||
l := len(s)
|
||||
// nothing to do for <= 1
|
||||
if l <= 1 {
|
||||
return s
|
||||
}
|
||||
// for 1..len(s) if previous entry does not match, keep current
|
||||
j := 0
|
||||
for i := 1; i < l; i++ {
|
||||
if s[i].String() != s[i-1].String() {
|
||||
s[j] = s[i]
|
||||
j++
|
||||
}
|
||||
}
|
||||
return s[0:j]
|
||||
}
|
|
@ -21,7 +21,7 @@ import (
|
|||
"testing"
|
||||
)
|
||||
|
||||
func TestParseIPRangesJSON(t *testing.T) {
|
||||
func TestAWSParseIPRangesJSON(t *testing.T) {
|
||||
// parse a snapshot of a valid subsest of data
|
||||
const testData = `{
|
||||
"syncToken": "1649878400",
|
||||
|
@ -43,15 +43,15 @@ func TestParseIPRangesJSON(t *testing.T) {
|
|||
}
|
||||
]
|
||||
}`
|
||||
expectedParsed := &IPRangesJSON{
|
||||
Prefixes: []Prefix{
|
||||
expectedParsed := &AWSIPRangesJSON{
|
||||
Prefixes: []AWSPrefix{
|
||||
{
|
||||
IPPrefix: "3.5.140.0/22",
|
||||
Region: "ap-northeast-2",
|
||||
Service: "AMAZON",
|
||||
},
|
||||
},
|
||||
IPv6Prefixes: []IPv6Prefix{
|
||||
IPv6Prefixes: []AWSIPv6Prefix{
|
||||
{
|
||||
IPv6Prefix: "2a05:d07a:a000::/40",
|
||||
Region: "eu-south-1",
|
||||
|
@ -59,7 +59,7 @@ func TestParseIPRangesJSON(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
parsed, err := parseIPRangesJSON([]byte(testData))
|
||||
parsed, err := parseAWSIPRangesJSON([]byte(testData))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error parsing testdata: %v", err)
|
||||
}
|
||||
|
@ -72,17 +72,17 @@ func TestParseIPRangesJSON(t *testing.T) {
|
|||
}
|
||||
|
||||
// parse some bogus data
|
||||
_, err = parseIPRangesJSON([]byte(`{"prefixes": false}`))
|
||||
_, err = parseAWSIPRangesJSON([]byte(`{"prefixes": false}`))
|
||||
if err == nil {
|
||||
t.Fatal("expected error parsing garbage data but got none")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegionsToPrefixesFromData(t *testing.T) {
|
||||
func TestAWSRegionsToPrefixesFromData(t *testing.T) {
|
||||
t.Run("bad IPv4 prefixes", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
badV4Prefixes := &IPRangesJSON{
|
||||
Prefixes: []Prefix{
|
||||
badV4Prefixes := &AWSIPRangesJSON{
|
||||
Prefixes: []AWSPrefix{
|
||||
{
|
||||
IPPrefix: "asdf;asdf,",
|
||||
Service: "AMAZON",
|
||||
|
@ -90,22 +90,22 @@ func TestRegionsToPrefixesFromData(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
_, err := regionsToPrefixesFromData(badV4Prefixes)
|
||||
_, err := awsRegionsToPrefixesFromData(badV4Prefixes)
|
||||
if err == nil {
|
||||
t.Fatal("expected error parsing bogus prefix but got none")
|
||||
}
|
||||
})
|
||||
t.Run("bad IPv6 prefixes", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
badV6Prefixes := &IPRangesJSON{
|
||||
Prefixes: []Prefix{
|
||||
badV6Prefixes := &AWSIPRangesJSON{
|
||||
Prefixes: []AWSPrefix{
|
||||
{
|
||||
IPPrefix: "127.0.0.1/32",
|
||||
Service: "AMAZON",
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
IPv6Prefixes: []IPv6Prefix{
|
||||
IPv6Prefixes: []AWSIPv6Prefix{
|
||||
{
|
||||
IPv6Prefix: "asdfasdf----....",
|
||||
Service: "AMAZON",
|
||||
|
@ -113,18 +113,18 @@ func TestRegionsToPrefixesFromData(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
_, err := regionsToPrefixesFromData(badV6Prefixes)
|
||||
_, err := awsRegionsToPrefixesFromData(badV6Prefixes)
|
||||
if err == nil {
|
||||
t.Fatal("expected error parsing bogus prefix but got none")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestRegionsToPrefixesFromRaw(t *testing.T) {
|
||||
func TestParseAWS(t *testing.T) {
|
||||
t.Run("unparsable data", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
badJSON := `{"prefixes":false}`
|
||||
_, err := regionsToPrefixesFromRaw(badJSON)
|
||||
_, err := parseAWS(badJSON)
|
||||
if err == nil {
|
||||
t.Fatal("expected error parsing bogus raw JSON but got none")
|
||||
}
|
|
@ -0,0 +1,93 @@
|
|||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/netip"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// parseGCP parses raw GCP cloud.json data
|
||||
// and processes it to a regionsToPrefixes map
|
||||
func parseGCP(raw string) (regionsToPrefixes, error) {
|
||||
parsed, err := parseGCPCloudJSON([]byte(raw))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return gcpRegionsToPrefixesFromData(parsed)
|
||||
}
|
||||
|
||||
type GCPCloudJSON struct {
|
||||
Prefixes []GCPPrefix `json:"prefixes"`
|
||||
// syncToken and createDate omitted
|
||||
}
|
||||
|
||||
type GCPPrefix struct {
|
||||
IPv4Prefix string `json:"ipv4Prefix"`
|
||||
IPv6Prefix string `json:"ipv6Prefix"`
|
||||
Scope string `json:"scope"`
|
||||
// service omitted
|
||||
}
|
||||
|
||||
// parseGCPCloudJSON parses GCP cloud.json IP ranges JSON data
|
||||
func parseGCPCloudJSON(rawJSON []byte) (*GCPCloudJSON, error) {
|
||||
r := &GCPCloudJSON{}
|
||||
if err := json.Unmarshal(rawJSON, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// gcpRegionsToPrefixesFromData processes the raw unmarshalled JSON into regionsToPrefixes map
|
||||
func gcpRegionsToPrefixesFromData(data *GCPCloudJSON) (regionsToPrefixes, error) {
|
||||
// convert from AWS published structure to a map by region, parse Prefixes
|
||||
rtp := regionsToPrefixes{}
|
||||
for _, prefix := range data.Prefixes {
|
||||
region := prefix.Scope
|
||||
if prefix.IPv4Prefix != "" {
|
||||
ipPrefix, err := netip.ParsePrefix(prefix.IPv4Prefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rtp[region] = append(rtp[region], ipPrefix)
|
||||
} else if prefix.IPv6Prefix != "" {
|
||||
ipPrefix, err := netip.ParsePrefix(prefix.IPv6Prefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rtp[region] = append(rtp[region], ipPrefix)
|
||||
} else {
|
||||
return nil, errors.New("unexpected entry with no ipv4Prefix or ipv6Prefix")
|
||||
}
|
||||
}
|
||||
|
||||
// flatten
|
||||
numPrefixes := 0
|
||||
for region := range rtp {
|
||||
// this approach allows us to produce consistent generated results
|
||||
// since the ip ranges will be ordered
|
||||
sort.Slice(rtp[region], func(i, j int) bool {
|
||||
return rtp[region][i].String() < rtp[region][j].String()
|
||||
})
|
||||
rtp[region] = dedupeSortedPrefixes(rtp[region])
|
||||
numPrefixes += len(rtp[region])
|
||||
}
|
||||
|
||||
return rtp, nil
|
||||
}
|
|
@ -0,0 +1,131 @@
|
|||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGCPParseIPRangesJSON(t *testing.T) {
|
||||
// parse a snapshot of a valid subsest of data
|
||||
const testData = `{
|
||||
"syncToken": "1675807451971",
|
||||
"creationTime": "2023-02-07T14:04:11.9716",
|
||||
"prefixes": [{
|
||||
"ipv4Prefix": "34.80.0.0/15",
|
||||
"service": "Google Cloud",
|
||||
"scope": "asia-east1"
|
||||
}, {
|
||||
"ipv6Prefix": "2600:1900:4180::/44",
|
||||
"service": "Google Cloud",
|
||||
"scope": "us-west4"
|
||||
}]
|
||||
}
|
||||
`
|
||||
expectedParsed := &GCPCloudJSON{
|
||||
Prefixes: []GCPPrefix{
|
||||
{
|
||||
IPv4Prefix: "34.80.0.0/15",
|
||||
Scope: "asia-east1",
|
||||
},
|
||||
{
|
||||
IPv6Prefix: "2600:1900:4180::/44",
|
||||
Scope: "us-west4",
|
||||
},
|
||||
},
|
||||
}
|
||||
parsed, err := parseGCPCloudJSON([]byte(testData))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error parsing testdata: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(expectedParsed, parsed) {
|
||||
t.Error("parsed did not match expected:")
|
||||
t.Errorf("%#v", expectedParsed)
|
||||
t.Error("parsed: ")
|
||||
t.Errorf("%#v", parsed)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
// parse some bogus data
|
||||
_, err = parseGCPCloudJSON([]byte(`{"prefixes": false}`))
|
||||
if err == nil {
|
||||
t.Fatal("expected error parsing garbage data but got none")
|
||||
}
|
||||
}
|
||||
|
||||
func TestGCPRegionsToPrefixesFromData(t *testing.T) {
|
||||
t.Run("bad IPv4 prefixes", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
badV4Prefixes := &GCPCloudJSON{
|
||||
Prefixes: []GCPPrefix{
|
||||
{
|
||||
IPv4Prefix: "asdf;asdf,",
|
||||
Scope: "us-east-1",
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := gcpRegionsToPrefixesFromData(badV4Prefixes)
|
||||
if err == nil {
|
||||
t.Fatal("expected error parsing bogus prefix but got none")
|
||||
}
|
||||
})
|
||||
t.Run("bad IPv6 prefixes", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
badV6Prefixes := &GCPCloudJSON{
|
||||
Prefixes: []GCPPrefix{
|
||||
{
|
||||
IPv4Prefix: "127.0.0.1/32",
|
||||
Scope: "us-east-1",
|
||||
},
|
||||
{
|
||||
IPv6Prefix: "asdfasdf----....",
|
||||
Scope: "us-east-1",
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := gcpRegionsToPrefixesFromData(badV6Prefixes)
|
||||
if err == nil {
|
||||
t.Fatal("expected error parsing bogus prefix but got none")
|
||||
}
|
||||
})
|
||||
t.Run("bad no prefixes", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
badNoPrefixes := &GCPCloudJSON{
|
||||
Prefixes: []GCPPrefix{
|
||||
{
|
||||
Scope: "us-east-1",
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := gcpRegionsToPrefixesFromData(badNoPrefixes)
|
||||
if err == nil {
|
||||
t.Fatal("expected error parsing bogus prefix but got none")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestParseGCP(t *testing.T) {
|
||||
t.Run("unparsable data", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
badJSON := `{"prefixes":false}`
|
||||
_, err := parseGCP(badJSON)
|
||||
if err == nil {
|
||||
t.Fatal("expected error parsing bogus raw JSON but got none")
|
||||
}
|
||||
})
|
||||
}
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -14,18 +14,23 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
package main
|
||||
|
||||
import "k8s.io/registry.k8s.io/pkg/net/cidrs"
|
||||
import "net/netip"
|
||||
|
||||
// NewAWSRegionMapper returns a new cidrs.IPMapper[string] mapping from
|
||||
// IP to AWS regions
|
||||
func NewAWSRegionMapper() cidrs.IPMapper[string] {
|
||||
t := cidrs.NewTrieMap[string]()
|
||||
for prefix, cidrs := range regionToRanges {
|
||||
for _, cidr := range cidrs {
|
||||
t.Insert(cidr, prefix)
|
||||
func dedupeSortedPrefixes(s []netip.Prefix) []netip.Prefix {
|
||||
l := len(s)
|
||||
// nothing to do for <= 1
|
||||
if l <= 1 {
|
||||
return s
|
||||
}
|
||||
// for 1..len(s) if previous entry does not match, keep current
|
||||
j := 0
|
||||
for i := 1; i < l; i++ {
|
||||
if s[i].String() != s[i-1].String() {
|
||||
s[j] = s[i]
|
||||
j++
|
||||
}
|
||||
}
|
||||
return t
|
||||
return s[0:j]
|
||||
}
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -14,18 +14,11 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
package main
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"net/netip"
|
||||
)
|
||||
|
||||
func TestRegions(t *testing.T) {
|
||||
someWellKnownRegions := []string{
|
||||
"us-east-1",
|
||||
}
|
||||
regions := Regions()
|
||||
for _, region := range someWellKnownRegions {
|
||||
if !regions[region] {
|
||||
t.Fatalf("expected well-known region %q to be in regions but it was not", region)
|
||||
}
|
||||
}
|
||||
}
|
||||
// regionToPrefixes is the structure we process the JSON into
|
||||
type regionsToPrefixes map[string][]netip.Prefix
|
|
@ -0,0 +1,24 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2022 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit -o nounset -o pipefail
|
||||
|
||||
# cd to self
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
|
||||
# fetch data for each supported cloud
|
||||
curl -Lo 'data/aws-ip-ranges.json' 'https://ip-ranges.amazonaws.com/ip-ranges.json'
|
||||
curl -Lo 'data/gcp-cloud.json' 'https://www.gstatic.com/ipranges/cloud.json'
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -14,14 +14,9 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
package cloudcidrs
|
||||
|
||||
// Regions returns a set-like map of all known AWS regions
|
||||
// based on the same underlying data as the rest of this package
|
||||
func Regions() map[string]bool {
|
||||
regions := map[string]bool{}
|
||||
for region := range regionToRanges {
|
||||
regions[region] = true
|
||||
}
|
||||
return regions
|
||||
type IPInfo struct {
|
||||
Cloud string
|
||||
Region string
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloudcidrs
|
||||
|
||||
import "k8s.io/registry.k8s.io/pkg/net/cidrs"
|
||||
|
||||
// NewIPMapper returns cidrs.IPMapper populated with cloud region info
|
||||
// for the clouds we have resources for, currently GCP and AWS
|
||||
func NewIPMapper() cidrs.IPMapper[IPInfo] {
|
||||
t := cidrs.NewTrieMap[IPInfo]()
|
||||
for info, cidrs := range regionToRanges {
|
||||
for _, cidr := range cidrs {
|
||||
t.Insert(cidr, info)
|
||||
}
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// AllIPInfos returns a slice of all known results that a NewIPMapper could
|
||||
// return
|
||||
func AllIPInfos() []IPInfo {
|
||||
r := make([]IPInfo, 0, len(regionToRanges))
|
||||
for v := range regionToRanges {
|
||||
r = append(r, v)
|
||||
}
|
||||
return r
|
||||
}
|
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
package cloudcidrs
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
|
@ -50,17 +50,17 @@ var testCasesIPv6 = []testCase{
|
|||
// append may re-use the first existing slice ...
|
||||
var allTestCases = append(append([]testCase{}, testCasesIPv4...), testCasesIPv6...)
|
||||
|
||||
func TestNewAWSRegionMapper(t *testing.T) {
|
||||
mapper := NewAWSRegionMapper()
|
||||
func TestNewIPMapper(t *testing.T) {
|
||||
mapper := NewIPMapper()
|
||||
for i := range allTestCases {
|
||||
tc := allTestCases[i]
|
||||
t.Run(tc.Addr.String(), func(t *testing.T) {
|
||||
region, matched := mapper.GetIP(tc.Addr)
|
||||
r, matched := mapper.GetIP(tc.Addr)
|
||||
expectMatched := tc.ExpectedRegion != ""
|
||||
if matched != expectMatched || region != tc.ExpectedRegion {
|
||||
if matched != expectMatched || r.Region != tc.ExpectedRegion {
|
||||
t.Fatalf(
|
||||
"result does not match for %v, got: (%q, %t) expected: (%q, %t)",
|
||||
tc.Addr, region, matched, tc.ExpectedRegion, expectMatched,
|
||||
tc.Addr, r.Region, matched, tc.ExpectedRegion, expectMatched,
|
||||
)
|
||||
}
|
||||
})
|
||||
|
@ -69,15 +69,15 @@ func TestNewAWSRegionMapper(t *testing.T) {
|
|||
|
||||
/* for benchmarking memory / init time */
|
||||
|
||||
func BenchmarkNewAWSRegionMapper(b *testing.B) {
|
||||
func BenchmarkNewIPMapper(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
mapper := NewAWSRegionMapper()
|
||||
mapper := NewIPMapper()
|
||||
// get any address just to prevent mapper being optimized out
|
||||
mapper.GetIP(allTestCases[0].Addr)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkNewAWSegionBruteForce(b *testing.B) {
|
||||
func BenchmarkNewegionBruteForce(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
mapper := cidrs.NewBruteForceMapper(regionToRanges)
|
||||
// get any address just to prevent mapper being optimized out
|
||||
|
@ -87,61 +87,61 @@ func BenchmarkNewAWSegionBruteForce(b *testing.B) {
|
|||
|
||||
/* for benchmarking matching time */
|
||||
|
||||
func BenchmarkAWSRegionTrieMapIPv4(b *testing.B) {
|
||||
mapper := NewAWSRegionMapper()
|
||||
func BenchmarkRegionTrieMapIPv4(b *testing.B) {
|
||||
mapper := NewIPMapper()
|
||||
for n := 0; n < b.N; n++ {
|
||||
tc := testCasesIPv4[n%len(testCasesIPv4)]
|
||||
region, matched := mapper.GetIP(tc.Addr)
|
||||
r, matched := mapper.GetIP(tc.Addr)
|
||||
expectMatched := tc.ExpectedRegion != ""
|
||||
if matched != expectMatched || region != tc.ExpectedRegion {
|
||||
if matched != expectMatched || r.Region != tc.ExpectedRegion {
|
||||
b.Fatalf(
|
||||
"result does not match for %v, got: (%q, %t) expected: (%q, %t)",
|
||||
tc.Addr, region, matched, tc.ExpectedRegion, expectMatched,
|
||||
tc.Addr, r.Region, matched, tc.ExpectedRegion, expectMatched,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAWSRegionTrieMapIPv6(b *testing.B) {
|
||||
mapper := NewAWSRegionMapper()
|
||||
func BenchmarkRegionTrieMapIPv6(b *testing.B) {
|
||||
mapper := NewIPMapper()
|
||||
for n := 0; n < b.N; n++ {
|
||||
tc := testCasesIPv6[n%len(testCasesIPv6)]
|
||||
region, matched := mapper.GetIP(tc.Addr)
|
||||
r, matched := mapper.GetIP(tc.Addr)
|
||||
expectMatched := tc.ExpectedRegion != ""
|
||||
if matched != expectMatched || region != tc.ExpectedRegion {
|
||||
if matched != expectMatched || r.Region != tc.ExpectedRegion {
|
||||
b.Fatalf(
|
||||
"result does not match for %v, got: (%q, %t) expected: (%q, %t)",
|
||||
tc.Addr, region, matched, tc.ExpectedRegion, expectMatched,
|
||||
tc.Addr, r.Region, matched, tc.ExpectedRegion, expectMatched,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAWSRegionBruteForceIPv4(b *testing.B) {
|
||||
func BenchmarkRegionBruteForceIPv4(b *testing.B) {
|
||||
mapper := cidrs.NewBruteForceMapper(regionToRanges)
|
||||
for n := 0; n < b.N; n++ {
|
||||
tc := testCasesIPv4[n%len(testCasesIPv4)]
|
||||
region, matched := mapper.GetIP(tc.Addr)
|
||||
r, matched := mapper.GetIP(tc.Addr)
|
||||
expectMatched := tc.ExpectedRegion != ""
|
||||
if matched != expectMatched || region != tc.ExpectedRegion {
|
||||
if matched != expectMatched || r.Region != tc.ExpectedRegion {
|
||||
b.Fatalf(
|
||||
"result does not match for %v, got: (%q, %t) expected: (%q, %t)",
|
||||
tc.Addr, region, matched, tc.ExpectedRegion, expectMatched,
|
||||
tc.Addr, r.Region, matched, tc.ExpectedRegion, expectMatched,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAWSRegionBruteForceIPv6(b *testing.B) {
|
||||
func BenchmarkRegionBruteForceIPv6(b *testing.B) {
|
||||
mapper := cidrs.NewBruteForceMapper(regionToRanges)
|
||||
for n := 0; n < b.N; n++ {
|
||||
tc := testCasesIPv6[n%len(testCasesIPv6)]
|
||||
region, matched := mapper.GetIP(tc.Addr)
|
||||
r, matched := mapper.GetIP(tc.Addr)
|
||||
expectMatched := tc.ExpectedRegion != ""
|
||||
if matched != expectMatched || region != tc.ExpectedRegion {
|
||||
if matched != expectMatched || r.Region != tc.ExpectedRegion {
|
||||
b.Fatalf(
|
||||
"result does not match for %v, got: (%q, %t) expected: (%q, %t)",
|
||||
tc.Addr, region, matched, tc.ExpectedRegion, expectMatched,
|
||||
tc.Addr, r.Region, matched, tc.ExpectedRegion, expectMatched,
|
||||
)
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue