Compare commits
14 Commits
Author | SHA1 | Date |
---|---|---|
|
2649caa6a8 | |
|
2a7e1c84da | |
|
c63c94da82 | |
|
7e1bac5f7d | |
|
d8075cc9c7 | |
|
f043ccde59 | |
|
ba9032d024 | |
|
cff3512a3b | |
|
aae1a04d86 | |
|
3a2dbaa5c4 | |
|
8010f6cb23 | |
|
d23527ab6b | |
|
ffda742203 | |
|
ff33c5a606 |
|
@ -1,10 +1,13 @@
|
|||
name: ci
|
||||
# This file requires QUAY_USERNAME, REGISTRY, ORGANISATION variables and a QUAY_TOKEN secret
|
||||
|
||||
on:
|
||||
push:
|
||||
|
||||
env:
|
||||
GO_VERSION: 1.23.6
|
||||
GO_VERSION: 1.24.3
|
||||
REGISTRY: quay.io
|
||||
ORGANISATION: tigeradev
|
||||
|
||||
jobs:
|
||||
test:
|
||||
|
@ -20,36 +23,40 @@ jobs:
|
|||
run: go mod download
|
||||
- name: Build
|
||||
run: go build -v ./...
|
||||
- name: Test
|
||||
run: go test -v ./...
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: v1.60
|
||||
version: v1.64.8
|
||||
- name: Test
|
||||
run: go test -v ./...
|
||||
- name: Run e2e test
|
||||
run: make e2e-test
|
||||
deploy:
|
||||
needs: test
|
||||
if: ${{ success() && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release-') || startsWith(github.ref, 'refs/tags/v')) }}
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- dockerfile: ./images/perf/Dockerfile
|
||||
image: quay.io/tigeradev/tiger-bench-perf
|
||||
image: ${{ vars.REGISTRY }}/${{ vars.ORGANISATION }}/tiger-bench-perf
|
||||
- dockerfile: ./images/nginx/Dockerfile
|
||||
image: quay.io/tigeradev/tiger-bench-nginx
|
||||
image: ${{ vars.REGISTRY }}/${{ vars.ORGANISATION }}/tiger-bench-nginx
|
||||
- dockerfile: ./images/ttfr/Dockerfile
|
||||
image: ${{ vars.REGISTRY }}/${{ vars.ORGANISATION }}/tiger-bench-ttfr
|
||||
- dockerfile: ./Dockerfile
|
||||
image: quay.io/tigeradev/tiger-bench
|
||||
image: ${{ vars.REGISTRY }}/${{ vars.ORGANISATION }}/tiger-bench
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Login to quay.io
|
||||
- name: Login to Docker registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: quay.io
|
||||
registry: ${{ vars.REGISTRY }}
|
||||
username: ${{ vars.QUAY_USERNAME }}
|
||||
password: ${{ secrets.QUAY_TOKEN }}
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
|
|
|
@ -2,3 +2,4 @@ setenv.sh
|
|||
kubeconfig*
|
||||
results.json
|
||||
TODO.md
|
||||
venv
|
||||
|
|
27
Dockerfile
27
Dockerfile
|
@ -1,4 +1,4 @@
|
|||
ARG GO_VERSION=1.23.6
|
||||
ARG GO_VERSION=1.24.3
|
||||
|
||||
FROM golang:${GO_VERSION} AS builder
|
||||
|
||||
|
@ -8,22 +8,31 @@ COPY pkg pkg
|
|||
COPY *.go go.* ./
|
||||
|
||||
RUN ls -ltr /benchmark
|
||||
RUN go mod download
|
||||
RUN GO111MODULE=on CGO_ENABLED=0 GOOS=linux go build cmd/benchmark.go
|
||||
RUN --mount=type=cache,target=/go/pkg/mod/ \
|
||||
--mount=type=bind,source=go.sum,target=go.sum \
|
||||
--mount=type=bind,source=go.mod,target=go.mod \
|
||||
go mod download -x
|
||||
ENV GOCACHE=/root/.cache/go-build
|
||||
RUN --mount=type=cache,target=/go/pkg/mod/ \
|
||||
--mount=type=cache,target="/root/.cache/go-build" \
|
||||
GO111MODULE=on CGO_ENABLED=0 GOOS=linux go build -o /benchmark/benchmark cmd/benchmark.go
|
||||
|
||||
RUN mkdir /results
|
||||
|
||||
FROM alpine:3.21
|
||||
ARG AWS_IAM_AUTHENTICATOR_URL=https://github.com/kubernetes-sigs/aws-iam-authenticator/releases/download/v0.6.30/aws-iam-authenticator_0.6.30_linux_amd64
|
||||
|
||||
ADD ${AWS_IAM_AUTHENTICATOR_URL} /usr/local/bin/aws-iam-authenticator
|
||||
RUN apk add --update ca-certificates gettext && \
|
||||
chmod +x /usr/local/bin/aws-iam-authenticator
|
||||
RUN apk add --no-cache aws-cli
|
||||
|
||||
RUN apk add --no-cache iperf3 curl
|
||||
RUN apk add --update ca-certificates gettext
|
||||
RUN apk add --no-cache aws-cli iperf3 curl
|
||||
RUN apk add --no-cache --repository http://dl-3.alpinelinux.org/alpine/edge/testing/ qperf
|
||||
COPY --from=builder /results /results
|
||||
COPY --from=builder /benchmark/benchmark /benchmark
|
||||
|
||||
RUN curl -L --retry 5 --retry-delay 10 \
|
||||
${AWS_IAM_AUTHENTICATOR_URL} \
|
||||
-o /usr/local/bin/aws-iam-authenticator && \
|
||||
chmod +x /usr/local/bin/aws-iam-authenticator
|
||||
|
||||
ENV KUBECONFIG="/kubeconfig"
|
||||
ENV TESTCONFIGFILE="/testconfig.yaml"
|
||||
CMD ["/benchmark"]
|
||||
|
|
|
@ -0,0 +1,61 @@
|
|||
# Makefile for building and testing images in tiger-bench
|
||||
|
||||
IMAGE_NAMES = perf nginx ttfr tool
|
||||
IMAGES_PATH = images
|
||||
REGISTRY?=quay.io
|
||||
ORGANISATION?=tigeradev
|
||||
E2E_CLUSTER_NAME?=tb-e2e
|
||||
|
||||
.PHONY: all build test clean tool test-tool e2e-test clean-ttfr clean-e2e
|
||||
|
||||
all: build
|
||||
|
||||
build: $(IMAGE_NAMES)
|
||||
|
||||
perf:
|
||||
docker build -t $(REGISTRY)/$(ORGANISATION)/tiger-bench-perf -f images/perf/Dockerfile .
|
||||
|
||||
nginx:
|
||||
docker build -t $(REGISTRY)/$(ORGANISATION)/tiger-bench-nginx -f images/nginx/Dockerfile .
|
||||
|
||||
ttfr:
|
||||
docker build -t $(REGISTRY)/$(ORGANISATION)/tiger-bench-ttfr -f images/ttfr/Dockerfile .
|
||||
|
||||
tool:
|
||||
docker build -t $(REGISTRY)/$(ORGANISATION)/tiger-bench -f Dockerfile .
|
||||
|
||||
test: $(addprefix test-,$(IMAGE_NAMES))
|
||||
|
||||
test-tool:
|
||||
go test ./pkg/... ./cmd/...
|
||||
|
||||
test-perf:
|
||||
@echo "No tests defined for perf image."
|
||||
|
||||
test-nginx:
|
||||
@echo "No tests defined for nginx image."
|
||||
|
||||
test-ttfr:
|
||||
cd images/ttfr && go test -v ./pingo_test.go
|
||||
|
||||
clean: clean-perf clean-nginx clean-ttfr clean-tool clean-e2e
|
||||
|
||||
clean-perf:
|
||||
docker rmi $(REGISTRY)/$(ORGANISATION)/tiger-bench-perf || true
|
||||
|
||||
clean-nginx:
|
||||
docker rmi $(REGISTRY)/$(ORGANISATION)/tiger-bench-nginx || true
|
||||
|
||||
clean-ttfr:
|
||||
docker rmi $(REGISTRY)/$(ORGANISATION)/tiger-bench-ttfr || true
|
||||
|
||||
clean-tool:
|
||||
docker rmi $(REGISTRY)/$(ORGANISATION)/tiger-bench || true
|
||||
|
||||
clean-e2e:
|
||||
kind delete cluster --name $(E2E_CLUSTER_NAME) || true
|
||||
@rm -f kubeconfig
|
||||
|
||||
e2e-test: build clean-e2e
|
||||
KIND_CLUSTER_NAME=$(E2E_CLUSTER_NAME) REGISTRY=$(REGISTRY) ORGANISATION=$(ORGANISATION) bash ./e2e-test.sh
|
||||
$(MAKE) clean-e2e
|
118
README.md
118
README.md
|
@ -108,11 +108,23 @@ A list of test run definitions are provided as [`testconfig.yaml`](testconfig.ya
|
|||
dataplane: bpf
|
||||
iterations: 1
|
||||
leaveStandingConfig: true
|
||||
- testKind: ttfr
|
||||
numPolicies: 100
|
||||
numServices: 10
|
||||
numPods: 7
|
||||
duration: 60
|
||||
hostNetwork: false
|
||||
iterations: 1
|
||||
leaveStandingConfig: false
|
||||
TestNamespace: testns2
|
||||
TTFRConfig:
|
||||
TestPodsPerNode: 53
|
||||
Rate: 2.5
|
||||
```
|
||||
|
||||
There are 2 tests requested in this example config.
|
||||
|
||||
`testKind` is required - at present you can only ask for `"thruput-latency"`.
|
||||
`testKind` is required - at present you can only ask for `"thruput-latency"` or `ttfr`
|
||||
|
||||
`numPolicies`, `numServices`, `numPods` specify the standing config desired for this test. Standing config exists simply to "load" the cluster up with config, they do not take any active part in the tests themselves. The number that you can create is limited by your cluster - you cannot create more standing pods than will fit on your cluster!
|
||||
|
||||
|
@ -143,6 +155,19 @@ Note that the tool will NOT expose the services for you, because there are too m
|
|||
|
||||
For `thruput-latency` tests, you will need to expose 2 ports from those pods: A TCP `TestPort` and a `ControlPort`. You must not map the port numbers between the pod and the external service, but they do NOT need to be consecutive. i.e. if you specify TestPort=32221, the pod will listen on port 32221 and whatever method you use to expose that service to the outside world must also use that port number.
|
||||
|
||||
A `ttfr` test may have the following additional config:
|
||||
|
||||
```
|
||||
TTFRConfig:
|
||||
TestPodsPerNode: 80
|
||||
Rate: 2.5
|
||||
```
|
||||
The `TestPodsPerNode` setting controls the number of pods it will try to set up on each test node
|
||||
|
||||
The `Rate` is the rate at which it will send requests to set up pods, in pods per second. Note that the acheivable rate depends on a number of things, including the TestPodsPerNode setting (since it cannot set up more than TestPodsPerNode multiplied by the number of nodes with the test label, the tool will stall if all the permitted pods are in the process of starting or terminating). And that will depend on the speed of the kubernetes control plane, kubelet, etc.
|
||||
|
||||
In the event that you ask for a rate higher than the tool can acheive, it will run at the maximum rate it can, while logging warnings that it is "unable to keep up with rate". If the problem is running out of pod slots, it will log that also, and you can fix it by either increasing the pods per node or giving more nodes the test label.
|
||||
|
||||
### Settings which can reconfigure your cluster
|
||||
|
||||
The following test settings will _reconfigure your cluster_. This could cause disruption to other things running on the cluster, so be careful specifying these in tests.
|
||||
|
@ -306,3 +331,94 @@ An example result from a "thruput-latency" test might look like:
|
|||
`config` contains the configuration requested in the test definition.
|
||||
`ClusterDetails` contains information collected about the cluster at the time of the test.
|
||||
`thruput-latency` contains a statistical summary of the raw qperf results - latency and throughput for a direct pod-pod test and via a service. Units are given in the result.
|
||||
|
||||
|
||||
### The "Time To First Response" test
|
||||
|
||||
This "time to first response" (TTFR) test spins up a server pod on each node in the cluster, and then spins up client pods on each node in the cluster. The client pods start and send requests to the server pod, and record the amount of time it takes before they get a response. This is sometimes[1] a useful proxy for how long its taking for Calico to program the rules for that pod (since pods start with a deny-all rule and calico-node must program the correct rules before it can talk to anything). A better measure of the time it takes Calico to program rules for pods is to look in the [Felix Prometheus metrics](https://docs.tigera.io/calico/latest/reference/felix/prometheus#common-data-plane-metrics) at the `felix_int_dataplane_apply_time_seconds` statistic.
|
||||
|
||||
[1] if `linuxPolicySetupTimeoutSeconds` is set in the CalicoNetworkSpec in the Installation resource, then pod startup will be delayed until policy is applied. This can be handy if your application pod wants its first request to always succeed. This is a Calico-specific feature that is not part of the CNI spec. See the [Calico documentation](https://docs.tigera.io/calico/latest/reference/configure-cni-plugins#enabling-policy-setup-timeout) for more information on this feature and how to enable it.
|
||||
|
||||
For a "ttfr" test, the tool will:
|
||||
|
||||
- Create a test namespace
|
||||
- Create a deployment of `numPods` pods that are unrelated to the test and apply `numPolicies` policies to them (standing pods and policies).
|
||||
- Create another deployment of 10 pods, and create `numServices` that point to those 10 pods.
|
||||
- Wait for those to come up.
|
||||
- Create a server pod on each node with the `tigera.io/test-nodepool=default-pool` label
|
||||
- Loop round:
|
||||
- creating test pods on those nodes, at the rate defined by Rate in the test config
|
||||
- test pods are then checked until they produce a ttfr result in their log, which is read by the tool
|
||||
- and a delete is sent for the test pod.
|
||||
- ttfr results are recorded
|
||||
- Collate results and compute min/max/average/50/75/90/99th percentiles
|
||||
- Output that summary into a JSON format results file.
|
||||
- Optionally delete the test namespace (which will cause all test resources within it to be deleted)
|
||||
- Wait for everything to finish being cleaned up.
|
||||
|
||||
This test measures Time to First Response in seconds. i.e. the time between a pod starting up, and it getting a response from a server pod on the same node.
|
||||
|
||||
An example result from a "ttfr" test might look like:
|
||||
```
|
||||
[
|
||||
{
|
||||
"config": {
|
||||
"TestKind": "ttfr",
|
||||
"Encap": "",
|
||||
"Dataplane": "",
|
||||
"NumPolicies": 100,
|
||||
"NumServices": 10,
|
||||
"NumPods": 7,
|
||||
"HostNetwork": false,
|
||||
"TestNamespace": "testns2",
|
||||
"Iterations": 1,
|
||||
"Duration": 60,
|
||||
"DNSPerf": null,
|
||||
"Perf": null,
|
||||
"TTFRConfig": {
|
||||
"TestPodsPerNode": 80,
|
||||
"Rate": 10
|
||||
},
|
||||
"CalicoNodeCPULimit": "",
|
||||
"LeaveStandingConfig": false
|
||||
},
|
||||
"ClusterDetails": {
|
||||
"Cloud": "unknown",
|
||||
"Provisioner": "kubeadm",
|
||||
"NodeType": "linux",
|
||||
"NodeOS": "Ubuntu 20.04.6 LTS",
|
||||
"NodeKernel": "5.15.0-1081-gcp",
|
||||
"NodeArch": "amd64",
|
||||
"NumNodes": 3,
|
||||
"Dataplane": "bpf",
|
||||
"IPFamily": "ipv4",
|
||||
"Encapsulation": "VXLANCrossSubnet",
|
||||
"WireguardEnabled": false,
|
||||
"Product": "calico",
|
||||
"CalicoVersion": "v3.30.0-0.dev-852-g389eae30ae5d",
|
||||
"K8SVersion": "v1.32.4",
|
||||
"CRIVersion": "containerd://1.7.27",
|
||||
"CNIOption": "Calico"
|
||||
},
|
||||
"ttfr": [
|
||||
{
|
||||
"ttfrSummary": {
|
||||
"min": 0.001196166,
|
||||
"max": 0.01283499,
|
||||
"avg": 0.0033952200330330333,
|
||||
"P50": 0.002893934,
|
||||
"P75": 0.003768213,
|
||||
"P90": 0.005621623,
|
||||
"P99": 0.011158944,
|
||||
"unit": "seconds",
|
||||
"datapoints": 333
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
`config` contains the configuration requested in the test definition.
|
||||
`ClusterDetails` contains information collected about the cluster at the time of the test.
|
||||
`ttfr` contains a statistical summary of the raw results. Units are given in the result.
|
140
cmd/benchmark.go
140
cmd/benchmark.go
|
@ -33,18 +33,23 @@ import (
|
|||
"github.com/projectcalico/tiger-bench/pkg/policy"
|
||||
"github.com/projectcalico/tiger-bench/pkg/qperf"
|
||||
"github.com/projectcalico/tiger-bench/pkg/results"
|
||||
"github.com/projectcalico/tiger-bench/pkg/ttfr"
|
||||
"github.com/projectcalico/tiger-bench/pkg/utils"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetReportCaller(true)
|
||||
log.SetLevel(log.InfoLevel)
|
||||
log.SetFormatter(&log.TextFormatter{
|
||||
customFormatter := &log.TextFormatter{
|
||||
CallerPrettyfier: func(frame *runtime.Frame) (function string, file string) {
|
||||
fileName := path.Base(frame.File) + ":" + strconv.Itoa(frame.Line)
|
||||
return "", fileName
|
||||
},
|
||||
})
|
||||
}
|
||||
customFormatter.TimestampFormat = "2006-01-02 15:04:05.000"
|
||||
customFormatter.FullTimestamp = true
|
||||
log.SetFormatter(customFormatter)
|
||||
|
||||
// get environment variables
|
||||
ctx := context.Background()
|
||||
cfg, clients, err := config.New(ctx)
|
||||
|
@ -69,6 +74,8 @@ func main() {
|
|||
if err != nil {
|
||||
log.WithError(err).Fatal("failed to configure cluster")
|
||||
}
|
||||
defer cleanupNamespace(ctx, clients, testConfig)
|
||||
|
||||
err = cluster.SetupStandingConfig(ctx, clients, *testConfig, testConfig.TestNamespace, cfg.WebServerImage)
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("failed to setup standing config on cluster")
|
||||
|
@ -126,49 +133,42 @@ func main() {
|
|||
}
|
||||
}
|
||||
case config.TestKindDNSPerf:
|
||||
_, err = policy.GetOrCreateDNSPolicy(ctx, clients, dnsperf.MakeDNSPolicy(testConfig.TestNamespace, testPolicyName, testConfig.DNSPerf.NumDomains))
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("failed to create dnsperf policy")
|
||||
if testConfig.DNSPerf.TestDNSPolicy {
|
||||
_, err = policy.GetOrCreateDNSPolicy(ctx, clients, dnsperf.MakeDNSPolicy(testConfig.TestNamespace, testPolicyName, testConfig.DNSPerf.NumDomains))
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("failed to create dnsperf policy")
|
||||
}
|
||||
}
|
||||
thisResult.DNSPerf, err = dnsperf.RunDNSPerfTests(ctx, clients, testConfig.Duration, testConfig.TestNamespace, cfg.WebServerImage, cfg.PerfImage)
|
||||
thisResult.DNSPerf, err = dnsperf.RunDNSPerfTests(ctx, clients, testConfig, cfg.WebServerImage, cfg.PerfImage)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to run dnsperf tests")
|
||||
}
|
||||
log.Infof("dnsperf results: %v", thisResult.DNSPerf)
|
||||
case config.TestKindTTFR:
|
||||
var ttfrResultsList []*ttfr.Results
|
||||
// Apply standing policy (that applies to both server and test pods)
|
||||
err := policy.CreateTestPolicy(ctx, clients, testPolicyName, testConfig.TestNamespace, []int{8080})
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("failed to create ttfr test policy")
|
||||
}
|
||||
log.Info("Running ttfr tests, Iterations=", testConfig.Iterations)
|
||||
for j := 0; j < testConfig.Iterations; j++ {
|
||||
ttfrResult, err := ttfr.RunTTFRTest(ctx, clients, testConfig, cfg)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to get ttfr results")
|
||||
continue
|
||||
}
|
||||
ttfrResultsList = append(ttfrResultsList, &ttfrResult)
|
||||
}
|
||||
if len(ttfrResultsList) > 0 {
|
||||
thisResult.TTFR, err = ttfr.SummarizeResults(ttfrResultsList)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to summarize ttfr results")
|
||||
}
|
||||
}
|
||||
default:
|
||||
log.Fatal("test type unknown")
|
||||
}
|
||||
if !testConfig.LeaveStandingConfig {
|
||||
// Clean up all the resources we might have created, apart from the namespace, which might have external service config in it
|
||||
err = utils.DeleteDeployment(ctx, clients, testConfig.TestNamespace, "standing-deployment")
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("failed to delete standing-deployment")
|
||||
}
|
||||
err = utils.DeleteDeployment(ctx, clients, testConfig.TestNamespace, "standing-svc")
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("failed to delete standing-svc")
|
||||
}
|
||||
err = utils.DeleteServicesWithPrefix(ctx, clients, testConfig.TestNamespace, "standing-svc")
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("failed to delete standing-svc")
|
||||
}
|
||||
err = utils.DeleteServicesWithPrefix(ctx, clients, testConfig.TestNamespace, "iperf-srv")
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("failed to delete iperf-srv")
|
||||
}
|
||||
err = utils.DeleteServicesWithPrefix(ctx, clients, testConfig.TestNamespace, "qperf-srv")
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("failed to delete qperf-srv")
|
||||
}
|
||||
err = utils.DeletePodsWithLabel(ctx, clients, testConfig.TestNamespace, "app=iperf")
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("failed to delete iperf pods")
|
||||
}
|
||||
err = utils.DeletePodsWithLabel(ctx, clients, testConfig.TestNamespace, "app=qperf")
|
||||
if err != nil {
|
||||
log.WithError(err).Fatal("failed to delete qperf pods")
|
||||
}
|
||||
}
|
||||
// If we set the CPU limit, unset it again.
|
||||
if testConfig.CalicoNodeCPULimit != "" {
|
||||
err = cluster.SetCalicoNodeCPULimit(ctx, clients, "0")
|
||||
|
@ -194,11 +194,69 @@ func main() {
|
|||
}
|
||||
}
|
||||
|
||||
func cleanupNamespace(ctx context.Context, clients config.Clients, testConfig *config.TestConfig) {
|
||||
log.Debug("entering cleanupNamespace function")
|
||||
if !testConfig.LeaveStandingConfig {
|
||||
// Clean up all the resources we might have created, apart from the namespace, which might have external service config in it
|
||||
log.Info("Cleaning up namespace: ", testConfig.TestNamespace)
|
||||
err := utils.DeleteDeploymentsWithPrefix(ctx, clients, testConfig.TestNamespace, "standing-deployment")
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to delete standing-deployment")
|
||||
}
|
||||
err = utils.DeleteDeploymentsWithPrefix(ctx, clients, testConfig.TestNamespace, "standing-svc")
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to delete standing-svc")
|
||||
}
|
||||
err = utils.DeleteServicesWithPrefix(ctx, clients, testConfig.TestNamespace, "standing-svc")
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to delete standing-svc")
|
||||
}
|
||||
err = utils.DeleteDeploymentsWithPrefix(ctx, clients, testConfig.TestNamespace, "ttfr-test-")
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to delete ttfr deployments")
|
||||
}
|
||||
err = utils.DeleteDeploymentsWithPrefix(ctx, clients, testConfig.TestNamespace, "headless")
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to delete headless deployments")
|
||||
}
|
||||
err = utils.DeleteServicesWithPrefix(ctx, clients, testConfig.TestNamespace, "iperf-srv")
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to delete iperf-srv")
|
||||
}
|
||||
err = utils.DeleteServicesWithPrefix(ctx, clients, testConfig.TestNamespace, "qperf-srv")
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to delete qperf-srv")
|
||||
}
|
||||
err = utils.DeletePodsWithLabel(ctx, clients, testConfig.TestNamespace, "app=iperf")
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to delete iperf pods")
|
||||
}
|
||||
err = utils.DeletePodsWithLabel(ctx, clients, testConfig.TestNamespace, "app=qperf")
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to delete qperf pods")
|
||||
}
|
||||
err = utils.DeletePodsWithLabel(ctx, clients, testConfig.TestNamespace, "app=ttfr")
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to delete ttfr pods")
|
||||
}
|
||||
err = utils.DeletePodsWithLabel(ctx, clients, testConfig.TestNamespace, "app=dnsperf")
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to delete dnsperf pods")
|
||||
}
|
||||
err = utils.DeleteNetPolsInNamespace(ctx, clients, testConfig.TestNamespace)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to delete netpols")
|
||||
}
|
||||
log.Info("Cleanup complete")
|
||||
}
|
||||
}
|
||||
|
||||
func writeResultToFile(filename string, results []results.Result) (err error) {
|
||||
log.Debug("entering writeResultToFile function")
|
||||
file, err := os.Create(filename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open output file: %s", filename)
|
||||
log.WithError(err).Errorf("failed to open output file: %s", filename)
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
closeErr := file.Close()
|
||||
|
@ -208,11 +266,13 @@ func writeResultToFile(filename string, results []results.Result) (err error) {
|
|||
}()
|
||||
output, err := json.MarshalIndent(results, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal results: %s", err)
|
||||
log.WithError(err).Errorf("failed to marshal results: %s", err)
|
||||
return err
|
||||
}
|
||||
_, err = file.Write(output)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write results to file: %s", err)
|
||||
log.WithError(err).Errorf("failed to write results to file: %s", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,50 @@
|
|||
#!/bin/bash
|
||||
set -euox pipefail
|
||||
|
||||
# E2E test for tiger-bench tool using KinD
|
||||
|
||||
# Variables
|
||||
KIND_CLUSTER_NAME="${KIND_CLUSTER_NAME:-tb-e2e}"
|
||||
KUBECONFIG_PATH="${KUBECONFIG_PATH:-$(pwd)/kubeconfig}"
|
||||
TEST_YAML="${TEST_YAML:-e2e-testconfig.yaml}"
|
||||
TOOL_IMAGE="${TOOL_IMAGE:-tiger-bench:latest}"
|
||||
CALICO_VERSION="${CALICO_VERSION:-v3.30.2}"
|
||||
REGISTRY="${REGISTRY:-quay.io}"
|
||||
ORGANISATION="${ORGANISATION:-tigeradev}"
|
||||
|
||||
kind create cluster --kubeconfig "$KUBECONFIG_PATH" --config kind-config.yaml || true
|
||||
|
||||
# Install Calico
|
||||
curl --retry 10 --retry-all-errors -sSL https://raw.githubusercontent.com/projectcalico/calico/$CALICO_VERSION/manifests/operator-crds.yaml | kubectl --kubeconfig "$KUBECONFIG_PATH" apply --server-side --force-conflicts -f -
|
||||
curl --retry 10 --retry-all-errors -sSL https://raw.githubusercontent.com/projectcalico/calico/$CALICO_VERSION/manifests/tigera-operator.yaml | kubectl --kubeconfig "$KUBECONFIG_PATH" apply --server-side --force-conflicts -f -
|
||||
curl --retry 10 --retry-all-errors -sSL https://raw.githubusercontent.com/projectcalico/calico/$CALICO_VERSION/manifests/custom-resources.yaml | kubectl --kubeconfig "$KUBECONFIG_PATH" apply --server-side --force-conflicts -f -
|
||||
|
||||
# Load test images into KinD nodes
|
||||
for img in tiger-bench-perf tiger-bench-nginx tiger-bench-ttfr; do
|
||||
docker image inspect "$REGISTRY/$ORGANISATION/$img:latest" >/dev/null 2>&1 || { echo "Image $img not found"; exit 1; }
|
||||
kind load docker-image "$REGISTRY/$ORGANISATION/$img:latest" --name "$KIND_CLUSTER_NAME"
|
||||
done
|
||||
|
||||
# Wait for nodes to be ready
|
||||
kubectl --kubeconfig "$KUBECONFIG_PATH" wait --for=condition=Ready nodes --all --timeout=600s
|
||||
|
||||
# Label nodes as described in README
|
||||
kubectl --kubeconfig "$KUBECONFIG_PATH" label node $KIND_CLUSTER_NAME-worker tigera.io/test-nodepool=default-pool
|
||||
kubectl --kubeconfig "$KUBECONFIG_PATH" label node $KIND_CLUSTER_NAME-control-plane tigera.io/test-nodepool=default-pool
|
||||
|
||||
# Wait for Calico to be ready
|
||||
kubectl --kubeconfig "$KUBECONFIG_PATH" wait --for=condition=Available tigerastatus --all --timeout=600s
|
||||
|
||||
# Run tiger-bench container with kubeconfig and test yaml
|
||||
# Assumes testconfig.yaml is present in the repo root
|
||||
docker run --rm --net=host \
|
||||
-v "${PWD}":/results \
|
||||
-v "$KUBECONFIG_PATH:/kubeconfig:ro" \
|
||||
-v "$(pwd)/$TEST_YAML:/testconfig.yaml:ro" \
|
||||
-e WEBSERVER_IMAGE="$REGISTRY/$ORGANISATION/tiger-bench-nginx:latest" \
|
||||
-e PERF_IMAGE="$REGISTRY/$ORGANISATION/tiger-bench-perf:latest" \
|
||||
-e TTFR_IMAGE="$REGISTRY/$ORGANISATION/tiger-bench-ttfr:latest" \
|
||||
"$REGISTRY/$ORGANISATION/$TOOL_IMAGE"
|
||||
|
||||
# Validate the results file
|
||||
go run validate_results.go
|
|
@ -0,0 +1,28 @@
|
|||
- testKind: thruput-latency
|
||||
numPolicies: 5
|
||||
numServices: 10
|
||||
numPods: 7
|
||||
duration: 10
|
||||
hostNetwork: false
|
||||
iterations: 1
|
||||
LeaveStandingConfig: true
|
||||
|
||||
- testKind: iperf
|
||||
numPolicies: 6
|
||||
numServices: 11
|
||||
numPods: 6
|
||||
duration: 10
|
||||
hostNetwork: false
|
||||
iterations: 1
|
||||
LeaveStandingConfig: true
|
||||
|
||||
- testKind: ttfr
|
||||
numPolicies: 7
|
||||
numServices: 12
|
||||
numPods: 8
|
||||
duration: 60
|
||||
hostNetwork: false
|
||||
iterations: 1
|
||||
TTFRConfig:
|
||||
TestPodsPerNode: 53
|
||||
Rate: 2.5
|
83
go.mod
83
go.mod
|
@ -1,65 +1,66 @@
|
|||
module github.com/projectcalico/tiger-bench
|
||||
|
||||
go 1.23.4
|
||||
go 1.24.0
|
||||
|
||||
toolchain go1.23.6
|
||||
toolchain go1.24.3
|
||||
|
||||
require (
|
||||
github.com/go-playground/validator/v10 v10.24.0
|
||||
github.com/kelseyhightower/envconfig v1.4.0
|
||||
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.62.0
|
||||
github.com/prometheus/client_golang v1.20.5
|
||||
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.82.0
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/rung/go-safecast v1.0.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/tigera/api v0.0.0-20240320170621-278e89a8c5fb
|
||||
golang.org/x/mod v0.22.0
|
||||
k8s.io/client-go v0.32.0
|
||||
sigs.k8s.io/controller-runtime v0.15.3
|
||||
github.com/tigera/api v0.0.0-20250516135852-26769a70a63a
|
||||
golang.org/x/mod v0.23.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
k8s.io/client-go v0.32.4
|
||||
k8s.io/kubernetes v1.33.0
|
||||
sigs.k8s.io/controller-runtime v0.20.4
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.8.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.8.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.8 // indirect
|
||||
github.com/go-logr/zapr v1.3.0 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/jinzhu/copier v0.4.0 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/nxadm/tail v1.4.11 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.23.3 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/projectcalico/api v0.0.0-20220722155641-439a754a988b // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.61.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/rogpeppe/go-internal v1.13.1 // indirect
|
||||
github.com/projectcalico/api v0.0.0-20250326193936-759a4c3213d1 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.63.0 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
golang.org/x/crypto v0.35.0 // indirect
|
||||
golang.org/x/tools v0.29.0 // indirect
|
||||
golang.org/x/crypto v0.37.0 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
k8s.io/apiserver v0.30.0 // indirect
|
||||
k8s.io/apiserver v0.32.4 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/elastic/go-elasticsearch/v7 v7.17.10
|
||||
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.1 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
|
@ -70,22 +71,22 @@ require (
|
|||
github.com/sethvargo/go-retry v0.2.4
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/tigera/operator v1.34.0
|
||||
golang.org/x/net v0.36.0
|
||||
golang.org/x/oauth2 v0.25.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
golang.org/x/term v0.29.0 // indirect
|
||||
golang.org/x/text v0.22.0 // indirect
|
||||
github.com/tigera/operator v1.36.0
|
||||
golang.org/x/net v0.39.0
|
||||
golang.org/x/oauth2 v0.27.0 // indirect
|
||||
golang.org/x/sys v0.32.0 // indirect
|
||||
golang.org/x/term v0.31.0 // indirect
|
||||
golang.org/x/text v0.24.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
google.golang.org/protobuf v1.36.1 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/api v0.32.0
|
||||
k8s.io/apimachinery v0.32.0
|
||||
k8s.io/api v0.32.4
|
||||
k8s.io/apimachinery v0.32.4
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
|
||||
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.7.0 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
|
711
go.sum
711
go.sum
|
@ -1,124 +1,36 @@
|
|||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
||||
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
||||
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
||||
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
||||
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
|
||||
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
|
||||
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
|
||||
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
|
||||
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
|
||||
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
github.com/elastic/go-elasticsearch/v7 v7.17.10 h1:TCQ8i4PmIJuBunvBS6bwT2ybzVFxxUhhltAs3Gyu1yo=
|
||||
github.com/elastic/go-elasticsearch/v7 v7.17.10/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful v2.11.2-0.20200112161605-a7c079c43d51+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU=
|
||||
github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww=
|
||||
github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
|
||||
github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU=
|
||||
github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
|
||||
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU=
|
||||
github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM=
|
||||
github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8=
|
||||
github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
|
||||
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
||||
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
||||
github.com/go-openapi/jsonreference v0.19.4-0.20191224164422-1f9748e5f45e/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
||||
github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic=
|
||||
github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk=
|
||||
github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
|
||||
github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY=
|
||||
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
|
||||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
||||
github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU=
|
||||
github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0=
|
||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||
|
@ -127,637 +39,194 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn
|
|||
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
||||
github.com/go-playground/validator/v10 v10.24.0 h1:KHQckvo8G6hlWnrPX4NJJ+aBfWNAE/HH+qdL2cBpCmg=
|
||||
github.com/go-playground/validator/v10 v10.24.0/go.mod h1:GGzBIJMuE98Ic/kJsBXbz1x/7cByt++cQ+YOuDM5wus=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||
github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
|
||||
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg=
|
||||
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
|
||||
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8=
|
||||
github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8=
|
||||
github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
||||
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
|
||||
github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
|
||||
github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
|
||||
github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY=
|
||||
github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
|
||||
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0=
|
||||
github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM=
|
||||
github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y=
|
||||
github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/projectcalico/api v0.0.0-20220722155641-439a754a988b h1:dW+UhJMzusDO6hqVGuCYeDxXWAzc7HnA9CsPN+uHPnA=
|
||||
github.com/projectcalico/api v0.0.0-20220722155641-439a754a988b/go.mod h1:Avoy1rTN1GfeisnHGf3WhQNqR+BuGOcwfNFsdWX6OHE=
|
||||
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.62.0 h1:55138zTXw/yRYizPxZ672I/aDD7Yte3uYRAfUjWUu2M=
|
||||
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.62.0/go.mod h1:j51242bf6LQwvJ1JPKWApzTnifmCwcQq0i1p29ylWiM=
|
||||
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
|
||||
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ=
|
||||
github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/projectcalico/api v0.0.0-20250326193936-759a4c3213d1 h1:n2lqKIGOvmham6sSmY7zQ1YCbD9E70vUkpdxPDDH9/w=
|
||||
github.com/projectcalico/api v0.0.0-20250326193936-759a4c3213d1/go.mod h1:OORX6y/uicCv0g5dYqoaP3Lqntw/ACBEzrBWbR+jX74=
|
||||
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.82.0 h1:Ee6zu4IR/WKYEcYHL4+gbC1A3GAzlHWxSjjMyRVBHYw=
|
||||
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.82.0/go.mod h1:hY5yoQsoIalncoxYqXXCDL5y7f+GGYYlW9Bi2IdU5KY=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k=
|
||||
github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/rung/go-safecast v1.0.1 h1:7rkt2qO4JGdOkWKdPEBFLaEwQy20y0IhhWJNFxmH0p0=
|
||||
github.com/rung/go-safecast v1.0.1/go.mod h1:dzUcUS2UMtbfVc7w6mx/Ur3UYcpXEZC+WilISksJ4P8=
|
||||
github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec=
|
||||
github.com/sethvargo/go-retry v0.2.4/go.mod h1:1afjQuvh7s4gflMObvjLPaWgluLLyhA1wmVZ6KLpICw=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tigera/api v0.0.0-20240320170621-278e89a8c5fb h1:eMA9CSNJ/BlL78ks28Ikxjip8UqXWMbAr3rAU0N6JvI=
|
||||
github.com/tigera/api v0.0.0-20240320170621-278e89a8c5fb/go.mod h1:QQw4zkSGOo1yLpji99IINB0f/ORvyngD2awv/LZKWSo=
|
||||
github.com/tigera/operator v1.34.0 h1:iBtEi2+sGAa8vfycNok38iU5JdPupsauB3nmVcubSyg=
|
||||
github.com/tigera/operator v1.34.0/go.mod h1:gqCRZUAJpvbMJYNkvtNMhqR8I7Xi2lgPFI7MzrRJqTU=
|
||||
github.com/tigera/api v0.0.0-20250516135852-26769a70a63a h1:eMGO0ULnwF2s5y3u7sU6Xp1Y+DcC0aMd1CCU+IiIExA=
|
||||
github.com/tigera/api v0.0.0-20250516135852-26769a70a63a/go.mod h1:LsLEYXzVwk5FRNyvUI5jWc4eIqyXJh0ZfNckk9ZrXqM=
|
||||
github.com/tigera/operator v1.36.0 h1:d0+1pbZsGXfp3zSmR/SQxIagEjm4wszo5/mUo0/tGto=
|
||||
github.com/tigera/operator v1.36.0/go.mod h1:oiZ7qwlvN9BZLZfVagxjJ3a5lRf5LgYQnoH0f5skU1c=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
|
||||
go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
|
||||
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
||||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs=
|
||||
golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
|
||||
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
|
||||
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM=
|
||||
golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA=
|
||||
golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
|
||||
golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
|
||||
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
|
||||
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
|
||||
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU=
|
||||
golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
|
||||
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
|
||||
golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
|
||||
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
|
||||
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
|
||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
|
||||
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE=
|
||||
golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588=
|
||||
golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY=
|
||||
golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gomodules.xyz/jsonpatch/v2 v2.3.0 h1:8NFhfS6gzxNqjLIYnZxg319wZ5Qjnx4m/CcX+Klzazc=
|
||||
gomodules.xyz/jsonpatch/v2 v2.3.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
|
||||
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
|
||||
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
|
||||
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
|
||||
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
|
||||
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
|
||||
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
|
||||
google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.23.2/go.mod h1:sYuDb3flCtRPI8ghn6qFrcK5ZBu2mhbElxRE95qpwlI=
|
||||
k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE=
|
||||
k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0=
|
||||
k8s.io/apiextensions-apiserver v0.28.9 h1:yzPHp+4IASHeu7XIPkAKJrY4UjWdjiAjOcQMd6oNKj0=
|
||||
k8s.io/apiextensions-apiserver v0.28.9/go.mod h1:Rjhvq5y3JESdZgV2UOByldyefCfRrUguVpBLYOAIbVs=
|
||||
k8s.io/apimachinery v0.23.2/go.mod h1:zDqeV0AK62LbCI0CI7KbWCAYdLg+E+8UXJ0rIz5gmS8=
|
||||
k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg=
|
||||
k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
|
||||
k8s.io/apiserver v0.30.0 h1:QCec+U72tMQ+9tR6A0sMBB5Vh6ImCEkoKkTDRABWq6M=
|
||||
k8s.io/apiserver v0.30.0/go.mod h1:smOIBq8t0MbKZi7O7SyIpjPsiKJ8qa+llcFCluKyqiY=
|
||||
k8s.io/client-go v0.23.2/go.mod h1:k3YbsWg6GWdHF1THHTQP88X9RhB1DWPo3Dq7KfU/D1c=
|
||||
k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8=
|
||||
k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8=
|
||||
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/api v0.32.4 h1:kw8Y/G8E7EpNy7gjB8gJZl3KJkNz8HM2YHrZPtAZsF4=
|
||||
k8s.io/api v0.32.4/go.mod h1:5MYFvLvweRhyKylM3Es/6uh/5hGp0dg82vP34KifX4g=
|
||||
k8s.io/apiextensions-apiserver v0.32.3 h1:4D8vy+9GWerlErCwVIbcQjsWunF9SUGNu7O7hiQTyPY=
|
||||
k8s.io/apiextensions-apiserver v0.32.3/go.mod h1:8YwcvVRMVzw0r1Stc7XfGAzB/SIVLunqApySV5V7Dss=
|
||||
k8s.io/apimachinery v0.32.4 h1:8EEksaxA7nd7xWJkkwLDN4SvWS5ot9g6Z/VZb3ju25I=
|
||||
k8s.io/apimachinery v0.32.4/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
|
||||
k8s.io/apiserver v0.32.4 h1:Yf7sd/y+GOQKH1Qf6wUeayZrYXe2SKZ17Bcq7VQM5HQ=
|
||||
k8s.io/apiserver v0.32.4/go.mod h1:JFUMNtE2M5yqLZpIsgCb06SkVSW1YcxW1oyLSTfjXR8=
|
||||
k8s.io/client-go v0.32.4 h1:zaGJS7xoYOYumoWIFXlcVrsiYioRPrXGO7dBfVC5R6M=
|
||||
k8s.io/client-go v0.32.4/go.mod h1:k0jftcyYnEtwlFW92xC7MTtFv5BNcZBr+zn9jPlT9Ic=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=
|
||||
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/controller-runtime v0.15.3 h1:L+t5heIaI3zeejoIyyvLQs5vTVu/67IU2FfisVzFlBc=
|
||||
sigs.k8s.io/controller-runtime v0.15.3/go.mod h1:kp4jckA4vTx281S/0Yk2LFEEQe67mjg+ev/yknv47Ds=
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
|
||||
k8s.io/kubernetes v1.33.0 h1:BP5Y5yIzUZVeBuE/ESZvnw6TNxjXbLsCckIkljE+R0U=
|
||||
k8s.io/kubernetes v1.33.0/go.mod h1:2nWuPk0seE4+6sd0x60wQ6rYEXcV7SoeMbU0YbFm/5k=
|
||||
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e h1:KqK5c/ghOm8xkHYhlodbp6i6+r+ChV2vuAuVRdFbLro=
|
||||
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU=
|
||||
sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
|
||||
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
|
|
|
@ -1,11 +1,4 @@
|
|||
# Based on https://torstenwalter.de/openshift/nginx/2017/08/04/nginx-on-openshift.html
|
||||
FROM nginxinc/nginx-unprivileged:stable-alpine
|
||||
|
||||
FROM nginx:stable
|
||||
|
||||
# support running as arbitrary user which belogs to the root group
|
||||
RUN chmod g+rwx /var/cache/nginx /var/run /var/log/nginx
|
||||
# users are not allowed to listen on priviliged ports
|
||||
RUN sed -i.bak 's/listen\(.*\)80;/listen 8080;/' /etc/nginx/conf.d/default.conf
|
||||
USER nginx
|
||||
EXPOSE 8080
|
||||
# comment user directive as master process is run as user in OpenShift anyhow
|
||||
RUN sed -i.bak 's/^user/#user/' /etc/nginx/nginx.conf
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
all: image
|
||||
|
||||
REGISTRY?=quay.io
|
||||
VERSION?=latest
|
||||
ORG?=tigeradev
|
||||
NAME?=nginx
|
||||
|
||||
.PHONY: image
|
||||
image:
|
||||
docker build -t $(REGISTRY)/$(ORG)/$(NAME):$(VERSION) .
|
||||
|
||||
.PHONY: push
|
||||
push: image
|
||||
docker push $(REGISTRY)/$(ORG)/$(NAME):$(VERSION)
|
||||
|
||||
clean:
|
||||
-docker rmi $(REGISTRY)/$(ORG)/$(NAME):$(VERSION)
|
|
@ -1,3 +1,24 @@
|
|||
FROM alpine:3.21 AS build-env
|
||||
RUN apk add --no-cache build-base git autoconf automake perl perl-doc
|
||||
|
||||
# Version 0.4.11 requires ipv6, 0.4.10 does not support ipv6 at all.
|
||||
# To permit running on nodes where IPv6 is disabled in the kernel, we use 0.4.10.
|
||||
# We get versions by git hash for integrity reasons.
|
||||
# 0.4.11 = c706363815a38ff2c5cbc07b73e2cfaaa59bae0f
|
||||
# 0.4.10 = aa644b22fff9e939e549a9759629be58b3c5cac2
|
||||
RUN git clone --single-branch https://github.com/linux-rdma/qperf.git
|
||||
WORKDIR /qperf
|
||||
RUN git checkout aa644b22fff9e939e549a9759629be58b3c5cac2
|
||||
|
||||
# Build qperf from source
|
||||
RUN ./cleanup && \
|
||||
./autogen.sh && \
|
||||
./configure && \
|
||||
make
|
||||
|
||||
FROM alpine:3.21
|
||||
RUN apk add --no-cache iperf3
|
||||
RUN apk add --no-cache --repository http://dl-3.alpinelinux.org/alpine/edge/testing/ qperf
|
||||
RUN apk add --no-cache iperf3 curl tcpdump
|
||||
COPY --from=build-env /qperf/src/qperf /usr/bin/qperf
|
||||
RUN chmod +x /usr/bin/qperf
|
||||
|
||||
USER perf
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
all: image
|
||||
|
||||
REGISTRY?=quay.io
|
||||
VERSION?=latest
|
||||
ORG?=tigeradev
|
||||
NAME?=perf
|
||||
|
||||
.PHONY: image
|
||||
image:
|
||||
docker build -t $(REGISTRY)/$(ORG)/$(NAME):$(VERSION) .
|
||||
|
||||
.PHONY: push
|
||||
push: image
|
||||
docker push $(REGISTRY)/$(ORG)/$(NAME):$(VERSION)
|
||||
|
||||
clean:
|
||||
-docker rmi $(REGISTRY)/$(ORG)/$(NAME):$(VERSION)
|
|
@ -1,3 +1,3 @@
|
|||
# Perf
|
||||
|
||||
`perf` is alpine container used for performance testing. It contains `iperf3` and `qperf`.
|
||||
`perf` is alpine container used for performance testing. It contains `iperf3`, `qperf` and `curl`.
|
||||
|
|
|
@ -1,3 +1,16 @@
|
|||
ARG GO_VERSION=1.24.3
|
||||
|
||||
FROM golang:${GO_VERSION} AS builder
|
||||
|
||||
WORKDIR /build
|
||||
COPY go.* /build
|
||||
ADD images/ttfr /build/ttfr
|
||||
WORKDIR /build/ttfr
|
||||
|
||||
RUN go mod download
|
||||
RUN GO111MODULE=on CGO_ENABLED=0 GOOS=linux go build -o /pingo .
|
||||
|
||||
FROM scratch
|
||||
COPY bin/pingo .
|
||||
COPY --from=builder /pingo /pingo
|
||||
USER pingo
|
||||
CMD ["/pingo"]
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
all: image
|
||||
|
||||
REGISTRY?=quay.io
|
||||
VERSION?=latest
|
||||
ORG?=tigeradev
|
||||
NAME?=ttfr
|
||||
|
||||
.PHONY: image
|
||||
image: binary
|
||||
docker build -t $(REGISTRY)/$(ORG)/$(NAME):$(VERSION) .
|
||||
|
||||
.PHONY: binary
|
||||
binary: pingo.go test
|
||||
CGO_ENABLED=0 go build -o bin/pingo .
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
go test -v .
|
||||
|
||||
push: image
|
||||
docker push $(REGISTRY)/$(ORG)/$(NAME):$(VERSION)
|
||||
|
||||
clean:
|
||||
-docker rmi $(REGISTRY)/$(ORG)/$(NAME):$(VERSION)
|
||||
-rm bin/pingo
|
|
@ -79,7 +79,7 @@ func (t *HTTPTarget) Ping() error {
|
|||
method = http.MethodHead
|
||||
url = t.url
|
||||
case "http":
|
||||
// randomly generate method, status for the request with equal likely hood for all status listed
|
||||
// randomly generate method, status for the request with equal likelihood for all status listed
|
||||
method = httpRequestMethods[rand.Intn(len(httpRequestMethods))]
|
||||
status := httpStatusCodes[rand.Intn(len(httpStatusCodes))]
|
||||
// creates a distribution for user agents, %d = 5 more likely than %d = 1
|
||||
|
@ -176,6 +176,7 @@ func CalculateTTFR(target Target) {
|
|||
for {
|
||||
// Do each response on its own goroutine so we can have multiple responses in flight.
|
||||
go func() {
|
||||
log.Info("Sending ping to ", target)
|
||||
err := target.Ping()
|
||||
if err == nil {
|
||||
select {
|
||||
|
@ -262,6 +263,7 @@ func main() {
|
|||
// If adding more, may be worth using https://github.com/kelseyhightower/envconfig.
|
||||
var err error
|
||||
reachableAddr := getEnvParam("ADDRESS", "localhost")
|
||||
log.Info("Using reachable address ", reachableAddr)
|
||||
unreachableAddr := getEnvParam("UNREACHABLE_ADDRESS", "")
|
||||
port := getEnvParam("PORT", "5005")
|
||||
prometheusEndpointsBlob := getEnvParam("PROM_GATEWAYS", "")
|
||||
|
@ -297,6 +299,7 @@ func main() {
|
|||
urlLengthFactor = 1
|
||||
}
|
||||
// If a SLEEPTIME is configured, sleep for that time before starting.
|
||||
log.Info("Sleeping for ", sleepTime, " seconds")
|
||||
if sleepTime > 0 {
|
||||
time.Sleep(time.Duration(sleepTime * float64(time.Second)))
|
||||
startTime = time.Now()
|
||||
|
@ -306,6 +309,7 @@ func main() {
|
|||
// These metrics aren't Prometheus best practice: can/can't reach would be better
|
||||
// distinguished by a label; not label + metric name for example. Leaving it for
|
||||
// compatibility.
|
||||
log.Info("Setting up Prometheus metrics")
|
||||
registry := prometheus.NewRegistry()
|
||||
reachLabels := prometheus.Labels{
|
||||
"node": nodeName,
|
||||
|
@ -378,6 +382,7 @@ func main() {
|
|||
var prometheusEndpoints []string
|
||||
var pusher *push.Pusher
|
||||
if prometheusEndpointsBlob != "" {
|
||||
log.Info("Will push results to Prometheus push gateway ", prometheusEndpointsBlob)
|
||||
err = json.Unmarshal([]byte(prometheusEndpointsBlob), &prometheusEndpoints)
|
||||
if err != nil || len(prometheusEndpoints) == 0 {
|
||||
log.Fatal("fatal error ", err, " decoding Prometheus endpoints ", prometheusEndpointsBlob)
|
||||
|
@ -395,6 +400,7 @@ func main() {
|
|||
case "http", "tcp":
|
||||
// Create a non-default HTTP transport that doesn't cache TCP connections
|
||||
// and limits the dial and GET timeout to half the configured total each.
|
||||
log.Info("Setting up HTTP transport")
|
||||
halfTimeout := time.Duration(timeout * float64(time.Second) / 2)
|
||||
transport := &http.Transport{
|
||||
DisableKeepAlives: true,
|
||||
|
@ -420,6 +426,7 @@ func main() {
|
|||
logResponses: true,
|
||||
}
|
||||
case "udp":
|
||||
log.Info("Setting up UDP transport")
|
||||
reachTarget = &UDPTarget{
|
||||
url: fmt.Sprintf("%s:%s", reachableAddr, port),
|
||||
sentMetric: numPings,
|
||||
|
@ -434,34 +441,35 @@ func main() {
|
|||
}
|
||||
|
||||
if reachableAddr != "" {
|
||||
log.Info("Starting connectivity check to ", reachableAddr, ":", port, " rate ", connRate)
|
||||
CalculateTTFR(reachTarget)
|
||||
go CheckConnectivity(reachTarget, connRate, quitAfter)
|
||||
}
|
||||
|
||||
if unreachableAddr != "" {
|
||||
log.Info("Starting connectivity check to ", unreachableAddr, ":", port, " rate ", failRate)
|
||||
go CheckConnectivity(failTarget, failRate, quitAfter)
|
||||
}
|
||||
|
||||
log.Info("Started everything!")
|
||||
// Report to Prometheus every 10 seconds.
|
||||
if prometheusEndpointsBlob != "" {
|
||||
DoEvery(func() {
|
||||
gather, err := registry.Gather()
|
||||
if err != nil {
|
||||
log.Fatal("error gathering metrics: ", err)
|
||||
}
|
||||
for _, metric := range gather {
|
||||
if *metric.Name == "ttfr_seconds" {
|
||||
log.Info(`{"ttfr_seconds": `, *metric.Metric[0].Gauge.Value, "}")
|
||||
}
|
||||
}
|
||||
DoEvery(func() {
|
||||
if prometheusEndpointsBlob != "" {
|
||||
err = pusher.Push()
|
||||
if err != nil {
|
||||
log.Fatal("error pushing response success update to gateway: ", err)
|
||||
}
|
||||
}, 10*time.Second, quitAfter)
|
||||
} else {
|
||||
gather, err := registry.Gather()
|
||||
for _, metric := range gather {
|
||||
if *metric.Name == "ttfr_seconds" {
|
||||
log.Info(`{"ttfr_seconds": `, *metric.Metric[0].Gauge.Value, "}")
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal("error gathering metrics: ", err)
|
||||
}
|
||||
}
|
||||
}, 10*time.Second, quitAfter)
|
||||
}
|
||||
|
||||
func randString(n int) string {
|
||||
|
|
|
@ -15,14 +15,20 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func TestHTTPTarget_Ping(t *testing.T) {
|
||||
|
@ -143,7 +149,6 @@ func Test_noPushGW(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
basicEnv := []envVar{
|
||||
{key: "PROM_GATEWAYS", value: ""},
|
||||
{key: "ADDRESS", value: host},
|
||||
{key: "PORT", value: port},
|
||||
{key: "QUIT_AFTER", value: "1"},
|
||||
|
@ -160,7 +165,57 @@ func Test_noPushGW(t *testing.T) {
|
|||
t.Setenv(envvar.key, envvar.value)
|
||||
}
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// run main and capture logs from it
|
||||
var str bytes.Buffer
|
||||
log.SetOutput(&str)
|
||||
|
||||
main()
|
||||
|
||||
capturedLogs := str.String()
|
||||
if capturedLogs == "" {
|
||||
t.Errorf("Expected logs to be captured, but got none")
|
||||
}
|
||||
assert.Contains(t, capturedLogs, "ttfr_seconds")
|
||||
r := regexp.MustCompile(`{\\"ttfr_seconds\\": ([0-9].*\.[0-9].*)}`)
|
||||
matches := r.FindStringSubmatch(capturedLogs)
|
||||
if len(matches) < 2 {
|
||||
t.Errorf("Expected regex to match, but didn't get enough matches: %v from %v", matches, capturedLogs)
|
||||
}
|
||||
ttfrString := matches[1]
|
||||
if len(ttfrString) == 0 {
|
||||
t.Errorf("Expected regex to match, but got none")
|
||||
}
|
||||
ttfrSec, err := strconv.ParseFloat(ttfrString, 64)
|
||||
if err != nil {
|
||||
t.Errorf("Expected to parse float, but got error: %v", err)
|
||||
}
|
||||
if ttfrSec == 0 {
|
||||
t.Errorf("Expected ttfrSec to be non-zero, but got %v", ttfrSec)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_regex(t *testing.T) {
|
||||
capturedLogs := "time=\"2025-05-02T14:04:07+01:00\" level=info msg=\"Pingo started at 2025-05-02 14:04:07.029670625 +0100 BST m=+0.000956863\"\ntime=\"2025-05-02T14:04:07+01:00\" level=info msg=\"Response status code 200 protocol tcp\"\ntime=\"2025-05-02T14:04:07+01:00\" level=info msg=\"Response status code 200 protocol tcp\"\ntime=\"2025-05-02T14:04:07+01:00\" level=info msg=\"TTFR found: was 0.016565863\"\ntime=\"2025-05-02T14:04:07+01:00\" level=info msg=\"Started everything!\"\ntime=\"2025-05-02T14:04:07+01:00\" level=info msg=\"Starting connectivity check to &{0xc00021c5a0 http://127.0.0.1:40379 tcp 0xc0002001e0 0xc000200300 false} rate 1\"\ntime=\"2025-05-02T14:04:07+01:00\" level=info msg=\"Response status code 200 protocol tcp\"\ntime=\"2025-05-02T14:04:07+01:00\" level=info msg=\"{\\\"ttfr_seconds\\\": 0.016565863}\"\n"
|
||||
r := regexp.MustCompile(`{\\"ttfr_seconds\\": ([0-9].*\.[0-9].*)}`)
|
||||
matches := r.FindStringSubmatch(capturedLogs)
|
||||
if len(matches) < 2 {
|
||||
t.Errorf("Expected regex to match, but didn't get enough matches: %v", matches)
|
||||
}
|
||||
ttfrString := matches[1]
|
||||
if ttfrString == "" {
|
||||
t.Error("Expected regex to match, but got none")
|
||||
}
|
||||
ttfrSec, err := strconv.ParseFloat(ttfrString, 64)
|
||||
if err != nil {
|
||||
t.Errorf("Expected to parse float, but got error: %v", err)
|
||||
}
|
||||
if ttfrSec == 0 {
|
||||
t.Errorf("Expected ttfrSec to be non-zero, but got %v", ttfrSec)
|
||||
}
|
||||
// Check if the ttfrSec is within a reasonable range
|
||||
if ttfrSec != 0.016565863 {
|
||||
t.Errorf("Expected ttfrSec to be 0.016565863, but got %v", ttfrSec)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
name: tb-e2e
|
||||
nodes:
|
||||
- role: control-plane
|
||||
- role: worker
|
||||
networking:
|
||||
disableDefaultCNI: true
|
||||
podSubnet: 192.168.0.0/16
|
|
@ -30,7 +30,6 @@ import (
|
|||
log "github.com/sirupsen/logrus"
|
||||
v3 "github.com/tigera/api/pkg/apis/projectcalico/v3"
|
||||
operatorv1 "github.com/tigera/operator/api/v1"
|
||||
v1 "github.com/tigera/operator/api/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
@ -43,17 +42,26 @@ func ConfigureCluster(ctx context.Context, cfg config.Config, clients config.Cli
|
|||
log.Debug("entering configureCluster function")
|
||||
err := updateEncap(ctx, cfg, clients, testConfig.Encap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update encapsulation")
|
||||
log.WithError(err).Error("failed to update encapsulation")
|
||||
return err
|
||||
}
|
||||
if testConfig.Dataplane == config.DataPlaneBPF {
|
||||
err = enableBPF(ctx, cfg, clients)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to enable BPF")
|
||||
log.WithError(err).Error("failed to enable BPF")
|
||||
return err
|
||||
}
|
||||
} else if testConfig.Dataplane == config.DataPlaneIPTables {
|
||||
err = enableIptables(ctx, clients)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to enable iptables")
|
||||
log.WithError(err).Error("failed to enable iptables")
|
||||
return err
|
||||
}
|
||||
} else if testConfig.Dataplane == config.DataPlaneNftables {
|
||||
err = enableNftables(ctx, clients)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to enable nftables")
|
||||
return fmt.Errorf("failed to enable nftables")
|
||||
}
|
||||
} else if testConfig.Dataplane == config.DataPlaneUnset {
|
||||
log.Info("No dataplane specified, using whatever is already set")
|
||||
|
@ -65,7 +73,8 @@ func ConfigureCluster(ctx context.Context, cfg config.Config, clients config.Cli
|
|||
if testConfig.DNSPerf.Mode != config.DNSPerfModeUnset {
|
||||
err = patchFelixConfig(ctx, clients, testConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to patch felixconfig")
|
||||
log.WithError(err).Error("failed to patch felixconfig")
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
log.Warn("No DNSPerfMode specified, using whatever is already set")
|
||||
|
@ -74,7 +83,8 @@ func ConfigureCluster(ctx context.Context, cfg config.Config, clients config.Cli
|
|||
if testConfig.CalicoNodeCPULimit != "" {
|
||||
err = SetCalicoNodeCPULimit(ctx, clients, testConfig.CalicoNodeCPULimit)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set calico-node CPU limit")
|
||||
log.WithError(err).Error("failed to set calico-node CPU limit")
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
log.Warn("No CalicoNodeCPULimit specified, using whatever is already set")
|
||||
|
@ -88,29 +98,33 @@ func patchFelixConfig(ctx context.Context, clients config.Clients, testConfig co
|
|||
felixconfig := &v3.FelixConfiguration{}
|
||||
err := clients.CtrlClient.Get(ctx, ctrlclient.ObjectKey{Name: "default"}, felixconfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get felixconfig")
|
||||
log.WithError(err).Error("failed to get felixconfig")
|
||||
return err
|
||||
}
|
||||
log.Debug("felixconfig is", felixconfig)
|
||||
dnsPolicyMode := testConfig.DNSPerf.Mode
|
||||
// patching felixconfig to use DNS policy mode
|
||||
log.Infof("Patching felixconfig to use %s dnspolicymode", dnsPolicyMode)
|
||||
v3PolicyMode := v3.DNSPolicyModeNoDelay
|
||||
if dnsPolicyMode == "DelayDNSResponse" {
|
||||
v3PolicyMode = v3.DNSPolicyModeDelayDNSResponse
|
||||
} else if dnsPolicyMode == "DelayDeniedPacket" {
|
||||
v3PolicyMode = v3.DNSPolicyModeDelayDeniedPacket
|
||||
}
|
||||
// Waiting on the API repo update to add this.
|
||||
/* else if dnsPolicyMode == "Inline" {
|
||||
v3PolicyMode = v3.DNSPolicyModeInline
|
||||
} */
|
||||
if testConfig.Dataplane == "iptables" {
|
||||
v3BPFDNSPolicyMode := v3.BPFDNSPolicyModeNoDelay
|
||||
if testConfig.Dataplane == config.DataPlaneIPTables {
|
||||
if dnsPolicyMode == "DelayDNSResponse" {
|
||||
v3PolicyMode = v3.DNSPolicyModeDelayDNSResponse
|
||||
} else if dnsPolicyMode == "DelayDeniedPacket" {
|
||||
v3PolicyMode = v3.DNSPolicyModeDelayDeniedPacket
|
||||
} else if dnsPolicyMode == "Inline" {
|
||||
v3PolicyMode = v3.DNSPolicyModeInline
|
||||
}
|
||||
felixconfig.Spec.DNSPolicyMode = &v3PolicyMode
|
||||
} else if testConfig.Dataplane == config.DataPlaneBPF {
|
||||
if dnsPolicyMode == "Inline" {
|
||||
v3BPFDNSPolicyMode = v3.BPFDNSPolicyModeInline
|
||||
}
|
||||
if dnsPolicyMode == "NoDelay" {
|
||||
v3BPFDNSPolicyMode = v3.BPFDNSPolicyModeNoDelay
|
||||
}
|
||||
felixconfig.Spec.BPFDNSPolicyMode = &v3BPFDNSPolicyMode
|
||||
}
|
||||
// Waiting on the API repo update to add this.
|
||||
/* else if testConfig.Dataplane == "bpf" {
|
||||
felixconfig.Spec.BPFDNSPolicyMode = &v3PolicyMode
|
||||
} */
|
||||
err = clients.CtrlClient.Update(ctx, felixconfig)
|
||||
|
||||
return err
|
||||
|
@ -157,11 +171,11 @@ func SetCalicoNodeCPULimit(ctx context.Context, clients config.Clients, limit st
|
|||
if limit == "0" {
|
||||
installation.Spec.CalicoNodeDaemonSet = nil
|
||||
} else {
|
||||
installation.Spec.CalicoNodeDaemonSet = &v1.CalicoNodeDaemonSet{
|
||||
Spec: &v1.CalicoNodeDaemonSetSpec{
|
||||
Template: &v1.CalicoNodeDaemonSetPodTemplateSpec{
|
||||
Spec: &v1.CalicoNodeDaemonSetPodSpec{
|
||||
Containers: []v1.CalicoNodeDaemonSetContainer{
|
||||
installation.Spec.CalicoNodeDaemonSet = &operatorv1.CalicoNodeDaemonSet{
|
||||
Spec: &operatorv1.CalicoNodeDaemonSetSpec{
|
||||
Template: &operatorv1.CalicoNodeDaemonSetPodTemplateSpec{
|
||||
Spec: &operatorv1.CalicoNodeDaemonSetPodSpec{
|
||||
Containers: []operatorv1.CalicoNodeDaemonSetContainer{
|
||||
{
|
||||
Name: "calico-node",
|
||||
Resources: &corev1.ResourceRequirements{
|
||||
|
@ -183,16 +197,10 @@ func SetCalicoNodeCPULimit(ctx context.Context, clients config.Clients, limit st
|
|||
return err
|
||||
}
|
||||
|
||||
// delete all calico-node pods so they all restart in parallel, since this is going to be slow if they update one-by-one
|
||||
err = utils.DeletePodsWithLabel(ctx, clients, "calico-system", "k8s-app=calico-node")
|
||||
err = waitForTigeraStatus(ctx, clients, 600, true)
|
||||
if err != nil {
|
||||
log.Warning("failed to delete calico-node pods")
|
||||
// we're not going to return an error here, since the pods will eventually restart, just slower
|
||||
}
|
||||
|
||||
err = waitForTigeraStatus(ctx, clients)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error waiting for tigera status")
|
||||
log.WithError(err).Error("error waiting for tigera status")
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -341,6 +349,8 @@ func GetClusterDetails(ctx context.Context, clients config.Clients) (Details, er
|
|||
details.Dataplane = "iptables"
|
||||
} else if *installation.Status.Computed.CalicoNetwork.LinuxDataplane == "VPP" {
|
||||
details.Dataplane = "vpp"
|
||||
} else if *installation.Status.Computed.CalicoNetwork.LinuxDataplane == "Nftables" {
|
||||
details.Dataplane = "nftables"
|
||||
} else {
|
||||
details.Dataplane = "unknown"
|
||||
}
|
||||
|
@ -427,7 +437,8 @@ func SetupStandingConfig(ctx context.Context, clients config.Clients, testConfig
|
|||
log.Info("Waiting for all pods to be running")
|
||||
err = utils.WaitForDeployment(ctx, clients, deployment)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error waiting for pods to deploy in standing-deployment")
|
||||
log.WithError(err).Error("error waiting for pods to deploy in standing-deployment")
|
||||
return err
|
||||
}
|
||||
|
||||
// Deploy services
|
||||
|
@ -435,17 +446,20 @@ func SetupStandingConfig(ctx context.Context, clients config.Clients, testConfig
|
|||
deployment = makeDeployment(namespace, "standing-svc", 10, false, webServerImage, []string{})
|
||||
deployment, err = utils.GetOrCreateDeployment(ctx, clients, deployment)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating deployment standing-svc")
|
||||
log.WithError(err).Error("error creating deployment standing-svc")
|
||||
return err
|
||||
}
|
||||
err = utils.ScaleDeployment(ctx, clients, deployment, 10) // When deployment exists but isn't scaled right, this might be needed.
|
||||
if err != nil {
|
||||
return fmt.Errorf("error scaling deployment standing-svc")
|
||||
log.WithError(err).Error("error scaling deployment standing-svc")
|
||||
return err
|
||||
}
|
||||
//wait for pods to deploy
|
||||
log.Info("Waiting for all pods to be running")
|
||||
err = utils.WaitForDeployment(ctx, clients, deployment)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error waiting for pods to deploy in standing-svc")
|
||||
log.WithError(err).Error("error waiting for pods to deploy in standing-svc")
|
||||
return err
|
||||
}
|
||||
// Spin up a channel with multiple threads to create services, because a single thread is limited to 5 actions per second
|
||||
const numThreads = 10
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
|
||||
"github.com/projectcalico/tiger-bench/pkg/config"
|
||||
"github.com/projectcalico/tiger-bench/pkg/utils"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/sethvargo/go-retry"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
@ -34,63 +35,32 @@ import (
|
|||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func enableBPF(ctx context.Context, cfg config.Config, clients config.Clients) error {
|
||||
// enable BPF
|
||||
log.Debug("entering enableBPF function")
|
||||
|
||||
installation := &operatorv1.Installation{}
|
||||
log.Debug("Getting installation")
|
||||
func enableNftables(ctx context.Context, clients config.Clients) error {
|
||||
// enable Nftables
|
||||
log.Debug("entering enableNftables function")
|
||||
childCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
installation := &operatorv1.Installation{}
|
||||
log.Debug("Getting installation")
|
||||
err := clients.CtrlClient.Get(childCtx, ctrlclient.ObjectKey{Name: "default"}, installation)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get installation")
|
||||
}
|
||||
if *installation.Spec.CalicoNetwork.LinuxDataplane == operatorv1.LinuxDataplaneBPF {
|
||||
log.Info("BPF already enabled")
|
||||
|
||||
err = setKubeProxyMode(childCtx, clients, "nftables")
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to set kube-proxy mode to nftables")
|
||||
return err
|
||||
}
|
||||
oldDataplane := installation.Spec.CalicoNetwork.LinuxDataplane
|
||||
|
||||
if *oldDataplane == operatorv1.LinuxDataplaneNftables {
|
||||
log.Info("Nftables already enabled")
|
||||
return nil
|
||||
}
|
||||
var host string
|
||||
var port string
|
||||
if cfg.K8sAPIHost == "" || cfg.K8sAPIPort == "" {
|
||||
// get apiserver host and port from kubernetes service endpoints
|
||||
kubesvc := &corev1.Endpoints{}
|
||||
err := clients.CtrlClient.Get(childCtx, ctrlclient.ObjectKey{Name: "kubernetes", Namespace: "default"}, kubesvc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get kubernetes service endpoints")
|
||||
}
|
||||
log.Infof("first kubernetes service endpoint IP is %v", kubesvc.Subsets[0].Addresses[0].IP)
|
||||
log.Infof("first kubernetes service endpoint port is %v", kubesvc.Subsets[0].Ports[0].Port)
|
||||
host = kubesvc.Subsets[0].Addresses[0].IP
|
||||
port = strconv.FormatInt(int64(kubesvc.Subsets[0].Ports[0].Port), 10)
|
||||
} else {
|
||||
log.Infof("Using user-provided k8s API host %s and port %s", cfg.K8sAPIHost, cfg.K8sAPIPort)
|
||||
host = cfg.K8sAPIHost
|
||||
port = cfg.K8sAPIPort
|
||||
}
|
||||
// if it doesn't exist already, create configMap with k8s endpoint data in it
|
||||
err = createOrUpdateCM(childCtx, clients, host, port)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create or update configMap")
|
||||
}
|
||||
|
||||
// kubectl patch ds -n kube-system kube-proxy -p '{"spec":{"template":{"spec":{"nodeSelector":{"non-calico": "true"}}}}}'
|
||||
patch := []byte(`{"spec":{"template":{"spec":{"nodeSelector":{"non-calico": "true"}}}}}`)
|
||||
proxyds := &appsv1.DaemonSet{}
|
||||
log.Debug("Getting kube-proxy ds")
|
||||
err = clients.CtrlClient.Get(childCtx, ctrlclient.ObjectKey{Namespace: "kube-system", Name: "kube-proxy"}, proxyds)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get kube-proxy ds")
|
||||
}
|
||||
log.Debugf("patching with %v", string(patch[:]))
|
||||
log.Info("enabling BPF dataplane")
|
||||
err = clients.CtrlClient.Patch(childCtx, proxyds, ctrlclient.RawPatch(ctrlclient.Merge.Type(), patch))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to patch kube-proxy ds")
|
||||
}
|
||||
|
||||
// kubectl patch installation.operator.tigera.io default --type merge -p '{"spec":{"calicoNetwork":{"linuxDataplane":"BPF"}}}'
|
||||
patch = []byte(`{"spec":{"calicoNetwork":{"linuxDataplane":"BPF"}}}`)
|
||||
// kubectl patch installation.operator.tigera.io default --type merge -p '{"spec":{"calicoNetwork":{"linuxDataplane":"Nftables"}}}'
|
||||
patch := []byte(`{"spec":{"calicoNetwork":{"linuxDataplane":"Nftables"}}}`)
|
||||
|
||||
installation = &operatorv1.Installation{}
|
||||
log.Debug("Getting installation")
|
||||
|
@ -99,44 +69,24 @@ func enableBPF(ctx context.Context, cfg config.Config, clients config.Clients) e
|
|||
return fmt.Errorf("failed to get installation")
|
||||
}
|
||||
log.Debugf("patching with %v", string(patch[:]))
|
||||
log.Info("enabling Nftables dataplane")
|
||||
err = clients.CtrlClient.Patch(childCtx, installation, ctrlclient.RawPatch(ctrlclient.Merge.Type(), patch))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to patch installation")
|
||||
}
|
||||
err = waitForTigeraStatus(ctx, clients)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error waiting for tigera status")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func enableIptables(ctx context.Context, clients config.Clients) error {
|
||||
// enable iptables
|
||||
log.Debug("entering enableIptables function")
|
||||
childCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
installation := &operatorv1.Installation{}
|
||||
log.Debug("Getting installation")
|
||||
err := clients.CtrlClient.Get(childCtx, ctrlclient.ObjectKey{Name: "default"}, installation)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get installation")
|
||||
}
|
||||
if *installation.Spec.CalicoNetwork.LinuxDataplane == operatorv1.LinuxDataplaneIptables {
|
||||
log.Info("IPtables already enabled")
|
||||
return nil
|
||||
}
|
||||
|
||||
// kubectl patch installation.operator.tigera.io default --type merge -p '{"spec":{"calicoNetwork":{"linuxDataplane":"Iptables"}}}'
|
||||
patch := []byte(`{"spec":{"calicoNetwork":{"linuxDataplane":"Iptables"}}}`)
|
||||
// This is a workaround for an operator nftables bug: https://github.com/tigera/operator/pull/3926. Remove this when the bug is fixed.
|
||||
// kubectl patch installation.operator.tigera.io default --type merge -p '{"spec":{"calicoNetwork":{"linuxPolicySetupTimeoutSeconds":null}}}'
|
||||
patch = []byte(`{"spec":{"calicoNetwork":{"linuxPolicySetupTimeoutSeconds":null}}}`)
|
||||
|
||||
installation = &operatorv1.Installation{}
|
||||
log.Debug("Getting installation")
|
||||
log.Debug("Getting installation to patch linuxPolicySetupTimeoutSeconds")
|
||||
err = clients.CtrlClient.Get(childCtx, ctrlclient.ObjectKey{Name: "default"}, installation)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get installation")
|
||||
}
|
||||
log.Debugf("patching with %v", string(patch[:]))
|
||||
log.Info("enabling iptables dataplane")
|
||||
log.Info("Making sure linuxPolicySetupTimeoutSeconds is Null")
|
||||
err = clients.CtrlClient.Patch(childCtx, installation, ctrlclient.RawPatch(ctrlclient.Merge.Type(), patch))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to patch installation")
|
||||
|
@ -156,14 +106,294 @@ func enableIptables(ctx context.Context, clients config.Clients) error {
|
|||
return fmt.Errorf("failed to patch kube-proxy ds")
|
||||
}
|
||||
|
||||
err = waitForTigeraStatus(ctx, clients)
|
||||
err = waitForTigeraStatus(ctx, clients, 900, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error waiting for tigera status")
|
||||
}
|
||||
|
||||
// Clear out any residual iptables rules. This is a workaround, which should not be needed once nftables is GA
|
||||
err = wipeIPTables(ctx, clients)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to wipe iptables")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func setKubeProxyMode(ctx context.Context, clients config.Clients, mode string) error {
|
||||
log.Debug("entering setKubeProxyMode function")
|
||||
kubecm := &corev1.ConfigMap{}
|
||||
err := clients.CtrlClient.Get(ctx, ctrlclient.ObjectKey{Namespace: "kube-system", Name: "kube-proxy"}, kubecm)
|
||||
log.Debug("Verifing if kube-proxy config is available")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get proxymode")
|
||||
}
|
||||
configStr, ok := kubecm.Data["config.conf"]
|
||||
if !ok {
|
||||
return fmt.Errorf("config.conf not found in kube-proxy configmap")
|
||||
}
|
||||
|
||||
var configMap map[string]interface{}
|
||||
err = yaml.Unmarshal([]byte(configStr), &configMap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse YAML: %w", err)
|
||||
}
|
||||
|
||||
currentmode, ok := configMap["mode"].(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("mode field not found or not a string")
|
||||
}
|
||||
log.Info("Verifying if kube-proxy is running in desired mode")
|
||||
if currentmode != mode {
|
||||
log.Infof("kube-proxy mode is %s, attempting to update to %s", currentmode, mode)
|
||||
configMap["mode"] = mode
|
||||
configStr, err := yaml.Marshal(configMap)
|
||||
if err != nil {
|
||||
log.Error("failed to marshal YAML")
|
||||
return fmt.Errorf("failed to marshal YAML: %w", err)
|
||||
}
|
||||
kubecm.Data["config.conf"] = string(configStr)
|
||||
log.Info("Patching kube-proxy configmap")
|
||||
err = clients.CtrlClient.Update(ctx, kubecm)
|
||||
if err != nil {
|
||||
log.Error("failed to patch kube-proxy configmap")
|
||||
return fmt.Errorf("failed to patch kube-proxy configmap: %w", err)
|
||||
}
|
||||
log.Info("Patching kube-proxy configmap succeeded")
|
||||
// Restart kube-proxy pods to pick up the new config
|
||||
err = utils.DeletePodsWithLabel(ctx, clients, "kube-system", "k8s-app=kube-proxy")
|
||||
if err != nil {
|
||||
log.Error("failed to delete kube-proxy pods")
|
||||
return fmt.Errorf("failed to restart kube-proxy pods: %w", err)
|
||||
}
|
||||
} else {
|
||||
log.Infof("kube-proxy mode is already %s", currentmode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func wipeIPTables(ctx context.Context, clients config.Clients) error {
|
||||
log.Debug("entering wipeIPTables function")
|
||||
// cmd := `iptables-legacy-save | awk '/^[*]/ { print $1 } /^:[A-Z]+ [^-]/ { print $1 " ACCEPT" ; } /COMMIT/ { print $0; }' | iptables-legacy-restore; iptables-save-nft | awk '/^[*]/ { print $1 } /^:[A-Z]+ [^-]/ { print $1 " ACCEPT" ; } /COMMIT/ { print $0; }' | iptables-restore-nft`
|
||||
cmd := `iptables-legacy -F -t raw; iptables-legacy -F -t filter; iptables-legacy -F -t mangle; iptables-legacy -F -t nat; iptables-nft -F -t raw; iptables-nft -F -t filter; iptables-nft -F -t mangle; iptables-nft -F -t nat`
|
||||
err := runCommandInNodePods(ctx, clients, cmd)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to run command ", cmd)
|
||||
return fmt.Errorf("failed to run command %s: %w", cmd, err)
|
||||
}
|
||||
time.Sleep(30 * time.Second)
|
||||
log.Info("Done wiping iptables rules")
|
||||
return nil
|
||||
}
|
||||
|
||||
func wipeNFTables(ctx context.Context, clients config.Clients) error {
|
||||
log.Debug("entering wipeNFTables function")
|
||||
cmd := `nft flush ruleset`
|
||||
err := runCommandInNodePods(ctx, clients, cmd)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to run command ", cmd)
|
||||
return fmt.Errorf("failed to run command %s: %w", cmd, err)
|
||||
}
|
||||
time.Sleep(30 * time.Second)
|
||||
log.Info("Done wiping nftables rules")
|
||||
return nil
|
||||
}
|
||||
|
||||
func runCommandInNodePods(ctx context.Context, clients config.Clients, cmd string) error {
|
||||
log.Debug("entering runCommandInNodePods function")
|
||||
pods, err := getCalicoNodePods(ctx, clients)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to get calico-node pods")
|
||||
return err
|
||||
}
|
||||
for _, pod := range pods {
|
||||
log.Info("wiping rules via pod ", pod.Name)
|
||||
var stdout string
|
||||
var stderr string
|
||||
var err error
|
||||
|
||||
stdout, stderr, err = utils.RetryinPod(ctx, clients, &pod, cmd, 10)
|
||||
log.Info("stdout: ", stdout)
|
||||
log.Info("stderr: ", stderr)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to run command ", cmd)
|
||||
return fmt.Errorf("%s", stderr)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getCalicoNodePods(ctx context.Context, clients config.Clients) ([]corev1.Pod, error) {
|
||||
log.Debug("entering getCalicoNodePods function")
|
||||
pods := &corev1.PodList{}
|
||||
err := clients.CtrlClient.List(ctx, pods, ctrlclient.InNamespace("calico-system"), ctrlclient.MatchingLabels{"k8s-app": "calico-node"})
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to list calico-node pods")
|
||||
return nil, err
|
||||
}
|
||||
return pods.Items, nil
|
||||
}
|
||||
|
||||
func enableBPF(ctx context.Context, cfg config.Config, clients config.Clients) error {
|
||||
// enable BPF
|
||||
log.Debug("entering enableBPF function")
|
||||
|
||||
installation := &operatorv1.Installation{}
|
||||
log.Debug("Getting installation")
|
||||
childCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
err := clients.CtrlClient.Get(childCtx, ctrlclient.ObjectKey{Name: "default"}, installation)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to get installation")
|
||||
return err
|
||||
}
|
||||
if *installation.Spec.CalicoNetwork.LinuxDataplane == operatorv1.LinuxDataplaneBPF {
|
||||
log.Info("BPF already enabled")
|
||||
return nil
|
||||
}
|
||||
var host string
|
||||
var port string
|
||||
if cfg.K8sAPIHost == "" || cfg.K8sAPIPort == "" {
|
||||
// get apiserver host and port from kubernetes service endpoints
|
||||
kubesvc := &corev1.Endpoints{}
|
||||
err := clients.CtrlClient.Get(childCtx, ctrlclient.ObjectKey{Name: "kubernetes", Namespace: "default"}, kubesvc)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to get kubernetes service endpoints")
|
||||
return err
|
||||
}
|
||||
log.Infof("first kubernetes service endpoint IP is %v", kubesvc.Subsets[0].Addresses[0].IP)
|
||||
log.Infof("first kubernetes service endpoint port is %v", kubesvc.Subsets[0].Ports[0].Port)
|
||||
host = kubesvc.Subsets[0].Addresses[0].IP
|
||||
port = strconv.FormatInt(int64(kubesvc.Subsets[0].Ports[0].Port), 10)
|
||||
} else {
|
||||
log.Infof("Using user-provided k8s API host %s and port %s", cfg.K8sAPIHost, cfg.K8sAPIPort)
|
||||
host = cfg.K8sAPIHost
|
||||
port = cfg.K8sAPIPort
|
||||
}
|
||||
// if it doesn't exist already, create configMap with k8s endpoint data in it
|
||||
err = createOrUpdateCM(childCtx, clients, host, port)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to create or update configMap")
|
||||
return err
|
||||
}
|
||||
|
||||
// kubectl patch ds -n kube-system kube-proxy -p '{"spec":{"template":{"spec":{"nodeSelector":{"non-calico": "true"}}}}}'
|
||||
patch := []byte(`{"spec":{"template":{"spec":{"nodeSelector":{"non-calico": "true"}}}}}`)
|
||||
proxyds := &appsv1.DaemonSet{}
|
||||
log.Debug("Getting kube-proxy ds")
|
||||
err = clients.CtrlClient.Get(childCtx, ctrlclient.ObjectKey{Namespace: "kube-system", Name: "kube-proxy"}, proxyds)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to get kube-proxy ds")
|
||||
return err
|
||||
}
|
||||
log.Debugf("patching with %v", string(patch[:]))
|
||||
log.Info("enabling BPF dataplane")
|
||||
err = clients.CtrlClient.Patch(childCtx, proxyds, ctrlclient.RawPatch(ctrlclient.Merge.Type(), patch))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to patch kube-proxy ds")
|
||||
return err
|
||||
}
|
||||
|
||||
// kubectl patch installation.operator.tigera.io default --type merge -p '{"spec":{"calicoNetwork":{"linuxDataplane":"BPF"}}}'
|
||||
patch = []byte(`{"spec":{"calicoNetwork":{"linuxDataplane":"BPF"}}}`)
|
||||
|
||||
installation = &operatorv1.Installation{}
|
||||
log.Debug("Getting installation")
|
||||
err = clients.CtrlClient.Get(childCtx, ctrlclient.ObjectKey{Name: "default"}, installation)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to get installation")
|
||||
return err
|
||||
}
|
||||
log.Debugf("patching with %v", string(patch[:]))
|
||||
err = clients.CtrlClient.Patch(childCtx, installation, ctrlclient.RawPatch(ctrlclient.Merge.Type(), patch))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to patch installation")
|
||||
return err
|
||||
}
|
||||
err = waitForTigeraStatus(ctx, clients, 600, true)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error waiting for tigera status")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func enableIptables(ctx context.Context, clients config.Clients) error {
|
||||
// enable iptables
|
||||
log.Debug("entering enableIptables function")
|
||||
childCtx, cancel := context.WithTimeout(ctx, 90*time.Second)
|
||||
defer cancel()
|
||||
installation := &operatorv1.Installation{}
|
||||
log.Debug("Getting installation")
|
||||
err := clients.CtrlClient.Get(childCtx, ctrlclient.ObjectKey{Name: "default"}, installation)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to get installation")
|
||||
return err
|
||||
}
|
||||
|
||||
err = setKubeProxyMode(childCtx, clients, "iptables")
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to set kube-proxy mode to iptables")
|
||||
return err
|
||||
}
|
||||
oldDataplane := installation.Spec.CalicoNetwork.LinuxDataplane
|
||||
|
||||
if *oldDataplane == operatorv1.LinuxDataplaneIptables {
|
||||
log.Info("IPtables already enabled")
|
||||
return nil
|
||||
}
|
||||
|
||||
// kubectl patch installation.operator.tigera.io default --type merge -p '{"spec":{"calicoNetwork":{"linuxDataplane":"Iptables"}}}'
|
||||
patch := []byte(`{"spec":{"calicoNetwork":{"linuxDataplane":"Iptables"}}}`)
|
||||
|
||||
installation = &operatorv1.Installation{}
|
||||
log.Debug("Getting installation")
|
||||
err = clients.CtrlClient.Get(childCtx, ctrlclient.ObjectKey{Name: "default"}, installation)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to get installation")
|
||||
return err
|
||||
}
|
||||
log.Debugf("patching with %v", string(patch[:]))
|
||||
log.Info("enabling iptables dataplane")
|
||||
err = clients.CtrlClient.Patch(childCtx, installation, ctrlclient.RawPatch(ctrlclient.Merge.Type(), patch))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to patch installation")
|
||||
return err
|
||||
}
|
||||
|
||||
// kubectl patch ds -n kube-system kube-proxy --type merge -p '{"spec":{"template":{"spec":{"nodeSelector":{"non-calico": null}}}}}'
|
||||
patch = []byte(`{"spec":{"template":{"spec":{"nodeSelector":{"non-calico": null}}}}}`)
|
||||
proxyds := &appsv1.DaemonSet{}
|
||||
log.Debug("Getting kube-proxy ds")
|
||||
err = clients.CtrlClient.Get(childCtx, ctrlclient.ObjectKey{Namespace: "kube-system", Name: "kube-proxy"}, proxyds)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to get kube-proxy ds")
|
||||
return err
|
||||
}
|
||||
log.Debugf("patching with %v", string(patch[:]))
|
||||
err = clients.CtrlClient.Patch(childCtx, proxyds, ctrlclient.RawPatch(ctrlclient.Merge.Type(), patch))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to patch kube-proxy ds")
|
||||
return err
|
||||
}
|
||||
|
||||
err = waitForTigeraStatus(ctx, clients, 900, true)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error waiting for tigera status")
|
||||
return err
|
||||
}
|
||||
|
||||
// Clear out any residual nftables rules. This is a workaround, which should not be needed once nftables is GA
|
||||
err = wipeNFTables(ctx, clients)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to wipe nftables")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createOrUpdateCM(ctx context.Context, clients config.Clients, host string, port string) error {
|
||||
log.Debug("entering createOrUpdateCM function")
|
||||
// if it doesn't exist already, create configMap with k8s endpoint data in it
|
||||
configMapName := "kubernetes-services-endpoint"
|
||||
namespace := "tigera-operator"
|
||||
|
@ -193,13 +423,22 @@ func createOrUpdateCM(ctx context.Context, clients config.Clients, host string,
|
|||
|
||||
}
|
||||
|
||||
func waitForTigeraStatus(ctx context.Context, clients config.Clients) error {
|
||||
func waitForTigeraStatus(ctx context.Context, clients config.Clients, timeout int, deleteCalicoNodePods bool) error {
|
||||
|
||||
if deleteCalicoNodePods {
|
||||
// delete all calico-node pods so they all restart in parallel, since this is going to be slow if they update one-by-one
|
||||
err := utils.DeletePodsWithLabel(ctx, clients, "calico-system", "k8s-app=calico-node")
|
||||
if err != nil {
|
||||
log.Warning("failed to delete calico-node pods")
|
||||
// we're not going to return an error here, since the pods will eventually restart, just slower
|
||||
}
|
||||
}
|
||||
|
||||
// wait for tigera status
|
||||
timeout := 600 * time.Second
|
||||
log.Debug("entering waitForTigeraStatus function")
|
||||
apiStatus := &operatorv1.TigeraStatus{}
|
||||
calicoStatus := &operatorv1.TigeraStatus{}
|
||||
childCtx, cancel := context.WithTimeout(ctx, timeout)
|
||||
childCtx, cancel := context.WithTimeout(ctx, time.Duration(timeout)*time.Second)
|
||||
defer cancel()
|
||||
time.Sleep(7 * time.Second) // give the operator time to update the status following whatever might have changed
|
||||
|
||||
|
@ -208,11 +447,13 @@ func waitForTigeraStatus(ctx context.Context, clients config.Clients) error {
|
|||
time.Sleep(10 * time.Second)
|
||||
err := clients.CtrlClient.Get(childCtx, ctrlclient.ObjectKey{Name: "apiserver"}, apiStatus)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get apiserver status")
|
||||
log.WithError(err).Error("failed to get apiserver status")
|
||||
return err
|
||||
}
|
||||
err = clients.CtrlClient.Get(childCtx, ctrlclient.ObjectKey{Name: "calico"}, calicoStatus)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get calico status")
|
||||
log.WithError(err).Error("failed to get calico status")
|
||||
return err
|
||||
}
|
||||
for _, apiCondition := range apiStatus.Status.Conditions {
|
||||
log.Debugf("apiserver condition: %v", apiCondition)
|
||||
|
@ -243,21 +484,24 @@ func updateEncap(ctx context.Context, cfg config.Config, clients config.Clients,
|
|||
patch = []byte(`{"spec":{"ipipMode":"Never","vxlanMode":"Never"}}`)
|
||||
err = patchInstallation(ctx, clients, "None")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to patch installation")
|
||||
log.WithError(err).Error("failed to patch installation")
|
||||
return err
|
||||
}
|
||||
} else if encap == config.EncapIPIP {
|
||||
// kubectl patch ippool default-ipv4-ippool -p '{"spec": {"ipipMode": "Always"}, {vxlanMode: "Never"}}'
|
||||
patch = []byte(`{"spec":{"ipipMode":"Always","vxlanMode":"Never"}}`)
|
||||
err = patchInstallation(ctx, clients, "IPIP")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to patch installation")
|
||||
log.WithError(err).Error("failed to patch installation")
|
||||
return err
|
||||
}
|
||||
} else if encap == config.EncapVXLAN {
|
||||
// kubectl patch ippool default-ipv4-ippool -p '{"spec": {"ipipMode": "Never"}, {vxlanMode: "Always"}}'
|
||||
patch = []byte(`{"spec":{"ipipMode":"Never","vxlanMode":"Always"}}`)
|
||||
err = patchInstallation(ctx, clients, "VXLAN")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to patch installation")
|
||||
log.WithError(err).Error("failed to patch installation")
|
||||
return err
|
||||
}
|
||||
} else if encap == config.EncapUnset {
|
||||
log.Info("No encapsulation specified, using whatever is already set")
|
||||
|
@ -269,12 +513,14 @@ func updateEncap(ctx context.Context, cfg config.Config, clients config.Clients,
|
|||
log.Debug("Calico version is less than v3.28.0, patching IPPool")
|
||||
err = patchIPPool(ctx, clients, patch)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to patch IPPool")
|
||||
log.WithError(err).Error("failed to patch IPPool")
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = waitForTigeraStatus(ctx, clients)
|
||||
err = waitForTigeraStatus(ctx, clients, 600, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error waiting for tigera status")
|
||||
log.WithError(err).Error("error waiting for tigera status")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -293,7 +539,8 @@ func patchInstallation(ctx context.Context, clients config.Clients, encap string
|
|||
installation := &operatorv1.Installation{}
|
||||
err := clients.CtrlClient.Get(ctx, ctrlclient.ObjectKey{Name: "default"}, installation)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get installation")
|
||||
log.WithError(err).Error("failed to get installation")
|
||||
return err
|
||||
}
|
||||
log.Debug("installation is", installation)
|
||||
installation.Spec.CalicoNetwork.IPPools[0].Encapsulation = v1encap
|
||||
|
@ -324,6 +571,7 @@ func patchIPPool(ctx context.Context, clients config.Clients, patch []byte) erro
|
|||
}
|
||||
|
||||
func makeSvc(namespace string, depname, svcname string) corev1.Service {
|
||||
log.Debug("entering makeSvc function")
|
||||
svcname = utils.SanitizeString(svcname)
|
||||
svc := corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -350,6 +598,7 @@ func makeSvc(namespace string, depname, svcname string) corev1.Service {
|
|||
}
|
||||
|
||||
func makeDeployment(namespace string, depname string, replicas int32, hostnetwork bool, image string, args []string) appsv1.Deployment {
|
||||
log.Debug("entering makeDeployment function")
|
||||
depname = utils.SanitizeString(depname)
|
||||
dep := appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -376,11 +625,35 @@ func makeDeployment(namespace string, depname string, replicas int32, hostnetwor
|
|||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
AutomountServiceAccountToken: utils.BoolPtr(false),
|
||||
SecurityContext: &corev1.PodSecurityContext{
|
||||
RunAsNonRoot: utils.BoolPtr(true),
|
||||
RunAsGroup: utils.Int64Ptr(1000),
|
||||
RunAsUser: utils.Int64Ptr(1000),
|
||||
SeccompProfile: &corev1.SeccompProfile{
|
||||
Type: corev1.SeccompProfileTypeRuntimeDefault,
|
||||
},
|
||||
},
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: depname,
|
||||
Image: image,
|
||||
Args: args,
|
||||
SecurityContext: &corev1.SecurityContext{
|
||||
Privileged: utils.BoolPtr(false),
|
||||
AllowPrivilegeEscalation: utils.BoolPtr(false),
|
||||
ReadOnlyRootFilesystem: utils.BoolPtr(false),
|
||||
Capabilities: &corev1.Capabilities{
|
||||
Drop: []corev1.Capability{"ALL"},
|
||||
},
|
||||
},
|
||||
Ports: []corev1.ContainerPort{
|
||||
{
|
||||
Name: "http",
|
||||
ContainerPort: 8080,
|
||||
Protocol: corev1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
HostNetwork: hostnetwork,
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
"golang.org/x/net/proxy"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
|
@ -59,7 +60,7 @@ type Config struct {
|
|||
LogLevel string `envconfig:"LOG_LEVEL" default:"info"`
|
||||
WebServerImage string `envconfig:"WEBSERVER_IMAGE" default:"quay.io/tigeradev/tiger-bench-nginx:latest"`
|
||||
PerfImage string `envconfig:"PERF_IMAGE" default:"quay.io/tigeradev/tiger-bench-perf:latest"`
|
||||
TTFRImage string `envconfig:"TTFR_IMAGE" default:"quay.io/tigeradev/ttfr:latest"`
|
||||
TTFRImage string `envconfig:"TTFR_IMAGE" default:"quay.io/tigeradev/tiger-bench-ttfr:latest"`
|
||||
TestConfigs testConfigs
|
||||
}
|
||||
|
||||
|
@ -80,6 +81,7 @@ const (
|
|||
TestKindDNSPerf TestKind = "dnsperf"
|
||||
TestKindIperf TestKind = "iperf"
|
||||
TestKindQperf TestKind = "thruput-latency"
|
||||
TestKindTTFR TestKind = "ttfr"
|
||||
)
|
||||
|
||||
// Encap represents the encapsulation type to use.
|
||||
|
@ -100,6 +102,7 @@ type DataPlane string
|
|||
const (
|
||||
DataPlaneIPTables DataPlane = "iptables"
|
||||
DataPlaneBPF DataPlane = "bpf"
|
||||
DataPlaneNftables DataPlane = "nftables"
|
||||
DataPlaneUnset DataPlane = ""
|
||||
)
|
||||
|
||||
|
@ -117,36 +120,47 @@ const (
|
|||
|
||||
// TestConfig represents a test to run on a cluster, and the configuration for the test.
|
||||
type TestConfig struct {
|
||||
TestKind TestKind `validate:"required,oneof=dnsperf iperf thruput-latency"`
|
||||
TestKind TestKind `validate:"required,oneof=dnsperf iperf thruput-latency ttfr"`
|
||||
Encap Encap `validate:"omitempty,oneof=none vxlan ipip"`
|
||||
Dataplane DataPlane `validate:"omitempty,oneof=iptables bpf"`
|
||||
Dataplane DataPlane `validate:"omitempty,oneof=iptables bpf nftables"`
|
||||
NumPolicies int `validate:"gte=0"`
|
||||
NumServices int `validate:"gte=0"`
|
||||
NumPods int `validate:"gte=0"`
|
||||
HostNetwork bool
|
||||
TestNamespace string `default:"testns"`
|
||||
Iterations int `validate:"gte=0"`
|
||||
Iterations int `default:"1" validate:"gte=0"`
|
||||
Duration int `default:"60"`
|
||||
DNSPerf *DNSConfig `validate:"required_if=TestKind dnsperf"`
|
||||
Perf *PerfConfig `validate:"required_if=TestType thruput-latency,required_if=TestType iperf"`
|
||||
TTFRConfig *TTFRConfig `validate:"required_if=TestType ttfr"`
|
||||
CalicoNodeCPULimit string
|
||||
LeaveStandingConfig bool
|
||||
}
|
||||
|
||||
// PerfConfig details which tests to run in thruput-latency and iperf tests.
|
||||
type PerfConfig struct {
|
||||
Direct bool // Whether to do a direct pod-pod test
|
||||
Service bool // Whether to do a pod-service-pod test
|
||||
External bool // Whether to test from this container to the external IP for an external-service-pod test
|
||||
ControlPort int // The port to use for the control connection in tests. Used by qperf tests.
|
||||
TestPort int // The port to use for the test connection in tests. Used by qperf and iperf tests
|
||||
Direct bool // Whether to do a direct pod-pod test
|
||||
Service bool // Whether to do a pod-service-pod test
|
||||
External bool // Whether to test from this container to the external IP for an external-service-pod test
|
||||
ControlPort int // The port to use for the control connection in tests. Used by qperf tests.
|
||||
TestPort int // The port to use for the test connection in tests. Used by qperf and iperf tests
|
||||
ExternalIPOrFQDN string // The external IP or DNS name to connect to for an external-service-pod test
|
||||
}
|
||||
|
||||
// DNSConfig contains the configuration specific to DNSPerf tests.
|
||||
type DNSConfig struct {
|
||||
NumDomains int `validate:"gte=0"`
|
||||
Mode DNSPerfMode `validate:"omitempty,oneof=Inline NoDelay DelayDeniedPacket DelayDNSResponse"`
|
||||
NumDomains int `validate:"gte=0"`
|
||||
Mode DNSPerfMode `validate:"omitempty,oneof=Inline NoDelay DelayDeniedPacket DelayDNSResponse"`
|
||||
RunStress bool `default:"true" validate:"omitempty"`
|
||||
TestDNSPolicy bool `default:"true" validate:"omitempty"`
|
||||
NumTargetPods int `default:"100" validate:"gte=1"`
|
||||
TargetType string `default:"pod" validate:"omitempty,oneof=pod service"`
|
||||
}
|
||||
|
||||
// TTFRConfig contains the configuration specific to TTFR tests.
|
||||
type TTFRConfig struct {
|
||||
TestPodsPerNode int `validate:"gte=0"`
|
||||
Rate float64 `validate:"gte=0"`
|
||||
}
|
||||
|
||||
// New returns a new instance of Config.
|
||||
|
@ -234,16 +248,13 @@ func defaultAndValidate(cfg *Config) error {
|
|||
tcfg.TestNamespace = "testns"
|
||||
}
|
||||
if tcfg.TestKind == "dnsperf" {
|
||||
if tcfg.DNSPerf.NumDomains == 0 {
|
||||
return fmt.Errorf("non-zero NumDomains is required for a dnsperf test")
|
||||
}
|
||||
if tcfg.DNSPerf.Mode == "" {
|
||||
return fmt.Errorf("Mode is required for a dnsperf test")
|
||||
if tcfg.DNSPerf.NumDomains < 0 {
|
||||
return fmt.Errorf("NumDomains must be non-negative for a dnsperf test")
|
||||
}
|
||||
}
|
||||
if tcfg.TestKind == "thruput-latency" || tcfg.TestKind == "iperf" {
|
||||
if tcfg.Perf == nil {
|
||||
tcfg.Perf = &PerfConfig{true, true, false, 32000, 0, ""} // Default so that old configs don't break
|
||||
tcfg.Perf = &PerfConfig{true, true, false, 32000, 32001, ""} // Default so that old configs don't break
|
||||
continue
|
||||
}
|
||||
if tcfg.Perf.External {
|
||||
|
@ -266,6 +277,16 @@ func defaultAndValidate(cfg *Config) error {
|
|||
}
|
||||
}
|
||||
}
|
||||
if tcfg.TestKind == "dnsperf" {
|
||||
if tcfg.DNSPerf.TestDNSPolicy {
|
||||
if tcfg.DNSPerf.Mode == DNSPerfModeUnset {
|
||||
return fmt.Errorf("Mode must be set for a dnsperf test with TestDNSPolicy enabled")
|
||||
}
|
||||
if tcfg.DNSPerf.NumDomains < 0 {
|
||||
return fmt.Errorf("NumDomains must be non-negative for a dnsperf test with TestDNSPolicy enabled")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -277,14 +298,18 @@ func newClientSet(config Config) (*kubernetes.Clientset, ctrlclient.Client) {
|
|||
if err != nil {
|
||||
log.WithError(err).Panic("failed to build config")
|
||||
}
|
||||
kconfig.QPS = 100
|
||||
kconfig.Burst = 200
|
||||
kconfig.QPS = 1000
|
||||
kconfig.Burst = 2000
|
||||
clientset, err := kubernetes.NewForConfig(kconfig)
|
||||
if err != nil {
|
||||
log.WithError(err).Panic("failed to create clientset")
|
||||
}
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
err = networkingv1.AddToScheme(scheme)
|
||||
if err != nil {
|
||||
log.WithError(err).Panic("failed to add networkingv1 to scheme")
|
||||
}
|
||||
err = operatorv1.AddToScheme(scheme)
|
||||
if err != nil {
|
||||
log.WithError(err).Panic("failed to add operatorv1 to scheme")
|
||||
|
|
|
@ -139,6 +139,8 @@ func TestDefaults(t *testing.T) {
|
|||
assert.Equal(t, true, cfg.TestConfigs[0].Perf.Service)
|
||||
assert.Equal(t, false, cfg.TestConfigs[0].Perf.External)
|
||||
assert.Equal(t, "testns", cfg.TestConfigs[0].TestNamespace)
|
||||
assert.Equal(t, 32000, cfg.TestConfigs[0].Perf.ControlPort)
|
||||
assert.Equal(t, 32001, cfg.TestConfigs[0].Perf.TestPort)
|
||||
}
|
||||
|
||||
func TestInvalidTestKind(t *testing.T) {
|
||||
|
@ -336,7 +338,9 @@ func TestDNSMissingMode(t *testing.T) {
|
|||
fileContent := `
|
||||
- testKind: dnsperf
|
||||
dnsperf:
|
||||
testDNSPolicy: true
|
||||
NumDomains: 4
|
||||
NumTargetPods: 10
|
||||
`
|
||||
filePath := "/tmp/test_configs.yaml"
|
||||
err := os.WriteFile(filePath, []byte(fileContent), 0644)
|
||||
|
@ -347,7 +351,7 @@ func TestDNSMissingMode(t *testing.T) {
|
|||
cfg.TestConfigFile = filePath
|
||||
err = loadTestConfigsFromFile(&cfg)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "Mode is required for a dnsperf test")
|
||||
assert.Contains(t, err.Error(), "Mode must be set for a dnsperf test with TestDNSPolicy enabled")
|
||||
}
|
||||
func TestDNSMissingNumDomains(t *testing.T) {
|
||||
fileContent := `
|
||||
|
@ -364,7 +368,7 @@ func TestDNSMissingNumDomains(t *testing.T) {
|
|||
cfg.TestConfigFile = filePath
|
||||
err = loadTestConfigsFromFile(&cfg)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "non-zero NumDomains is required for a dnsperf test")
|
||||
assert.Contains(t, err.Error(), "Field validation for 'NumTargetPods' failed on the 'gte' tag")
|
||||
}
|
||||
func TestDNSBasic(t *testing.T) {
|
||||
fileContent := `
|
||||
|
@ -372,6 +376,7 @@ func TestDNSBasic(t *testing.T) {
|
|||
dnsperf:
|
||||
Mode: Inline
|
||||
NumDomains: 10
|
||||
NumTargetPods: 5
|
||||
`
|
||||
filePath := "/tmp/test_configs.yaml"
|
||||
err := os.WriteFile(filePath, []byte(fileContent), 0644)
|
||||
|
|
|
@ -0,0 +1,125 @@
|
|||
# DNSPerf tests
|
||||
|
||||
This test has 2 use cases:
|
||||
- to test DNS Policy performance in Calico Enterprise clusters
|
||||
- to test the latency of a service using Curl
|
||||
|
||||
Example Config:
|
||||
```
|
||||
- testKind: dnsperf
|
||||
Dataplane: iptables
|
||||
TestNamespace: dnstest
|
||||
numServices: 1000
|
||||
iterations: 1
|
||||
duration: 60
|
||||
DNSPerf:
|
||||
numDomains: 0
|
||||
RunStress: false
|
||||
TestDNSPolicy: false
|
||||
numTargetPods: 10
|
||||
targetType: service
|
||||
```
|
||||
`testKind` sets the test to be a dnsperf test.
|
||||
Setting `Dataplane` causes the tool to reconfigure your cluster to use a particular dataplane. Leave it blank to test whatever your cluster already uses. Valid values: <blank>, `bpf`, `iptables`, `nftables`
|
||||
`numServices` causes the tool to set up "standing config", which includes services. This takes the form of 10 pods, each backing N services (where N is the number you set)
|
||||
`iterations` is not used by dnsperf tests.
|
||||
`duration` defines the length of time the test will be repeated for
|
||||
`DNSPerf` is the name of the dnsperf specific config:
|
||||
`numDomains` - defines the number of domains that should be added to the DNS policy
|
||||
`RunStress` - controls whether the test should run some control plane stress while the curl is executed - this is useful when testing DNS Policy, because it makes calico-node do work.
|
||||
`TestDNSPolicy` - controls whether or not the test should be a DNS policy test
|
||||
`numTargetPods` - controls the number of target pods that should be created. Curls will be round-robined to the targets. Must be at least 1.
|
||||
`targetType` - controls whether the target passed to curl is a pod FQDN or a service FQDN. Valid values: `pod`, `service`
|
||||
|
||||
|
||||
## Operation
|
||||
The test operates by execing into a test pod and running a curl command. That curl command looks something like this:
|
||||
```
|
||||
curl -m 8 -w '{"time_lookup": %{time_namelookup}, "time_connect": %{time_connect}}\n' -s -o /dev/null http://service.cluster.local:8080
|
||||
```
|
||||
The curl therefore outputs a lookup time and a connect time, which are recorded by the test. The lookup time is the time between curl sending a DNS request for the target FQDN and getting a response from CoreDNS. The connect time is the time taken from DNS response to completion of the TCP 3-way handshake.
|
||||
|
||||
The test cycles round, creating test pods, running the curl command in them, and tearing them down.
|
||||
|
||||
|
||||
## Result
|
||||
Example result:
|
||||
```
|
||||
[
|
||||
{
|
||||
"config": {
|
||||
"TestKind": "dnsperf",
|
||||
"Encap": "",
|
||||
"Dataplane": "iptables",
|
||||
"NumPolicies": 0,
|
||||
"NumServices": 1000,
|
||||
"NumPods": 0,
|
||||
"HostNetwork": false,
|
||||
"TestNamespace": "dnstest",
|
||||
"Iterations": 1,
|
||||
"Duration": 60,
|
||||
"DNSPerf": {
|
||||
"NumDomains": 0,
|
||||
"Mode": "",
|
||||
"RunStress": false,
|
||||
"TestDNSPolicy": false,
|
||||
"NumTargetPods": 10,
|
||||
"TargetType": "service"
|
||||
},
|
||||
"Perf": null,
|
||||
"TTFRConfig": null,
|
||||
"CalicoNodeCPULimit": "",
|
||||
"LeaveStandingConfig": false
|
||||
},
|
||||
"ClusterDetails": {
|
||||
"Cloud": "unknown",
|
||||
"Provisioner": "kubeadm",
|
||||
"NodeType": "linux",
|
||||
"NodeOS": "Ubuntu 20.04.6 LTS",
|
||||
"NodeKernel": "5.15.0-1081-gcp",
|
||||
"NodeArch": "amd64",
|
||||
"NumNodes": 4,
|
||||
"Dataplane": "iptables",
|
||||
"IPFamily": "ipv4",
|
||||
"Encapsulation": "VXLANCrossSubnet",
|
||||
"WireguardEnabled": false,
|
||||
"Product": "calico",
|
||||
"CalicoVersion": "v3.30.0-0.dev-852-g389eae30ae5d",
|
||||
"K8SVersion": "v1.33.1",
|
||||
"CRIVersion": "containerd://1.7.27",
|
||||
"CNIOption": "Calico"
|
||||
},
|
||||
"dnsperf": {
|
||||
"LookupTime": {
|
||||
"min": 0.009491,
|
||||
"max": 0.037909,
|
||||
"avg": 0.022658038461538466,
|
||||
"P50": 0.023875,
|
||||
"P75": 0.025672,
|
||||
"P90": 0.02856,
|
||||
"P99": 0.03434,
|
||||
"datapoints": 104
|
||||
},
|
||||
"ConnectTime": {
|
||||
"min": 0.00018300000000000087,
|
||||
"max": 0.0031160000000000007,
|
||||
"avg": 0.001034471153846154,
|
||||
"P50": 0.0008029999999999982,
|
||||
"P75": 0.0014260000000000002,
|
||||
"P90": 0.0020359999999999996,
|
||||
"P99": 0.0029660000000000016,
|
||||
"datapoints": 104
|
||||
},
|
||||
"DuplicateSYN": 0,
|
||||
"DuplicateSYNACK": 0,
|
||||
"FailedCurls": 0,
|
||||
"SuccessfulCurls": 104
|
||||
}
|
||||
}
|
||||
]
|
||||
```
|
||||
the dnsperf section contains statistical summaries of the curl results for LookupTime and ConnectTime.
|
||||
|
||||
`DuplicateSYN` gives the number of duplicate SYN packets seen in the tcpdump (useful for DNS Policy performance). tcpdump is only run when TestDNSPolicy=true.
|
||||
`DuplicateSYNACK` gives the number of duplicate SYNACK packets seen in the tcpdump (useful for DNS Policy performance). tcpdump is only run when TestDNSPolicy=true.
|
||||
`FailedCurls` and `SuccessfulCurls` show the total number of failed and successful curl attempts during that test.
|
|
@ -19,7 +19,6 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -29,6 +28,7 @@ import (
|
|||
log "github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/projectcalico/tiger-bench/pkg/config"
|
||||
"github.com/projectcalico/tiger-bench/pkg/stats"
|
||||
"github.com/projectcalico/tiger-bench/pkg/utils"
|
||||
|
||||
v3 "github.com/tigera/api/pkg/apis/projectcalico/v3"
|
||||
|
@ -49,8 +49,8 @@ type CurlResult struct {
|
|||
|
||||
// Results holds the results from this test
|
||||
type Results struct {
|
||||
LookupTime map[int]float64 // will be a set of percentiles
|
||||
ConnectTime map[int]float64 // will be a set of percentiles
|
||||
LookupTime stats.ResultSummary
|
||||
ConnectTime stats.ResultSummary
|
||||
DuplicateSYN int
|
||||
DuplicateSYNACK int
|
||||
FailedCurls int
|
||||
|
@ -113,84 +113,110 @@ func MakeDNSPolicy(namespace string, name string, numDomains int) v3.NetworkPoli
|
|||
}
|
||||
|
||||
// RunDNSPerfTests runs a DNS performance test
|
||||
func RunDNSPerfTests(ctx context.Context, clients config.Clients, testDuration int, namespace string, webServerImage string, perfImage string) (*Results, error) {
|
||||
func RunDNSPerfTests(ctx context.Context, clients config.Clients, testConfig *config.TestConfig, webServerImage string, perfImage string) (*Results, error) {
|
||||
|
||||
var results Results
|
||||
log.Debug("entering RunDNSPerfTests function")
|
||||
// setup a deployment to scale up and down repeatedly (to eat felix cpu)
|
||||
scaleDep, err := utils.GetOrCreateDeployment(ctx, clients,
|
||||
makeDeployment(
|
||||
namespace,
|
||||
"dnsscale",
|
||||
int32(0),
|
||||
false,
|
||||
[]string{"default-pool"},
|
||||
webServerImage,
|
||||
[]string{"sh", "-c", "while true; do echo `date`: MARK; sleep 10; done"},
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return &results, err
|
||||
}
|
||||
// setup test pods (daemonset)
|
||||
testpods, err := DeployDNSPerfPods(ctx, clients, false, "dnsperf", namespace, perfImage)
|
||||
if err != nil {
|
||||
return &results, err
|
||||
}
|
||||
// setup tcpdump on nodes (deploy network tools as host-networked daemonset, figure out main interface, run tcpdump)
|
||||
tcpdumppods, err := DeployDNSPerfPods(ctx, clients, true, "tcpdump", namespace, perfImage)
|
||||
if err != nil {
|
||||
return &results, err
|
||||
}
|
||||
// setup target pods
|
||||
for i := 0; i < 4; i++ {
|
||||
thisname := fmt.Sprintf("headless%d", i)
|
||||
_, err = utils.GetOrCreateDeployment(ctx, clients,
|
||||
var scaleDep appsv1.Deployment
|
||||
if testConfig.DNSPerf.RunStress {
|
||||
// setup a deployment to scale up and down repeatedly (to eat felix cpu)
|
||||
var err error
|
||||
scaleDep, err = utils.GetOrCreateDeployment(ctx, clients,
|
||||
makeDeployment(
|
||||
namespace,
|
||||
thisname,
|
||||
int32(25),
|
||||
testConfig.TestNamespace,
|
||||
"dnsscale",
|
||||
int32(0),
|
||||
false,
|
||||
[]string{"infrastructure"},
|
||||
[]string{"default-pool"},
|
||||
webServerImage,
|
||||
[]string{},
|
||||
[]string{"sh", "-c", "while true; do echo `date`: MARK; sleep 10; done"},
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return &results, err
|
||||
}
|
||||
}
|
||||
|
||||
_, err = utils.WaitForTestPods(ctx, clients, namespace, "app=dnsperf")
|
||||
// setup test pods (daemonset)
|
||||
testpods, err := DeployDNSPerfPods(ctx, clients, false, "dnsperf", testConfig.TestNamespace, perfImage)
|
||||
if err != nil {
|
||||
return &results, err
|
||||
}
|
||||
// setup tcpdump on nodes (deploy network tools as host-networked daemonset, figure out main interface, run tcpdump)
|
||||
tcpdumppods, err := DeployDNSPerfPods(ctx, clients, true, "tcpdump", testConfig.TestNamespace, perfImage)
|
||||
if err != nil {
|
||||
return &results, err
|
||||
}
|
||||
// setup target pods
|
||||
thisname := fmt.Sprintf("headless%d", 0)
|
||||
_, err = utils.GetOrCreateDeployment(ctx, clients,
|
||||
makeDeployment(
|
||||
testConfig.TestNamespace,
|
||||
thisname,
|
||||
int32(testConfig.DNSPerf.NumTargetPods),
|
||||
false,
|
||||
[]string{"infrastructure"},
|
||||
webServerImage,
|
||||
[]string{},
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
return &results, err
|
||||
}
|
||||
|
||||
testdomains, err := getPodFQDNs(ctx, clients, namespace)
|
||||
_, err = utils.WaitForTestPods(ctx, clients, testConfig.TestNamespace, "app=dnsperf")
|
||||
if err != nil {
|
||||
return &results, err
|
||||
}
|
||||
|
||||
var testdomains []string
|
||||
if testConfig.DNSPerf.TargetType == "pod" {
|
||||
log.Info("Using pod FQDNs as targets")
|
||||
testdomains, err = getPodFQDNs(ctx, clients, testConfig.TestNamespace)
|
||||
if err != nil {
|
||||
return &results, err
|
||||
}
|
||||
} else {
|
||||
log.Info("Using service FQDNs as targets")
|
||||
svcs := corev1.ServiceList{}
|
||||
err := clients.CtrlClient.List(ctx, &svcs, ctrlclient.InNamespace(testConfig.TestNamespace))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to list services")
|
||||
return &results, err
|
||||
}
|
||||
for _, svc := range svcs.Items {
|
||||
testdomains = append(testdomains, fmt.Sprintf("%s.%s.svc.cluster.local", svc.Name, testConfig.TestNamespace))
|
||||
}
|
||||
}
|
||||
if len(testdomains) == 0 {
|
||||
log.Info("No test domains found, skipping test")
|
||||
return &results, fmt.Errorf("no test domains found")
|
||||
}
|
||||
|
||||
err = checkTestPods(ctx, clients, testpods)
|
||||
if err != nil {
|
||||
return &results, err
|
||||
}
|
||||
|
||||
testctx, cancel := context.WithTimeout(ctx, time.Duration(testDuration)*time.Second)
|
||||
testctx, cancel := context.WithTimeout(ctx, time.Duration(testConfig.Duration)*time.Second)
|
||||
defer cancel()
|
||||
log.Debugf("Created test context: %+v", testctx)
|
||||
// kick off per-node threads to run tcpdump
|
||||
for i, pod := range tcpdumppods {
|
||||
go func() {
|
||||
err = runTCPDump(testctx, clients, &pod, testpods[i], testDuration+60)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to run tcpdump")
|
||||
}
|
||||
}()
|
||||
}
|
||||
log.Info("tcpdump threads started")
|
||||
|
||||
go scaleDeploymentLoop(testctx, clients, scaleDep, int32(24), 10*time.Second)
|
||||
if testConfig.DNSPerf.TestDNSPolicy {
|
||||
// kick off per-node threads to run tcpdump
|
||||
for i, pod := range tcpdumppods {
|
||||
go func() {
|
||||
err = runTCPDump(testctx, clients, &pod, testpods[i], testConfig.Duration+60)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to run tcpdump")
|
||||
}
|
||||
}()
|
||||
}
|
||||
log.Info("tcpdump threads started")
|
||||
}
|
||||
|
||||
if testConfig.DNSPerf.RunStress {
|
||||
go scaleDeploymentLoop(testctx, clients, scaleDep, int32(24), 10*time.Second)
|
||||
}
|
||||
|
||||
// kick off per-node threads to run curl commands
|
||||
var rawresults []CurlResult
|
||||
|
@ -203,18 +229,18 @@ func RunDNSPerfTests(ctx context.Context, clients config.Clients, testDuration i
|
|||
for {
|
||||
domain := testdomains[i%(len(testdomains))]
|
||||
result, err := runDNSPerfTest(testctx, &pod, domain)
|
||||
if testctx.Err() != nil {
|
||||
// Probably ctx expiry or cancellation, don't append result or log errors in this case
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("failed to run curl to %s", domain)
|
||||
} else if result.Success {
|
||||
// Since Connectime includes LookupTime, we need to subtract LookupTime from ConnectTime to get the actual connect time
|
||||
result.ConnectTime = result.ConnectTime - result.LookupTime
|
||||
}
|
||||
log.Infof("appending result: %+v", result)
|
||||
log.Debugf("appending result: %+v", result)
|
||||
rawresults = append(rawresults, result)
|
||||
if testctx.Err() != nil {
|
||||
// Probably ctx expiry or cancellation
|
||||
break
|
||||
}
|
||||
log.Debugf("current test context: %+v", testctx)
|
||||
i++
|
||||
}
|
||||
|
@ -222,18 +248,20 @@ func RunDNSPerfTests(ctx context.Context, clients config.Clients, testDuration i
|
|||
}
|
||||
wg.Wait()
|
||||
|
||||
log.Infof("rawresults: %+v", rawresults)
|
||||
log.Debugf("rawresults: %+v", rawresults)
|
||||
results = processResults(rawresults)
|
||||
|
||||
// add up the duplicate SYN numbers from each tcpdump pod
|
||||
results.DuplicateSYN = 0
|
||||
for _, pod := range tcpdumppods {
|
||||
duplicateSYN, duplicateSYNACK, err := countDuplicateSYN(ctx, &pod)
|
||||
if err != nil {
|
||||
return &results, err
|
||||
if testConfig.DNSPerf.TestDNSPolicy {
|
||||
// add up the duplicate SYN numbers from each tcpdump pod
|
||||
results.DuplicateSYN = 0
|
||||
for _, pod := range tcpdumppods {
|
||||
duplicateSYN, duplicateSYNACK, err := countDuplicateSYN(ctx, &pod)
|
||||
if err != nil {
|
||||
return &results, err
|
||||
}
|
||||
results.DuplicateSYN += duplicateSYN
|
||||
results.DuplicateSYNACK += duplicateSYNACK
|
||||
}
|
||||
results.DuplicateSYN += duplicateSYN
|
||||
results.DuplicateSYNACK += duplicateSYNACK
|
||||
}
|
||||
log.Infof("Results: %+v", results)
|
||||
return &results, nil
|
||||
|
@ -261,8 +289,8 @@ func getPodFQDNs(ctx context.Context, clients config.Clients, namespace string)
|
|||
func processResults(rawresults []CurlResult) Results {
|
||||
log.Debug("entering processResults function")
|
||||
results := Results{
|
||||
LookupTime: map[int]float64{},
|
||||
ConnectTime: map[int]float64{},
|
||||
LookupTime: stats.ResultSummary{},
|
||||
ConnectTime: stats.ResultSummary{},
|
||||
DuplicateSYN: 0,
|
||||
FailedCurls: 0,
|
||||
SuccessfulCurls: 0,
|
||||
|
@ -284,19 +312,19 @@ func processResults(rawresults []CurlResult) Results {
|
|||
results.FailedCurls++
|
||||
}
|
||||
}
|
||||
sort.Float64s(lookupTimes)
|
||||
sort.Float64s(connectTimes)
|
||||
|
||||
// Now we have sorted slices, we can calculate percentiles by picking the value at the appropriate index
|
||||
// (e.g. if we had 100 results, the 50th percentile would be the value at index 50, etc.)
|
||||
percentiles := []int{50, 75, 90, 95, 99}
|
||||
results.LookupTime = make(map[int]float64)
|
||||
results.ConnectTime = make(map[int]float64)
|
||||
for _, p := range percentiles {
|
||||
results.LookupTime[p] = lookupTimes[int(float64(p)/100*float64(len(lookupTimes)))]
|
||||
log.Infof("lookupTime: %d percentile: %f", p, results.LookupTime[p])
|
||||
results.ConnectTime[p] = connectTimes[int(float64(p)/100*float64(len(connectTimes)))]
|
||||
log.Infof("connectTime: %d percentile: %f", p, results.ConnectTime[p])
|
||||
if len(lookupTimes) == 0 {
|
||||
log.Info("No successful curls, skipping percentiles")
|
||||
return results
|
||||
}
|
||||
var err error
|
||||
results.LookupTime, err = stats.SummarizeResults(lookupTimes)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to summarize lookup times")
|
||||
return results
|
||||
}
|
||||
results.ConnectTime, err = stats.SummarizeResults(connectTimes)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to summarize connect times")
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
@ -311,10 +339,12 @@ func runDNSPerfTest(ctx context.Context, srcPod *corev1.Pod, target string) (Cur
|
|||
result.Target = target
|
||||
result.Success = true
|
||||
cmdfrag := `curl -m 8 -w '{"time_lookup": %{time_namelookup}, "time_connect": %{time_connect}}\n' -s -o /dev/null`
|
||||
cmd := fmt.Sprintf("%s %s", cmdfrag, target)
|
||||
cmd := fmt.Sprintf("%s %s:8080", cmdfrag, target)
|
||||
stdout, _, err := utils.ExecCommandInPod(ctx, srcPod, cmd, 10)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to run curl command")
|
||||
if ctx.Err() == nil { // Only log error if context is still valid
|
||||
log.WithError(err).Error("failed to run curl command")
|
||||
}
|
||||
result.Success = false
|
||||
} else {
|
||||
err = json.Unmarshal([]byte(stdout), &result)
|
||||
|
@ -376,7 +406,7 @@ func runTCPDump(ctx context.Context, clients config.Clients, pod *corev1.Pod, te
|
|||
log.Infof("nic=%s", nic)
|
||||
|
||||
// run tcpdump command until timeout
|
||||
cmd = fmt.Sprintf(`tcpdump -s0 -w dump.cap -i %s port 80`, nic)
|
||||
cmd = fmt.Sprintf(`tcpdump -s0 -w dump.cap -i %s port 8080`, nic)
|
||||
var out string
|
||||
out, _, err = utils.ExecCommandInPod(ctx, pod, cmd, timeout+30)
|
||||
if err != nil {
|
||||
|
@ -403,7 +433,7 @@ func countDuplicateSYN(ctx context.Context, pod *corev1.Pod) (int, int, error) {
|
|||
log.Info("tcpdump file was truncated, ignoring")
|
||||
return processTCPDumpOutput(stdout)
|
||||
} else {
|
||||
log.Infof("Hit error running command, retrying: %s", stderr)
|
||||
log.WithError(err).Infof("Hit error running command %s, retrying. stderr: %s stdout: %s", cmd, stderr, stdout)
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
@ -509,6 +539,15 @@ func makeDNSPerfPod(nodename string, namespace string, podname string, image str
|
|||
Namespace: namespace,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
AutomountServiceAccountToken: utils.BoolPtr(false),
|
||||
SecurityContext: &corev1.PodSecurityContext{
|
||||
RunAsNonRoot: utils.BoolPtr(true),
|
||||
RunAsGroup: utils.Int64Ptr(1000),
|
||||
RunAsUser: utils.Int64Ptr(1000),
|
||||
SeccompProfile: &corev1.SeccompProfile{
|
||||
Type: corev1.SeccompProfileTypeRuntimeDefault,
|
||||
},
|
||||
},
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "dnsperf",
|
||||
|
@ -517,6 +556,15 @@ func makeDNSPerfPod(nodename string, namespace string, podname string, image str
|
|||
"sh", "-c",
|
||||
"while true; do echo `date`: MARK; sleep 10; done",
|
||||
},
|
||||
SecurityContext: &corev1.SecurityContext{
|
||||
Privileged: utils.BoolPtr(false),
|
||||
AllowPrivilegeEscalation: utils.BoolPtr(false),
|
||||
ReadOnlyRootFilesystem: utils.BoolPtr(false),
|
||||
Capabilities: &corev1.Capabilities{
|
||||
Drop: []corev1.Capability{"ALL"},
|
||||
},
|
||||
},
|
||||
ImagePullPolicy: corev1.PullAlways,
|
||||
},
|
||||
},
|
||||
NodeName: nodename,
|
||||
|
@ -586,11 +634,36 @@ func makeDeployment(namespace string, depname string, replicas int32, hostnetwor
|
|||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
AutomountServiceAccountToken: utils.BoolPtr(false),
|
||||
SecurityContext: &corev1.PodSecurityContext{
|
||||
RunAsNonRoot: utils.BoolPtr(true),
|
||||
RunAsGroup: utils.Int64Ptr(1000),
|
||||
RunAsUser: utils.Int64Ptr(1000),
|
||||
SeccompProfile: &corev1.SeccompProfile{
|
||||
Type: corev1.SeccompProfileTypeRuntimeDefault,
|
||||
},
|
||||
},
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: depname,
|
||||
Image: image,
|
||||
Args: args,
|
||||
SecurityContext: &corev1.SecurityContext{
|
||||
Privileged: utils.BoolPtr(false),
|
||||
AllowPrivilegeEscalation: utils.BoolPtr(false),
|
||||
ReadOnlyRootFilesystem: utils.BoolPtr(false),
|
||||
Capabilities: &corev1.Capabilities{
|
||||
Drop: []corev1.Capability{"ALL"},
|
||||
},
|
||||
},
|
||||
Ports: []corev1.ContainerPort{
|
||||
{
|
||||
Name: "http",
|
||||
ContainerPort: 8080,
|
||||
Protocol: corev1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
ImagePullPolicy: corev1.PullAlways,
|
||||
},
|
||||
},
|
||||
HostNetwork: hostnetwork,
|
||||
|
@ -622,7 +695,7 @@ func scaleDeploymentLoop(ctx context.Context, clients config.Clients, deployment
|
|||
log.Warning("failed to scale deployment up")
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
log.Info("Context expired? Quitting scaleDeploymentLoop")
|
||||
log.Info("Context expired. Quitting scaleDeploymentLoop")
|
||||
return
|
||||
}
|
||||
time.Sleep(sleeptime)
|
||||
|
|
|
@ -19,11 +19,11 @@ import (
|
|||
"reflect"
|
||||
testing "testing"
|
||||
|
||||
"github.com/projectcalico/tiger-bench/pkg/stats"
|
||||
"github.com/stretchr/testify/require"
|
||||
v3 "github.com/tigera/api/pkg/apis/projectcalico/v3"
|
||||
"github.com/tigera/api/pkg/lib/numorstring"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
)
|
||||
|
||||
func TestProcessTCPDumpOutput(t *testing.T) {
|
||||
|
@ -70,7 +70,7 @@ func TestProcessTCPDumpOutput(t *testing.T) {
|
|||
`
|
||||
expectedsyn = 2
|
||||
expectedsynack = 0
|
||||
resultsyn, resultsynack, err= processTCPDumpOutput(input)
|
||||
resultsyn, resultsynack, err = processTCPDumpOutput(input)
|
||||
require.NoError(t, err)
|
||||
if resultsyn != expectedsyn {
|
||||
panic("Unexpected number of duplicate SYN packets")
|
||||
|
@ -245,8 +245,8 @@ func TestProcessResults(t *testing.T) {
|
|||
// Test case 1: Empty input
|
||||
var rawResults []CurlResult
|
||||
expected := Results{
|
||||
LookupTime: map[int]float64{},
|
||||
ConnectTime: map[int]float64{},
|
||||
LookupTime: stats.ResultSummary{},
|
||||
ConnectTime: stats.ResultSummary{},
|
||||
DuplicateSYN: 0,
|
||||
FailedCurls: 0,
|
||||
SuccessfulCurls: 0,
|
||||
|
@ -279,21 +279,28 @@ func TestProcessResults(t *testing.T) {
|
|||
},
|
||||
}
|
||||
expected = Results{
|
||||
LookupTime: map[int]float64{
|
||||
50: 0.234,
|
||||
75: 0.345,
|
||||
90: 0.345,
|
||||
95: 0.345,
|
||||
99: 0.345,
|
||||
LookupTime: stats.ResultSummary{
|
||||
Min: 0.123,
|
||||
Max: 0.345,
|
||||
Average: 0.23399999999999999,
|
||||
P50: 0.234,
|
||||
P75: 0.345,
|
||||
P90: 0.345,
|
||||
P99: 0.345,
|
||||
NumDataPoints: 3,
|
||||
},
|
||||
ConnectTime: map[int]float64{
|
||||
50: 0.567,
|
||||
75: 0.678,
|
||||
90: 0.678,
|
||||
95: 0.678,
|
||||
99: 0.678,
|
||||
ConnectTime: stats.ResultSummary{
|
||||
Min: 0.456,
|
||||
Max: 0.678,
|
||||
Average: 0.5670000000000001,
|
||||
P50: 0.567,
|
||||
P75: 0.678,
|
||||
P90: 0.678,
|
||||
P99: 0.678,
|
||||
NumDataPoints: 3,
|
||||
},
|
||||
DuplicateSYN: 0,
|
||||
DuplicateSYNACK: 0,
|
||||
FailedCurls: 0,
|
||||
SuccessfulCurls: 3,
|
||||
}
|
||||
|
@ -325,21 +332,28 @@ func TestProcessResults(t *testing.T) {
|
|||
},
|
||||
}
|
||||
expected = Results{
|
||||
LookupTime: map[int]float64{
|
||||
50: 0.234,
|
||||
75: 0.234,
|
||||
90: 0.234,
|
||||
95: 0.234,
|
||||
99: 0.234,
|
||||
LookupTime: stats.ResultSummary{
|
||||
Min: 0.234,
|
||||
Max: 0.234,
|
||||
Average: 0.234,
|
||||
P50: 0.234,
|
||||
P75: 0.234,
|
||||
P90: 0.234,
|
||||
P99: 0.234,
|
||||
NumDataPoints: 1,
|
||||
},
|
||||
ConnectTime: map[int]float64{
|
||||
50: 0.567,
|
||||
75: 0.567,
|
||||
90: 0.567,
|
||||
95: 0.567,
|
||||
99: 0.567,
|
||||
ConnectTime: stats.ResultSummary{
|
||||
Min: 0.567,
|
||||
Max: 0.567,
|
||||
Average: 0.567,
|
||||
P50: 0.567,
|
||||
P75: 0.567,
|
||||
P90: 0.567,
|
||||
P99: 0.567,
|
||||
NumDataPoints: 1,
|
||||
},
|
||||
DuplicateSYN: 0,
|
||||
DuplicateSYNACK: 0,
|
||||
FailedCurls: 2,
|
||||
SuccessfulCurls: 1,
|
||||
}
|
||||
|
|
|
@ -20,24 +20,31 @@ import (
|
|||
"github.com/projectcalico/tiger-bench/pkg/config"
|
||||
"github.com/projectcalico/tiger-bench/pkg/dnsperf"
|
||||
"github.com/projectcalico/tiger-bench/pkg/results"
|
||||
"github.com/projectcalico/tiger-bench/pkg/stats"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCreateESDoc(t *testing.T) {
|
||||
dnsperf := dnsperf.Results{
|
||||
LookupTime: map[int]float64{
|
||||
50: 0.003640,
|
||||
75: 0.004745,
|
||||
90: 0.006914,
|
||||
95: 0.008833,
|
||||
99: 0.013641,
|
||||
LookupTime: stats.ResultSummary{
|
||||
Min: 0.001,
|
||||
Max: 0.013641,
|
||||
Average: 0.004745,
|
||||
P50: 0.003640,
|
||||
P75: 0.004745,
|
||||
P90: 0.006914,
|
||||
P99: 0.013641,
|
||||
NumDataPoints: 17,
|
||||
},
|
||||
ConnectTime: map[int]float64{
|
||||
50: 0.000439,
|
||||
75: 0.000542,
|
||||
90: 0.000880,
|
||||
95: 0.001406,
|
||||
99: 0.003917,
|
||||
ConnectTime: stats.ResultSummary{
|
||||
Min: 0.000239,
|
||||
Max: 0.003917,
|
||||
Average: 0.000542,
|
||||
P50: 0.000439,
|
||||
P75: 0.000542,
|
||||
P90: 0.000880,
|
||||
P99: 0.003917,
|
||||
NumDataPoints: 17,
|
||||
},
|
||||
DuplicateSYN: 101,
|
||||
DuplicateSYNACK: 0,
|
||||
|
@ -54,14 +61,14 @@ func TestCreateESDoc(t *testing.T) {
|
|||
NumPods: 10,
|
||||
CalicoNodeCPULimit: "40m",
|
||||
DNSPerf: &config.DNSConfig{
|
||||
NumDomains: 0,
|
||||
Mode: "Inline",
|
||||
NumDomains: 0,
|
||||
Mode: "Inline",
|
||||
},
|
||||
},
|
||||
DNSPerf: &dnsperf,
|
||||
}
|
||||
|
||||
expectedDoc := `{"config":{"TestKind":"dnsperf","Encap":"none","Dataplane":"bpf","NumPolicies":30,"NumServices":20,"NumPods":10,"HostNetwork":false,"TestNamespace":"","Iterations":0,"Duration":0,"DNSPerf":{"NumDomains":0,"Mode":"Inline"},"Perf":null,"CalicoNodeCPULimit":"40m","LeaveStandingConfig":false},"ClusterDetails":{"Cloud":"","Provisioner":"","NodeType":"","NodeOS":"","NodeKernel":"","NodeArch":"","NumNodes":0,"Dataplane":"","IPFamily":"","Encapsulation":"","WireguardEnabled":false,"Product":"","CalicoVersion":"","K8SVersion":"","CRIVersion":"","CNIOption":""},"dnsperf":{"LookupTime":{"50":0.00364,"75":0.004745,"90":0.006914,"95":0.008833,"99":0.013641},"ConnectTime":{"50":0.000439,"75":0.000542,"90":0.00088,"95":0.001406,"99":0.003917},"DuplicateSYN":101,"DuplicateSYNACK":0,"FailedCurls":16,"SuccessfulCurls":1326}}`
|
||||
expectedDoc := `{"config":{"TestKind":"dnsperf","Encap":"none","Dataplane":"bpf","NumPolicies":30,"NumServices":20,"NumPods":10,"HostNetwork":false,"TestNamespace":"","Iterations":0,"Duration":0,"DNSPerf":{"NumDomains":0,"Mode":"Inline","RunStress":false,"TestDNSPolicy":false,"NumTargetPods":0,"TargetType":""},"Perf":null,"TTFRConfig":null,"CalicoNodeCPULimit":"40m","LeaveStandingConfig":false},"ClusterDetails":{"Cloud":"","Provisioner":"","NodeType":"","NodeOS":"","NodeKernel":"","NodeArch":"","NumNodes":0,"Dataplane":"","IPFamily":"","Encapsulation":"","WireguardEnabled":false,"Product":"","CalicoVersion":"","K8SVersion":"","CRIVersion":"","CNIOption":""},"dnsperf":{"LookupTime":{"min":0.001,"max":0.013641,"avg":0.004745,"P50":0.00364,"P75":0.004745,"P90":0.006914,"P99":0.013641,"datapoints":17},"ConnectTime":{"min":0.000239,"max":0.003917,"avg":0.000542,"P50":0.000439,"P75":0.000542,"P90":0.00088,"P99":0.003917,"datapoints":17},"DuplicateSYN":101,"DuplicateSYNACK":0,"FailedCurls":16,"SuccessfulCurls":1326}}`
|
||||
|
||||
doc, err := createESDoc(result)
|
||||
require.NoError(t, err)
|
||||
|
@ -77,7 +84,7 @@ func TestCreateESDocBlank(t *testing.T) {
|
|||
DNSPerf: &dnsperf,
|
||||
}
|
||||
|
||||
expectedDoc := `{"config":{"TestKind":"","Encap":"","Dataplane":"","NumPolicies":0,"NumServices":0,"NumPods":0,"HostNetwork":false,"TestNamespace":"","Iterations":0,"Duration":0,"DNSPerf":null,"Perf":null,"CalicoNodeCPULimit":"","LeaveStandingConfig":false},"ClusterDetails":{"Cloud":"","Provisioner":"","NodeType":"","NodeOS":"","NodeKernel":"","NodeArch":"","NumNodes":0,"Dataplane":"","IPFamily":"","Encapsulation":"","WireguardEnabled":false,"Product":"","CalicoVersion":"","K8SVersion":"","CRIVersion":"","CNIOption":""},"dnsperf":{"LookupTime":null,"ConnectTime":null,"DuplicateSYN":0,"DuplicateSYNACK":0,"FailedCurls":0,"SuccessfulCurls":0}}`
|
||||
expectedDoc := `{"config":{"TestKind":"","Encap":"","Dataplane":"","NumPolicies":0,"NumServices":0,"NumPods":0,"HostNetwork":false,"TestNamespace":"","Iterations":0,"Duration":0,"DNSPerf":null,"Perf":null,"TTFRConfig":null,"CalicoNodeCPULimit":"","LeaveStandingConfig":false},"ClusterDetails":{"Cloud":"","Provisioner":"","NodeType":"","NodeOS":"","NodeKernel":"","NodeArch":"","NumNodes":0,"Dataplane":"","IPFamily":"","Encapsulation":"","WireguardEnabled":false,"Product":"","CalicoVersion":"","K8SVersion":"","CRIVersion":"","CNIOption":""},"dnsperf":{"LookupTime":{},"ConnectTime":{},"DuplicateSYN":0,"DuplicateSYNACK":0,"FailedCurls":0,"SuccessfulCurls":0}}`
|
||||
doc, err := createESDoc(result)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
|
@ -269,7 +269,8 @@ func DeployIperfPods(ctx context.Context, clients config.Clients, namespace stri
|
|||
nodelist := &corev1.NodeList{}
|
||||
err := clients.CtrlClient.List(ctx, nodelist)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list nodes: %w", err)
|
||||
log.WithError(err).Error("failed to list nodes")
|
||||
return err
|
||||
}
|
||||
for _, node := range nodelist.Items {
|
||||
if node.Labels["tigera.io/test-nodepool"] == "default-pool" {
|
||||
|
@ -278,7 +279,7 @@ func DeployIperfPods(ctx context.Context, clients config.Clients, namespace stri
|
|||
log.Debugf("found nodename: %s", nodename)
|
||||
podname := fmt.Sprintf("iperf-srv-%s", nodename)
|
||||
cmd := fmt.Sprintf("iperf3 -s -p %d", port)
|
||||
pod := makePod(nodename, namespace, podname, hostnet, image, cmd)
|
||||
pod := makePod(nodename, namespace, podname, hostnet, image, cmd, port)
|
||||
_, err = utils.GetOrCreatePod(ctx, clients, pod)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error making iperf pod")
|
||||
|
@ -314,7 +315,7 @@ func parseIperfOutput(stdout string) (int, float64, string, error) {
|
|||
return retransmits, throughput, "Mbits/sec", nil
|
||||
}
|
||||
|
||||
func makePod(nodename string, namespace string, podname string, hostnetwork bool, image string, command string) corev1.Pod {
|
||||
func makePod(nodename string, namespace string, podname string, hostnetwork bool, image string, command string, port int) corev1.Pod {
|
||||
podname = utils.SanitizeString(podname)
|
||||
pod := corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -326,6 +327,15 @@ func makePod(nodename string, namespace string, podname string, hostnetwork bool
|
|||
Namespace: namespace,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
AutomountServiceAccountToken: utils.BoolPtr(false),
|
||||
SecurityContext: &corev1.PodSecurityContext{
|
||||
RunAsNonRoot: utils.BoolPtr(true),
|
||||
RunAsGroup: utils.Int64Ptr(1000),
|
||||
RunAsUser: utils.Int64Ptr(1000),
|
||||
SeccompProfile: &corev1.SeccompProfile{
|
||||
Type: corev1.SeccompProfileTypeRuntimeDefault,
|
||||
},
|
||||
},
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "iperf",
|
||||
|
@ -335,10 +345,26 @@ func makePod(nodename string, namespace string, podname string, hostnetwork bool
|
|||
"-c",
|
||||
command,
|
||||
},
|
||||
SecurityContext: &corev1.SecurityContext{
|
||||
Privileged: utils.BoolPtr(false),
|
||||
AllowPrivilegeEscalation: utils.BoolPtr(false),
|
||||
ReadOnlyRootFilesystem: utils.BoolPtr(false),
|
||||
Capabilities: &corev1.Capabilities{
|
||||
Drop: []corev1.Capability{"ALL"},
|
||||
},
|
||||
},
|
||||
ImagePullPolicy: corev1.PullAlways,
|
||||
Ports: []corev1.ContainerPort{
|
||||
{
|
||||
Name: "test-port",
|
||||
ContainerPort: int32(port),
|
||||
Protocol: corev1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
NodeName: nodename,
|
||||
RestartPolicy: "OnFailure",
|
||||
RestartPolicy: corev1.RestartPolicyOnFailure,
|
||||
HostNetwork: hostnetwork,
|
||||
},
|
||||
}
|
||||
|
@ -363,6 +389,7 @@ func makeSvc(namespace string, podname string, port int) corev1.Service {
|
|||
},
|
||||
Ports: []corev1.ServicePort{
|
||||
{
|
||||
Name: "test-port",
|
||||
Port: int32(port),
|
||||
},
|
||||
},
|
||||
|
|
|
@ -46,11 +46,12 @@ func DeployPolicies(ctx context.Context, clients config.Clients, numPolicies int
|
|||
|
||||
// deploy policies
|
||||
currentNumPolicies, err := countPolicies(ctx, clients, namespace, "policy-")
|
||||
log.Info("Current number of policies: ", currentNumPolicies)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if numPolicies > currentNumPolicies {
|
||||
// create policies
|
||||
// If we do not have enough policies, create them
|
||||
podSelector := metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{Key: "app", Operator: metav1.LabelSelectorOpExists},
|
||||
|
@ -95,12 +96,12 @@ func DeployPolicies(ctx context.Context, clients config.Clients, numPolicies int
|
|||
return overallError
|
||||
|
||||
} else if numPolicies < currentNumPolicies {
|
||||
// delete policies
|
||||
// if we have too many policies, delete some
|
||||
// Spin up a channel with multiple threads to delete policies, because a single thread is limited to 5 per second
|
||||
|
||||
// make a list of ints from currentNumPolicies to numPolicies
|
||||
var policyIndexes []int
|
||||
for i := currentNumPolicies; i > numPolicies; i-- {
|
||||
for i := currentNumPolicies-1; i >= numPolicies; i-- {
|
||||
policyIndexes = append(policyIndexes, i)
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
|
|
|
@ -315,7 +315,8 @@ func DeployQperfPods(ctx context.Context, clients config.Clients, namespace stri
|
|||
nodelist := &corev1.NodeList{}
|
||||
err := clients.CtrlClient.List(ctx, nodelist)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list nodes: %w", err)
|
||||
log.WithError(err).Error("failed to list nodes")
|
||||
return err
|
||||
}
|
||||
for _, node := range nodelist.Items {
|
||||
if node.Labels["tigera.io/test-nodepool"] == "default-pool" {
|
||||
|
@ -323,7 +324,7 @@ func DeployQperfPods(ctx context.Context, clients config.Clients, namespace stri
|
|||
nodename := node.ObjectMeta.Name
|
||||
log.Debugf("found nodename: %s", nodename)
|
||||
podname := fmt.Sprintf("qperf-srv-%s", nodename)
|
||||
pod := makeQperfPod(nodename, namespace, podname, image, hostnet, controlPort)
|
||||
pod := makeQperfPod(nodename, namespace, podname, image, hostnet, controlPort, testPort)
|
||||
_, err = utils.GetOrCreatePod(ctx, clients, pod)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error making qperf pod")
|
||||
|
@ -341,9 +342,9 @@ func DeployQperfPods(ctx context.Context, clients config.Clients, namespace stri
|
|||
return nil
|
||||
}
|
||||
|
||||
func makeQperfPod(nodename string, namespace string, podname string, image string, hostnetwork bool, port int) corev1.Pod {
|
||||
func makeQperfPod(nodename string, namespace string, podname string, image string, hostnetwork bool, controlPort int, testPort int) corev1.Pod {
|
||||
podname = utils.SanitizeString(podname)
|
||||
controlPortStr := strconv.Itoa(port)
|
||||
controlPortStr := strconv.Itoa(controlPort)
|
||||
|
||||
pod := corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -355,6 +356,15 @@ func makeQperfPod(nodename string, namespace string, podname string, image strin
|
|||
Namespace: namespace,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
AutomountServiceAccountToken: utils.BoolPtr(false),
|
||||
SecurityContext: &corev1.PodSecurityContext{
|
||||
RunAsNonRoot: utils.BoolPtr(true),
|
||||
RunAsGroup: utils.Int64Ptr(1000),
|
||||
RunAsUser: utils.Int64Ptr(1000),
|
||||
SeccompProfile: &corev1.SeccompProfile{
|
||||
Type: corev1.SeccompProfileTypeRuntimeDefault,
|
||||
},
|
||||
},
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "qperf",
|
||||
|
@ -364,6 +374,26 @@ func makeQperfPod(nodename string, namespace string, podname string, image strin
|
|||
"-lp",
|
||||
controlPortStr,
|
||||
},
|
||||
SecurityContext: &corev1.SecurityContext{
|
||||
Privileged: utils.BoolPtr(false),
|
||||
AllowPrivilegeEscalation: utils.BoolPtr(false),
|
||||
ReadOnlyRootFilesystem: utils.BoolPtr(true),
|
||||
Capabilities: &corev1.Capabilities{
|
||||
Drop: []corev1.Capability{"ALL"},
|
||||
},
|
||||
},
|
||||
Ports: []corev1.ContainerPort{
|
||||
{
|
||||
Name: "control",
|
||||
ContainerPort: int32(controlPort),
|
||||
Protocol: corev1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
Name: "data",
|
||||
ContainerPort: int32(testPort),
|
||||
Protocol: corev1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
NodeName: nodename,
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"github.com/projectcalico/tiger-bench/pkg/dnsperf"
|
||||
"github.com/projectcalico/tiger-bench/pkg/iperf"
|
||||
"github.com/projectcalico/tiger-bench/pkg/qperf"
|
||||
"github.com/projectcalico/tiger-bench/pkg/ttfr"
|
||||
// "github.com/projectcalico/tiger-bench/pkg/stats"
|
||||
)
|
||||
|
||||
|
@ -29,7 +30,7 @@ type Result struct {
|
|||
ClusterDetails cluster.Details `json:"ClusterDetails"`
|
||||
// CalicoNodeCPU stats.MinMaxAvg `json:"CalicoNodeCPU,omitempty"`
|
||||
// CalicoNodeMemory stats.MinMaxAvg `json:"CalicoNodeMemory,omitempty"`
|
||||
TTFR map[string]string `json:"ttfr,omitempty"`
|
||||
TTFR []*ttfr.ResultSummary `json:"ttfr,omitempty"`
|
||||
IPerf *iperf.ResultSummary `json:"iperf,omitempty"`
|
||||
QPerf *qperf.ResultSummary `json:"thruput-latency,omitempty"`
|
||||
DNSPerf *dnsperf.Results `json:"dnsperf,omitempty"`
|
||||
|
|
|
@ -87,14 +87,15 @@ type MinMaxAvg struct {
|
|||
|
||||
// ResultSummary holds a statistical summary of a set of results
|
||||
type ResultSummary struct {
|
||||
Min float64 `json:"min,omitempty"`
|
||||
Max float64 `json:"max,omitempty"`
|
||||
Average float64 `json:"avg,omitempty"`
|
||||
P50 float64 `json:"P50,omitempty"`
|
||||
P75 float64 `json:"P75,omitempty"`
|
||||
P90 float64 `json:"P90,omitempty"`
|
||||
P99 float64 `json:"P99,omitempty"`
|
||||
Unit string `json:"unit,omitempty"`
|
||||
Min float64 `json:"min,omitempty"`
|
||||
Max float64 `json:"max,omitempty"`
|
||||
Average float64 `json:"avg,omitempty"`
|
||||
P50 float64 `json:"P50,omitempty"`
|
||||
P75 float64 `json:"P75,omitempty"`
|
||||
P90 float64 `json:"P90,omitempty"`
|
||||
P99 float64 `json:"P99,omitempty"`
|
||||
Unit string `json:"unit,omitempty"`
|
||||
NumDataPoints int `json:"datapoints,omitempty"`
|
||||
}
|
||||
|
||||
// SummarizeResults summarizes the results
|
||||
|
@ -102,6 +103,7 @@ func SummarizeResults(results []float64) (ResultSummary, error) {
|
|||
log.Debug("Entering summarizeResults function")
|
||||
var err error
|
||||
summary := ResultSummary{}
|
||||
summary.NumDataPoints = len(results)
|
||||
if len(results) == 0 {
|
||||
log.Warning("No results to summarize")
|
||||
return summary, fmt.Errorf("no results to summarize")
|
||||
|
@ -118,6 +120,7 @@ func SummarizeResults(results []float64) (ResultSummary, error) {
|
|||
log.WithError(err).Warning("Error summarizing stats")
|
||||
return summary, err
|
||||
}
|
||||
log.Debugf("Summary: %+v", summary)
|
||||
return summary, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -89,6 +89,7 @@ func TestSummarizeResults(t *testing.T) {
|
|||
P75: 4,
|
||||
P90: 5,
|
||||
P99: 5,
|
||||
NumDataPoints: 5,
|
||||
},
|
||||
err: false,
|
||||
},
|
||||
|
@ -103,6 +104,7 @@ func TestSummarizeResults(t *testing.T) {
|
|||
P75: -2,
|
||||
P90: -1,
|
||||
P99: -1,
|
||||
NumDataPoints: 5,
|
||||
},
|
||||
err: false,
|
||||
},
|
||||
|
@ -117,6 +119,7 @@ func TestSummarizeResults(t *testing.T) {
|
|||
P75: 2,
|
||||
P90: 4,
|
||||
P99: 4,
|
||||
NumDataPoints: 5,
|
||||
},
|
||||
err: false,
|
||||
},
|
||||
|
@ -131,6 +134,7 @@ func TestSummarizeResults(t *testing.T) {
|
|||
P75: 0,
|
||||
P90: 0,
|
||||
P99: 0,
|
||||
NumDataPoints: 0,
|
||||
},
|
||||
err: true,
|
||||
},
|
||||
|
@ -145,6 +149,7 @@ func TestSummarizeResults(t *testing.T) {
|
|||
P75: 42,
|
||||
P90: 42,
|
||||
P99: 42,
|
||||
NumDataPoints: 1,
|
||||
},
|
||||
err: false,
|
||||
},
|
||||
|
@ -158,13 +163,14 @@ func TestSummarizeResults(t *testing.T) {
|
|||
} else {
|
||||
assert.NoError(t, recderr)
|
||||
}
|
||||
assert.InDelta(t, tt.expected.Min, result.Min, 0.0001)
|
||||
assert.InDelta(t, tt.expected.Max, result.Max, 0.0001)
|
||||
assert.Equal(t, tt.expected.Min, result.Min)
|
||||
assert.Equal(t, tt.expected.Max, result.Max)
|
||||
assert.InDelta(t, tt.expected.Average, result.Average, 0.0001)
|
||||
assert.InDelta(t, tt.expected.P50, result.P50, 0.0001)
|
||||
assert.InDelta(t, tt.expected.P75, result.P75, 0.0001)
|
||||
assert.InDelta(t, tt.expected.P90, result.P90, 0.0001)
|
||||
assert.InDelta(t, tt.expected.P99, result.P99, 0.0001)
|
||||
assert.Equal(t, tt.expected.P50, result.P50)
|
||||
assert.Equal(t, tt.expected.P75, result.P75)
|
||||
assert.Equal(t, tt.expected.P90, result.P90)
|
||||
assert.Equal(t, tt.expected.P99, result.P99)
|
||||
assert.Equal(t, tt.expected.NumDataPoints, result.NumDataPoints)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,400 @@
|
|||
// Copyright (c) 2024-2025 Tigera, Inc. All rights reserved.
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This "time to first response" (TTFR) test spins up a server pod on each
|
||||
// node in the cluster, and then spins up client pods on each node in the
|
||||
// cluster. The client pods start and send requests to the server pod, and
|
||||
// record the amount of time it takes before they get a response. This is
|
||||
// sometimes* a useful proxy for how long its taking for Calico to program the
|
||||
// rules for that pod (since pods start with a deny-all rule and calico-node
|
||||
// must program the correct rules before it can talk to anything).
|
||||
//
|
||||
// * if `linuxPolicySetupTimeoutSeconds` is set in the CalicoNetworkSpec in
|
||||
// the Installation resource, then pod startup will be delayed until policy
|
||||
// is applied.
|
||||
// This can be handy if your application pod wants its first request to
|
||||
// always succeed.
|
||||
// This is a Calico-specific feature that is not part of the CNI spec. See
|
||||
// the [Calico documentation](https://docs.tigera.io/calico/latest/reference/configure-cni-plugins#enabling-policy-setup-timeout)
|
||||
// for more information on this feature and how to enable it.
|
||||
|
||||
package ttfr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/projectcalico/tiger-bench/pkg/config"
|
||||
"github.com/projectcalico/tiger-bench/pkg/stats"
|
||||
"github.com/projectcalico/tiger-bench/pkg/utils"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
)
|
||||
|
||||
// Results holds the results returned from ttfr
|
||||
type Results struct {
|
||||
TTFR []float64 `json:"ttfr,omitempty"`
|
||||
}
|
||||
|
||||
// ResultSummary holds a statistical summary of the results
|
||||
type ResultSummary struct {
|
||||
TTFRSummary stats.ResultSummary `json:"ttfrSummary,omitempty"`
|
||||
}
|
||||
|
||||
// RunTTFRTest runs a ttfr test
|
||||
func RunTTFRTest(ctx context.Context, clients config.Clients, testconfig *config.TestConfig, cfg config.Config) (Results, error) {
|
||||
ttfrResults := Results{}
|
||||
|
||||
nodelist := &corev1.NodeList{}
|
||||
err := clients.CtrlClient.List(ctx, nodelist, &client.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(labels.Set{
|
||||
"tigera.io/test-nodepool": "default-pool",
|
||||
}),
|
||||
})
|
||||
if err != nil {
|
||||
return ttfrResults, fmt.Errorf("failed to list nodes: %w", err)
|
||||
}
|
||||
if len(nodelist.Items) == 0 {
|
||||
return ttfrResults, fmt.Errorf("no nodes found with label tigera.io/test-nodepool=default-pool")
|
||||
}
|
||||
targets := make([]string, len(nodelist.Items))
|
||||
for i, node := range nodelist.Items {
|
||||
// For each node in the cluster (with the test label):
|
||||
// Create server pod on this node
|
||||
podname := fmt.Sprintf("ttfr-srv-%.2d", i)
|
||||
pod := makePod(node.ObjectMeta.Name, testconfig.TestNamespace, podname, testconfig.HostNetwork, cfg.WebServerImage)
|
||||
_, err = utils.GetOrCreatePod(ctx, clients, pod)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error making server pod")
|
||||
return ttfrResults, err
|
||||
}
|
||||
// Wait for the server pod to be ready
|
||||
pods, err := utils.WaitForTestPods(ctx, clients, testconfig.TestNamespace, fmt.Sprintf("pod=%s", podname))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error waiting for server pod to be ready")
|
||||
return ttfrResults, err
|
||||
}
|
||||
if len(pods) == 0 {
|
||||
log.Error("no server pod found")
|
||||
}
|
||||
podIP := pods[0].Status.PodIP
|
||||
log.Infof("Server pod IP: %s", podIP)
|
||||
targets[i] = podIP
|
||||
}
|
||||
startTime := time.Now()
|
||||
endtime := startTime.Add(time.Duration(testconfig.Duration) * time.Second)
|
||||
period := 1000.0 / testconfig.TTFRConfig.Rate
|
||||
log.Debug("period: ", period, "ms")
|
||||
nextTime := startTime.Add(time.Duration(period) * time.Millisecond)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Make slices to hold ttfrs and errors
|
||||
var ttfrs []float64
|
||||
var errors []error
|
||||
|
||||
numThreads := len(nodelist.Items) * testconfig.TTFRConfig.TestPodsPerNode
|
||||
sem := make(chan struct{}, numThreads)
|
||||
|
||||
outer:
|
||||
for loopcount := 0; true; loopcount++ {
|
||||
// For each node in the cluster (with the test label):
|
||||
for n, node := range nodelist.Items {
|
||||
// For each pod
|
||||
for p := range testconfig.TTFRConfig.TestPodsPerNode {
|
||||
sem <- struct{}{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
defer func() { <-sem }()
|
||||
// Create a pod on this node
|
||||
podname := fmt.Sprintf("ttfr-%.2d-%.2d-%.2d", loopcount, n, p)
|
||||
pod := makeTestPod(node.ObjectMeta.Name, testconfig.TestNamespace, podname, testconfig.HostNetwork, cfg.TTFRImage, targets[n])
|
||||
defer func() {
|
||||
// delete the pod
|
||||
_ = clients.CtrlClient.Delete(ctx, &pod)
|
||||
}()
|
||||
err = clients.CtrlClient.Create(ctx, &pod)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error making pod")
|
||||
ttfrs = append(ttfrs, 99999)
|
||||
errors = append(errors, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Wait for the pod to be ready
|
||||
_, err = utils.WaitForTestPods(ctx, clients, testconfig.TestNamespace, fmt.Sprintf("pod=%s", podname))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error waiting for pod to be ready")
|
||||
ttfrs = append(ttfrs, 99999)
|
||||
errors = append(errors, err)
|
||||
return
|
||||
}
|
||||
|
||||
ttfrSec, err := getPodTTFR(ctx, clients, pod)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
err = fmt.Errorf("pod not found: %s", pod.ObjectMeta.Name)
|
||||
ttfrs = append(ttfrs, 99999)
|
||||
errors = append(errors, err)
|
||||
return
|
||||
}
|
||||
err = fmt.Errorf("error getting pod TTFR: %w", err)
|
||||
ttfrs = append(ttfrs, 99999)
|
||||
errors = append(errors, err)
|
||||
return
|
||||
}
|
||||
ttfrs = append(ttfrs, ttfrSec)
|
||||
errors = append(errors, err)
|
||||
}()
|
||||
delay := time.Until(nextTime)
|
||||
if delay > 0 {
|
||||
log.Debugf("Sleeping for %s", delay)
|
||||
time.Sleep(delay)
|
||||
} else {
|
||||
log.Warning("unable to keep up with rate")
|
||||
if numThreads-len(sem) == 0 {
|
||||
log.Info("Not enough free pods to make requested rate, blocking until one is freed")
|
||||
}
|
||||
}
|
||||
nextTime = nextTime.Add(time.Duration(period) * time.Millisecond)
|
||||
// if we are at the end of the test, break out of the loop
|
||||
if time.Now().After(endtime) {
|
||||
log.Info("Time's up, stopping test (but allowing pods already requested to finish)")
|
||||
break outer
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
wg.Wait()
|
||||
// we now have a slice of errors, and matching slice of ttfrs.
|
||||
log.Debugf("Errors: %v+", errors)
|
||||
numerrs := 0
|
||||
numresults := 0
|
||||
for i, err := range errors {
|
||||
if err == nil {
|
||||
// copy all results that don't have an error to the results
|
||||
numresults++
|
||||
log.Debug("Copying over TTFR result: ", ttfrs[i])
|
||||
ttfrResults.TTFR = append(ttfrResults.TTFR, ttfrs[i])
|
||||
} else {
|
||||
numerrs++
|
||||
switch {
|
||||
case strings.Contains(err.Error(), "pod not found"):
|
||||
log.Info("error getting pod TTFR")
|
||||
case strings.Contains(err.Error(), "pod is deleting"):
|
||||
log.Info("Pod is deleting, skipping")
|
||||
default:
|
||||
log.WithError(err).Error("error getting pod TTFR")
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Debugf("Test complete, got %d results and %d errors", numresults, numerrs)
|
||||
log.Debug("ttfrResults.TTFR length = ", len(ttfrResults.TTFR))
|
||||
return ttfrResults, nil
|
||||
}
|
||||
|
||||
// getPodTTFR gets the TTFR from the pod logs (with retry), and deletes the pod when successful
|
||||
func getPodTTFR(ctx context.Context, clients config.Clients, pod corev1.Pod) (float64, error) {
|
||||
|
||||
// retry getting pod logs
|
||||
for j := 0; j < 20; j++ {
|
||||
// if pod isn't running yet, wait for it to be running
|
||||
podRunning, err := utils.IsPodRunning(ctx, clients, &pod)
|
||||
if !podRunning || err != nil {
|
||||
log.Info("Pod ", pod.ObjectMeta.Name, " is not running, skipping")
|
||||
time.Sleep(1 * time.Second)
|
||||
continue
|
||||
}
|
||||
logs, err := utils.GetPodLogs(ctx, clients, pod.ObjectMeta.Name, pod.ObjectMeta.Namespace)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error getting pod logs")
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
return 99999, err
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
continue
|
||||
}
|
||||
// if we got a result:
|
||||
r := regexp.MustCompile(`{\\"ttfr_seconds\\": ([0-9].*\.[0-9].*)}`)
|
||||
results := r.FindStringSubmatch(logs)
|
||||
if len(results) == 0 {
|
||||
log.Info("No result found in logs")
|
||||
time.Sleep(1 * time.Second)
|
||||
continue
|
||||
}
|
||||
// Parse the result and append to list of results
|
||||
ttfrSec, err := strconv.ParseFloat(results[1], 64)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error parsing ttfr result")
|
||||
return ttfrSec, err
|
||||
}
|
||||
log.Info("TTFR result: ", ttfrSec, " from pod ", pod.ObjectMeta.Name)
|
||||
// delete the pod
|
||||
err = clients.CtrlClient.Delete(ctx, &pod)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
return 99999, err
|
||||
}
|
||||
log.WithError(err).Error("error deleting pod")
|
||||
return ttfrSec, err
|
||||
}
|
||||
// Success! we made it all the way through without error
|
||||
return ttfrSec, nil
|
||||
}
|
||||
return 99999, fmt.Errorf("failed to get pod logs after 10 attempts for pod %s", pod.ObjectMeta.Name)
|
||||
}
|
||||
|
||||
// SummarizeResults summarizes the results
|
||||
func SummarizeResults(ttfrResults []*Results) ([]*ResultSummary, error) {
|
||||
log.Debug("Summarizing results")
|
||||
if len(ttfrResults) == 0 {
|
||||
return nil, fmt.Errorf("no results to summarize")
|
||||
}
|
||||
var resultSummaryList []*ResultSummary
|
||||
for _, result := range ttfrResults {
|
||||
// Summarize the results
|
||||
resultSummary := ResultSummary{}
|
||||
var err error
|
||||
// Calculate the summary statistics
|
||||
resultSummary.TTFRSummary, err = stats.SummarizeResults(result.TTFR)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error summarizing results")
|
||||
return nil, err
|
||||
}
|
||||
resultSummary.TTFRSummary.Unit = "seconds"
|
||||
// Add the summary to the list
|
||||
resultSummaryList = append(resultSummaryList, &resultSummary)
|
||||
}
|
||||
return resultSummaryList, nil
|
||||
}
|
||||
|
||||
func makePod(nodename string, namespace string, podname string, hostnetwork bool, image string) corev1.Pod {
|
||||
podname = utils.SanitizeString(podname)
|
||||
pod := corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"app": "ttfr",
|
||||
"pod": podname,
|
||||
"node": nodename,
|
||||
},
|
||||
Name: podname,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
AutomountServiceAccountToken: utils.BoolPtr(false),
|
||||
SecurityContext: &corev1.PodSecurityContext{
|
||||
RunAsNonRoot: utils.BoolPtr(true),
|
||||
RunAsGroup: utils.Int64Ptr(1000),
|
||||
RunAsUser: utils.Int64Ptr(1000),
|
||||
SeccompProfile: &corev1.SeccompProfile{
|
||||
Type: corev1.SeccompProfileTypeRuntimeDefault,
|
||||
},
|
||||
},
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "ttfr",
|
||||
Image: image,
|
||||
SecurityContext: &corev1.SecurityContext{
|
||||
Privileged: utils.BoolPtr(false),
|
||||
AllowPrivilegeEscalation: utils.BoolPtr(false),
|
||||
ReadOnlyRootFilesystem: utils.BoolPtr(false),
|
||||
},
|
||||
Ports: []corev1.ContainerPort{
|
||||
{
|
||||
Name: "http",
|
||||
ContainerPort: 8080,
|
||||
Protocol: corev1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
NodeName: nodename,
|
||||
RestartPolicy: "OnFailure",
|
||||
HostNetwork: hostnetwork,
|
||||
},
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
func makeTestPod(nodename string, namespace string, podname string, hostnetwork bool, image string, target string) corev1.Pod {
|
||||
podname = utils.SanitizeString(podname)
|
||||
pod := corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"app": "ttfr",
|
||||
"pod": podname,
|
||||
"node": nodename,
|
||||
},
|
||||
Name: podname,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
SecurityContext: &corev1.PodSecurityContext{
|
||||
RunAsNonRoot: utils.BoolPtr(true),
|
||||
RunAsGroup: utils.Int64Ptr(1000),
|
||||
RunAsUser: utils.Int64Ptr(1000),
|
||||
},
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "ttfr",
|
||||
Image: image,
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: "ADDRESS",
|
||||
Value: target,
|
||||
},
|
||||
{
|
||||
Name: "PORT",
|
||||
Value: "8080",
|
||||
},
|
||||
{
|
||||
Name: "PROTOCOL",
|
||||
Value: "http",
|
||||
},
|
||||
},
|
||||
SecurityContext: &corev1.SecurityContext{
|
||||
Privileged: utils.BoolPtr(false),
|
||||
AllowPrivilegeEscalation: utils.BoolPtr(false),
|
||||
ReadOnlyRootFilesystem: utils.BoolPtr(true),
|
||||
Capabilities: &corev1.Capabilities{
|
||||
Drop: []corev1.Capability{"ALL"},
|
||||
},
|
||||
},
|
||||
Ports: []corev1.ContainerPort{
|
||||
{
|
||||
Name: "http",
|
||||
ContainerPort: 8080,
|
||||
Protocol: corev1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
NodeName: nodename,
|
||||
RestartPolicy: "Always",
|
||||
HostNetwork: hostnetwork,
|
||||
},
|
||||
}
|
||||
return pod
|
||||
}
|
|
@ -31,12 +31,14 @@ import (
|
|||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
"k8s.io/kubernetes/pkg/client/conditions"
|
||||
)
|
||||
|
||||
// ExecCommandInPod executes a command in a pod
|
||||
|
@ -100,6 +102,7 @@ func DeletePodsWithLabel(ctx context.Context, clients config.Clients, namespace
|
|||
return err
|
||||
}
|
||||
for _, pod := range podlist.Items {
|
||||
log.Info("Deleting pod: ", pod.Name)
|
||||
err = clients.CtrlClient.Delete(ctx, &pod)
|
||||
if ctrlclient.IgnoreNotFound(err) != nil { // Since we're deleting pods, don't worry if they're already gone
|
||||
log.WithError(err).Errorf("failed to delete pod %v", pod.Name)
|
||||
|
@ -214,6 +217,30 @@ func DeleteServicesWithPrefix(ctx context.Context, clients config.Clients, names
|
|||
return nil
|
||||
}
|
||||
|
||||
// DeleteNetPolsInNamespace deletes network policies in a namespace
|
||||
func DeleteNetPolsInNamespace(ctx context.Context, clients config.Clients, namespace string) error {
|
||||
log.Debug("Entering DeleteNetPolInNamespace function")
|
||||
netpols := &networkingv1.NetworkPolicyList{}
|
||||
err := clients.CtrlClient.List(ctx, netpols, ctrlclient.InNamespace(namespace))
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to list network policies")
|
||||
return err
|
||||
}
|
||||
for _, netpol := range netpols.Items {
|
||||
log.Debug("Deleting network policy: ", netpol.Name)
|
||||
err = clients.CtrlClient.Delete(ctx, &netpol)
|
||||
if err != nil {
|
||||
if ctrlclient.IgnoreNotFound(err) == nil {
|
||||
log.Infof("didn't find existing network policy %s", netpol.Name)
|
||||
} else {
|
||||
log.WithError(err).Errorf("failed to delete network policy %v", netpol.Name)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetOrCreateDeployment gets or creates a deployment if it does not exist
|
||||
func GetOrCreateDeployment(ctx context.Context, clients config.Clients, deployment appsv1.Deployment) (appsv1.Deployment, error) {
|
||||
log.Debug("Entering GetOrCreateDeployment function")
|
||||
|
@ -237,18 +264,30 @@ func GetOrCreateDeployment(ctx context.Context, clients config.Clients, deployme
|
|||
return deployment, nil
|
||||
}
|
||||
|
||||
// DeleteDeployment deletes a deployment, ignoring if it didn't exist
|
||||
func DeleteDeployment(ctx context.Context, clients config.Clients, namespace string, deploymentName string) error {
|
||||
log.Debug("Entering DeleteDeployment function")
|
||||
err := clients.CtrlClient.Delete(ctx, &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: deploymentName, Namespace: namespace}})
|
||||
// DeleteDeploymentsWithPrefix deletes deployments, starting with a prefix
|
||||
func DeleteDeploymentsWithPrefix(ctx context.Context, clients config.Clients, namespace string, deploymentName string) error {
|
||||
log.Debug("Entering DeleteDeploymentsWithPrefix function")
|
||||
deployments := appsv1.DeploymentList{}
|
||||
err := clients.CtrlClient.List(ctx, &deployments, ctrlclient.InNamespace(namespace))
|
||||
if err != nil {
|
||||
if ctrlclient.IgnoreNotFound(err) == nil {
|
||||
log.Infof("didn't find existing deployment %s", deploymentName)
|
||||
return nil
|
||||
}
|
||||
log.WithError(err).Error("failed to delete deployment")
|
||||
log.WithError(err).Error("failed to list deployments")
|
||||
return err
|
||||
}
|
||||
return err
|
||||
for _, deployment := range deployments.Items {
|
||||
if strings.HasPrefix(deployment.Name, deploymentName) {
|
||||
log.Debug("Deleting deployment: ", deployment.Name)
|
||||
err = clients.CtrlClient.Delete(ctx, &deployment)
|
||||
if err != nil {
|
||||
if ctrlclient.IgnoreNotFound(err) == nil {
|
||||
log.Infof("didn't find existing deployment %s", deployment.Name)
|
||||
} else {
|
||||
log.WithError(err).Errorf("failed to delete deployment %v", deployment.Name)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetOrCreateDS gets or creates a deployment if it does not exist
|
||||
|
@ -409,3 +448,50 @@ func ScaleDeployment(ctx context.Context, clients config.Clients, deployment app
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetPodLogs retrieves logs from a pod
|
||||
func GetPodLogs(ctx context.Context, clients config.Clients, podName string, namespace string) (string, error) {
|
||||
log.Debug("Entering GetPodLogs function")
|
||||
podLogOpts := corev1.PodLogOptions{}
|
||||
req := clients.Clientset.CoreV1().Pods(namespace).GetLogs(podName, &podLogOpts)
|
||||
logs, err := req.Stream(ctx)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to get pod logs")
|
||||
return "", err
|
||||
}
|
||||
defer logs.Close()
|
||||
buf := new(bytes.Buffer)
|
||||
_, err = buf.ReadFrom(logs)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("failed to read pod logs")
|
||||
return "", err
|
||||
}
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// IsPodRunning checks if a pod is running
|
||||
func IsPodRunning(ctx context.Context, clients config.Clients, pod *corev1.Pod) (bool, error) {
|
||||
log.Debug("Entering isPodRunning function")
|
||||
pod, err := clients.Clientset.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
switch pod.Status.Phase {
|
||||
case corev1.PodRunning:
|
||||
return true, nil
|
||||
case corev1.PodFailed, corev1.PodSucceeded:
|
||||
return false, conditions.ErrPodCompleted
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Int64Ptr returns a pointer to the given int64 value.
|
||||
func Int64Ptr(i int64) *int64 {
|
||||
return &i
|
||||
}
|
||||
|
||||
// BoolPtr returns a pointer to the given bool value.
|
||||
func BoolPtr(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
PyYAML==6.0.2
|
|
@ -0,0 +1,248 @@
|
|||
[
|
||||
{
|
||||
"config": {
|
||||
"TestKind": "thruput-latency",
|
||||
"Encap": "",
|
||||
"Dataplane": "",
|
||||
"NumPolicies": 5,
|
||||
"NumServices": 10,
|
||||
"NumPods": 7,
|
||||
"HostNetwork": false,
|
||||
"TestNamespace": "testns",
|
||||
"Iterations": 1,
|
||||
"Duration": 10,
|
||||
"DNSPerf": null,
|
||||
"Perf": {
|
||||
"Direct": true,
|
||||
"Service": true,
|
||||
"External": false,
|
||||
"ControlPort": 32000,
|
||||
"TestPort": 32001,
|
||||
"ExternalIPOrFQDN": ""
|
||||
},
|
||||
"TTFRConfig": null,
|
||||
"CalicoNodeCPULimit": "",
|
||||
"LeaveStandingConfig": true
|
||||
},
|
||||
"ClusterDetails": {
|
||||
"Cloud": "unknown",
|
||||
"Provisioner": "kubeadm",
|
||||
"NodeType": "linux",
|
||||
"NodeOS": "Debian GNU/Linux 12 (bookworm)",
|
||||
"NodeKernel": "6.8.0-71-generic",
|
||||
"NodeArch": "amd64",
|
||||
"NumNodes": 2,
|
||||
"Dataplane": "iptables",
|
||||
"IPFamily": "ipv4",
|
||||
"Encapsulation": "VXLANCrossSubnet",
|
||||
"WireguardEnabled": false,
|
||||
"Product": "calico",
|
||||
"CalicoVersion": "v3.30.2",
|
||||
"K8SVersion": "v1.32.0",
|
||||
"CRIVersion": "containerd://1.7.24",
|
||||
"CNIOption": "Calico"
|
||||
},
|
||||
"thruput-latency": {
|
||||
"Latency": {
|
||||
"pod-pod": {
|
||||
"min": 15.8,
|
||||
"max": 15.8,
|
||||
"avg": 15.8,
|
||||
"P50": 15.8,
|
||||
"P75": 15.8,
|
||||
"P90": 15.8,
|
||||
"P99": 15.8,
|
||||
"unit": "us",
|
||||
"datapoints": 1
|
||||
},
|
||||
"pod-svc-pod": {
|
||||
"min": 16.5,
|
||||
"max": 16.5,
|
||||
"avg": 16.5,
|
||||
"P50": 16.5,
|
||||
"P75": 16.5,
|
||||
"P90": 16.5,
|
||||
"P99": 16.5,
|
||||
"unit": "us",
|
||||
"datapoints": 1
|
||||
},
|
||||
"ext-svc-pod": {}
|
||||
},
|
||||
"Throughput": {
|
||||
"pod-pod": {
|
||||
"min": 21600,
|
||||
"max": 21600,
|
||||
"avg": 21600,
|
||||
"P50": 21600,
|
||||
"P75": 21600,
|
||||
"P90": 21600,
|
||||
"P99": 21600,
|
||||
"unit": "Mb/sec",
|
||||
"datapoints": 1
|
||||
},
|
||||
"pod-svc-pod": {
|
||||
"min": 20200,
|
||||
"max": 20200,
|
||||
"avg": 20200,
|
||||
"P50": 20200,
|
||||
"P75": 20200,
|
||||
"P90": 20200,
|
||||
"P99": 20200,
|
||||
"unit": "Mb/sec",
|
||||
"datapoints": 1
|
||||
},
|
||||
"ext-svc-pod": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": {
|
||||
"TestKind": "iperf",
|
||||
"Encap": "",
|
||||
"Dataplane": "",
|
||||
"NumPolicies": 6,
|
||||
"NumServices": 11,
|
||||
"NumPods": 6,
|
||||
"HostNetwork": false,
|
||||
"TestNamespace": "testns",
|
||||
"Iterations": 1,
|
||||
"Duration": 10,
|
||||
"DNSPerf": null,
|
||||
"Perf": {
|
||||
"Direct": true,
|
||||
"Service": true,
|
||||
"External": false,
|
||||
"ControlPort": 32000,
|
||||
"TestPort": 32001,
|
||||
"ExternalIPOrFQDN": ""
|
||||
},
|
||||
"TTFRConfig": null,
|
||||
"CalicoNodeCPULimit": "",
|
||||
"LeaveStandingConfig": true
|
||||
},
|
||||
"ClusterDetails": {
|
||||
"Cloud": "unknown",
|
||||
"Provisioner": "kubeadm",
|
||||
"NodeType": "linux",
|
||||
"NodeOS": "Debian GNU/Linux 12 (bookworm)",
|
||||
"NodeKernel": "6.8.0-71-generic",
|
||||
"NodeArch": "amd64",
|
||||
"NumNodes": 2,
|
||||
"Dataplane": "iptables",
|
||||
"IPFamily": "ipv4",
|
||||
"Encapsulation": "VXLANCrossSubnet",
|
||||
"WireguardEnabled": false,
|
||||
"Product": "calico",
|
||||
"CalicoVersion": "v3.30.2",
|
||||
"K8SVersion": "v1.32.0",
|
||||
"CRIVersion": "containerd://1.7.24",
|
||||
"CNIOption": "Calico"
|
||||
},
|
||||
"iperf": {
|
||||
"Retries": {
|
||||
"pod-pod": {
|
||||
"min": 56242,
|
||||
"max": 56242,
|
||||
"avg": 56242,
|
||||
"P50": 56242,
|
||||
"P75": 56242,
|
||||
"P90": 56242,
|
||||
"P99": 56242,
|
||||
"unit": "none",
|
||||
"datapoints": 1
|
||||
},
|
||||
"pod-svc-pod": {
|
||||
"min": 59817,
|
||||
"max": 59817,
|
||||
"avg": 59817,
|
||||
"P50": 59817,
|
||||
"P75": 59817,
|
||||
"P90": 59817,
|
||||
"P99": 59817,
|
||||
"unit": "none",
|
||||
"datapoints": 1
|
||||
},
|
||||
"ext-svc-pod": {}
|
||||
},
|
||||
"Throughput": {
|
||||
"pod-pod": {
|
||||
"min": 91701.305416915,
|
||||
"max": 91701.305416915,
|
||||
"avg": 91701.305416915,
|
||||
"P50": 91701.305416915,
|
||||
"P75": 91701.305416915,
|
||||
"P90": 91701.305416915,
|
||||
"P99": 91701.305416915,
|
||||
"datapoints": 1
|
||||
},
|
||||
"pod-svc-pod": {
|
||||
"min": 89655.18182698895,
|
||||
"max": 89655.18182698895,
|
||||
"avg": 89655.18182698895,
|
||||
"P50": 89655.18182698895,
|
||||
"P75": 89655.18182698895,
|
||||
"P90": 89655.18182698895,
|
||||
"P99": 89655.18182698895,
|
||||
"unit": "Mb/sec",
|
||||
"datapoints": 1
|
||||
},
|
||||
"ext-svc-pod": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"config": {
|
||||
"TestKind": "ttfr",
|
||||
"Encap": "",
|
||||
"Dataplane": "",
|
||||
"NumPolicies": 7,
|
||||
"NumServices": 12,
|
||||
"NumPods": 8,
|
||||
"HostNetwork": false,
|
||||
"TestNamespace": "testns",
|
||||
"Iterations": 1,
|
||||
"Duration": 60,
|
||||
"DNSPerf": null,
|
||||
"Perf": null,
|
||||
"TTFRConfig": {
|
||||
"TestPodsPerNode": 53,
|
||||
"Rate": 2.5
|
||||
},
|
||||
"CalicoNodeCPULimit": "",
|
||||
"LeaveStandingConfig": false
|
||||
},
|
||||
"ClusterDetails": {
|
||||
"Cloud": "unknown",
|
||||
"Provisioner": "kubeadm",
|
||||
"NodeType": "linux",
|
||||
"NodeOS": "Debian GNU/Linux 12 (bookworm)",
|
||||
"NodeKernel": "6.8.0-71-generic",
|
||||
"NodeArch": "amd64",
|
||||
"NumNodes": 2,
|
||||
"Dataplane": "iptables",
|
||||
"IPFamily": "ipv4",
|
||||
"Encapsulation": "VXLANCrossSubnet",
|
||||
"WireguardEnabled": false,
|
||||
"Product": "calico",
|
||||
"CalicoVersion": "v3.30.2",
|
||||
"K8SVersion": "v1.32.0",
|
||||
"CRIVersion": "containerd://1.7.24",
|
||||
"CNIOption": "Calico"
|
||||
},
|
||||
"ttfr": [
|
||||
{
|
||||
"ttfrSummary": {
|
||||
"min": 0.00059141,
|
||||
"max": 0.002781802,
|
||||
"avg": 0.0007567828466666665,
|
||||
"P50": 0.000716177,
|
||||
"P75": 0.000787486,
|
||||
"P90": 0.000911463,
|
||||
"P99": 0.001219205,
|
||||
"unit": "seconds",
|
||||
"datapoints": 150
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
|
@ -0,0 +1,135 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type ResultsFile []map[string]interface{}
|
||||
type TestConfigs []map[string]interface{}
|
||||
|
||||
func main() {
|
||||
refPath := "results.json.reference"
|
||||
genPath := "results.json"
|
||||
testsPath := "e2e-testconfig.yaml"
|
||||
|
||||
ref, err := readResults(refPath)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Reference file error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
gen, err := readResults(genPath)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Generated file error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
tests, err := readTests(testsPath)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Tests file error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
numTests := len(tests)
|
||||
println("Number of tests in e2e-testconfig.yaml:", numTests)
|
||||
if len(gen) != numTests {
|
||||
fmt.Fprintf(os.Stderr, "results.json should contain %d test results, found %d\n", numTests, len(gen))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if !similarStructure(ref, gen) {
|
||||
fmt.Fprintf(os.Stderr, "results.json structure does not match reference\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Println("results.json is valid and matches reference structure.")
|
||||
}
|
||||
|
||||
func readResults(path string) (ResultsFile, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var results ResultsFile
|
||||
if err := json.Unmarshal(data, &results); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func readTests(path string) (TestConfigs, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var tests TestConfigs
|
||||
if err := yaml.Unmarshal(data, &tests); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return tests, nil
|
||||
}
|
||||
|
||||
func similarStructure(a, b ResultsFile) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i := range a {
|
||||
if !compareTypes(a[i], b[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func compareTypes(a, b map[string]interface{}) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for k, va := range a {
|
||||
vb, ok := b[k]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if !sameType(va, vb) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func sameType(a, b interface{}) bool {
|
||||
ta := reflect.TypeOf(a)
|
||||
tb := reflect.TypeOf(b)
|
||||
if ta == nil || tb == nil {
|
||||
return ta == tb
|
||||
}
|
||||
if ta.Kind() != tb.Kind() {
|
||||
return false
|
||||
}
|
||||
switch ta.Kind() {
|
||||
case reflect.Map:
|
||||
ma, ok1 := a.(map[string]interface{})
|
||||
mb, ok2 := b.(map[string]interface{})
|
||||
if !ok1 || !ok2 {
|
||||
return false
|
||||
}
|
||||
return compareTypes(ma, mb)
|
||||
case reflect.Slice:
|
||||
sa, ok1 := a.([]interface{})
|
||||
sb, ok2 := b.([]interface{})
|
||||
if !ok1 || !ok2 {
|
||||
return false
|
||||
}
|
||||
// Compare first element type if slices are non-empty
|
||||
if len(sa) > 0 && len(sb) > 0 {
|
||||
return sameType(sa[0], sb[0])
|
||||
}
|
||||
return len(sa) == len(sb)
|
||||
default:
|
||||
return ta.Kind() == tb.Kind()
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue