Compare commits

...

47 Commits

Author SHA1 Message Date
ChrisLiu 293619796d
bugfix(Kubernetes-HostPort): allow pod update when node notfound (#260)
Signed-off-by: ChrisLiu <chrisliu1995@163.com>
2025-07-11 20:37:35 +08:00
ChrisLiu cac4ba793e
enhance: network trigger time adapts to different time zones (#259)
* enhance: network trigger time adapts to different time zones

Signed-off-by: ChrisLiu <chrisliu1995@163.com>

* update config of kruise-game manager for using cert-manager

Signed-off-by: ChrisLiu <chrisliu1995@163.com>

---------

Signed-off-by: ChrisLiu <chrisliu1995@163.com>
2025-07-10 16:39:53 +08:00
Kagaya 52d9a14c13
feat: add enable-cert-generation option (#245)
* add enable-cert-generation option

Signed-off-by: Kagaya <kagaya85@outlook.com>

* update webhook manifests config

Signed-off-by: Kagaya <kagaya85@outlook.com>

* e2e: install cert manager

Signed-off-by: Kagaya <kagaya85@outlook.com>

---------

Signed-off-by: Kagaya <kagaya85@outlook.com>
2025-07-08 21:33:54 +08:00
ChrisLiu f6d679cc75
bugfix: consider preDelete pods when scaling (#257)
Signed-off-by: ChrisLiu <chrisliu1995@163.com>
2025-07-08 14:09:36 +08:00
Xuetao Song 90c0b68350
feat: support EnableMultiIngress for vke(#251) 2025-07-07 17:22:38 +08:00
ChrisLiu b82d7e34f7
feat: add PreDeleteReplicas for GameServerSet status (#254)
Signed-off-by: ChrisLiu <chrisliu1995@163.com>
2025-07-03 22:13:17 +08:00
ChrisLiu 1a1c256460
fix the meaning of CURRENT printcolumn when using kubectl (#253)
Signed-off-by: ChrisLiu <chrisliu1995@163.com>
2025-07-03 21:51:54 +08:00
ChrisLiu 19d8ce0b2c
bugfix: gs state should be changed from PreDelete to Deleting (#252)
Signed-off-by: ChrisLiu <chrisliu1995@163.com>
2025-07-03 21:19:23 +08:00
ChrisLiu 5095740248
AlibabaCloud-AutoNLBs support multi intranet type eip (#248)
Signed-off-by: ChrisLiu <chrisliu1995@163.com>
2025-06-30 10:55:28 +08:00
ChrisLiu 7dfe07097b
feat: support new plugin named AlibabaCloud-AutoNLBs (#246)
Signed-off-by: ChrisLiu <chrisliu1995@163.com>
2025-06-26 17:27:03 +08:00
ChrisLiu 0ff70733c6
feat: support user-defined number of controller workers (#247)
Signed-off-by: ChrisLiu <chrisliu1995@163.com>
2025-06-23 19:17:37 +08:00
roc fbcb3953c0
Add PersistentVolumeClaimRetentionPolicy support to GameServerSet (#243)
Signed-off-by: roc <roc@imroc.cc>
2025-06-20 18:09:20 +08:00
ChrisLiu 94a15fdb38
feat: add annotation of state-last-changed-time (#238)
Signed-off-by: ChrisLiu <chrisliu1995@163.com>
2025-06-16 23:48:09 +08:00
roc a6ed1d95c4
feat(Kubernetes-HostPort): support TCPUDP protocol (#244)
Signed-off-by: roc <roc@imroc.cc>
2025-06-16 23:47:42 +08:00
Xuetao Song 9a04f87f5e
feat: volcengine-clb plugin support EnableClbScatter 2025-06-16 14:22:35 +08:00
Xuetao Song f175e0d73c
fix duplicated port for Volcengine-CLB plugin (#240) 2025-06-13 14:04:32 +08:00
roc 1414654f46
升级 TencentCloud-CLB 插件 (#239)
* upgrade tencentcloud clb plugin

* deprecate DedicatedCLBListener CRD
* use CLBPortPool's pod annotation

Signed-off-by: roc <roc@imroc.cc>

* add comments

Signed-off-by: roc <roc@imroc.cc>

---------

Signed-off-by: roc <roc@imroc.cc>
2025-06-10 21:34:58 +08:00
lizhipeng629 7136738627
fix old svc remain after pod recreate when using Volcengine-CLB (#233)
feat(*): check pod uid in svc

fix:add pod create time in svc

Co-authored-by: 李志朋 <lizhipeng.629@bytedance.com>
2025-06-06 17:00:09 +08:00
ChrisLiu 6dbab6be15
fix go-lint err (#237)
Signed-off-by: ChrisLiu <chrisliu1995@163.com>
2025-06-06 13:32:19 +08:00
ChrisLiu 1ca95a5c36
cancel the limit of Ali NLB port range (#235)
Signed-off-by: ChrisLiu <chrisliu1995@163.com>
2025-06-06 12:08:42 +08:00
ChrisLiu 40c7bba35e
enhance: AlibabaCloud-SLB-SharedPort plugin support managed services (#224)
Signed-off-by: ChrisLiu <chrisliu1995@163.com>
2025-06-06 11:55:53 +08:00
ChrisLiu 51a82bd107
enhance: Kubernetes-HostPort support container port same as host (#230)
Signed-off-by: ChrisLiu <chrisliu1995@163.com>
2025-06-06 11:49:42 +08:00
ChrisLiu a64b21eab5
enhance: activity of externalscaler relate to minAvailable (#228)
Signed-off-by: ChrisLiu <chrisliu1995@163.com>
2025-05-19 18:08:11 +08:00
ChrisLiu 4e6ae2e2d0
fix the external scaler error when minAvailable is 0 (#227)
Signed-off-by: ChrisLiu <chrisliu1995@163.com>
2025-05-19 17:37:40 +08:00
ChrisLiu f2044b8f1a
fix: update ppmHash when ServiceQualities changed (#226)
Signed-off-by: ChrisLiu <chrisliu1995@163.com>
2025-05-19 14:04:51 +08:00
ChrisLiu 9c4ce841c3
fix: support auto-scaling when replicas is 0 (#225)
Signed-off-by: ChrisLiu <chrisliu1995@163.com>
2025-05-19 11:51:35 +08:00
Kagaya 5180743458
feat: support minAvailable percentage type (#222)
Signed-off-by: Kagaya <kagaya85@outlook.com>
2025-05-12 20:04:27 +08:00
陈欣宇 7c51b24e6e
feat(metrics): improve observability for GameServersOpsStateCount metrics (#221)
* feat(metrics): improve observability

add gssName namespace label for  metrics: okg_gameservers_opsState_count to improve observability

* fix: remove gssName Compare

---------

Co-authored-by: 陈欣宇 <chenxinyu@YJ-IT-02836.local>
2025-05-06 20:49:41 +08:00
Xuetao Song fc88742857
add doc of Volcengine-EIP (#219) 2025-04-30 17:26:11 +08:00
Xuetao Song d04f8d0a7a
feat(*): add eip provider of VKE (#218) 2025-04-27 15:33:36 +08:00
ChrisLiu 5a272eaec3
enhance: add network ready condition for AlibabaCloud-Multi-NLBs plugin (#214)
Signed-off-by: ChrisLiu <chrisliu1995@163.com>
2025-04-24 15:32:54 +08:00
ChrisLiu 6d5f041afc
enhance: support svc external traffic policy for AlibabaCloud-Multi-NLBs (#216)
Signed-off-by: ChrisLiu <chrisliu1995@163.com>
2025-04-24 15:32:09 +08:00
berg 3da984ff96
ServiceQualities support serverless pod (#212) 2025-04-22 20:21:32 +08:00
ChrisLiu 897e706a85
update ci workflow to ubuntu-24.04 (#215)
Signed-off-by: ChrisLiu <chrisliu1995@163.com>
2025-04-21 16:22:27 +08:00
Kagaya 624d17ff11
feat: support range type for ReserveIDs (#209) 2025-04-21 15:31:16 +08:00
ChrisLiu d038737580
feat: support multi groups for nlbs (#213)
Signed-off-by: ChrisLiu <chrisliu1995@163.com>
2025-04-14 16:40:38 +08:00
Kagaya f2d02a6ab2
deps: update to k8s 0.30.10 (#210) 2025-04-14 15:04:10 +08:00
ChrisLiu 0bfc500fec
enhance: create service of ali-multi-nlbs in parallel (#207)
Signed-off-by: ChrisLiu <chrisliu1995@163.com>
2025-03-24 10:51:48 +08:00
ChrisLiu 6133bab818
update workflow ci go cache to v4 (#206)
Signed-off-by: ChrisLiu <chrisliu1995@163.com>
2025-03-12 20:33:59 +08:00
LHB6540 a2a0864f27
Add index-offset-scheduler (#205)
Co-authored-by: 李海彬 <lihaibin@goatgames.com>
2025-03-12 18:22:50 +08:00
ChrisLiu 0b3575947b
Increase the upper limit of ali-nlb ports (#204)
Signed-off-by: ChrisLiu <chrisliu1995@163.com>
2025-02-27 17:44:08 +08:00
Gao PeiLiang aaa63740a4
Add hwcloud provider and elb plugin (#201)
* add hwcloud ELB Network Plugin

* add hwcloud cloud provider register

* fix register error

* fix error

* add log

* fix hwcloud provider regster error

* fix health check error

* only suuport use exist elb

* add docs

* add hwcloud elb config

* fix docs
2025-02-12 17:46:54 +08:00
Durgin 2ea11c1cb3
feat: add annotation of opsState-last-changed-time (#200)
- add annotation `game.kruise.io/opsState-last-changed-time`

#199
2025-02-08 18:01:06 +08:00
Gao PeiLiang 8079c29c22
alibabacloud slb support map same TCP and UDP port , eg 8000/TCPUDP (#197)
* create svc use port + protocol as name to fix when use same port but different protocol

* alibabacloud slb support TCP/UDP

* add log info

* fix alibabacloud slb init same port svc

* add doc

* clear log print, avoid too many info
2025-02-06 16:58:17 +08:00
Gao PeiLiang f0c82f1b1f
add support svc external traffic policy for alibabacloud slb (#194)
* add test log

* add support svc external traffic policy for alibabacloud slb

* fix error

* add e2e test timeout

* add aliyun slb param ExternalTrafficPolicyType doc
2025-01-16 10:44:24 +08:00
roc 8c229c1191
add rbac role for tencentcloud provider (#193)
Signed-off-by: roc <roc@imroc.cc>
2025-01-08 19:07:39 +08:00
roc 65d230658e
add tencentcloud in config.yaml (#192)
Signed-off-by: roc <roc@imroc.cc>
2025-01-08 15:51:18 +08:00
89 changed files with 7955 additions and 2741 deletions

View File

@ -10,7 +10,7 @@ on:
env:
# Common versions
GO_VERSION: '1.21'
GO_VERSION: '1.22'
GOLANGCI_VERSION: 'v1.58'
DOCKER_BUILDX_VERSION: 'v0.4.2'
@ -23,7 +23,7 @@ env:
jobs:
golangci-lint:
runs-on: ubuntu-20.04
runs-on: ubuntu-24.04
steps:
- name: Checkout
uses: actions/checkout@v3
@ -34,7 +34,7 @@ jobs:
with:
go-version: ${{ env.GO_VERSION }}
- name: Cache Go Dependencies
uses: actions/cache@v2
uses: actions/cache@v4
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
@ -50,7 +50,7 @@ jobs:
skip-pkg-cache: true
unit-tests:
runs-on: ubuntu-20.04
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@v3
with:
@ -62,7 +62,7 @@ jobs:
with:
go-version: ${{ env.GO_VERSION }}
- name: Cache Go Dependencies
uses: actions/cache@v2
uses: actions/cache@v4
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}

View File

@ -10,16 +10,16 @@ on:
env:
# Common versions
GO_VERSION: '1.21'
KIND_ACTION_VERSION: 'v1.3.0'
KIND_VERSION: 'v0.14.0'
GO_VERSION: '1.22'
KIND_VERSION: 'v0.18.0'
KIND_IMAGE: 'kindest/node:v1.26.4'
KIND_CLUSTER_NAME: 'ci-testing'
CERT_MANAGER_VERSION: 'v1.18.2'
jobs:
game-kruise:
runs-on: ubuntu-20.04
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@v3
with:
@ -40,6 +40,10 @@ jobs:
export IMAGE="openkruise/kruise-game-manager:e2e-${GITHUB_RUN_ID}"
docker build --pull --no-cache . -t $IMAGE
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
- name: Install Cert-Manager
run: |
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/${{ env.CERT_MANAGER_VERSION }}/cert-manager.yaml
kubectl -n cert-manager rollout status deploy/cert-manager-webhook --timeout=180s
- name: Install Kruise
run: |
set -ex

117
.github/workflows/e2e-1.30.yaml vendored Normal file
View File

@ -0,0 +1,117 @@
name: E2E-1.30
on:
push:
branches:
- master
- release-*
pull_request: {}
workflow_dispatch: {}
env:
# Common versions
GO_VERSION: '1.22'
KIND_VERSION: 'v0.22.0'
KIND_IMAGE: 'kindest/node:v1.30.8'
KIND_CLUSTER_NAME: 'ci-testing'
CERT_MANAGER_VERSION: 'v1.18.2'
jobs:
game-kruise:
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@v3
with:
submodules: true
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: ${{ env.GO_VERSION }}
- name: Setup Kind Cluster
uses: helm/kind-action@v1.12.0
with:
node_image: ${{ env.KIND_IMAGE }}
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
config: ./test/kind-conf.yaml
version: ${{ env.KIND_VERSION }}
- name: Build image
run: |
export IMAGE="openkruise/kruise-game-manager:e2e-${GITHUB_RUN_ID}"
docker build --pull --no-cache . -t $IMAGE
kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; }
- name: Install Cert-Manager
run: |
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/${{ env.CERT_MANAGER_VERSION }}/cert-manager.yaml
kubectl -n cert-manager rollout status deploy/cert-manager-webhook --timeout=180s
- name: Install Kruise
run: |
set -ex
kubectl cluster-info
make helm
helm repo add openkruise https://openkruise.github.io/charts/
helm repo update
helm install kruise openkruise/kruise --version 1.7.3
for ((i=1;i<10;i++));
do
set +e
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | grep kruise-controller-manager | wc -l)
set -e
if [ "$PODS" -eq "2" ]; then
break
fi
sleep 3
done
set +e
PODS=$(kubectl get pod -n kruise-system | grep '1/1' | grep kruise-controller-manager | wc -l)
set -e
if [ "$PODS" -eq "2" ]; then
echo "Wait for kruise-manager ready successfully"
else
echo "Timeout to wait for kruise-manager ready"
exit 1
fi
- name: Install Kruise Game
run: |
set -ex
kubectl cluster-info
IMG=openkruise/kruise-game-manager:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh
for ((i=1;i<10;i++));
do
set +e
PODS=$(kubectl get pod -n kruise-game-system | grep '1/1' | wc -l)
set -e
if [ "$PODS" -eq "1" ]; then
break
fi
sleep 3
done
set +e
PODS=$(kubectl get pod -n kruise-game-system | grep '1/1' | wc -l)
kubectl get node -o yaml
kubectl get all -n kruise-game-system -o yaml
set -e
if [ "$PODS" -eq "1" ]; then
echo "Wait for kruise-game ready successfully"
else
echo "Timeout to wait for kruise-game ready"
exit 1
fi
- name: Run E2E Tests
run: |
export KUBECONFIG=/home/runner/.kube/config
make ginkgo
set +e
./bin/ginkgo -timeout 60m -v test/e2e
retVal=$?
# kubectl get pod -n kruise-game-system --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-game-system
restartCount=$(kubectl get pod -n kruise-game-system --no-headers | awk '{print $4}')
if [ "${restartCount}" -eq "0" ];then
echo "Kruise-game has not restarted"
else
kubectl get pod -n kruise-game-system --no-headers
echo "Kruise-game has restarted, abort!!!"
kubectl get pod -n kruise-game-system --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-game-system
exit 1
fi
exit $retVal

1
.gitignore vendored
View File

@ -23,3 +23,4 @@ testbin/*
*.swp
*.swo
*~
.vscode

View File

@ -1,5 +1,5 @@
# Build the manager binary
FROM golang:1.21 as builder
FROM golang:1.22.12 AS builder
WORKDIR /workspace
# Copy the Go Modules manifests

View File

@ -2,7 +2,7 @@
# Image URL to use all building/pushing images targets
IMG ?= kruise-game-manager:test
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
ENVTEST_K8S_VERSION = 1.24.1
ENVTEST_K8S_VERSION = 1.30.0
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
@ -114,7 +114,7 @@ ENVTEST ?= $(LOCALBIN)/setup-envtest
## Tool Versions
KUSTOMIZE_VERSION ?= v4.5.5
CONTROLLER_TOOLS_VERSION ?= v0.9.0
CONTROLLER_TOOLS_VERSION ?= v0.16.5
KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh"
.PHONY: kustomize

View File

@ -33,6 +33,8 @@ const (
GameServerNetworkDisabled = "game.kruise.io/network-disabled"
GameServerNetworkStatus = "game.kruise.io/network-status"
GameServerNetworkTriggerTime = "game.kruise.io/network-trigger-time"
GameServerOpsStateLastChangedTime = "game.kruise.io/opsState-last-changed-time"
GameServerStateLastChangedTime = "game.kruise.io/state-last-changed-time"
)
// GameServerSpec defines the desired state of GameServer

View File

@ -52,12 +52,17 @@ type GameServerSetSpec struct {
// Important: Run "make" to regenerate code after modifying this file
GameServerTemplate GameServerTemplate `json:"gameServerTemplate,omitempty"`
ServiceName string `json:"serviceName,omitempty"`
ReserveGameServerIds []int `json:"reserveGameServerIds,omitempty"`
ReserveGameServerIds []intstr.IntOrString `json:"reserveGameServerIds,omitempty"`
ServiceQualities []ServiceQuality `json:"serviceQualities,omitempty"`
UpdateStrategy UpdateStrategy `json:"updateStrategy,omitempty"`
ScaleStrategy ScaleStrategy `json:"scaleStrategy,omitempty"`
Network *Network `json:"network,omitempty"`
Lifecycle *appspub.Lifecycle `json:"lifecycle,omitempty"`
// PersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from
// the StatefulSet VolumeClaimTemplates. This requires the
// StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha.
// +optional
PersistentVolumeClaimRetentionPolicy *kruiseV1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy `json:"persistentVolumeClaimRetentionPolicy,omitempty"`
}
type GameServerTemplate struct {
@ -186,6 +191,7 @@ type GameServerSetStatus struct {
UpdatedReadyReplicas int32 `json:"updatedReadyReplicas,omitempty"`
MaintainingReplicas *int32 `json:"maintainingReplicas,omitempty"`
WaitToBeDeletedReplicas *int32 `json:"waitToBeDeletedReplicas,omitempty"`
PreDeleteReplicas *int32 `json:"preDeleteReplicas,omitempty"`
// LabelSelector is label selectors for query over pods that should match the replica count used by HPA.
LabelSelector string `json:"labelSelector,omitempty"`
}
@ -193,11 +199,12 @@ type GameServerSetStatus struct {
//+genclient
//+kubebuilder:object:root=true
//+kubebuilder:printcolumn:name="DESIRED",type="integer",JSONPath=".spec.replicas",description="The desired number of GameServers."
//+kubebuilder:printcolumn:name="CURRENT",type="integer",JSONPath=".status.replicas",description="The number of currently all GameServers."
//+kubebuilder:printcolumn:name="CURRENT",type="integer",JSONPath=".status.currentReplicas",description="The number of currently all GameServers."
//+kubebuilder:printcolumn:name="UPDATED",type="integer",JSONPath=".status.updatedReplicas",description="The number of GameServers updated."
//+kubebuilder:printcolumn:name="READY",type="integer",JSONPath=".status.readyReplicas",description="The number of GameServers ready."
//+kubebuilder:printcolumn:name="Maintaining",type="integer",JSONPath=".status.maintainingReplicas",description="The number of GameServers Maintaining."
//+kubebuilder:printcolumn:name="WaitToBeDeleted",type="integer",JSONPath=".status.waitToBeDeletedReplicas",description="The number of GameServers WaitToBeDeleted."
//+kubebuilder:printcolumn:name="PreDelete",type="integer",JSONPath=".status.preDeleteReplicas",description="The number of GameServers PreDelete."
//+kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp",description="The age of GameServerSet."
//+kubebuilder:subresource:status
//+kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.labelSelector

View File

@ -1,5 +1,4 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2022 The Kruise Authors.
@ -23,6 +22,7 @@ package v1alpha1
import (
"github.com/openkruise/kruise-api/apps/pub"
"github.com/openkruise/kruise-api/apps/v1beta1"
"k8s.io/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
@ -190,7 +190,7 @@ func (in *GameServerSetSpec) DeepCopyInto(out *GameServerSetSpec) {
in.GameServerTemplate.DeepCopyInto(&out.GameServerTemplate)
if in.ReserveGameServerIds != nil {
in, out := &in.ReserveGameServerIds, &out.ReserveGameServerIds
*out = make([]int, len(*in))
*out = make([]intstr.IntOrString, len(*in))
copy(*out, *in)
}
if in.ServiceQualities != nil {
@ -212,6 +212,11 @@ func (in *GameServerSetSpec) DeepCopyInto(out *GameServerSetSpec) {
*out = new(pub.Lifecycle)
(*in).DeepCopyInto(*out)
}
if in.PersistentVolumeClaimRetentionPolicy != nil {
in, out := &in.PersistentVolumeClaimRetentionPolicy, &out.PersistentVolumeClaimRetentionPolicy
*out = new(v1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GameServerSetSpec.
@ -237,6 +242,11 @@ func (in *GameServerSetStatus) DeepCopyInto(out *GameServerSetStatus) {
*out = new(int32)
**out = **in
}
if in.PreDeleteReplicas != nil {
in, out := &in.PreDeleteReplicas, &out.PreDeleteReplicas
*out = new(int32)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GameServerSetStatus.

View File

@ -1,5 +1,4 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2022 The Kruise Authors.

View File

@ -0,0 +1,544 @@
/*
Copyright 2025 The Kruise Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package alibabacloud
import (
"context"
"fmt"
gamekruiseiov1alpha1 "github.com/openkruise/kruise-game/apis/v1alpha1"
"github.com/openkruise/kruise-game/cloudprovider"
cperrors "github.com/openkruise/kruise-game/cloudprovider/errors"
"github.com/openkruise/kruise-game/cloudprovider/utils"
"github.com/openkruise/kruise-game/pkg/util"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
log "k8s.io/klog/v2"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
"strconv"
"strings"
"sync"
)
const (
AutoNLBsNetwork = "AlibabaCloud-AutoNLBs"
AliasAutoNLBs = "Auto-NLBs-Network"
ReserveNlbNumConfigName = "ReserveNlbNum"
EipTypesConfigName = "EipTypes"
ZoneMapsConfigName = "ZoneMaps"
MinPortConfigName = "MinPort"
MaxPortConfigName = "MaxPort"
BlockPortsConfigName = "BlockPorts"
NLBZoneMapsServiceAnnotationKey = "service.beta.kubernetes.io/alibaba-cloud-loadbalancer-zone-maps"
NLBAddressTypeAnnotationKey = "service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type"
IntranetEIPType = "intranet"
DefaultEIPType = "default"
)
type AutoNLBsPlugin struct {
gssMaxPodIndex map[string]int
mutex sync.RWMutex
}
type autoNLBsConfig struct {
minPort int32
maxPort int32
blockPorts []int32
zoneMaps string
reserveNlbNum int
targetPorts []int
protocols []corev1.Protocol
eipTypes []string
externalTrafficPolicy corev1.ServiceExternalTrafficPolicyType
*nlbHealthConfig
}
func (a *AutoNLBsPlugin) Name() string {
return AutoNLBsNetwork
}
func (a *AutoNLBsPlugin) Alias() string {
return AliasAutoNLBs
}
func (a *AutoNLBsPlugin) Init(c client.Client, options cloudprovider.CloudProviderOptions, ctx context.Context) error {
gssList := &gamekruiseiov1alpha1.GameServerSetList{}
err := c.List(ctx, gssList, &client.ListOptions{})
if err != nil {
log.Errorf("cannot list gameserverset in cluster because %s", err.Error())
return err
}
for _, gss := range gssList.Items {
if gss.Spec.Network != nil && gss.Spec.Network.NetworkType == AutoNLBsNetwork {
a.gssMaxPodIndex[gss.GetNamespace()+"/"+gss.GetName()] = int(*gss.Spec.Replicas)
nc, err := parseAutoNLBsConfig(gss.Spec.Network.NetworkConf)
if err != nil {
log.Errorf("pasrse config wronge because %s", err.Error())
return err
}
err = a.ensureServices(ctx, c, gss.GetNamespace(), gss.GetName(), nc)
if err != nil {
log.Errorf("ensure services error because %s", err.Error())
return err
}
}
}
return nil
}
func (a *AutoNLBsPlugin) OnPodAdded(c client.Client, pod *corev1.Pod, ctx context.Context) (*corev1.Pod, cperrors.PluginError) {
networkManager := utils.NewNetworkManager(pod, c)
networkConfig := networkManager.GetNetworkConfig()
conf, err := parseAutoNLBsConfig(networkConfig)
if err != nil {
return pod, cperrors.NewPluginError(cperrors.ParameterError, err.Error())
}
a.ensureMaxPodIndex(pod)
gssName := pod.GetLabels()[gamekruiseiov1alpha1.GameServerOwnerGssKey]
if err := a.ensureServices(ctx, c, pod.GetNamespace(), gssName, conf); err != nil {
return pod, cperrors.NewPluginError(cperrors.ApiCallError, err.Error())
}
containerPorts := make([]corev1.ContainerPort, 0)
podIndex := util.GetIndexFromGsName(pod.GetName())
for i, port := range conf.targetPorts {
if conf.protocols[i] == ProtocolTCPUDP {
containerPortTCP := corev1.ContainerPort{
ContainerPort: int32(port),
Protocol: corev1.ProtocolTCP,
Name: "tcp-" + strconv.Itoa(podIndex) + "-" + strconv.Itoa(port),
}
containerPortUDP := corev1.ContainerPort{
ContainerPort: int32(port),
Protocol: corev1.ProtocolUDP,
Name: "udp-" + strconv.Itoa(podIndex) + "-" + strconv.Itoa(port),
}
containerPorts = append(containerPorts, containerPortTCP, containerPortUDP)
} else {
containerPort := corev1.ContainerPort{
ContainerPort: int32(port),
Protocol: conf.protocols[i],
Name: strings.ToLower(string(conf.protocols[i])) + "-" + strconv.Itoa(podIndex) + "-" + strconv.Itoa(port),
}
containerPorts = append(containerPorts, containerPort)
}
}
pod.Spec.Containers[0].Ports = containerPorts
lenRange := int(conf.maxPort) - int(conf.minPort) - len(conf.blockPorts) + 1
svcIndex := podIndex / (lenRange / len(conf.targetPorts))
for _, eipType := range conf.eipTypes {
svcName := gssName + "-" + eipType + "-" + strconv.Itoa(svcIndex)
pod.Spec.ReadinessGates = append(pod.Spec.ReadinessGates, corev1.PodReadinessGate{
ConditionType: corev1.PodConditionType(PrefixReadyReadinessGate + svcName),
})
}
return pod, nil
}
func (a *AutoNLBsPlugin) OnPodUpdated(c client.Client, pod *corev1.Pod, ctx context.Context) (*corev1.Pod, cperrors.PluginError) {
networkManager := utils.NewNetworkManager(pod, c)
networkStatus, _ := networkManager.GetNetworkStatus()
networkConfig := networkManager.GetNetworkConfig()
conf, err := parseAutoNLBsConfig(networkConfig)
if err != nil {
return pod, cperrors.NewPluginError(cperrors.ParameterError, err.Error())
}
if networkStatus == nil {
pod, err := networkManager.UpdateNetworkStatus(gamekruiseiov1alpha1.NetworkStatus{
CurrentNetworkState: gamekruiseiov1alpha1.NetworkNotReady,
}, pod)
return pod, cperrors.ToPluginError(err, cperrors.InternalError)
}
_, readyCondition := util.GetPodConditionFromList(pod.Status.Conditions, corev1.PodReady)
if readyCondition == nil || readyCondition.Status == corev1.ConditionFalse {
networkStatus.CurrentNetworkState = gamekruiseiov1alpha1.NetworkNotReady
pod, err = networkManager.UpdateNetworkStatus(*networkStatus, pod)
return pod, cperrors.ToPluginError(err, cperrors.InternalError)
}
var internalPorts []gamekruiseiov1alpha1.NetworkPort
var externalPorts []gamekruiseiov1alpha1.NetworkPort
endPoints := ""
podIndex := util.GetIndexFromGsName(pod.GetName())
lenRange := int(conf.maxPort) - int(conf.minPort) - len(conf.blockPorts) + 1
svcIndex := podIndex / (lenRange / len(conf.targetPorts))
for i, eipType := range conf.eipTypes {
svcName := pod.GetLabels()[gamekruiseiov1alpha1.GameServerOwnerGssKey] + "-" + eipType + "-" + strconv.Itoa(svcIndex)
svc := &corev1.Service{}
err := c.Get(ctx, types.NamespacedName{
Name: svcName,
Namespace: pod.GetNamespace(),
}, svc)
if err != nil {
return pod, cperrors.NewPluginError(cperrors.ApiCallError, err.Error())
}
if svc.Status.LoadBalancer.Ingress == nil || len(svc.Status.LoadBalancer.Ingress) == 0 {
networkStatus.CurrentNetworkState = gamekruiseiov1alpha1.NetworkNotReady
pod, err = networkManager.UpdateNetworkStatus(*networkStatus, pod)
return pod, cperrors.ToPluginError(err, cperrors.InternalError)
}
endPoints = endPoints + svc.Status.LoadBalancer.Ingress[0].Hostname + "/" + eipType
if i == len(conf.eipTypes)-1 {
for i, port := range conf.targetPorts {
if conf.protocols[i] == ProtocolTCPUDP {
portNameTCP := "tcp-" + strconv.Itoa(podIndex) + strconv.Itoa(port)
portNameUDP := "udp-" + strconv.Itoa(podIndex) + strconv.Itoa(port)
iPort := intstr.FromInt(port)
internalPorts = append(internalPorts, gamekruiseiov1alpha1.NetworkPort{
Name: portNameTCP,
Protocol: corev1.ProtocolTCP,
Port: &iPort,
}, gamekruiseiov1alpha1.NetworkPort{
Name: portNameUDP,
Protocol: corev1.ProtocolUDP,
Port: &iPort,
})
for _, svcPort := range svc.Spec.Ports {
if svcPort.Name == portNameTCP || svcPort.Name == portNameUDP {
ePort := intstr.FromInt32(svcPort.Port)
externalPorts = append(externalPorts, gamekruiseiov1alpha1.NetworkPort{
Name: portNameTCP,
Protocol: corev1.ProtocolTCP,
Port: &ePort,
}, gamekruiseiov1alpha1.NetworkPort{
Name: portNameUDP,
Protocol: corev1.ProtocolUDP,
Port: &ePort,
})
break
}
}
} else {
portName := strings.ToLower(string(conf.protocols[i])) + "-" + strconv.Itoa(podIndex) + "-" + strconv.Itoa(port)
iPort := intstr.FromInt(port)
internalPorts = append(internalPorts, gamekruiseiov1alpha1.NetworkPort{
Name: portName,
Protocol: conf.protocols[i],
Port: &iPort,
})
for _, svcPort := range svc.Spec.Ports {
if svcPort.Name == portName {
ePort := intstr.FromInt32(svcPort.Port)
externalPorts = append(externalPorts, gamekruiseiov1alpha1.NetworkPort{
Name: portName,
Protocol: conf.protocols[i],
Port: &ePort,
})
break
}
}
}
}
} else {
endPoints = endPoints + ","
}
}
networkStatus = &gamekruiseiov1alpha1.NetworkStatus{
InternalAddresses: []gamekruiseiov1alpha1.NetworkAddress{
{
IP: pod.Status.PodIP,
Ports: internalPorts,
},
},
ExternalAddresses: []gamekruiseiov1alpha1.NetworkAddress{
{
EndPoint: endPoints,
Ports: externalPorts,
},
},
CurrentNetworkState: gamekruiseiov1alpha1.NetworkReady,
}
pod, err = networkManager.UpdateNetworkStatus(*networkStatus, pod)
return pod, cperrors.ToPluginError(err, cperrors.InternalError)
}
func (a *AutoNLBsPlugin) OnPodDeleted(client client.Client, pod *corev1.Pod, ctx context.Context) cperrors.PluginError {
return nil
}
func init() {
autoNLBsPlugin := AutoNLBsPlugin{
mutex: sync.RWMutex{},
gssMaxPodIndex: make(map[string]int),
}
alibabaCloudProvider.registerPlugin(&autoNLBsPlugin)
}
func (a *AutoNLBsPlugin) ensureMaxPodIndex(pod *corev1.Pod) {
a.mutex.Lock()
defer a.mutex.Unlock()
podIndex := util.GetIndexFromGsName(pod.GetName())
gssNsName := pod.GetNamespace() + "/" + pod.GetLabels()[gamekruiseiov1alpha1.GameServerOwnerGssKey]
if podIndex > a.gssMaxPodIndex[gssNsName] {
a.gssMaxPodIndex[gssNsName] = podIndex
}
}
func (a *AutoNLBsPlugin) checkSvcNumToCreate(namespace, gssName string, config *autoNLBsConfig) int {
a.mutex.RLock()
defer a.mutex.RUnlock()
lenRange := int(config.maxPort) - int(config.minPort) - len(config.blockPorts) + 1
expectSvcNum := a.gssMaxPodIndex[namespace+"/"+gssName]/(lenRange/len(config.targetPorts)) + config.reserveNlbNum + 1
return expectSvcNum
}
func (a *AutoNLBsPlugin) ensureServices(ctx context.Context, client client.Client, namespace, gssName string, config *autoNLBsConfig) error {
expectSvcNum := a.checkSvcNumToCreate(namespace, gssName, config)
for _, eipType := range config.eipTypes {
for j := 0; j < expectSvcNum; j++ {
// get svc
svcName := gssName + "-" + eipType + "-" + strconv.Itoa(j)
svc := &corev1.Service{}
err := client.Get(ctx, types.NamespacedName{
Name: svcName,
Namespace: namespace,
}, svc)
if err != nil {
if errors.IsNotFound(err) {
// create svc
toAddSvc := a.consSvc(namespace, gssName, eipType, j, config)
if err := setSvcOwner(client, ctx, toAddSvc, namespace, gssName); err != nil {
return err
} else {
if err := client.Create(ctx, toAddSvc); err != nil {
return err
}
}
} else {
return err
}
}
}
}
return nil
}
func (a *AutoNLBsPlugin) consSvcPorts(svcIndex int, config *autoNLBsConfig) []corev1.ServicePort {
lenRange := int(config.maxPort) - int(config.minPort) - len(config.blockPorts) + 1
ports := make([]corev1.ServicePort, 0)
toAllocatedPort := config.minPort
portNumPerPod := lenRange / len(config.targetPorts)
for podIndex := svcIndex * portNumPerPod; podIndex < (svcIndex+1)*portNumPerPod; podIndex++ {
for i, protocol := range config.protocols {
if protocol == ProtocolTCPUDP {
svcPortTCP := corev1.ServicePort{
Name: "tcp-" + strconv.Itoa(podIndex) + "-" + strconv.Itoa(config.targetPorts[i]),
TargetPort: intstr.FromString("tcp-" + strconv.Itoa(podIndex) + "-" + strconv.Itoa(config.targetPorts[i])),
Port: toAllocatedPort,
Protocol: corev1.ProtocolTCP,
}
svcPortUDP := corev1.ServicePort{
Name: "udp-" + strconv.Itoa(podIndex) + "-" + strconv.Itoa(config.targetPorts[i]),
TargetPort: intstr.FromString("udp-" + strconv.Itoa(podIndex) + "-" + strconv.Itoa(config.targetPorts[i])),
Port: toAllocatedPort,
Protocol: corev1.ProtocolUDP,
}
ports = append(ports, svcPortTCP, svcPortUDP)
} else {
svcPort := corev1.ServicePort{
Name: strings.ToLower(string(protocol)) + "-" + strconv.Itoa(podIndex) + "-" + strconv.Itoa(config.targetPorts[i]),
TargetPort: intstr.FromString(strings.ToLower(string(protocol)) + "-" + strconv.Itoa(podIndex) + "-" + strconv.Itoa(config.targetPorts[i])),
Port: toAllocatedPort,
Protocol: protocol,
}
ports = append(ports, svcPort)
}
toAllocatedPort++
for util.IsNumInListInt32(toAllocatedPort, config.blockPorts) {
toAllocatedPort++
}
}
}
return ports
}
func (a *AutoNLBsPlugin) consSvc(namespace, gssName, eipType string, svcIndex int, conf *autoNLBsConfig) *corev1.Service {
loadBalancerClass := "alibabacloud.com/nlb"
svcAnnotations := map[string]string{
//SlbConfigHashKey: util.GetHash(conf),
NLBZoneMapsServiceAnnotationKey: conf.zoneMaps,
LBHealthCheckFlagAnnotationKey: conf.lBHealthCheckFlag,
}
if conf.lBHealthCheckFlag == "on" {
svcAnnotations[LBHealthCheckTypeAnnotationKey] = conf.lBHealthCheckType
svcAnnotations[LBHealthCheckConnectPortAnnotationKey] = conf.lBHealthCheckConnectPort
svcAnnotations[LBHealthCheckConnectTimeoutAnnotationKey] = conf.lBHealthCheckConnectTimeout
svcAnnotations[LBHealthCheckIntervalAnnotationKey] = conf.lBHealthCheckInterval
svcAnnotations[LBHealthyThresholdAnnotationKey] = conf.lBHealthyThreshold
svcAnnotations[LBUnhealthyThresholdAnnotationKey] = conf.lBUnhealthyThreshold
if conf.lBHealthCheckType == "http" {
svcAnnotations[LBHealthCheckDomainAnnotationKey] = conf.lBHealthCheckDomain
svcAnnotations[LBHealthCheckUriAnnotationKey] = conf.lBHealthCheckUri
svcAnnotations[LBHealthCheckMethodAnnotationKey] = conf.lBHealthCheckMethod
}
}
if strings.Contains(eipType, IntranetEIPType) {
svcAnnotations[NLBAddressTypeAnnotationKey] = IntranetEIPType
}
return &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: gssName + "-" + eipType + "-" + strconv.Itoa(svcIndex),
Namespace: namespace,
Annotations: svcAnnotations,
},
Spec: corev1.ServiceSpec{
Ports: a.consSvcPorts(svcIndex, conf),
Type: corev1.ServiceTypeLoadBalancer,
Selector: map[string]string{
gamekruiseiov1alpha1.GameServerOwnerGssKey: gssName,
},
LoadBalancerClass: &loadBalancerClass,
AllocateLoadBalancerNodePorts: ptr.To[bool](false),
ExternalTrafficPolicy: conf.externalTrafficPolicy,
},
}
}
func setSvcOwner(c client.Client, ctx context.Context, svc *corev1.Service, namespace, gssName string) error {
gss := &gamekruiseiov1alpha1.GameServerSet{}
err := c.Get(ctx, types.NamespacedName{
Namespace: namespace,
Name: gssName,
}, gss)
if err != nil {
return err
}
ownerRef := []metav1.OwnerReference{
{
APIVersion: gss.APIVersion,
Kind: gss.Kind,
Name: gss.GetName(),
UID: gss.GetUID(),
Controller: ptr.To[bool](true),
BlockOwnerDeletion: ptr.To[bool](true),
},
}
svc.OwnerReferences = ownerRef
return nil
}
func parseAutoNLBsConfig(conf []gamekruiseiov1alpha1.NetworkConfParams) (*autoNLBsConfig, error) {
reserveNlbNum := 1
eipTypes := []string{"default"}
ports := make([]int, 0)
protocols := make([]corev1.Protocol, 0)
externalTrafficPolicy := corev1.ServiceExternalTrafficPolicyTypeLocal
zoneMaps := ""
blockPorts := make([]int32, 0)
minPort := int32(1000)
maxPort := int32(1499)
for _, c := range conf {
switch c.Name {
case PortProtocolsConfigName:
for _, pp := range strings.Split(c.Value, ",") {
ppSlice := strings.Split(pp, "/")
port, err := strconv.Atoi(ppSlice[0])
if err != nil {
return nil, fmt.Errorf("invalid PortProtocols %s", c.Value)
}
ports = append(ports, port)
if len(ppSlice) != 2 {
protocols = append(protocols, corev1.ProtocolTCP)
} else {
protocols = append(protocols, corev1.Protocol(ppSlice[1]))
}
}
case ExternalTrafficPolicyTypeConfigName:
if strings.EqualFold(c.Value, string(corev1.ServiceExternalTrafficPolicyTypeCluster)) {
externalTrafficPolicy = corev1.ServiceExternalTrafficPolicyTypeCluster
}
case ReserveNlbNumConfigName:
reserveNlbNum, _ = strconv.Atoi(c.Value)
case EipTypesConfigName:
eipTypes = strings.Split(c.Value, ",")
case ZoneMapsConfigName:
zoneMaps = c.Value
case BlockPortsConfigName:
blockPorts = util.StringToInt32Slice(c.Value, ",")
case MinPortConfigName:
val, err := strconv.ParseInt(c.Value, 10, 32)
if err != nil {
return nil, fmt.Errorf("invalid MinPort %s", c.Value)
} else {
minPort = int32(val)
}
case MaxPortConfigName:
val, err := strconv.ParseInt(c.Value, 10, 32)
if err != nil {
return nil, fmt.Errorf("invalid MaxPort %s", c.Value)
} else {
maxPort = int32(val)
}
}
}
if minPort > maxPort {
return nil, fmt.Errorf("invalid MinPort %d and MaxPort %d", minPort, maxPort)
}
if zoneMaps == "" {
return nil, fmt.Errorf("invalid ZoneMaps, which can not be empty")
}
// check ports & protocols
if len(ports) == 0 || len(protocols) == 0 {
return nil, fmt.Errorf("invalid PortProtocols, which can not be empty")
}
nlbHealthConfig, err := parseNlbHealthConfig(conf)
if err != nil {
return nil, err
}
return &autoNLBsConfig{
blockPorts: blockPorts,
minPort: minPort,
maxPort: maxPort,
nlbHealthConfig: nlbHealthConfig,
reserveNlbNum: reserveNlbNum,
eipTypes: eipTypes,
protocols: protocols,
targetPorts: ports,
zoneMaps: zoneMaps,
externalTrafficPolicy: externalTrafficPolicy,
}, nil
}

View File

@ -0,0 +1,272 @@
/*
Copyright 2025 The Kruise Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package alibabacloud
import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"reflect"
"sync"
"testing"
)
func TestIsNeedToCreateService(t *testing.T) {
tests := []struct {
ns string
gssName string
config *autoNLBsConfig
a *AutoNLBsPlugin
expectSvcNum int
}{
// case 0
{
ns: "default",
gssName: "pod",
config: &autoNLBsConfig{
protocols: []corev1.Protocol{
corev1.ProtocolTCP,
corev1.ProtocolUDP,
},
reserveNlbNum: 2,
targetPorts: []int{
6666,
8888,
},
maxPort: 2500,
minPort: 1000,
blockPorts: []int32{},
},
a: &AutoNLBsPlugin{
gssMaxPodIndex: map[string]int{
"default/pod": 1499,
},
mutex: sync.RWMutex{},
},
expectSvcNum: 4,
},
// case 1
{
ns: "default",
gssName: "pod",
config: &autoNLBsConfig{
protocols: []corev1.Protocol{
corev1.ProtocolTCP,
corev1.ProtocolTCP,
corev1.ProtocolUDP,
},
reserveNlbNum: 2,
targetPorts: []int{
6666,
7777,
8888,
},
maxPort: 1005,
minPort: 1000,
blockPorts: []int32{},
},
a: &AutoNLBsPlugin{
gssMaxPodIndex: map[string]int{
"default/pod": 1,
},
mutex: sync.RWMutex{},
},
expectSvcNum: 3,
},
}
for i, test := range tests {
a := test.a
expectSvcNum := a.checkSvcNumToCreate(test.ns, test.gssName, test.config)
if expectSvcNum != test.expectSvcNum {
t.Errorf("case %d: expect toAddSvcNum: %d, but got toAddSvcNum: %d", i, test.expectSvcNum, expectSvcNum)
}
}
}
func TestConsSvcPorts(t *testing.T) {
tests := []struct {
a *AutoNLBsPlugin
svcIndex int
config *autoNLBsConfig
expectSvcPorts []corev1.ServicePort
}{
// case 0
{
a: &AutoNLBsPlugin{
mutex: sync.RWMutex{},
},
svcIndex: 0,
config: &autoNLBsConfig{
protocols: []corev1.Protocol{
corev1.ProtocolTCP,
corev1.ProtocolUDP,
},
targetPorts: []int{
6666,
8888,
},
maxPort: 1003,
minPort: 1000,
blockPorts: []int32{},
},
expectSvcPorts: []corev1.ServicePort{
{
Name: "tcp-0-6666",
TargetPort: intstr.FromString("tcp-0-6666"),
Port: 1000,
Protocol: corev1.ProtocolTCP,
},
{
Name: "udp-0-8888",
TargetPort: intstr.FromString("udp-0-8888"),
Port: 1001,
Protocol: corev1.ProtocolUDP,
},
{
Name: "tcp-1-6666",
TargetPort: intstr.FromString("tcp-1-6666"),
Port: 1002,
Protocol: corev1.ProtocolTCP,
},
{
Name: "udp-1-8888",
TargetPort: intstr.FromString("udp-1-8888"),
Port: 1003,
Protocol: corev1.ProtocolUDP,
},
},
},
// case 1
{
a: &AutoNLBsPlugin{
mutex: sync.RWMutex{},
},
svcIndex: 1,
config: &autoNLBsConfig{
protocols: []corev1.Protocol{
corev1.ProtocolTCP,
corev1.ProtocolTCP,
corev1.ProtocolUDP,
},
targetPorts: []int{
6666,
7777,
8888,
},
maxPort: 1004,
minPort: 1000,
blockPorts: []int32{},
},
expectSvcPorts: []corev1.ServicePort{
{
Name: "tcp-1-6666",
TargetPort: intstr.FromString("tcp-1-6666"),
Port: 1000,
Protocol: corev1.ProtocolTCP,
},
{
Name: "tcp-1-7777",
TargetPort: intstr.FromString("tcp-1-7777"),
Port: 1001,
Protocol: corev1.ProtocolTCP,
},
{
Name: "udp-1-8888",
TargetPort: intstr.FromString("udp-1-8888"),
Port: 1002,
Protocol: corev1.ProtocolUDP,
},
},
},
// case 2
{
a: &AutoNLBsPlugin{
mutex: sync.RWMutex{},
},
svcIndex: 3,
config: &autoNLBsConfig{
protocols: []corev1.Protocol{
ProtocolTCPUDP,
},
targetPorts: []int{
6666,
},
maxPort: 1004,
minPort: 1000,
blockPorts: []int32{1002},
},
expectSvcPorts: []corev1.ServicePort{
{
Name: "tcp-12-6666",
TargetPort: intstr.FromString("tcp-12-6666"),
Port: 1000,
Protocol: corev1.ProtocolTCP,
},
{
Name: "udp-12-6666",
TargetPort: intstr.FromString("udp-12-6666"),
Port: 1000,
Protocol: corev1.ProtocolUDP,
},
{
Name: "tcp-13-6666",
TargetPort: intstr.FromString("tcp-13-6666"),
Port: 1001,
Protocol: corev1.ProtocolTCP,
},
{
Name: "udp-13-6666",
TargetPort: intstr.FromString("udp-13-6666"),
Port: 1001,
Protocol: corev1.ProtocolUDP,
},
{
Name: "tcp-14-6666",
TargetPort: intstr.FromString("tcp-14-6666"),
Port: 1003,
Protocol: corev1.ProtocolTCP,
},
{
Name: "udp-14-6666",
TargetPort: intstr.FromString("udp-14-6666"),
Port: 1003,
Protocol: corev1.ProtocolUDP,
},
{
Name: "tcp-15-6666",
TargetPort: intstr.FromString("tcp-15-6666"),
Port: 1004,
Protocol: corev1.ProtocolTCP,
},
{
Name: "udp-15-6666",
TargetPort: intstr.FromString("udp-15-6666"),
Port: 1004,
Protocol: corev1.ProtocolUDP,
},
},
},
}
for i, test := range tests {
svcPorts := test.a.consSvcPorts(test.svcIndex, test.config)
if !reflect.DeepEqual(svcPorts, test.expectSvcPorts) {
t.Errorf("case %d: expect svcPorts: %v, but got svcPorts: %v", i, test.expectSvcPorts, svcPorts)
}
}
}

View File

@ -52,6 +52,8 @@ const (
ServiceBelongNetworkTypeKey = "game.kruise.io/network-type"
ProtocolTCPUDP corev1.Protocol = "TCPUDP"
PrefixReadyReadinessGate = "service.readiness.alibabacloud.com/"
)
type MultiNlbsPlugin struct {
@ -146,7 +148,25 @@ func initMultiLBCache(svcList []corev1.Service, maxPort, minPort int32, blockPor
return podAllocate, cache
}
func (m *MultiNlbsPlugin) OnPodAdded(client client.Client, pod *corev1.Pod, ctx context.Context) (*corev1.Pod, cperrors.PluginError) {
func (m *MultiNlbsPlugin) OnPodAdded(c client.Client, pod *corev1.Pod, ctx context.Context) (*corev1.Pod, cperrors.PluginError) {
networkManager := utils.NewNetworkManager(pod, c)
networkConfig := networkManager.GetNetworkConfig()
conf, err := parseMultiNLBsConfig(networkConfig)
if err != nil {
return pod, cperrors.NewPluginError(cperrors.ParameterError, err.Error())
}
var lbNames []string
for _, lbName := range conf.lbNames {
if !util.IsStringInList(lbName, lbNames) {
lbNames = append(lbNames, lbName)
}
}
for _, lbName := range lbNames {
pod.Spec.ReadinessGates = append(pod.Spec.ReadinessGates, corev1.PodReadinessGate{
ConditionType: corev1.PodConditionType(PrefixReadyReadinessGate + pod.GetName() + "-" + strings.ToLower(lbName)),
})
}
return pod, nil
}
@ -166,8 +186,13 @@ func (m *MultiNlbsPlugin) OnPodUpdated(c client.Client, pod *corev1.Pod, ctx con
return pod, cperrors.ToPluginError(err, cperrors.InternalError)
}
endPoints := ""
for i, lbId := range conf.idList[0] {
podNsName := pod.GetNamespace() + "/" + pod.GetName()
podLbsPorts, err := m.allocate(conf, podNsName)
if err != nil {
return pod, cperrors.ToPluginError(err, cperrors.ParameterError)
}
for _, lbId := range conf.idList[podLbsPorts.index] {
// get svc
lbName := conf.lbNames[lbId]
svc := &corev1.Service{}
@ -177,7 +202,28 @@ func (m *MultiNlbsPlugin) OnPodUpdated(c client.Client, pod *corev1.Pod, ctx con
}, svc)
if err != nil {
if errors.IsNotFound(err) {
service, err := m.consSvc(conf, pod, lbName, c, ctx)
service, err := m.consSvc(podLbsPorts, conf, pod, lbName, c, ctx)
if err != nil {
return pod, cperrors.ToPluginError(err, cperrors.ParameterError)
}
return pod, cperrors.ToPluginError(c.Create(ctx, service), cperrors.ApiCallError)
}
return pod, cperrors.NewPluginError(cperrors.ApiCallError, err.Error())
}
}
endPoints := ""
for i, lbId := range conf.idList[podLbsPorts.index] {
// get svc
lbName := conf.lbNames[lbId]
svc := &corev1.Service{}
err = c.Get(ctx, types.NamespacedName{
Name: pod.GetName() + "-" + strings.ToLower(lbName),
Namespace: pod.GetNamespace(),
}, svc)
if err != nil {
if errors.IsNotFound(err) {
service, err := m.consSvc(podLbsPorts, conf, pod, lbName, c, ctx)
if err != nil {
return pod, cperrors.ToPluginError(err, cperrors.ParameterError)
}
@ -199,7 +245,7 @@ func (m *MultiNlbsPlugin) OnPodUpdated(c client.Client, pod *corev1.Pod, ctx con
if err != nil {
return pod, cperrors.NewPluginError(cperrors.InternalError, err.Error())
}
service, err := m.consSvc(conf, pod, lbName, c, ctx)
service, err := m.consSvc(podLbsPorts, conf, pod, lbName, c, ctx)
if err != nil {
return pod, cperrors.ToPluginError(err, cperrors.ParameterError)
}
@ -224,6 +270,12 @@ func (m *MultiNlbsPlugin) OnPodUpdated(c client.Client, pod *corev1.Pod, ctx con
pod, err = networkManager.UpdateNetworkStatus(*networkStatus, pod)
return pod, cperrors.ToPluginError(err, cperrors.InternalError)
}
_, readyCondition := util.GetPodConditionFromList(pod.Status.Conditions, corev1.PodReady)
if readyCondition == nil || readyCondition.Status == corev1.ConditionFalse {
networkStatus.CurrentNetworkState = gamekruiseiov1alpha1.NetworkNotReady
pod, err = networkManager.UpdateNetworkStatus(*networkStatus, pod)
return pod, cperrors.ToPluginError(err, cperrors.InternalError)
}
// allow not ready containers
if util.IsAllowNotReadyContainers(networkManager.GetNetworkConfig()) {
@ -334,16 +386,11 @@ type multiNLBsConfig struct {
targetPorts []int
protocols []corev1.Protocol
isFixed bool
externalTrafficPolicy corev1.ServiceExternalTrafficPolicyType
*nlbHealthConfig
}
func (m *MultiNlbsPlugin) consSvc(conf *multiNLBsConfig, pod *corev1.Pod, lbName string, c client.Client, ctx context.Context) (*corev1.Service, error) {
podNsName := pod.GetNamespace() + "/" + pod.GetName()
podLbsPorts, err := m.allocate(conf, podNsName)
if err != nil {
return nil, err
}
func (m *MultiNlbsPlugin) consSvc(podLbsPorts *lbsPorts, conf *multiNLBsConfig, pod *corev1.Pod, lbName string, c client.Client, ctx context.Context) (*corev1.Service, error) {
var selectId string
for _, lbId := range podLbsPorts.lbIds {
if conf.lbNames[lbId] == lbName {
@ -412,7 +459,7 @@ func (m *MultiNlbsPlugin) consSvc(conf *multiNLBsConfig, pod *corev1.Pod, lbName
},
Spec: corev1.ServiceSpec{
AllocateLoadBalancerNodePorts: ptr.To[bool](false),
ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeLocal,
ExternalTrafficPolicy: conf.externalTrafficPolicy,
Type: corev1.ServiceTypeLoadBalancer,
Selector: map[string]string{
SvcSelectorKey: pod.GetName(),
@ -469,6 +516,9 @@ func (m *MultiNlbsPlugin) allocate(conf *multiNLBsConfig, nsName string) (*lbsPo
if index == -1 {
return nil, fmt.Errorf("no available ports found")
}
if index >= len(conf.idList) {
return nil, fmt.Errorf("NlbIdNames configuration have not synced")
}
for _, port := range ports {
m.cache[index][port-m.minPort] = true
}
@ -507,6 +557,7 @@ func parseMultiNLBsConfig(conf []gamekruiseiov1alpha1.NetworkConfParams) (*multi
ports := make([]int, 0)
protocols := make([]corev1.Protocol, 0)
isFixed := false
externalTrafficPolicy := corev1.ServiceExternalTrafficPolicyTypeLocal
for _, c := range conf {
switch c.Name {
@ -551,6 +602,10 @@ func parseMultiNLBsConfig(conf []gamekruiseiov1alpha1.NetworkConfParams) (*multi
return nil, fmt.Errorf("invalid Fixed %s", c.Value)
}
isFixed = v
case ExternalTrafficPolicyTypeConfigName:
if strings.EqualFold(c.Value, string(corev1.ServiceExternalTrafficPolicyTypeCluster)) {
externalTrafficPolicy = corev1.ServiceExternalTrafficPolicyTypeCluster
}
}
}
@ -582,6 +637,7 @@ func parseMultiNLBsConfig(conf []gamekruiseiov1alpha1.NetworkConfParams) (*multi
targetPorts: ports,
protocols: protocols,
isFixed: isFixed,
externalTrafficPolicy: externalTrafficPolicy,
nlbHealthConfig: nlbHealthConfig,
}, nil
}

View File

@ -45,6 +45,7 @@ const (
AliasSLB = "LB-Network"
SlbIdsConfigName = "SlbIds"
PortProtocolsConfigName = "PortProtocols"
ExternalTrafficPolicyTypeConfigName = "ExternalTrafficPolicyType"
SlbListenerOverrideKey = "service.beta.kubernetes.io/alibaba-cloud-loadbalancer-force-override-listeners"
SlbIdAnnotationKey = "service.beta.kubernetes.io/alibaba-cloud-loadbalancer-id"
SlbIdLabelKey = "service.k8s.alibaba/loadbalancer-id"
@ -79,6 +80,7 @@ type slbConfig struct {
protocols []corev1.Protocol
isFixed bool
externalTrafficPolicyType corev1.ServiceExternalTrafficPolicyType
lBHealthCheckSwitch string
lBHealthCheckProtocolPort string
lBHealthCheckFlag string
@ -142,10 +144,13 @@ func initLbCache(svcList []corev1.Service, minPort, maxPort int32, blockPorts []
var ports []int32
for _, port := range getPorts(svc.Spec.Ports) {
if port <= maxPort && port >= minPort {
value, ok := newCache[lbId][port]
if !ok || !value {
newCache[lbId][port] = true
ports = append(ports, port)
}
}
}
if len(ports) != 0 {
newPodAllocate[svc.GetNamespace()+"/"+svc.GetName()] = lbId + ":" + util.Int32SliceToString(ports, ",")
}
@ -408,6 +413,7 @@ func parseLbConfig(conf []gamekruiseiov1alpha1.NetworkConfParams) (*slbConfig, e
protocols := make([]corev1.Protocol, 0)
isFixed := false
externalTrafficPolicy := corev1.ServiceExternalTrafficPolicyTypeCluster
lBHealthCheckSwitch := "on"
lBHealthCheckProtocolPort := ""
lBHealthCheckFlag := "off"
@ -447,6 +453,10 @@ func parseLbConfig(conf []gamekruiseiov1alpha1.NetworkConfParams) (*slbConfig, e
continue
}
isFixed = v
case ExternalTrafficPolicyTypeConfigName:
if strings.EqualFold(c.Value, string(corev1.ServiceExternalTrafficPolicyTypeLocal)) {
externalTrafficPolicy = corev1.ServiceExternalTrafficPolicyTypeLocal
}
case LBHealthCheckSwitchConfigName:
checkSwitch := strings.ToLower(c.Value)
if checkSwitch != "on" && checkSwitch != "off" {
@ -529,6 +539,7 @@ func parseLbConfig(conf []gamekruiseiov1alpha1.NetworkConfParams) (*slbConfig, e
protocols: protocols,
targetPorts: ports,
isFixed: isFixed,
externalTrafficPolicyType: externalTrafficPolicy,
lBHealthCheckSwitch: lBHealthCheckSwitch,
lBHealthCheckFlag: lBHealthCheckFlag,
lBHealthCheckType: lBHealthCheckType,
@ -569,13 +580,30 @@ func (s *SlbPlugin) consSvc(sc *slbConfig, pod *corev1.Pod, c client.Client, ctx
svcPorts := make([]corev1.ServicePort, 0)
for i := 0; i < len(sc.targetPorts); i++ {
if sc.protocols[i] == ProtocolTCPUDP {
svcPorts = append(svcPorts, corev1.ServicePort{
Name: strconv.Itoa(sc.targetPorts[i]),
Name: fmt.Sprintf("%s-%s", strconv.Itoa(sc.targetPorts[i]), corev1.ProtocolTCP),
Port: ports[i],
Protocol: corev1.ProtocolTCP,
TargetPort: intstr.FromInt(sc.targetPorts[i]),
})
svcPorts = append(svcPorts, corev1.ServicePort{
Name: fmt.Sprintf("%s-%s", strconv.Itoa(sc.targetPorts[i]), corev1.ProtocolUDP),
Port: ports[i],
Protocol: corev1.ProtocolUDP,
TargetPort: intstr.FromInt(sc.targetPorts[i]),
})
} else {
svcPorts = append(svcPorts, corev1.ServicePort{
Name: fmt.Sprintf("%s-%s", strconv.Itoa(sc.targetPorts[i]), sc.protocols[i]),
Port: ports[i],
Protocol: sc.protocols[i],
TargetPort: intstr.FromInt(sc.targetPorts[i]),
})
}
}
svcAnnotations := map[string]string{
SlbListenerOverrideKey: "true",
@ -607,6 +635,7 @@ func (s *SlbPlugin) consSvc(sc *slbConfig, pod *corev1.Pod, c client.Client, ctx
},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeLoadBalancer,
ExternalTrafficPolicy: sc.externalTrafficPolicyType,
Selector: map[string]string{
SvcSelectorKey: pod.GetName(),
},

View File

@ -23,6 +23,8 @@ import (
const (
SlbSPNetwork = "AlibabaCloud-SLB-SharedPort"
SvcSLBSPLabel = "game.kruise.io/AlibabaCloud-SLB-SharedPort"
ManagedServiceNamesConfigName = "ManagedServiceNames"
ManagedServiceSelectorConfigName = "ManagedServiceSelector"
)
const (
@ -46,6 +48,9 @@ type lbSpConfig struct {
lbIds []string
ports []int
protocols []corev1.Protocol
managedServiceNames []string
managedServiceSelectorKey string
managedServiceSelectorValue string
}
func (s *SlbSpPlugin) OnPodAdded(c client.Client, pod *corev1.Pod, ctx context.Context) (*corev1.Pod, cperrors.PluginError) {
@ -112,6 +117,7 @@ func (s *SlbSpPlugin) OnPodUpdated(c client.Client, pod *corev1.Pod, ctx context
if networkManager.GetNetworkDisabled() && hasLabel {
newLabels := pod.GetLabels()
delete(newLabels, SlbIdLabelKey)
delete(newLabels, podNetConfig.managedServiceSelectorKey)
pod.Labels = newLabels
networkStatus.CurrentNetworkState = gamekruiseiov1alpha1.NetworkNotReady
pod, err = networkManager.UpdateNetworkStatus(*networkStatus, pod)
@ -121,6 +127,7 @@ func (s *SlbSpPlugin) OnPodUpdated(c client.Client, pod *corev1.Pod, ctx context
// enable network
if !networkManager.GetNetworkDisabled() && !hasLabel {
pod.Labels[SlbIdLabelKey] = podSlbId
pod.Labels[podNetConfig.managedServiceSelectorKey] = podNetConfig.managedServiceSelectorValue
networkStatus.CurrentNetworkState = gamekruiseiov1alpha1.NetworkReady
pod, err = networkManager.UpdateNetworkStatus(*networkStatus, pod)
return pod, cperrors.ToPluginError(err, cperrors.InternalError)
@ -144,6 +151,26 @@ func (s *SlbSpPlugin) OnPodUpdated(c client.Client, pod *corev1.Pod, ctx context
return pod, cperrors.ToPluginError(err, cperrors.ApiCallError)
}
}
for _, svcName := range podNetConfig.managedServiceNames {
managedSvc := &corev1.Service{}
getErr := c.Get(ctx, types.NamespacedName{
Namespace: pod.GetNamespace(),
Name: svcName,
}, managedSvc)
if getErr != nil {
return pod, cperrors.ToPluginError(err, cperrors.ApiCallError)
}
toUpDateManagedSvc, err := utils.AllowNotReadyContainers(c, ctx, pod, managedSvc, true)
if err != nil {
return pod, err
}
if toUpDateManagedSvc {
err := c.Update(ctx, managedSvc)
if err != nil {
return pod, cperrors.ToPluginError(err, cperrors.ApiCallError)
}
}
}
}
// network ready
@ -311,18 +338,29 @@ func parseLbSpConfig(conf []gamekruiseiov1alpha1.NetworkConfParams) *lbSpConfig
var lbIds []string
var ports []int
var protocols []corev1.Protocol
var managedServiceNames []string
var managedServiceSelectorKey string
var managedServiceSelectorValue string
for _, c := range conf {
switch c.Name {
case SlbIdsConfigName:
lbIds = parseLbIds(c.Value)
case PortProtocolsConfigName:
ports, protocols = parsePortProtocols(c.Value)
case ManagedServiceNamesConfigName:
managedServiceNames = strings.Split(c.Value, ",")
case ManagedServiceSelectorConfigName:
managedServiceSelectorKey = strings.Split(c.Value, "=")[0]
managedServiceSelectorValue = strings.Split(c.Value, "=")[1]
}
}
return &lbSpConfig{
lbIds: lbIds,
ports: ports,
protocols: protocols,
managedServiceNames: managedServiceNames,
managedServiceSelectorKey: managedServiceSelectorKey,
managedServiceSelectorValue: managedServiceSelectorValue,
}
}

View File

@ -106,11 +106,22 @@ func TestParseLbSpConfig(t *testing.T) {
Name: SlbIdsConfigName,
Value: "lb-xxa",
},
{
Name: ManagedServiceNamesConfigName,
Value: "service-clusterIp",
},
{
Name: ManagedServiceSelectorConfigName,
Value: "game=v1",
},
},
podNetConfig: &lbSpConfig{
lbIds: []string{"lb-xxa"},
ports: []int{80},
protocols: []corev1.Protocol{corev1.ProtocolTCP},
managedServiceNames: []string{"service-clusterIp"},
managedServiceSelectorKey: "game",
managedServiceSelectorValue: "v1",
},
},
}

View File

@ -17,13 +17,14 @@ limitations under the License.
package alibabacloud
import (
"reflect"
"sync"
"testing"
gamekruiseiov1alpha1 "github.com/openkruise/kruise-game/apis/v1alpha1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"reflect"
"sync"
"testing"
)
func TestAllocateDeAllocate(t *testing.T) {
@ -136,6 +137,7 @@ func TestParseLbConfig(t *testing.T) {
lbIds: []string{"xxx-A"},
targetPorts: []int{80},
protocols: []corev1.Protocol{corev1.ProtocolTCP},
externalTrafficPolicyType: corev1.ServiceExternalTrafficPolicyTypeCluster,
isFixed: false,
lBHealthCheckSwitch: "off",
lBHealthCheckFlag: "off",
@ -164,11 +166,16 @@ func TestParseLbConfig(t *testing.T) {
Name: FixedConfigName,
Value: "true",
},
{
Name: ExternalTrafficPolicyTypeConfigName,
Value: "Local",
},
},
slbConfig: &slbConfig{
lbIds: []string{"xxx-A", "xxx-B"},
targetPorts: []int{81, 82, 83},
protocols: []corev1.Protocol{corev1.ProtocolUDP, corev1.ProtocolTCP, corev1.ProtocolTCP},
externalTrafficPolicyType: corev1.ServiceExternalTrafficPolicyTypeLocal,
isFixed: true,
lBHealthCheckSwitch: "on",
lBHealthCheckFlag: "off",

View File

@ -43,6 +43,7 @@ import (
provideroptions "github.com/openkruise/kruise-game/cloudprovider/options"
"github.com/openkruise/kruise-game/cloudprovider/utils"
"github.com/openkruise/kruise-game/pkg/util"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
)
const (
@ -127,7 +128,9 @@ func watchTargetGroup(ctx context.Context) error {
utilruntime.Must(ackv1alpha1.AddToScheme(scheme))
utilruntime.Must(elbv2api.AddToScheme(scheme))
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
MetricsBindAddress: "0",
Metrics: metricsserver.Options{
BindAddress: "0",
},
Scheme: scheme,
})
if err != nil {
@ -138,14 +141,16 @@ func watchTargetGroup(ctx context.Context) error {
return fmt.Errorf("failed to get informer: %v", err)
}
informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
if _, err := informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
handleTargetGroupEvent(ctx, mgr.GetClient(), obj)
},
UpdateFunc: func(oldObj, newObj interface{}) {
handleTargetGroupEvent(ctx, mgr.GetClient(), newObj)
},
})
}); err != nil {
return fmt.Errorf("failed to add event handler: %v", err)
}
log.Info("Start to watch TargetGroups successfully")
return mgr.Start(ctx)

View File

@ -49,6 +49,7 @@ type CloudProviderConfig struct {
AmazonsWebServicesOptions CloudProviderOptions
TencentCloudOptions CloudProviderOptions
JdCloudOptions CloudProviderOptions
HwCloudOptions CloudProviderOptions
}
type tomlConfigs struct {
@ -58,6 +59,7 @@ type tomlConfigs struct {
AmazonsWebServices options.AmazonsWebServicesOptions `toml:"aws"`
TencentCloud options.TencentCloudOptions `toml:"tencentcloud"`
JdCloud options.JdCloudOptions `toml:"jdcloud"`
HwCloud options.HwCloudOptions `toml:"hwcloud"`
}
func (cf *ConfigFile) Parse() *CloudProviderConfig {
@ -73,6 +75,7 @@ func (cf *ConfigFile) Parse() *CloudProviderConfig {
AmazonsWebServicesOptions: config.AmazonsWebServices,
TencentCloudOptions: config.TencentCloud,
JdCloudOptions: config.JdCloud,
HwCloudOptions: config.HwCloud,
}
}

View File

@ -0,0 +1,799 @@
/*
Copyright 2022 The Kruise Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hwcloud
import (
"context"
"encoding/json"
"fmt"
"strconv"
"strings"
"sync"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
log "k8s.io/klog/v2"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
gamekruiseiov1alpha1 "github.com/openkruise/kruise-game/apis/v1alpha1"
"github.com/openkruise/kruise-game/cloudprovider"
cperrors "github.com/openkruise/kruise-game/cloudprovider/errors"
provideroptions "github.com/openkruise/kruise-game/cloudprovider/options"
"github.com/openkruise/kruise-game/cloudprovider/utils"
"github.com/openkruise/kruise-game/pkg/util"
)
const (
PortProtocolsConfigName = "PortProtocols"
ExternalTrafficPolicyTypeConfigName = "ExternalTrafficPolicyType"
PublishNotReadyAddressesConfigName = "PublishNotReadyAddresses"
ElbIdAnnotationKey = "kubernetes.io/elb.id"
ElbIdsConfigName = "ElbIds"
ElbClassAnnotationKey = "kubernetes.io/elb.class"
ElbClassConfigName = "ElbClass"
ElbAvailableZoneAnnotationKey = "kubernetes.io/elb.availability-zones"
ElbAvailableZoneAnnotationConfigName = "ElbAvailableZone"
ElbConnLimitAnnotationKey = "kubernetes.io/elb.connection-limit"
ElbConnLimitConfigName = "ElbConnLimit"
ElbSubnetAnnotationKey = "kubernetes.io/elb.subnet-id"
ElbSubnetConfigName = "ElbSubnetId"
ElbEipAnnotationKey = "kubernetes.io/elb.eip-id"
ElbEipConfigName = "ElbEipId"
ElbEipKeepAnnotationKey = "kubernetes.io/elb.keep-eip"
ElbEipKeepConfigName = "ElbKeepd"
ElbEipAutoCreateOptionAnnotationKey = "kubernetes.io/elb.eip-auto-create-option"
ElbEipAutoCreateOptionConfigName = "ElbEipAutoCreateOption"
ElbLbAlgorithmAnnotationKey = "kubernetes.io/elb.lb-algorithm"
ElbLbAlgorithmConfigName = "ElbLbAlgorithm"
ElbSessionAffinityFlagAnnotationKey = "kubernetes.io/elb.session-affinity-flag"
ElbSessionAffinityFlagConfigName = "ElbSessionAffinityFlag"
ElbSessionAffinityOptionAnnotationKey = "kubernetes.io/elb.session-affinity-option"
ElbSessionAffinityOptionConfigName = "ElbSessionAffinityOption"
ElbTransparentClientIPAnnotationKey = "kubernetes.io/elb.enable-transparent-client-ip"
ElbTransparentClientIPConfigName = "ElbTransparentClientIP"
ElbXForwardedHostAnnotationKey = "kubernetes.io/elb.x-forwarded-host"
ElbXForwardedHostConfigName = "ElbXForwardedHost"
ElbTlsRefAnnotationKey = "kubernetes.io/elb.default-tls-container-ref"
ElbTlsRefConfigName = "ElbTlsRef"
ElbIdleTimeoutAnnotationKey = "kubernetes.io/elb.idle-timeout"
ElbIdleTimeoutConfigName = "ElbIdleTimeout"
ElbRequestTimeoutAnnotationKey = "kubernetes.io/elb.request-timeout"
ElbRequestTimeoutConfigName = "ElbRequestTimeout"
ElbResponseTimeoutAnnotationKey = "kubernetes.io/elb.response-timeout"
ElbResponseTimeoutConfigName = "ElbResponseTimeout"
ElbEnableCrossVPCAnnotationKey = "kubernetes.io/elb.enable-cross-vpc"
ElbEnableCrossVPCConfigName = "ElbEnableCrossVPC"
ElbL4FlavorIDAnnotationKey = "kubernetes.io/elb.l4-flavor-id"
ElbL4FlavorIDConfigName = "ElbL4FlavorID"
ElbL7FlavorIDAnnotationKey = "kubernetes.io/elb.l7-flavor-id"
ElbL7FlavorIDConfigName = "ElbL7FlavorID"
LBHealthCheckSwitchAnnotationKey = "kubernetes.io/elb.health-check-flag"
LBHealthCheckSwitchConfigName = "LBHealthCheckFlag"
LBHealthCheckOptionAnnotationKey = "kubernetes.io/elb.health-check-option"
LBHealthCHeckOptionConfigName = "LBHealthCheckOption"
)
const (
ElbConfigHashKey = "game.kruise.io/network-config-hash"
SvcSelectorKey = "statefulset.kubernetes.io/pod-name"
ProtocolTCPUDP corev1.Protocol = "TCPUDP"
FixedConfigName = "Fixed"
ElbNetwork = "HwCloud-ELB"
AliasELB = "ELB-Network"
ElbClassDedicated = "dedicated"
ElbClassShared = "shared"
ElbLbAlgorithmRoundRobin = "ROUND_ROBIN"
ElbLbAlgorithmLeastConn = "LEAST_CONNECTIONS"
ElbLbAlgorithmSourceIP = "SOURCE_IP"
)
type portAllocated map[int32]bool
type ElbPlugin struct {
maxPort int32
minPort int32
blockPorts []int32
cache map[string]portAllocated
podAllocate map[string]string
mutex sync.RWMutex
}
type elbConfig struct {
lbIds []string
targetPorts []int
protocols []corev1.Protocol
isFixed bool
elbClass string
elbConnLimit int32
elbLbAlgorithm string
elbSessionAffinityFlag string
elbSessionAffinityOption string
elbTransparentClientIP bool
elbXForwardedHost bool
elbIdleTimeout int32
elbRequestTimeout int32
elbResponseTimeout int32
externalTrafficPolicyType corev1.ServiceExternalTrafficPolicyType
publishNotReadyAddresses bool
lBHealthCheckSwitch string
lBHealtchCheckOption string
}
func (s *ElbPlugin) Name() string {
return ElbNetwork
}
func (s *ElbPlugin) Alias() string {
return AliasELB
}
func (s *ElbPlugin) Init(c client.Client, options cloudprovider.CloudProviderOptions, ctx context.Context) error {
s.mutex.Lock()
defer s.mutex.Unlock()
elbOptions := options.(provideroptions.HwCloudOptions).ELBOptions
s.minPort = elbOptions.MinPort
s.maxPort = elbOptions.MaxPort
s.blockPorts = elbOptions.BlockPorts
svcList := &corev1.ServiceList{}
err := c.List(ctx, svcList)
if err != nil {
return err
}
s.cache, s.podAllocate = initLbCache(svcList.Items, s.minPort, s.maxPort, s.blockPorts)
log.Infof("[%s] podAllocate cache complete initialization: %v", ElbNetwork, s.podAllocate)
return nil
}
func initLbCache(svcList []corev1.Service, minPort, maxPort int32, blockPorts []int32) (map[string]portAllocated, map[string]string) {
newCache := make(map[string]portAllocated)
newPodAllocate := make(map[string]string)
for _, svc := range svcList {
lbId := svc.Annotations[ElbIdAnnotationKey]
if lbId != "" && svc.Spec.Type == corev1.ServiceTypeLoadBalancer {
// init cache for that lb
if newCache[lbId] == nil {
newCache[lbId] = make(portAllocated, maxPort-minPort+1)
for i := minPort; i <= maxPort; i++ {
newCache[lbId][i] = false
}
}
// block ports
for _, blockPort := range blockPorts {
newCache[lbId][blockPort] = true
}
// fill in cache for that lb
var ports []int32
for _, port := range getPorts(svc.Spec.Ports) {
if port <= maxPort && port >= minPort {
value, ok := newCache[lbId][port]
if !ok || !value {
newCache[lbId][port] = true
ports = append(ports, port)
}
}
}
if len(ports) != 0 {
newPodAllocate[svc.GetNamespace()+"/"+svc.GetName()] = lbId + ":" + util.Int32SliceToString(ports, ",")
log.Infof("svc %s/%s allocate elb %s ports %v", svc.Namespace, svc.Name, lbId, ports)
}
}
}
return newCache, newPodAllocate
}
func (s *ElbPlugin) OnPodAdded(c client.Client, pod *corev1.Pod, ctx context.Context) (*corev1.Pod, cperrors.PluginError) {
return pod, nil
}
func (s *ElbPlugin) OnPodUpdated(c client.Client, pod *corev1.Pod, ctx context.Context) (*corev1.Pod, cperrors.PluginError) {
networkManager := utils.NewNetworkManager(pod, c)
networkStatus, _ := networkManager.GetNetworkStatus()
if networkStatus == nil {
pod, err := networkManager.UpdateNetworkStatus(gamekruiseiov1alpha1.NetworkStatus{
CurrentNetworkState: gamekruiseiov1alpha1.NetworkNotReady,
}, pod)
return pod, cperrors.ToPluginError(err, cperrors.InternalError)
}
networkConfig := networkManager.GetNetworkConfig()
sc, err := parseLbConfig(networkConfig)
if err != nil {
return pod, cperrors.ToPluginError(err, cperrors.ParameterError)
}
// get svc
svc := &corev1.Service{}
err = c.Get(ctx, types.NamespacedName{
Name: pod.GetName(),
Namespace: pod.GetNamespace(),
}, svc)
if err != nil {
if errors.IsNotFound(err) {
service, err := s.consSvc(sc, pod, c, ctx)
if err != nil {
return pod, cperrors.ToPluginError(err, cperrors.ParameterError)
}
return pod, cperrors.ToPluginError(c.Create(ctx, service), cperrors.ApiCallError)
}
return pod, cperrors.NewPluginError(cperrors.ApiCallError, err.Error())
}
// old svc remain
if svc.OwnerReferences[0].Kind == "Pod" && svc.OwnerReferences[0].UID != pod.UID {
log.Infof("[%s] waitting old svc %s/%s deleted. old owner pod uid is %s, but now is %s", ElbNetwork, svc.Namespace, svc.Name, svc.OwnerReferences[0].UID, pod.UID)
return pod, nil
}
// update svc
if util.GetHash(sc) != svc.GetAnnotations()[ElbConfigHashKey] {
networkStatus.CurrentNetworkState = gamekruiseiov1alpha1.NetworkNotReady
pod, err = networkManager.UpdateNetworkStatus(*networkStatus, pod)
if err != nil {
return pod, cperrors.NewPluginError(cperrors.InternalError, err.Error())
}
service, err := s.consSvc(sc, pod, c, ctx)
if err != nil {
return pod, cperrors.NewPluginError(cperrors.ParameterError, err.Error())
}
return pod, cperrors.ToPluginError(c.Update(ctx, service), cperrors.ApiCallError)
}
// disable network
if networkManager.GetNetworkDisabled() && svc.Spec.Type == corev1.ServiceTypeLoadBalancer {
svc.Spec.Type = corev1.ServiceTypeClusterIP
return pod, cperrors.ToPluginError(c.Update(ctx, svc), cperrors.ApiCallError)
}
// enable network
if !networkManager.GetNetworkDisabled() && svc.Spec.Type == corev1.ServiceTypeClusterIP {
svc.Spec.Type = corev1.ServiceTypeLoadBalancer
return pod, cperrors.ToPluginError(c.Update(ctx, svc), cperrors.ApiCallError)
}
// network not ready
if svc.Status.LoadBalancer.Ingress == nil {
networkStatus.CurrentNetworkState = gamekruiseiov1alpha1.NetworkNotReady
pod, err = networkManager.UpdateNetworkStatus(*networkStatus, pod)
return pod, cperrors.ToPluginError(err, cperrors.InternalError)
}
// allow not ready containers
if util.IsAllowNotReadyContainers(networkManager.GetNetworkConfig()) {
toUpDateSvc, err := utils.AllowNotReadyContainers(c, ctx, pod, svc, false)
if err != nil {
return pod, err
}
if toUpDateSvc {
err := c.Update(ctx, svc)
if err != nil {
return pod, cperrors.ToPluginError(err, cperrors.ApiCallError)
}
}
}
// network ready
internalAddresses := make([]gamekruiseiov1alpha1.NetworkAddress, 0)
externalAddresses := make([]gamekruiseiov1alpha1.NetworkAddress, 0)
for _, port := range svc.Spec.Ports {
instrIPort := port.TargetPort
instrEPort := intstr.FromInt(int(port.Port))
internalAddress := gamekruiseiov1alpha1.NetworkAddress{
IP: pod.Status.PodIP,
Ports: []gamekruiseiov1alpha1.NetworkPort{
{
Name: instrIPort.String(),
Port: &instrIPort,
Protocol: port.Protocol,
},
},
}
externalAddress := gamekruiseiov1alpha1.NetworkAddress{
IP: svc.Status.LoadBalancer.Ingress[0].IP,
Ports: []gamekruiseiov1alpha1.NetworkPort{
{
Name: instrIPort.String(),
Port: &instrEPort,
Protocol: port.Protocol,
},
},
}
internalAddresses = append(internalAddresses, internalAddress)
externalAddresses = append(externalAddresses, externalAddress)
}
networkStatus.InternalAddresses = internalAddresses
networkStatus.ExternalAddresses = externalAddresses
networkStatus.CurrentNetworkState = gamekruiseiov1alpha1.NetworkReady
pod, err = networkManager.UpdateNetworkStatus(*networkStatus, pod)
return pod, cperrors.ToPluginError(err, cperrors.InternalError)
}
func (s *ElbPlugin) OnPodDeleted(c client.Client, pod *corev1.Pod, ctx context.Context) cperrors.PluginError {
networkManager := utils.NewNetworkManager(pod, c)
networkConfig := networkManager.GetNetworkConfig()
sc, err := parseLbConfig(networkConfig)
if err != nil {
return cperrors.NewPluginError(cperrors.ParameterError, err.Error())
}
var podKeys []string
if sc.isFixed {
gss, err := util.GetGameServerSetOfPod(pod, c, ctx)
if err != nil && !errors.IsNotFound(err) {
return cperrors.ToPluginError(err, cperrors.ApiCallError)
}
// gss exists in cluster, do not deAllocate.
if err == nil && gss.GetDeletionTimestamp() == nil {
return nil
}
// gss not exists in cluster, deAllocate all the ports related to it.
for key := range s.podAllocate {
gssName := pod.GetLabels()[gamekruiseiov1alpha1.GameServerOwnerGssKey]
if strings.Contains(key, pod.GetNamespace()+"/"+gssName) {
podKeys = append(podKeys, key)
}
}
} else {
podKeys = append(podKeys, pod.GetNamespace()+"/"+pod.GetName())
}
for _, podKey := range podKeys {
s.deAllocate(podKey)
}
return nil
}
func (s *ElbPlugin) allocate(lbIds []string, num int, nsName string) (string, []int32) {
s.mutex.Lock()
defer s.mutex.Unlock()
var ports []int32
var lbId string
// find lb with adequate ports
for _, slbId := range lbIds {
sum := 0
for i := s.minPort; i <= s.maxPort; i++ {
if !s.cache[slbId][i] {
sum++
}
if sum >= num {
lbId = slbId
break
}
}
}
if lbId == "" {
return "", nil
}
// select ports
for i := 0; i < num; i++ {
var port int32
if s.cache[lbId] == nil {
// init cache for new lb
s.cache[lbId] = make(portAllocated, s.maxPort-s.minPort+1)
for i := s.minPort; i <= s.maxPort; i++ {
s.cache[lbId][i] = false
}
// block ports
for _, blockPort := range s.blockPorts {
s.cache[lbId][blockPort] = true
}
}
for p, allocated := range s.cache[lbId] {
if !allocated {
port = p
break
}
}
s.cache[lbId][port] = true
ports = append(ports, port)
}
s.podAllocate[nsName] = lbId + ":" + util.Int32SliceToString(ports, ",")
log.Infof("pod %s allocate slb %s ports %v", nsName, lbId, ports)
return lbId, ports
}
func (s *ElbPlugin) deAllocate(nsName string) {
s.mutex.Lock()
defer s.mutex.Unlock()
allocatedPorts, exist := s.podAllocate[nsName]
if !exist {
return
}
slbPorts := strings.Split(allocatedPorts, ":")
lbId := slbPorts[0]
ports := util.StringToInt32Slice(slbPorts[1], ",")
for _, port := range ports {
s.cache[lbId][port] = false
}
// block ports
for _, blockPort := range s.blockPorts {
s.cache[lbId][blockPort] = true
}
delete(s.podAllocate, nsName)
log.Infof("pod %s deallocate slb %s ports %v", nsName, lbId, ports)
}
func init() {
elbPlugin := ElbPlugin{
mutex: sync.RWMutex{},
}
hwCloudProvider.registerPlugin(&elbPlugin)
}
func parseLbConfig(conf []gamekruiseiov1alpha1.NetworkConfParams) (*elbConfig, error) {
var lbIds []string
ports := make([]int, 0)
protocols := make([]corev1.Protocol, 0)
isFixed := false
externalTrafficPolicy := corev1.ServiceExternalTrafficPolicyTypeCluster
publishNotReadyAddresses := false
elbClass := ElbClassDedicated
elbConnLimit := int32(-1)
elbLbAlgorithm := ElbLbAlgorithmRoundRobin
elbSessionAffinityFlag := "off"
elbSessionAffinityOption := ""
elbTransparentClientIP := false
elbXForwardedHost := false
elbIdleTimeout := int32(-1)
elbRequestTimeout := int32(-1)
elbResponseTimeout := int32(-1)
lBHealthCheckSwitch := "on"
LBHealthCHeckOptionConfig := ""
for _, c := range conf {
switch c.Name {
case ElbIdsConfigName:
for _, slbId := range strings.Split(c.Value, ",") {
if slbId != "" {
lbIds = append(lbIds, slbId)
}
}
if len(lbIds) <= 0 {
return nil, fmt.Errorf("no elb id found, must specify at least one elb id")
}
case PortProtocolsConfigName:
for _, pp := range strings.Split(c.Value, ",") {
ppSlice := strings.Split(pp, "/")
port, err := strconv.Atoi(ppSlice[0])
if err != nil {
continue
}
ports = append(ports, port)
if len(ppSlice) != 2 {
protocols = append(protocols, corev1.ProtocolTCP)
} else {
protocols = append(protocols, corev1.Protocol(ppSlice[1]))
}
}
case FixedConfigName:
v, err := strconv.ParseBool(c.Value)
if err != nil {
continue
}
isFixed = v
case ExternalTrafficPolicyTypeConfigName:
if strings.EqualFold(c.Value, string(corev1.ServiceExternalTrafficPolicyTypeLocal)) {
externalTrafficPolicy = corev1.ServiceExternalTrafficPolicyTypeLocal
}
case PublishNotReadyAddressesConfigName:
v, err := strconv.ParseBool(c.Value)
if err != nil {
continue
}
publishNotReadyAddresses = v
case ElbClassConfigName:
if strings.EqualFold(c.Value, string(ElbClassShared)) {
elbClass = ElbClassShared
}
case ElbConnLimitConfigName:
v, err := strconv.Atoi(c.Value)
if err != nil {
_ = fmt.Errorf("ignore invalid elb connection limit value: %s", c.Value)
continue
}
elbConnLimit = int32(v)
case ElbLbAlgorithmConfigName:
if strings.EqualFold(c.Value, ElbLbAlgorithmRoundRobin) {
elbLbAlgorithm = ElbLbAlgorithmRoundRobin
}
if strings.EqualFold(c.Value, ElbLbAlgorithmLeastConn) {
elbLbAlgorithm = ElbLbAlgorithmLeastConn
}
if strings.EqualFold(c.Value, ElbLbAlgorithmSourceIP) {
elbLbAlgorithm = ElbLbAlgorithmSourceIP
}
case ElbSessionAffinityFlagConfigName:
if strings.EqualFold(c.Value, "on") {
elbSessionAffinityFlag = "on"
}
case ElbSessionAffinityOptionConfigName:
if json.Valid([]byte(c.Value)) {
LBHealthCHeckOptionConfig = c.Value
} else {
return nil, fmt.Errorf("invalid elb session affinity option value: %s", c.Value)
}
elbSessionAffinityOption = c.Value
case ElbTransparentClientIPConfigName:
v, err := strconv.ParseBool(c.Value)
if err != nil {
_ = fmt.Errorf("ignore invalid elb transparent client ip value: %s", c.Value)
continue
}
elbTransparentClientIP = v
case ElbXForwardedHostConfigName:
v, err := strconv.ParseBool(c.Value)
if err != nil {
_ = fmt.Errorf("ignore invalid elb x forwarded host value: %s", c.Value)
continue
}
elbXForwardedHost = v
case ElbIdleTimeoutConfigName:
v, err := strconv.Atoi(c.Value)
if err != nil {
_ = fmt.Errorf("ignore invalid elb idle timeout value: %s", c.Value)
continue
}
if v >= 0 && v <= 4000 {
elbIdleTimeout = int32(v)
} else {
_ = fmt.Errorf("ignore invalid elb idle timeout value: %s", c.Value)
continue
}
case ElbRequestTimeoutConfigName:
v, err := strconv.Atoi(c.Value)
if err != nil {
_ = fmt.Errorf("ignore invalid elb request timeout value: %s", c.Value)
continue
}
if v >= 1 && v <= 300 {
elbRequestTimeout = int32(v)
} else {
_ = fmt.Errorf("ignore invalid elb request timeout value: %s", c.Value)
continue
}
case ElbResponseTimeoutConfigName:
v, err := strconv.Atoi(c.Value)
if err != nil {
_ = fmt.Errorf("ignore invalid elb response timeout value: %s", c.Value)
continue
}
if v >= 1 && v <= 300 {
elbResponseTimeout = int32(v)
} else {
_ = fmt.Errorf("ignore invalid elb response timeout value: %s", c.Value)
continue
}
case LBHealthCheckSwitchConfigName:
checkSwitch := strings.ToLower(c.Value)
if checkSwitch != "on" && checkSwitch != "off" {
return nil, fmt.Errorf("invalid lb health check switch value: %s", c.Value)
}
lBHealthCheckSwitch = checkSwitch
case LBHealthCHeckOptionConfigName:
if json.Valid([]byte(c.Value)) {
LBHealthCHeckOptionConfig = c.Value
} else {
return nil, fmt.Errorf("invalid lb health check option value: %s", c.Value)
}
}
}
return &elbConfig{
lbIds: lbIds,
protocols: protocols,
targetPorts: ports,
isFixed: isFixed,
externalTrafficPolicyType: externalTrafficPolicy,
publishNotReadyAddresses: publishNotReadyAddresses,
elbClass: elbClass,
elbConnLimit: elbConnLimit,
elbLbAlgorithm: elbLbAlgorithm,
elbSessionAffinityFlag: elbSessionAffinityFlag,
elbSessionAffinityOption: elbSessionAffinityOption,
elbTransparentClientIP: elbTransparentClientIP,
elbXForwardedHost: elbXForwardedHost,
elbIdleTimeout: elbIdleTimeout,
elbRequestTimeout: elbRequestTimeout,
elbResponseTimeout: elbResponseTimeout,
lBHealthCheckSwitch: lBHealthCheckSwitch,
lBHealtchCheckOption: LBHealthCHeckOptionConfig,
}, nil
}
func getPorts(ports []corev1.ServicePort) []int32 {
var ret []int32
for _, port := range ports {
ret = append(ret, port.Port)
}
return ret
}
func (s *ElbPlugin) consSvc(sc *elbConfig, pod *corev1.Pod, c client.Client, ctx context.Context) (*corev1.Service, error) {
var ports []int32
var lbId string
podKey := pod.GetNamespace() + "/" + pod.GetName()
allocatedPorts, exist := s.podAllocate[podKey]
if exist {
slbPorts := strings.Split(allocatedPorts, ":")
lbId = slbPorts[0]
ports = util.StringToInt32Slice(slbPorts[1], ",")
} else {
lbId, ports = s.allocate(sc.lbIds, len(sc.targetPorts), podKey)
if lbId == "" && ports == nil {
return nil, fmt.Errorf("there are no avaialable ports for %v", sc.lbIds)
}
}
svcPorts := make([]corev1.ServicePort, 0)
for i := 0; i < len(sc.targetPorts); i++ {
if sc.protocols[i] == ProtocolTCPUDP {
svcPorts = append(svcPorts, corev1.ServicePort{
Name: fmt.Sprintf("%s-%s", strconv.Itoa(sc.targetPorts[i]), strings.ToLower(string(corev1.ProtocolTCP))),
Port: ports[i],
Protocol: corev1.ProtocolTCP,
TargetPort: intstr.FromInt(sc.targetPorts[i]),
})
svcPorts = append(svcPorts, corev1.ServicePort{
Name: fmt.Sprintf("%s-%s", strconv.Itoa(sc.targetPorts[i]), strings.ToLower(string(corev1.ProtocolUDP))),
Port: ports[i],
Protocol: corev1.ProtocolUDP,
TargetPort: intstr.FromInt(sc.targetPorts[i]),
})
} else {
svcPorts = append(svcPorts, corev1.ServicePort{
Name: fmt.Sprintf("%s-%s", strconv.Itoa(sc.targetPorts[i]), strings.ToLower(string(sc.protocols[i]))),
Port: ports[i],
Protocol: sc.protocols[i],
TargetPort: intstr.FromInt(sc.targetPorts[i]),
})
}
}
svcAnnotations := map[string]string{
ElbIdAnnotationKey: lbId,
ElbConfigHashKey: util.GetHash(sc),
ElbClassAnnotationKey: sc.elbClass,
ElbLbAlgorithmAnnotationKey: sc.elbLbAlgorithm,
ElbSessionAffinityFlagAnnotationKey: sc.elbSessionAffinityFlag,
ElbSessionAffinityOptionAnnotationKey: sc.elbSessionAffinityOption,
ElbTransparentClientIPAnnotationKey: strconv.FormatBool(sc.elbTransparentClientIP),
ElbXForwardedHostAnnotationKey: strconv.FormatBool(sc.elbXForwardedHost),
LBHealthCheckSwitchAnnotationKey: sc.lBHealthCheckSwitch,
}
if sc.elbClass == ElbClassDedicated {
} else {
svcAnnotations[ElbConnLimitAnnotationKey] = strconv.Itoa(int(sc.elbConnLimit))
}
if sc.elbIdleTimeout != -1 {
svcAnnotations[ElbIdleTimeoutAnnotationKey] = strconv.Itoa(int(sc.elbIdleTimeout))
}
if sc.elbRequestTimeout != -1 {
svcAnnotations[ElbRequestTimeoutAnnotationKey] = strconv.Itoa(int(sc.elbRequestTimeout))
}
if sc.elbResponseTimeout != -1 {
svcAnnotations[ElbResponseTimeoutAnnotationKey] = strconv.Itoa(int(sc.elbResponseTimeout))
}
if sc.lBHealthCheckSwitch == "on" {
svcAnnotations[LBHealthCheckOptionAnnotationKey] = sc.lBHealtchCheckOption
}
svc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: pod.GetName(),
Namespace: pod.GetNamespace(),
Annotations: svcAnnotations,
OwnerReferences: getSvcOwnerReference(c, ctx, pod, sc.isFixed),
},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeLoadBalancer,
ExternalTrafficPolicy: sc.externalTrafficPolicyType,
PublishNotReadyAddresses: sc.publishNotReadyAddresses,
Selector: map[string]string{
SvcSelectorKey: pod.GetName(),
},
Ports: svcPorts,
},
}
return svc, nil
}
func getSvcOwnerReference(c client.Client, ctx context.Context, pod *corev1.Pod, isFixed bool) []metav1.OwnerReference {
ownerReferences := []metav1.OwnerReference{
{
APIVersion: pod.APIVersion,
Kind: pod.Kind,
Name: pod.GetName(),
UID: pod.GetUID(),
Controller: ptr.To[bool](true),
BlockOwnerDeletion: ptr.To[bool](true),
},
}
if isFixed {
gss, err := util.GetGameServerSetOfPod(pod, c, ctx)
if err == nil {
ownerReferences = []metav1.OwnerReference{
{
APIVersion: gss.APIVersion,
Kind: gss.Kind,
Name: gss.GetName(),
UID: gss.GetUID(),
Controller: ptr.To[bool](true),
BlockOwnerDeletion: ptr.To[bool](true),
},
}
}
}
return ownerReferences
}

View File

@ -0,0 +1,45 @@
package hwcloud
import (
"github.com/openkruise/kruise-game/cloudprovider"
"k8s.io/klog/v2"
)
const (
HwCloud = "HwCloud"
)
var (
hwCloudProvider = &Provider{
plugins: make(map[string]cloudprovider.Plugin),
}
)
type Provider struct {
plugins map[string]cloudprovider.Plugin
}
func (ap *Provider) Name() string {
return HwCloud
}
func (ap *Provider) ListPlugins() (map[string]cloudprovider.Plugin, error) {
if ap.plugins == nil {
return make(map[string]cloudprovider.Plugin), nil
}
return ap.plugins, nil
}
// register plugin of cloud provider and different cloud providers
func (ap *Provider) registerPlugin(plugin cloudprovider.Plugin) {
name := plugin.Name()
if name == "" {
klog.Fatal("empty plugin name")
}
ap.plugins[name] = plugin
}
func NewHwCloudProvider() (cloudprovider.CloudProvider, error) {
return hwCloudProvider, nil
}

View File

@ -18,6 +18,11 @@ package kubernetes
import (
"context"
"net"
"strconv"
"strings"
"sync"
gamekruiseiov1alpha1 "github.com/openkruise/kruise-game/apis/v1alpha1"
"github.com/openkruise/kruise-game/cloudprovider"
"github.com/openkruise/kruise-game/cloudprovider/errors"
@ -29,11 +34,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
log "k8s.io/klog/v2"
"net"
"sigs.k8s.io/controller-runtime/pkg/client"
"strconv"
"strings"
"sync"
)
const (
@ -42,6 +43,8 @@ const (
// Its corresponding value format is as follows, containerName:port1/protocol1,port2/protocol2,... e.g. game-server:25565/TCP
// When no protocol is specified, TCP is used by default
ContainerPortsKey = "ContainerPorts"
PortSameAsHost = "SameAsHost"
ProtocolTCPUDP = "TCPUDP"
)
type HostPortPlugin struct {
@ -103,12 +106,30 @@ func (hpp *HostPortPlugin) OnPodAdded(c client.Client, pod *corev1.Pod, ctx cont
if ports, ok := containerPortsMap[container.Name]; ok {
containerPorts := container.Ports
for i, port := range ports {
containerPort := corev1.ContainerPort{
ContainerPort: port,
HostPort: hostPorts[numToAlloc-1],
Protocol: containerProtocolsMap[container.Name][i],
// -1 means same as host
if port == -1 {
port = hostPorts[numToAlloc-1]
}
protocol := containerProtocolsMap[container.Name][i]
hostPort := hostPorts[numToAlloc-1]
if protocol == ProtocolTCPUDP {
containerPorts = append(containerPorts,
corev1.ContainerPort{
ContainerPort: port,
HostPort: hostPort,
Protocol: corev1.ProtocolTCP,
}, corev1.ContainerPort{
ContainerPort: port,
HostPort: hostPort,
Protocol: corev1.ProtocolUDP,
})
} else {
containerPorts = append(containerPorts, corev1.ContainerPort{
ContainerPort: port,
HostPort: hostPort,
Protocol: protocol,
})
}
containerPorts = append(containerPorts, containerPort)
numToAlloc--
}
containers[cIndex].Ports = containerPorts
@ -125,6 +146,9 @@ func (hpp *HostPortPlugin) OnPodUpdated(c client.Client, pod *corev1.Pod, ctx co
Name: pod.Spec.NodeName,
}, node)
if err != nil {
if k8serrors.IsNotFound(err) {
return pod, nil
}
return pod, errors.NewPluginError(errors.ApiCallError, err.Error())
}
nodeIp := getAddress(node)
@ -338,10 +362,16 @@ func parseConfig(conf []gamekruiseiov1alpha1.NetworkConfParams, pod *corev1.Pod)
for _, portString := range strings.Split(cpSlice[1], ",") {
ppSlice := strings.Split(portString, "/")
// handle port
port, err := strconv.ParseInt(ppSlice[0], 10, 32)
var port int64
var err error
if ppSlice[0] == PortSameAsHost {
port = -1
} else {
port, err = strconv.ParseInt(ppSlice[0], 10, 32)
if err != nil {
continue
}
}
numToAlloc++
ports = append(ports, int32(port))
// handle protocol

View File

@ -18,6 +18,8 @@ package manager
import (
"context"
"github.com/openkruise/kruise-game/cloudprovider/hwcloud"
"github.com/openkruise/kruise-game/cloudprovider/jdcloud"
"github.com/openkruise/kruise-game/apis/v1alpha1"
@ -161,5 +163,17 @@ func NewProviderManager() (*ProviderManager, error) {
}
}
if configs.HwCloudOptions.Valid() && configs.HwCloudOptions.Enabled() {
// build and register hw cloud provider
hp, err := hwcloud.NewHwCloudProvider()
if err != nil {
log.Errorf("Failed to initialize hwcloud provider.because of %s", err.Error())
} else {
pm.RegisterCloudProvider(hp, configs.HwCloudOptions)
}
} else {
log.Warningf("HwCloudProvider is not enabled, enable flag is %v, config valid flag is %v", configs.HwCloudOptions.Enabled(), configs.HwCloudOptions.Valid())
}
return pm, nil
}

View File

@ -39,13 +39,7 @@ func (o AlibabaCloudOptions) Valid() bool {
return false
}
}
if int(nlbOptions.MaxPort-nlbOptions.MinPort)-len(nlbOptions.BlockPorts) >= 500 {
return false
}
if nlbOptions.MinPort <= 0 {
return false
}
return true
return nlbOptions.MinPort > 0
}
func (o AlibabaCloudOptions) Enabled() bool {

View File

@ -0,0 +1,32 @@
package options
type HwCloudOptions struct {
Enable bool `toml:"enable"`
ELBOptions ELBOptions `toml:"elb"`
}
type ELBOptions struct {
MaxPort int32 `toml:"max_port"`
MinPort int32 `toml:"min_port"`
BlockPorts []int32 `toml:"block_ports"`
}
func (o HwCloudOptions) Valid() bool {
elbOptions := o.ELBOptions
for _, blockPort := range elbOptions.BlockPorts {
if blockPort >= elbOptions.MaxPort || blockPort <= elbOptions.MinPort {
return false
}
}
if int(elbOptions.MaxPort-elbOptions.MinPort)-len(elbOptions.BlockPorts) > 200 {
return false
}
if elbOptions.MinPort <= 0 {
return false
}
return true
}
func (o HwCloudOptions) Enabled() bool {
return o.Enable
}

View File

@ -2,28 +2,12 @@ package options
type TencentCloudOptions struct {
Enable bool `toml:"enable"`
CLBOptions TencentCloudCLBOptions `toml:"clb"`
}
type TencentCloudCLBOptions struct {
MaxPort int32 `toml:"max_port"`
MinPort int32 `toml:"min_port"`
}
func (o TencentCloudOptions) Valid() bool {
clbOptions := o.CLBOptions
if clbOptions.MaxPort > 65535 {
return false
}
if clbOptions.MinPort < 1 {
return false
}
return true
}
func (o TencentCloudOptions) Enabled() bool {
return o.Enable
}
func (o TencentCloudOptions) Valid() bool {
return true
}

View File

@ -1,93 +0,0 @@
/*
Copyright 2024.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// DedicatedCLBListenerSpec defines the desired state of DedicatedCLBListener
type DedicatedCLBListenerSpec struct {
// +kubebuilder:validation:XValidation:rule="self == oldSelf", message="Value is immutable"
LbId string `json:"lbId"`
// +kubebuilder:validation:XValidation:rule="self == oldSelf", message="Value is immutable"
// +optional
LbRegion string `json:"lbRegion,omitempty"`
// +kubebuilder:validation:XValidation:rule="self == oldSelf", message="Value is immutable"
LbPort int64 `json:"lbPort"`
// +kubebuilder:validation:XValidation:rule="self == oldSelf", message="Value is immutable"
// +kubebuilder:validation:Enum=TCP;UDP
Protocol string `json:"protocol"`
// +optional
ExtensiveParameters string `json:"extensiveParameters,omitempty"`
// +optional
TargetPod *TargetPod `json:"targetPod,omitempty"`
}
type TargetPod struct {
PodName string `json:"podName"`
TargetPort int64 `json:"targetPort"`
}
// DedicatedCLBListenerStatus defines the observed state of DedicatedCLBListener
type DedicatedCLBListenerStatus struct {
ListenerId string `json:"listenerId,omitempty"`
// +kubebuilder:validation:Enum=Bound;Available;Pending;Failed;Deleting
State string `json:"state,omitempty"`
Message string `json:"message,omitempty"`
Address string `json:"address,omitempty"`
}
const (
DedicatedCLBListenerStateBound = "Bound"
DedicatedCLBListenerStateAvailable = "Available"
DedicatedCLBListenerStatePending = "Pending"
DedicatedCLBListenerStateFailed = "Failed"
DedicatedCLBListenerStateDeleting = "Deleting"
)
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="LbId",type="string",JSONPath=".spec.lbId",description="CLB ID"
// +kubebuilder:printcolumn:name="LbPort",type="integer",JSONPath=".spec.lbPort",description="Port of CLB Listener"
// +kubebuilder:printcolumn:name="Pod",type="string",JSONPath=".spec.targetPod.podName",description="Pod name of target pod"
// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.state",description="State of the dedicated clb listener"
// DedicatedCLBListener is the Schema for the dedicatedclblisteners API
type DedicatedCLBListener struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec DedicatedCLBListenerSpec `json:"spec,omitempty"`
Status DedicatedCLBListenerStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// DedicatedCLBListenerList contains a list of DedicatedCLBListener
type DedicatedCLBListenerList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []DedicatedCLBListener `json:"items"`
}
func init() {
SchemeBuilder.Register(&DedicatedCLBListener{}, &DedicatedCLBListenerList{})
}

View File

@ -1,4 +0,0 @@
// Package v1alpha1 contains API Schema definitions for the tencentcloud v1alpha1 API group
// +k8s:deepcopy-gen=package,register
// +groupName=networking.cloud.tencent.com
package v1alpha1

View File

@ -1,38 +0,0 @@
/*
Copyright 2024.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1alpha1 contains API Schema definitions for the networking v1alpha1 API group
// +kubebuilder:validation:Required
// +kubebuilder:object:generate=true
// +groupName=networking.cloud.tencent.com
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects
GroupVersion = schema.GroupVersion{Group: "networking.cloud.tencent.com", Version: "v1alpha1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)

View File

@ -1,135 +0,0 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2022 The Kruise Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DedicatedCLBListener) DeepCopyInto(out *DedicatedCLBListener) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DedicatedCLBListener.
func (in *DedicatedCLBListener) DeepCopy() *DedicatedCLBListener {
if in == nil {
return nil
}
out := new(DedicatedCLBListener)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DedicatedCLBListener) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DedicatedCLBListenerList) DeepCopyInto(out *DedicatedCLBListenerList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]DedicatedCLBListener, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DedicatedCLBListenerList.
func (in *DedicatedCLBListenerList) DeepCopy() *DedicatedCLBListenerList {
if in == nil {
return nil
}
out := new(DedicatedCLBListenerList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DedicatedCLBListenerList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DedicatedCLBListenerSpec) DeepCopyInto(out *DedicatedCLBListenerSpec) {
*out = *in
if in.TargetPod != nil {
in, out := &in.TargetPod, &out.TargetPod
*out = new(TargetPod)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DedicatedCLBListenerSpec.
func (in *DedicatedCLBListenerSpec) DeepCopy() *DedicatedCLBListenerSpec {
if in == nil {
return nil
}
out := new(DedicatedCLBListenerSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DedicatedCLBListenerStatus) DeepCopyInto(out *DedicatedCLBListenerStatus) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DedicatedCLBListenerStatus.
func (in *DedicatedCLBListenerStatus) DeepCopy() *DedicatedCLBListenerStatus {
if in == nil {
return nil
}
out := new(DedicatedCLBListenerStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TargetPod) DeepCopyInto(out *TargetPod) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetPod.
func (in *TargetPod) DeepCopy() *TargetPod {
if in == nil {
return nil
}
out := new(TargetPod)
in.DeepCopyInto(out)
return out
}

View File

@ -2,26 +2,21 @@ package tencentcloud
import (
"context"
"encoding/json"
"fmt"
"reflect"
"slices"
"strconv"
"strings"
"sync"
"github.com/openkruise/kruise-game/pkg/util"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/intstr"
kruisev1alpha1 "github.com/openkruise/kruise-game/apis/v1alpha1"
"github.com/openkruise/kruise-game/cloudprovider"
cperrors "github.com/openkruise/kruise-game/cloudprovider/errors"
provideroptions "github.com/openkruise/kruise-game/cloudprovider/options"
"github.com/openkruise/kruise-game/cloudprovider/tencentcloud/apis/v1alpha1"
"github.com/openkruise/kruise-game/cloudprovider/utils"
"github.com/openkruise/kruise-game/pkg/util"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
log "k8s.io/klog/v2"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@ -30,21 +25,13 @@ const (
AliasCLB = "CLB-Network"
ClbIdsConfigName = "ClbIds"
PortProtocolsConfigName = "PortProtocols"
MinPortConfigName = "MinPort"
MaxPortConfigName = "MaxPort"
OwnerPodKey = "game.kruise.io/owner-pod"
TargetPortKey = "game.kruise.io/target-port"
CLBPortMappingAnnotation = "networking.cloud.tencent.com/clb-port-mapping"
EnableCLBPortMappingAnnotation = "networking.cloud.tencent.com/enable-clb-port-mapping"
CLBPortMappingResultAnnotation = "networking.cloud.tencent.com/clb-port-mapping-result"
CLBPortMappingStatuslAnnotation = "networking.cloud.tencent.com/clb-port-mapping-status"
)
type portAllocated map[int32]bool
type ClbPlugin struct {
maxPort int32
minPort int32
cache map[string]portAllocated
podAllocate map[string][]string
mutex sync.RWMutex
}
type ClbPlugin struct{}
type portProtocol struct {
port int
@ -52,10 +39,15 @@ type portProtocol struct {
}
type clbConfig struct {
lbIds []string
targetPorts []portProtocol
}
type portMapping struct {
Port int `json:"port"`
Protocol string `json:"protocol"`
Address string `json:"address"`
}
func (p *ClbPlugin) Name() string {
return ClbNetwork
}
@ -65,83 +57,27 @@ func (p *ClbPlugin) Alias() string {
}
func (p *ClbPlugin) Init(c client.Client, options cloudprovider.CloudProviderOptions, ctx context.Context) error {
p.mutex.Lock()
defer p.mutex.Unlock()
clbOptions := options.(provideroptions.TencentCloudOptions).CLBOptions
p.minPort = clbOptions.MinPort
p.maxPort = clbOptions.MaxPort
listenerList := &v1alpha1.DedicatedCLBListenerList{}
err := c.List(ctx, listenerList)
if err != nil {
return err
}
p.cache, p.podAllocate = initLbCache(listenerList.Items, p.minPort, p.maxPort)
log.Infof("[%s] podAllocate cache complete initialization: %v", ClbNetwork, p.podAllocate)
return nil
}
func initLbCache(listenerList []v1alpha1.DedicatedCLBListener, minPort, maxPort int32) (map[string]portAllocated, map[string][]string) {
newCache := make(map[string]portAllocated)
newPodAllocate := make(map[string][]string)
for _, lis := range listenerList {
podName, exist := lis.GetLabels()[OwnerPodKey]
if !exist || podName == "" {
continue
}
if lis.Spec.LbPort > int64(maxPort) || lis.Spec.LbPort < int64(minPort) {
continue
}
lbId := lis.Spec.LbId
if newCache[lbId] == nil {
newCache[lbId] = make(portAllocated, maxPort-minPort)
for i := minPort; i < maxPort; i++ {
newCache[lbId][i] = false
}
}
newCache[lbId][int32(lis.Spec.LbPort)] = true
podKey := lis.GetNamespace() + "/" + podName
newPodAllocate[podKey] = append(newPodAllocate[podKey], fmt.Sprintf("%s:%d", lbId, lis.Spec.LbPort))
}
return newCache, newPodAllocate
}
func (p *ClbPlugin) OnPodAdded(c client.Client, pod *corev1.Pod, ctx context.Context) (*corev1.Pod, cperrors.PluginError) {
return pod, nil
}
func (p *ClbPlugin) deleteListener(ctx context.Context, c client.Client, lis *v1alpha1.DedicatedCLBListener) cperrors.PluginError {
err := c.Delete(ctx, lis)
if err != nil {
return cperrors.NewPluginError(cperrors.ApiCallError, err.Error())
}
if pm := p.cache[lis.Spec.LbId]; pm != nil {
pm[int32(lis.Spec.LbPort)] = false
}
var podName string
if targetPod := lis.Spec.TargetPod; targetPod != nil {
podName = targetPod.PodName
} else if lis.Labels != nil && lis.Labels[TargetPortKey] != "" && lis.Labels[OwnerPodKey] != "" {
podName = lis.Labels[OwnerPodKey]
} else {
return nil
}
target := fmt.Sprintf("%s/%d", lis.Spec.LbId, lis.Spec.LbPort)
p.podAllocate[podName] = slices.DeleteFunc(p.podAllocate[podName], func(el string) bool {
return el == target
})
return nil
return p.reconcile(c, pod, ctx)
}
func (p *ClbPlugin) OnPodUpdated(c client.Client, pod *corev1.Pod, ctx context.Context) (*corev1.Pod, cperrors.PluginError) {
if pod.DeletionTimestamp != nil {
return pod, nil
}
return p.reconcile(c, pod, ctx)
}
// Ensure the annotation of pod is correct.
func (p *ClbPlugin) reconcile(c client.Client, pod *corev1.Pod, ctx context.Context) (*corev1.Pod, cperrors.PluginError) {
networkManager := utils.NewNetworkManager(pod, c)
networkStatus, _ := networkManager.GetNetworkStatus()
if networkStatus == nil {
pod, err := networkManager.UpdateNetworkStatus(kruisev1alpha1.NetworkStatus{
CurrentNetworkState: kruisev1alpha1.NetworkNotReady,
CurrentNetworkState: kruisev1alpha1.NetworkWaiting,
}, pod)
return pod, cperrors.ToPluginError(err, cperrors.InternalError)
}
@ -150,263 +86,101 @@ func (p *ClbPlugin) OnPodUpdated(c client.Client, pod *corev1.Pod, ctx context.C
if err != nil {
return pod, cperrors.ToPluginError(err, cperrors.ParameterError)
}
gss, err := util.GetGameServerSetOfPod(pod, c, ctx)
if err != nil && !errors.IsNotFound(err) {
if err != nil && !apierrors.IsNotFound(err) {
return pod, cperrors.ToPluginError(err, cperrors.ApiCallError)
}
// get related dedicated clb listeners
listeners := &v1alpha1.DedicatedCLBListenerList{}
if err := c.List(
ctx, listeners,
client.InNamespace(pod.Namespace),
client.MatchingLabels{
OwnerPodKey: pod.Name,
kruisev1alpha1.GameServerOwnerGssKey: gss.Name,
},
); err != nil {
return pod, cperrors.NewPluginError(cperrors.ApiCallError, err.Error())
if pod.Annotations == nil {
pod.Annotations = make(map[string]string)
}
// reconcile
lisMap := make(map[portProtocol]v1alpha1.DedicatedCLBListener)
for _, lis := range listeners.Items {
// ignore deleting dedicated clb listener
if lis.DeletionTimestamp != nil {
continue
pod.Annotations[CLBPortMappingAnnotation] = getClbPortMappingAnnotation(clbConf, gss)
enableCLBPortMapping := "true"
if networkManager.GetNetworkDisabled() {
enableCLBPortMapping = "false"
}
// old dedicated clb listener remain
if lis.OwnerReferences[0].Kind == "Pod" && lis.OwnerReferences[0].UID != pod.UID {
log.Infof("[%s] waitting old dedicated clb listener %s/%s deleted. old owner pod uid is %s, but now is %s", ClbNetwork, lis.Namespace, lis.Name, lis.OwnerReferences[0].UID, pod.UID)
return pod, nil
pod.Annotations[EnableCLBPortMappingAnnotation] = enableCLBPortMapping
if pod.Annotations[CLBPortMappingStatuslAnnotation] == "Ready" {
if result := pod.Annotations[CLBPortMappingResultAnnotation]; result != "" {
mappings := []portMapping{}
if err := json.Unmarshal([]byte(result), &mappings); err != nil {
return pod, cperrors.ToPluginError(err, cperrors.InternalError)
}
targetPod := lis.Spec.TargetPod
if targetPod != nil && targetPod.PodName == pod.Name {
port := portProtocol{
port: int(targetPod.TargetPort),
protocol: lis.Spec.Protocol,
}
lisMap[port] = lis
} else if targetPod == nil && (lis.Labels != nil && lis.Labels[TargetPortKey] != "") {
targetPort, err := strconv.Atoi(lis.Labels[TargetPortKey])
if err != nil {
log.Warningf("[%s] invalid dedicated clb listener target port annotation %s/%s: %s", ClbNetwork, lis.Namespace, lis.Name, err.Error())
continue
}
port := portProtocol{
port: targetPort,
protocol: lis.Spec.Protocol,
}
// lower priority than targetPod is not nil
if _, exists := lisMap[port]; !exists {
lisMap[port] = lis
}
}
}
if len(mappings) != 0 {
internalAddresses := make([]kruisev1alpha1.NetworkAddress, 0)
externalAddresses := make([]kruisev1alpha1.NetworkAddress, 0)
for _, port := range clbConf.targetPorts {
if lis, ok := lisMap[port]; !ok { // no dedicated clb listener, try to create one
if networkManager.GetNetworkDisabled() {
for _, mapping := range mappings {
ss := strings.Split(mapping.Address, ":")
if len(ss) != 2 {
continue
}
// ensure not ready while creating the listener
networkStatus.CurrentNetworkState = kruisev1alpha1.NetworkNotReady
pod, err = networkManager.UpdateNetworkStatus(*networkStatus, pod)
if err != nil {
return pod, cperrors.NewPluginError(cperrors.InternalError, err.Error())
}
// allocate and create new listener bound to pod
newLis, err := p.consLis(clbConf, pod, port, gss.Name)
if err != nil {
return pod, cperrors.ToPluginError(err, cperrors.InternalError)
}
err = c.Create(ctx, newLis)
if err != nil {
return pod, cperrors.NewPluginError(cperrors.ApiCallError, err.Error())
}
} else { // already created dedicated clb listener bound to pod
delete(lisMap, port)
if networkManager.GetNetworkDisabled() { // disable network
// deregister pod if networkDisabled is true
if lis.Spec.TargetPod != nil {
lis.Spec.TargetPod = nil
err = c.Update(ctx, &lis)
if err != nil {
return pod, cperrors.ToPluginError(err, cperrors.ApiCallError)
}
}
} else { // enable network
if lis.Spec.TargetPod == nil { // ensure target pod is bound to dedicated clb listener
lis.Spec.TargetPod = &v1alpha1.TargetPod{
PodName: pod.Name,
TargetPort: int64(port.port),
}
err = c.Update(ctx, &lis)
if err != nil {
return pod, cperrors.ToPluginError(err, cperrors.ApiCallError)
}
} else {
// recreate dedicated clb listener if necessary (config changed)
if !slices.Contains(clbConf.lbIds, lis.Spec.LbId) || lis.Spec.LbPort > int64(p.maxPort) || lis.Spec.LbPort < int64(p.minPort) || lis.Spec.Protocol != port.protocol || lis.Spec.TargetPod.TargetPort != int64(port.port) {
// ensure not ready while recreating the listener
networkStatus.CurrentNetworkState = kruisev1alpha1.NetworkNotReady
pod, err = networkManager.UpdateNetworkStatus(*networkStatus, pod)
if err != nil {
return pod, cperrors.NewPluginError(cperrors.InternalError, err.Error())
}
// delete old listener
err := p.deleteListener(ctx, c, &lis)
if err != nil {
return pod, err
}
// allocate and create new listener bound to pod
if newLis, err := p.consLis(clbConf, pod, port, gss.Name); err != nil {
return pod, cperrors.ToPluginError(err, cperrors.InternalError)
} else {
err := c.Create(ctx, newLis)
if err != nil {
return pod, cperrors.NewPluginError(cperrors.ApiCallError, err.Error())
}
}
} else { // dedicated clb listener is desired, check status
if lis.Status.State == v1alpha1.DedicatedCLBListenerStateBound && lis.Status.Address != "" { // network ready
ss := strings.Split(lis.Status.Address, ":")
if len(ss) != 2 {
return pod, cperrors.NewPluginError(cperrors.InternalError, fmt.Sprintf("invalid dedicated clb listener address %s", lis.Status.Address))
}
lbIP := ss[0]
lbPort, err := strconv.Atoi(ss[1])
if err != nil {
return pod, cperrors.NewPluginError(cperrors.InternalError, fmt.Sprintf("invalid dedicated clb listener port %s", ss[1]))
continue
}
instrIPort := intstr.FromInt(int(port.port))
port := mapping.Port
instrIPort := intstr.FromInt(port)
instrEPort := intstr.FromInt(lbPort)
portName := instrIPort.String()
protocol := corev1.Protocol(mapping.Protocol)
internalAddresses = append(internalAddresses, kruisev1alpha1.NetworkAddress{
IP: pod.Status.PodIP,
Ports: []kruisev1alpha1.NetworkPort{
{
Name: instrIPort.String(),
Name: portName,
Port: &instrIPort,
Protocol: corev1.Protocol(port.protocol),
Protocol: protocol,
},
},
})
externalAddresses = append(externalAddresses, kruisev1alpha1.NetworkAddress{
IP: ss[0],
IP: lbIP,
Ports: []kruisev1alpha1.NetworkPort{
{
Name: instrIPort.String(),
Name: portName,
Port: &instrEPort,
Protocol: corev1.Protocol(port.protocol),
Protocol: protocol,
},
},
})
} else { // network not ready
networkStatus.CurrentNetworkState = kruisev1alpha1.NetworkNotReady
pod, err = networkManager.UpdateNetworkStatus(*networkStatus, pod)
if err != nil {
return pod, cperrors.NewPluginError(cperrors.InternalError, err.Error())
}
}
}
}
}
}
}
// other dedicated clb listener is not used, delete it
for _, lis := range lisMap {
err := p.deleteListener(ctx, c, &lis)
if err != nil {
return pod, err
}
}
// set network status to ready when all lb port is ready
if len(externalAddresses) == len(clbConf.targetPorts) {
// change network status to ready if necessary
if !reflect.DeepEqual(externalAddresses, networkStatus.ExternalAddresses) || networkStatus.CurrentNetworkState != kruisev1alpha1.NetworkReady {
networkStatus.InternalAddresses = internalAddresses
networkStatus.ExternalAddresses = externalAddresses
networkStatus.CurrentNetworkState = kruisev1alpha1.NetworkReady
pod, err = networkManager.UpdateNetworkStatus(*networkStatus, pod)
if err != nil {
return pod, cperrors.NewPluginError(cperrors.InternalError, err.Error())
return pod, cperrors.ToPluginError(err, cperrors.InternalError)
}
}
}
}
}
return pod, nil
}
func (p *ClbPlugin) OnPodDeleted(c client.Client, pod *corev1.Pod, ctx context.Context) cperrors.PluginError {
p.deAllocate(pod.GetNamespace() + "/" + pod.GetName())
return nil
}
func (p *ClbPlugin) consLis(clbConf *clbConfig, pod *corev1.Pod, port portProtocol, gssName string) (*v1alpha1.DedicatedCLBListener, error) {
lbId, lbPort := p.allocate(clbConf.lbIds, pod.GetNamespace()+"/"+pod.GetName())
if lbId == "" {
return nil, fmt.Errorf("there are no avaialable ports for %v", clbConf.lbIds)
}
lis := &v1alpha1.DedicatedCLBListener{
ObjectMeta: metav1.ObjectMeta{
GenerateName: pod.Name + "-",
Namespace: pod.Namespace,
Labels: map[string]string{
OwnerPodKey: pod.Name, // used to select pod related dedicated clb listener
TargetPortKey: strconv.Itoa(port.port), // used to recover clb pod binding when networkDisabled set from true to false
kruisev1alpha1.GameServerOwnerGssKey: gssName,
},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: pod.APIVersion,
Kind: pod.Kind,
Name: pod.GetName(),
UID: pod.GetUID(),
Controller: ptr.To[bool](true),
BlockOwnerDeletion: ptr.To[bool](true),
},
},
},
Spec: v1alpha1.DedicatedCLBListenerSpec{
LbId: lbId,
LbPort: int64(lbPort),
Protocol: port.protocol,
TargetPod: &v1alpha1.TargetPod{
PodName: pod.Name,
TargetPort: int64(port.port),
},
},
}
return lis, nil
}
func init() {
clbPlugin := ClbPlugin{
mutex: sync.RWMutex{},
}
clbPlugin := ClbPlugin{}
tencentCloudProvider.registerPlugin(&clbPlugin)
}
func getClbPortMappingAnnotation(clbConf *clbConfig, gss *kruisev1alpha1.GameServerSet) string {
poolName := fmt.Sprintf("%s-%s", gss.Namespace, gss.Name)
var buf strings.Builder
for _, pp := range clbConf.targetPorts {
buf.WriteString(fmt.Sprintf("%d %s %s\n", pp.port, pp.protocol, poolName))
}
return buf.String()
}
var ErrMissingPortProtocolsConfig = fmt.Errorf("missing %s config", PortProtocolsConfigName)
func parseLbConfig(conf []kruisev1alpha1.NetworkConfParams) (*clbConfig, error) {
var lbIds []string
ports := []portProtocol{}
for _, c := range conf {
switch c.Name {
case ClbIdsConfigName:
for _, clbId := range strings.Split(c.Value, ",") {
if clbId != "" {
lbIds = append(lbIds, clbId)
}
}
case PortProtocolsConfigName:
for _, pp := range strings.Split(c.Value, ",") {
ppSlice := strings.Split(pp, "/")
@ -425,67 +199,10 @@ func parseLbConfig(conf []kruisev1alpha1.NetworkConfParams) (*clbConfig, error)
}
}
}
if len(ports) == 0 {
return nil, ErrMissingPortProtocolsConfig
}
return &clbConfig{
lbIds: lbIds,
targetPorts: ports,
}, nil
}
func (p *ClbPlugin) allocate(lbIds []string, podKey string) (string, int32) {
p.mutex.Lock()
defer p.mutex.Unlock()
var lbId string
var port int32
// find avaialable port
for _, clbId := range lbIds {
for i := p.minPort; i < p.maxPort; i++ {
if !p.cache[clbId][i] {
lbId = clbId
port = i
break
}
}
}
// update cache
if lbId != "" {
if p.cache[lbId] == nil { // init lb cache if not exist
p.cache[lbId] = make(portAllocated, p.maxPort-p.minPort)
for i := p.minPort; i < p.maxPort; i++ {
p.cache[lbId][i] = false
}
}
p.cache[lbId][port] = true
p.podAllocate[podKey] = append(p.podAllocate[podKey], fmt.Sprintf("%s:%d", lbId, port))
log.Infof("pod %s allocate clb %s port %d", podKey, lbId, port)
}
return lbId, port
}
func (p *ClbPlugin) deAllocate(podKey string) {
p.mutex.Lock()
defer p.mutex.Unlock()
allocatedPorts, exist := p.podAllocate[podKey]
if !exist {
return
}
for _, port := range allocatedPorts {
ss := strings.Split(port, ":")
if len(ss) != 2 {
log.Errorf("bad allocated port cache format %s", port)
continue
}
lbId := ss[0]
lbPort, err := strconv.Atoi(ss[1])
if err != nil {
log.Errorf("failed to parse allocated port %s: %s", port, err.Error())
continue
}
p.cache[lbId][int32(lbPort)] = false
log.Infof("pod %s deallocate clb %s ports %d", podKey, lbId, lbPort)
}
delete(p.podAllocate, podKey)
}

View File

@ -2,50 +2,11 @@ package tencentcloud
import (
"reflect"
"sync"
"testing"
kruisev1alpha1 "github.com/openkruise/kruise-game/apis/v1alpha1"
"github.com/openkruise/kruise-game/cloudprovider/tencentcloud/apis/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestAllocateDeAllocate(t *testing.T) {
test := struct {
lbIds []string
clb *ClbPlugin
podKey string
}{
lbIds: []string{"lb-xxx"},
clb: &ClbPlugin{
maxPort: int32(712),
minPort: int32(512),
cache: make(map[string]portAllocated),
podAllocate: make(map[string][]string),
mutex: sync.RWMutex{},
},
podKey: "xxx/xxx",
}
lbId, port := test.clb.allocate(test.lbIds, test.podKey)
if _, exist := test.clb.podAllocate[test.podKey]; !exist {
t.Errorf("podAllocate[%s] is empty after allocated", test.podKey)
}
if port > test.clb.maxPort || port < test.clb.minPort {
t.Errorf("allocate port %d, unexpected", port)
}
if test.clb.cache[lbId][port] == false {
t.Errorf("Allocate port %d failed", port)
}
test.clb.deAllocate(test.podKey)
if test.clb.cache[lbId][port] == true {
t.Errorf("deAllocate port %d failed", port)
}
if _, exist := test.clb.podAllocate[test.podKey]; exist {
t.Errorf("podAllocate[%s] is not empty after deallocated", test.podKey)
}
}
func TestParseLbConfig(t *testing.T) {
tests := []struct {
conf []kruisev1alpha1.NetworkConfParams
@ -63,7 +24,6 @@ func TestParseLbConfig(t *testing.T) {
},
},
clbConfig: &clbConfig{
lbIds: []string{"xxx-A"},
targetPorts: []portProtocol{
{
port: 80,
@ -84,7 +44,6 @@ func TestParseLbConfig(t *testing.T) {
},
},
clbConfig: &clbConfig{
lbIds: []string{"xxx-A", "xxx-B"},
targetPorts: []portProtocol{
{
port: 81,
@ -113,78 +72,3 @@ func TestParseLbConfig(t *testing.T) {
}
}
}
func TestInitLbCache(t *testing.T) {
test := struct {
listenerList []v1alpha1.DedicatedCLBListener
minPort int32
maxPort int32
cache map[string]portAllocated
podAllocate map[string][]string
}{
minPort: 512,
maxPort: 712,
cache: map[string]portAllocated{
"xxx-A": map[int32]bool{
666: true,
},
"xxx-B": map[int32]bool{
555: true,
},
},
podAllocate: map[string][]string{
"ns-0/name-0": {"xxx-A:666"},
"ns-1/name-1": {"xxx-B:555"},
},
listenerList: []v1alpha1.DedicatedCLBListener{
{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
OwnerPodKey: "name-0",
},
Namespace: "ns-0",
Name: "name-0-xxx",
},
Spec: v1alpha1.DedicatedCLBListenerSpec{
LbId: "xxx-A",
LbPort: 666,
Protocol: "TCP",
TargetPod: &v1alpha1.TargetPod{
PodName: "name-0",
TargetPort: 80,
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
OwnerPodKey: "name-1",
},
Namespace: "ns-1",
Name: "name-1-xxx",
},
Spec: v1alpha1.DedicatedCLBListenerSpec{
LbId: "xxx-B",
LbPort: 555,
Protocol: "TCP",
TargetPod: &v1alpha1.TargetPod{
PodName: "name-1",
TargetPort: 80,
},
},
},
},
}
actualCache, actualPodAllocate := initLbCache(test.listenerList, test.minPort, test.maxPort)
for lb, pa := range test.cache {
for port, isAllocated := range pa {
if actualCache[lb][port] != isAllocated {
t.Errorf("lb %s port %d isAllocated, expect: %t, actual: %t", lb, port, isAllocated, actualCache[lb][port])
}
}
}
if !reflect.DeepEqual(actualPodAllocate, test.podAllocate) {
t.Errorf("podAllocate expect %v, but actully got %v", test.podAllocate, actualPodAllocate)
}
}

View File

@ -56,6 +56,8 @@ const (
ClbSchedulerKey = "service.beta.kubernetes.io/volcengine-loadbalancer-scheduler"
ClbSchedulerWRR = "wrr"
SvcSelectorKey = "statefulset.kubernetes.io/pod-name"
EnableClbScatterConfigName = "EnableClbScatter"
EnableMultiIngressConfigName = "EnableMultiIngress"
)
type portAllocated map[int32]bool
@ -67,6 +69,7 @@ type ClbPlugin struct {
cache map[string]portAllocated
podAllocate map[string]string
mutex sync.RWMutex
lastScatterIdx int // 新增:用于轮询打散
}
type clbConfig struct {
@ -76,6 +79,8 @@ type clbConfig struct {
isFixed bool
annotations map[string]string
allocateLoadBalancerNodePorts bool
enableClbScatter bool // 新增:打散开关
enableMultiIngress bool // 新增:多 ingress IP 开关
}
func (c *ClbPlugin) Name() string {
@ -87,10 +92,12 @@ func (c *ClbPlugin) Alias() string {
}
func (c *ClbPlugin) Init(client client.Client, options cloudprovider.CloudProviderOptions, ctx context.Context) error {
log.Infof("[CLB] Init called, options: %+v", options)
c.mutex.Lock()
defer c.mutex.Unlock()
clbOptions, ok := options.(provideroptions.VolcengineOptions)
if !ok {
log.Errorf("[CLB] failed to convert options to clbOptions: %+v", options)
return cperrors.ToPluginError(fmt.Errorf("failed to convert options to clbOptions"), cperrors.InternalError)
}
c.minPort = clbOptions.CLBOptions.MinPort
@ -100,10 +107,12 @@ func (c *ClbPlugin) Init(client client.Client, options cloudprovider.CloudProvid
svcList := &corev1.ServiceList{}
err := client.List(ctx, svcList)
if err != nil {
log.Errorf("[CLB] client.List failed: %v", err)
return err
}
c.cache, c.podAllocate = initLbCache(svcList.Items, c.minPort, c.maxPort, c.blockPorts)
log.Infof("[CLB] Init finished, minPort=%d, maxPort=%d, blockPorts=%v, svcCount=%d", c.minPort, c.maxPort, c.blockPorts, len(svcList.Items))
return nil
}
@ -142,24 +151,35 @@ func initLbCache(svcList []corev1.Service, minPort, maxPort int32, blockPorts []
}
func (c *ClbPlugin) OnPodAdded(client client.Client, pod *corev1.Pod, ctx context.Context) (*corev1.Pod, cperrors.PluginError) {
log.Infof("[CLB] OnPodAdded called for pod %s/%s", pod.GetNamespace(), pod.GetName())
return pod, nil
}
func (c *ClbPlugin) OnPodUpdated(client client.Client, pod *corev1.Pod, ctx context.Context) (*corev1.Pod, cperrors.PluginError) {
log.Infof("[CLB] OnPodUpdated called for pod %s/%s", pod.GetNamespace(), pod.GetName())
networkManager := utils.NewNetworkManager(pod, client)
networkStatus, err := networkManager.GetNetworkStatus()
if err != nil {
log.Errorf("[CLB] GetNetworkStatus failed: %v", err)
return pod, cperrors.ToPluginError(err, cperrors.InternalError)
}
networkConfig := networkManager.GetNetworkConfig()
log.V(4).Infof("[CLB] NetworkConfig: %+v", networkConfig)
config := parseLbConfig(networkConfig)
log.V(4).Infof("[CLB] Parsed clbConfig: %+v", config)
if networkStatus == nil {
log.Infof("[CLB] networkStatus is nil, set NetworkNotReady for pod %s/%s", pod.GetNamespace(), pod.GetName())
pod, err := networkManager.UpdateNetworkStatus(gamekruiseiov1alpha1.NetworkStatus{
CurrentNetworkState: gamekruiseiov1alpha1.NetworkNotReady,
}, pod)
if err != nil {
return pod, cperrors.ToPluginError(err, cperrors.InternalError)
}
networkStatus = &gamekruiseiov1alpha1.NetworkStatus{
CurrentNetworkState: gamekruiseiov1alpha1.NetworkNotReady,
}
}
// get svc
svc := &corev1.Service{}
@ -169,35 +189,53 @@ func (c *ClbPlugin) OnPodUpdated(client client.Client, pod *corev1.Pod, ctx cont
}, svc)
if err != nil {
if errors.IsNotFound(err) {
return pod, cperrors.ToPluginError(client.Create(ctx, c.consSvc(config, pod, client, ctx)), cperrors.ApiCallError)
log.Infof("[CLB] Service not found for pod %s/%s, will create new svc", pod.GetNamespace(), pod.GetName())
svc, err := c.consSvc(config, pod, client, ctx)
if err != nil {
return pod, cperrors.ToPluginError(err, cperrors.InternalError)
}
return pod, cperrors.ToPluginError(client.Create(ctx, svc), cperrors.ApiCallError)
}
log.Errorf("[CLB] client.Get svc failed: %v", err)
return pod, cperrors.NewPluginError(cperrors.ApiCallError, err.Error())
}
if len(svc.OwnerReferences) > 0 && svc.OwnerReferences[0].Kind == "Pod" && svc.OwnerReferences[0].UID != pod.UID {
log.Infof("[CLB] waiting old svc %s/%s deleted. old owner pod uid is %s, but now is %s", svc.Namespace, svc.Name, svc.OwnerReferences[0].UID, pod.UID)
return pod, nil
}
// update svc
if util.GetHash(config) != svc.GetAnnotations()[ClbConfigHashKey] {
log.Infof("[CLB] config hash changed for pod %s/%s, updating svc", pod.GetNamespace(), pod.GetName())
networkStatus.CurrentNetworkState = gamekruiseiov1alpha1.NetworkNotReady
pod, err = networkManager.UpdateNetworkStatus(*networkStatus, pod)
if err != nil {
return pod, cperrors.NewPluginError(cperrors.InternalError, err.Error())
}
return pod, cperrors.ToPluginError(client.Update(ctx, c.consSvc(config, pod, client, ctx)), cperrors.ApiCallError)
newSvc, err := c.consSvc(config, pod, client, ctx)
if err != nil {
return pod, cperrors.ToPluginError(err, cperrors.InternalError)
}
return pod, cperrors.ToPluginError(client.Update(ctx, newSvc), cperrors.ApiCallError)
}
// disable network
if networkManager.GetNetworkDisabled() && svc.Spec.Type == corev1.ServiceTypeLoadBalancer {
log.V(4).Infof("[CLB] Network disabled, set svc type to ClusterIP for pod %s/%s", pod.GetNamespace(), pod.GetName())
svc.Spec.Type = corev1.ServiceTypeClusterIP
return pod, cperrors.ToPluginError(client.Update(ctx, svc), cperrors.ApiCallError)
}
// enable network
if !networkManager.GetNetworkDisabled() && svc.Spec.Type == corev1.ServiceTypeClusterIP {
log.V(4).Infof("[CLB] Network enabled, set svc type to LoadBalancer for pod %s/%s", pod.GetNamespace(), pod.GetName())
svc.Spec.Type = corev1.ServiceTypeLoadBalancer
return pod, cperrors.ToPluginError(client.Update(ctx, svc), cperrors.ApiCallError)
}
// network not ready
if len(svc.Status.LoadBalancer.Ingress) == 0 {
log.Infof("[CLB] svc %s/%s has no ingress, network not ready", svc.Namespace, svc.Name)
networkStatus.CurrentNetworkState = gamekruiseiov1alpha1.NetworkNotReady
pod, err = networkManager.UpdateNetworkStatus(*networkStatus, pod)
return pod, cperrors.ToPluginError(err, cperrors.InternalError)
@ -205,6 +243,7 @@ func (c *ClbPlugin) OnPodUpdated(client client.Client, pod *corev1.Pod, ctx cont
// allow not ready containers
if util.IsAllowNotReadyContainers(networkManager.GetNetworkConfig()) {
log.V(4).Infof("[CLB] AllowNotReadyContainers enabled for pod %s/%s", pod.GetNamespace(), pod.GetName())
toUpDateSvc, err := utils.AllowNotReadyContainers(client, ctx, pod, svc, false)
if err != nil {
return pod, err
@ -219,21 +258,43 @@ func (c *ClbPlugin) OnPodUpdated(client client.Client, pod *corev1.Pod, ctx cont
}
// network ready
networkReady(svc, pod, networkStatus, config)
pod, err = networkManager.UpdateNetworkStatus(*networkStatus, pod)
return pod, cperrors.ToPluginError(err, cperrors.InternalError)
}
func networkReady(svc *corev1.Service, pod *corev1.Pod, networkStatus *gamekruiseiov1alpha1.NetworkStatus, config *clbConfig) {
internalAddresses := make([]gamekruiseiov1alpha1.NetworkAddress, 0)
externalAddresses := make([]gamekruiseiov1alpha1.NetworkAddress, 0)
// 检查是否启用多 ingress IP 支持
if config.enableMultiIngress && len(svc.Status.LoadBalancer.Ingress) > 1 {
// 多 ingress IP 模式:为每个 ingress IP 创建单独的 external address
for _, ingress := range svc.Status.LoadBalancer.Ingress {
for _, port := range svc.Spec.Ports {
instrIPort := port.TargetPort
instrEPort := intstr.FromInt(int(port.Port))
internalAddress := gamekruiseiov1alpha1.NetworkAddress{
IP: pod.Status.PodIP,
// 每个 ingress IP 都创建一个单独的 external address
externalAddress := gamekruiseiov1alpha1.NetworkAddress{
IP: ingress.IP,
Ports: []gamekruiseiov1alpha1.NetworkPort{
{
Name: instrIPort.String(),
Port: &instrIPort,
Port: &instrEPort,
Protocol: port.Protocol,
},
},
}
externalAddresses = append(externalAddresses, externalAddress)
}
}
} else {
// 单 ingress IP 模式(原有逻辑)
if len(svc.Status.LoadBalancer.Ingress) > 0 {
for _, port := range svc.Spec.Ports {
instrIPort := port.TargetPort
instrEPort := intstr.FromInt(int(port.Port))
externalAddress := gamekruiseiov1alpha1.NetworkAddress{
IP: svc.Status.LoadBalancer.Ingress[0].IP,
Ports: []gamekruiseiov1alpha1.NetworkPort{
@ -244,29 +305,48 @@ func (c *ClbPlugin) OnPodUpdated(client client.Client, pod *corev1.Pod, ctx cont
},
},
}
internalAddresses = append(internalAddresses, internalAddress)
externalAddresses = append(externalAddresses, externalAddress)
}
}
}
// internal addresses 逻辑保持不变
for _, port := range svc.Spec.Ports {
instrIPort := port.TargetPort
internalAddress := gamekruiseiov1alpha1.NetworkAddress{
IP: pod.Status.PodIP,
Ports: []gamekruiseiov1alpha1.NetworkPort{
{
Name: instrIPort.String(),
Port: &instrIPort,
Protocol: port.Protocol,
},
},
}
internalAddresses = append(internalAddresses, internalAddress)
}
networkStatus.InternalAddresses = internalAddresses
networkStatus.ExternalAddresses = externalAddresses
networkStatus.CurrentNetworkState = gamekruiseiov1alpha1.NetworkReady
pod, err = networkManager.UpdateNetworkStatus(*networkStatus, pod)
return pod, cperrors.ToPluginError(err, cperrors.InternalError)
}
func (c *ClbPlugin) OnPodDeleted(client client.Client, pod *corev1.Pod, ctx context.Context) cperrors.PluginError {
log.Infof("[CLB] OnPodDeleted called for pod %s/%s", pod.GetNamespace(), pod.GetName())
networkManager := utils.NewNetworkManager(pod, client)
networkConfig := networkManager.GetNetworkConfig()
sc := parseLbConfig(networkConfig)
var podKeys []string
if sc.isFixed {
log.Infof("[CLB] isFixed=true, check gss for pod %s/%s", pod.GetNamespace(), pod.GetName())
gss, err := util.GetGameServerSetOfPod(pod, client, ctx)
if err != nil && !errors.IsNotFound(err) {
return cperrors.ToPluginError(err, cperrors.ApiCallError)
}
// gss exists in cluster, do not deAllocate.
if err == nil && gss.GetDeletionTimestamp() == nil {
log.Infof("[CLB] gss exists, skip deAllocate for pod %s/%s", pod.GetNamespace(), pod.GetName())
return nil
}
// gss not exists in cluster, deAllocate all the ports related to it.
@ -281,21 +361,67 @@ func (c *ClbPlugin) OnPodDeleted(client client.Client, pod *corev1.Pod, ctx cont
}
for _, podKey := range podKeys {
log.Infof("[CLB] deAllocate for podKey %s", podKey)
c.deAllocate(podKey)
}
return nil
}
func (c *ClbPlugin) allocate(lbIds []string, num int, nsName string) (string, []int32) {
func (c *ClbPlugin) allocate(lbIds []string, num int, nsName string, enableClbScatter ...bool) (string, []int32, error) {
c.mutex.Lock()
defer c.mutex.Unlock()
log.Infof("[CLB] allocate called, lbIds=%v, num=%d, nsName=%s, scatter=%v", lbIds, num, nsName, enableClbScatter)
if len(lbIds) == 0 {
return "", nil, fmt.Errorf("no load balancer IDs provided")
}
var ports []int32
var lbId string
useScatter := false
if len(enableClbScatter) > 0 {
useScatter = enableClbScatter[0]
}
// find lb with adequate ports
if useScatter && len(lbIds) > 0 {
log.V(4).Infof("[CLB] scatter enabled, round robin from idx %d", c.lastScatterIdx)
// 轮询分配
startIdx := c.lastScatterIdx % len(lbIds)
for i := 0; i < len(lbIds); i++ {
idx := (startIdx + i) % len(lbIds)
clbId := lbIds[idx]
if c.cache[clbId] == nil {
// we assume that an empty cache is allways allocatable
c.newCacheForSingleLb(clbId)
lbId = clbId
c.lastScatterIdx = idx + 1 // 下次从下一个开始
break
}
sum := 0
for p := c.minPort; p < c.maxPort; p++ {
if !c.cache[clbId][p] {
sum++
}
if sum >= num {
lbId = clbId
c.lastScatterIdx = idx + 1 // 下次从下一个开始
break
}
}
if lbId != "" {
break
}
}
} else {
log.V(4).Infof("[CLB] scatter disabled, use default order")
// 原有逻辑
for _, clbId := range lbIds {
if c.cache[clbId] == nil {
c.newCacheForSingleLb(clbId)
lbId = clbId
break
}
sum := 0
for i := c.minPort; i < c.maxPort; i++ {
if !c.cache[clbId][i] {
@ -306,44 +432,60 @@ func (c *ClbPlugin) allocate(lbIds []string, num int, nsName string) (string, []
break
}
}
if lbId != "" {
break
}
}
}
// select ports
for i := 0; i < num; i++ {
var port int32
if lbId == "" {
return "", nil, fmt.Errorf("unable to find load balancer with %d available ports", num)
}
// Find available ports sequentially
portCount := 0
for port := c.minPort; port < c.maxPort && portCount < num; port++ {
if !c.cache[lbId][port] {
c.cache[lbId][port] = true
ports = append(ports, port)
portCount++
}
}
// Check if we found enough ports
if len(ports) < num {
// Rollback: release allocated ports
for _, port := range ports {
c.cache[lbId][port] = false
}
return "", nil, fmt.Errorf("insufficient available ports on load balancer %s: found %d, need %d", lbId, len(ports), num)
}
c.podAllocate[nsName] = lbId + ":" + util.Int32SliceToString(ports, ",")
log.Infof("[CLB] pod %s allocate clb %s ports %v", nsName, lbId, ports)
return lbId, ports, nil
}
// newCacheForSingleLb initializes the port allocation cache for a single load balancer. MUST BE CALLED IN LOCK STATE
func (c *ClbPlugin) newCacheForSingleLb(lbId string) {
if c.cache[lbId] == nil {
c.cache[lbId] = make(portAllocated, c.maxPort-c.minPort)
for i := c.minPort; i < c.maxPort; i++ {
c.cache[lbId] = make(portAllocated, c.maxPort-c.minPort+1)
for i := c.minPort; i <= c.maxPort; i++ {
c.cache[lbId][i] = false
}
// block ports
for _, blockPort := range c.blockPorts {
c.cache[lbId][blockPort] = true
}
}
for p, allocated := range c.cache[lbId] {
if !allocated {
port = p
break
}
}
c.cache[lbId][port] = true
ports = append(ports, port)
}
c.podAllocate[nsName] = lbId + ":" + util.Int32SliceToString(ports, ",")
log.Infof("pod %s allocate clb %s ports %v", nsName, lbId, ports)
return lbId, ports
}
func (c *ClbPlugin) deAllocate(nsName string) {
c.mutex.Lock()
defer c.mutex.Unlock()
log.Infof("[CLB] deAllocate called for nsName=%s", nsName)
allocatedPorts, exist := c.podAllocate[nsName]
if !exist {
log.Warningf("[CLB] deAllocate: nsName=%s not found in podAllocate", nsName)
return
}
@ -370,18 +512,25 @@ func init() {
}
func parseLbConfig(conf []gamekruiseiov1alpha1.NetworkConfParams) *clbConfig {
log.Infof("[CLB] parseLbConfig called, conf=%+v", conf)
var lbIds []string
ports := make([]int, 0)
protocols := make([]corev1.Protocol, 0)
isFixed := false
allocateLoadBalancerNodePorts := true
annotations := map[string]string{}
enableClbScatter := false
enableMultiIngress := false
for _, c := range conf {
switch c.Name {
case ClbIdsConfigName:
seenIds := make(map[string]struct{})
for _, clbId := range strings.Split(c.Value, ",") {
if clbId != "" {
if _, exists := seenIds[clbId]; !exists {
lbIds = append(lbIds, clbId)
seenIds[clbId] = struct{}{}
}
}
}
case PortProtocolsConfigName:
@ -419,6 +568,16 @@ func parseLbConfig(conf []gamekruiseiov1alpha1.NetworkConfParams) *clbConfig {
log.Warningf("clb annotation %s is invalid", annoKV[0])
}
}
case EnableClbScatterConfigName:
v, err := strconv.ParseBool(c.Value)
if err == nil {
enableClbScatter = v
}
case EnableMultiIngressConfigName:
v, err := strconv.ParseBool(c.Value)
if err == nil {
enableMultiIngress = v
}
}
}
return &clbConfig{
@ -428,6 +587,8 @@ func parseLbConfig(conf []gamekruiseiov1alpha1.NetworkConfParams) *clbConfig {
isFixed: isFixed,
annotations: annotations,
allocateLoadBalancerNodePorts: allocateLoadBalancerNodePorts,
enableClbScatter: enableClbScatter,
enableMultiIngress: enableMultiIngress,
}
}
@ -439,7 +600,7 @@ func getPorts(ports []corev1.ServicePort) []int32 {
return ret
}
func (c *ClbPlugin) consSvc(config *clbConfig, pod *corev1.Pod, client client.Client, ctx context.Context) *corev1.Service {
func (c *ClbPlugin) consSvc(config *clbConfig, pod *corev1.Pod, client client.Client, ctx context.Context) (*corev1.Service, error) {
var ports []int32
var lbId string
podKey := pod.GetNamespace() + "/" + pod.GetName()
@ -449,13 +610,19 @@ func (c *ClbPlugin) consSvc(config *clbConfig, pod *corev1.Pod, client client.Cl
lbId = clbPorts[0]
ports = util.StringToInt32Slice(clbPorts[1], ",")
} else {
lbId, ports = c.allocate(config.lbIds, len(config.targetPorts), podKey)
var err error
lbId, ports, err = c.allocate(config.lbIds, len(config.targetPorts), podKey, config.enableClbScatter)
if err != nil {
log.Errorf("[CLB] pod %s allocate clb failed: %v", podKey, err)
return nil, err
}
}
svcPorts := make([]corev1.ServicePort, 0)
for i := 0; i < len(config.targetPorts); i++ {
portName := fmt.Sprintf("%d-%s", config.targetPorts[i], strings.ToLower(string(config.protocols[i])))
svcPorts = append(svcPorts, corev1.ServicePort{
Name: strconv.Itoa(config.targetPorts[i]),
Name: portName,
Port: ports[i],
Protocol: config.protocols[i],
TargetPort: intstr.FromInt(config.targetPorts[i]),
@ -488,7 +655,7 @@ func (c *ClbPlugin) consSvc(config *clbConfig, pod *corev1.Pod, client client.Cl
AllocateLoadBalancerNodePorts: ptr.To[bool](config.allocateLoadBalancerNodePorts),
},
}
return svc
return svc, nil
}
func getSvcOwnerReference(c client.Client, ctx context.Context, pod *corev1.Pod, isFixed bool) []metav1.OwnerReference {

View File

@ -18,15 +18,20 @@ package volcengine
import (
"context"
"k8s.io/utils/ptr"
"encoding/json"
"reflect"
"sync"
"testing"
"k8s.io/utils/ptr"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
gamekruiseiov1alpha1 "github.com/openkruise/kruise-game/apis/v1alpha1"
"github.com/openkruise/kruise-game/pkg/util"
@ -51,7 +56,10 @@ func TestAllocateDeAllocate(t *testing.T) {
num: 3,
}
lbId, ports := test.clb.allocate(test.lbIds, test.num, test.podKey)
lbId, ports, err := test.clb.allocate(test.lbIds, test.num, test.podKey)
if err != nil {
t.Errorf("allocate failed: %v", err)
}
if _, exist := test.clb.podAllocate[test.podKey]; !exist {
t.Errorf("podAllocate[%s] is empty after allocated", test.podKey)
}
@ -138,6 +146,18 @@ func TestParseLbConfig(t *testing.T) {
}
}
func TestParseLbConfig_EnableClbScatter(t *testing.T) {
conf := []gamekruiseiov1alpha1.NetworkConfParams{
{Name: ClbIdsConfigName, Value: "clb-1,clb-2"},
{Name: PortProtocolsConfigName, Value: "80,81"},
{Name: EnableClbScatterConfigName, Value: "true"},
}
sc := parseLbConfig(conf)
if !sc.enableClbScatter {
t.Errorf("enableClbScatter expect true, got false")
}
}
func TestInitLbCache(t *testing.T) {
test := struct {
svcList []corev1.Service
@ -330,7 +350,7 @@ func TestClbPlugin_consSvc(t *testing.T) {
SvcSelectorKey: "test-pod",
},
Ports: []corev1.ServicePort{{
Name: "82",
Name: "82-tcp",
Port: 80,
Protocol: "TCP",
TargetPort: intstr.IntOrString{
@ -351,8 +371,755 @@ func TestClbPlugin_consSvc(t *testing.T) {
cache: tt.fields.cache,
podAllocate: tt.fields.podAllocate,
}
if got := c.consSvc(tt.args.config, tt.args.pod, tt.args.client, tt.args.ctx); !reflect.DeepEqual(got, tt.want) {
if got, _ := c.consSvc(tt.args.config, tt.args.pod, tt.args.client, tt.args.ctx); !reflect.DeepEqual(got, tt.want) {
t.Errorf("consSvc() = %v, want %v", got, tt.want)
}
}
}
func TestAllocateScatter(t *testing.T) {
clb := &ClbPlugin{
maxPort: 120,
minPort: 100,
cache: map[string]portAllocated{"clb-1": {}, "clb-2": {}},
podAllocate: make(map[string]string),
mutex: sync.RWMutex{},
}
// 初始化 cache
for _, id := range []string{"clb-1", "clb-2"} {
clb.cache[id] = make(portAllocated)
for i := clb.minPort; i < clb.maxPort; i++ {
clb.cache[id][i] = false
}
}
lbIds := []string{"clb-1", "clb-2"}
// 连续分配 4 次,轮询应分布到 clb-1, clb-2, clb-1, clb-2
results := make([]string, 0)
for i := 0; i < 4; i++ {
lbId, _, err := clb.allocate(lbIds, 1, "ns/pod"+string(rune(i)), true)
if err != nil {
t.Errorf("error when allocating ports")
}
results = append(results, lbId)
}
if !(results[0] != results[1] && results[0] == results[2] && results[1] == results[3]) {
t.Errorf("scatter allocate not round robin: %v", results)
}
}
func TestAllocate2(t *testing.T) {
tests := []struct {
name string
lbIds []string
num int
nsName string
clb *ClbPlugin
enableClbScatter bool
wantLbId string
wantPortsLen int
wantErr bool
}{
{
name: "no load balancer IDs",
lbIds: []string{},
num: 1,
nsName: "default/test-pod",
clb: &ClbPlugin{mutex: sync.RWMutex{}},
wantErr: true,
wantLbId: "",
},
{
name: "normal allocation without scatter",
lbIds: []string{"lb-1", "lb-2"},
num: 2,
nsName: "default/test-pod",
clb: &ClbPlugin{
maxPort: 600,
minPort: 500,
cache: map[string]portAllocated{},
podAllocate: map[string]string{},
mutex: sync.RWMutex{},
},
wantLbId: "lb-1",
wantPortsLen: 2,
wantErr: false,
},
{
name: "allocation with scatter enabled",
lbIds: []string{"lb-1", "lb-2"},
num: 2,
nsName: "default/test-pod",
clb: &ClbPlugin{
maxPort: 600,
minPort: 500,
cache: map[string]portAllocated{},
podAllocate: map[string]string{},
mutex: sync.RWMutex{},
lastScatterIdx: 0,
},
enableClbScatter: true,
wantLbId: "lb-1", // First allocation should go to lb-1
wantPortsLen: 2,
wantErr: false,
},
{
name: "insufficient ports available",
lbIds: []string{"lb-1"},
num: 10, // Request more ports than available
nsName: "default/test-pod",
clb: &ClbPlugin{
maxPort: 505, // Only 5 ports available (500-504)
minPort: 500,
cache: map[string]portAllocated{
"lb-1": map[int32]bool{
502: true, // One port already allocated
},
},
podAllocate: map[string]string{},
mutex: sync.RWMutex{},
},
wantErr: true,
},
{
name: "allocate multiple ports",
lbIds: []string{"lb-1"},
num: 3,
nsName: "default/test-pod",
clb: &ClbPlugin{
maxPort: 510,
minPort: 500,
cache: map[string]portAllocated{},
podAllocate: map[string]string{},
mutex: sync.RWMutex{},
},
wantLbId: "lb-1",
wantPortsLen: 3,
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotLbId, gotPorts, err := tt.clb.allocate(tt.lbIds, tt.num, tt.nsName, tt.enableClbScatter)
// Check error
if (err != nil) != tt.wantErr {
t.Errorf("allocate() error = %v, wantErr %v", err, tt.wantErr)
return
}
// If we expect an error, we don't need to check the other conditions
if tt.wantErr {
return
}
// Check lbId
if gotLbId != tt.wantLbId {
t.Errorf("allocate() gotLbId = %v, want %v", gotLbId, tt.wantLbId)
}
// Check number of ports
if len(gotPorts) != tt.wantPortsLen {
t.Errorf("allocate() got %d ports, want %d", len(gotPorts), tt.wantPortsLen)
}
// Check if ports are within range
for _, port := range gotPorts {
if port < tt.clb.minPort || port >= tt.clb.maxPort {
t.Errorf("allocated port %d out of range [%d, %d)", port, tt.clb.minPort, tt.clb.maxPort)
}
// Verify the port is marked as allocated in cache
if !tt.clb.cache[gotLbId][port] {
t.Errorf("port %d not marked as allocated in cache", port)
}
}
// Check if podAllocate map is updated
if allocStr, ok := tt.clb.podAllocate[tt.nsName]; !ok {
t.Errorf("podAllocate not updated for %s", tt.nsName)
} else {
expected := gotLbId + ":" + util.Int32SliceToString(gotPorts, ",")
if allocStr != expected {
t.Errorf("podAllocate[%s] = %s, want %s", tt.nsName, allocStr, expected)
}
}
})
}
}
func TestClbPlugin_OnPodUpdated(t *testing.T) {
baseAnnotations := map[string]string{
gamekruiseiov1alpha1.GameServerNetworkType: "clb",
}
// Create test cases
tests := []struct {
name string
serviceExists bool
serviceOwnerUID types.UID
networkStatus *gamekruiseiov1alpha1.NetworkStatus
networkConfig []gamekruiseiov1alpha1.NetworkConfParams
serviceType corev1.ServiceType
hasIngress bool
networkDisabled bool
expectNetworkReady bool
expectErr bool
}{
{
name: "Service not found",
serviceExists: false,
networkStatus: &gamekruiseiov1alpha1.NetworkStatus{
CurrentNetworkState: gamekruiseiov1alpha1.NetworkNotReady,
},
networkConfig: []gamekruiseiov1alpha1.NetworkConfParams{
{Name: ClbIdsConfigName, Value: "clb-test"},
{Name: PortProtocolsConfigName, Value: "80"},
},
expectErr: false,
},
{
name: "Service exists but owned by another pod",
serviceExists: true,
serviceOwnerUID: "other-uid",
networkStatus: &gamekruiseiov1alpha1.NetworkStatus{
CurrentNetworkState: gamekruiseiov1alpha1.NetworkNotReady,
},
networkConfig: []gamekruiseiov1alpha1.NetworkConfParams{
{Name: ClbIdsConfigName, Value: "clb-test"},
{Name: PortProtocolsConfigName, Value: "80"},
},
expectErr: false,
},
{
name: "Network disabled",
serviceExists: true,
serviceType: corev1.ServiceTypeLoadBalancer,
networkStatus: &gamekruiseiov1alpha1.NetworkStatus{
CurrentNetworkState: gamekruiseiov1alpha1.NetworkNotReady,
},
networkConfig: []gamekruiseiov1alpha1.NetworkConfParams{
{Name: ClbIdsConfigName, Value: "clb-test"},
{Name: PortProtocolsConfigName, Value: "80"},
},
networkDisabled: true,
expectErr: false,
},
{
name: "Network ready",
serviceExists: true,
serviceType: corev1.ServiceTypeLoadBalancer,
hasIngress: true,
networkStatus: &gamekruiseiov1alpha1.NetworkStatus{
CurrentNetworkState: gamekruiseiov1alpha1.NetworkNotReady,
},
networkConfig: []gamekruiseiov1alpha1.NetworkConfParams{
{Name: ClbIdsConfigName, Value: "clb-test"},
{Name: PortProtocolsConfigName, Value: "80"},
},
expectNetworkReady: true,
expectErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// 动态生成 annotation
ann := make(map[string]string)
for k, v := range baseAnnotations {
ann[k] = v
}
if tt.networkConfig != nil {
confBytes, _ := json.Marshal(tt.networkConfig)
ann[gamekruiseiov1alpha1.GameServerNetworkConf] = string(confBytes)
}
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pod",
Namespace: "default",
UID: "test-uid",
Annotations: ann,
},
Status: corev1.PodStatus{
PodIP: "192.168.1.1",
},
}
var svc *corev1.Service
if tt.serviceExists {
svc = &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
},
Spec: corev1.ServiceSpec{
Type: tt.serviceType,
Ports: []corev1.ServicePort{
{
Port: 80,
TargetPort: intstr.FromInt(8080),
Protocol: corev1.ProtocolTCP,
},
},
},
}
if tt.serviceOwnerUID != "" {
svc.OwnerReferences = []metav1.OwnerReference{
{
Kind: "Pod",
UID: tt.serviceOwnerUID,
},
}
}
if tt.hasIngress {
svc.Status.LoadBalancer.Ingress = []corev1.LoadBalancerIngress{
{IP: "192.168.1.100"},
}
}
}
scheme := runtime.NewScheme()
_ = corev1.AddToScheme(scheme)
_ = gamekruiseiov1alpha1.AddToScheme(scheme)
builder := fake.NewClientBuilder().WithScheme(scheme)
if svc != nil {
builder = builder.WithObjects(svc)
}
fakeClient := builder.Build()
clb := &ClbPlugin{
maxPort: 600,
minPort: 500,
cache: make(map[string]portAllocated),
podAllocate: make(map[string]string),
mutex: sync.RWMutex{},
}
resultPod, err := clb.OnPodUpdated(fakeClient, pod, context.Background())
if (err != nil) != tt.expectErr {
t.Errorf("OnPodUpdated() error = %v, expectErr %v", err, tt.expectErr)
}
_ = resultPod
})
}
}
func TestClbPlugin_OnPodDeleted(t *testing.T) {
ctx := context.Background()
// 非 fixed 情况
clb := &ClbPlugin{
podAllocate: map[string]string{
"ns1/pod1": "clb-xxx:100",
"ns2/pod2": "clb-xxx:101",
},
cache: map[string]portAllocated{
"clb-xxx": {
100: true,
101: true,
},
},
}
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
Namespace: "ns1",
Annotations: map[string]string{
gamekruiseiov1alpha1.GameServerNetworkType: "clb",
gamekruiseiov1alpha1.GameServerNetworkConf: `[{"Name":"ClbIds","Value":"clb-xxx"},{"Name":"PortProtocols","Value":"100"}]`,
},
},
}
fakeClient := fake.NewClientBuilder().Build()
_ = clb.OnPodDeleted(fakeClient, pod, ctx)
if _, ok := clb.podAllocate["ns1/pod1"]; ok {
t.Errorf("OnPodDeleted should deAllocate podKey ns1/pod1")
}
// fixed 情况gss 不存在,应该 deAllocate 所有关联 key
clb2 := &ClbPlugin{
podAllocate: map[string]string{
"ns2/gss2": "clb-xxx:201",
},
cache: map[string]portAllocated{
"clb-xxx": {
200: true,
201: true,
},
},
}
pod2 := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod2",
Namespace: "ns2",
Annotations: map[string]string{
gamekruiseiov1alpha1.GameServerNetworkType: "clb",
gamekruiseiov1alpha1.GameServerNetworkConf: `[{"Name":"ClbIds","Value":"clb-xxx"},{"Name":"PortProtocols","Value":"200"},{"Name":"Fixed","Value":"true"}]`,
},
Labels: map[string]string{
gamekruiseiov1alpha1.GameServerOwnerGssKey: "gss1",
},
},
}
fakeClient2 := fake.NewClientBuilder().Build() // 不包含 gss模拟 not found
_ = clb2.OnPodDeleted(fakeClient2, pod2, ctx)
if _, ok := clb2.podAllocate["ns2/gss1"]; ok {
t.Errorf("OnPodDeleted should deAllocate podKey ns2/gss1 for fixed case (gss not found)")
}
// fixed 情况gss 存在且无删除时间戳,不应 deAllocate
gss := &gamekruiseiov1alpha1.GameServerSet{
ObjectMeta: metav1.ObjectMeta{
Name: "gss1",
Namespace: "ns2",
},
}
clb3 := &ClbPlugin{
podAllocate: map[string]string{
"ns2/gss1": "clb-xxx:200",
},
cache: map[string]portAllocated{
"clb-xxx": {
200: true,
101: true,
},
},
}
pod3 := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod3",
Namespace: "ns2",
Annotations: map[string]string{
gamekruiseiov1alpha1.GameServerNetworkType: "clb",
gamekruiseiov1alpha1.GameServerNetworkConf: `[{"Name":"ClbIds","Value":"clb-xxx"},{"Name":"PortProtocols","Value":"200"},{"Name":"Fixed","Value":"true"}]`,
},
Labels: map[string]string{
gamekruiseiov1alpha1.GameServerOwnerGssKey: "gss1",
},
},
}
scheme := runtime.NewScheme()
_ = gamekruiseiov1alpha1.AddToScheme(scheme)
fakeClient3 := fake.NewClientBuilder().WithScheme(scheme).WithObjects(gss).Build()
_ = clb3.OnPodDeleted(fakeClient3, pod3, ctx)
if _, ok := clb3.podAllocate["ns2/gss1"]; !ok {
t.Errorf("OnPodDeleted should NOT deAllocate podKey ns2/gss1 for fixed case (gss exists)")
}
}
func TestNetworkReady(t *testing.T) {
tests := []struct {
name string
svc *corev1.Service
pod *corev1.Pod
config *clbConfig
expectedInternalLen int
expectedExternalLen int
expectedExternalIPs []string
}{
{
name: "单 ingress IP 模式 - enableMultiIngress=false",
svc: &corev1.Service{
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Name: "port1",
Port: 8080,
TargetPort: intstr.FromInt(80),
Protocol: corev1.ProtocolTCP,
},
{
Name: "port2",
Port: 9090,
TargetPort: intstr.FromInt(90),
Protocol: corev1.ProtocolTCP,
},
},
},
Status: corev1.ServiceStatus{
LoadBalancer: corev1.LoadBalancerStatus{
Ingress: []corev1.LoadBalancerIngress{
{IP: "1.2.3.4"},
{IP: "5.6.7.8"}, // 多个 ingress但只使用第一个
},
},
},
},
pod: &corev1.Pod{
Status: corev1.PodStatus{
PodIP: "10.0.0.1",
},
},
config: &clbConfig{
enableMultiIngress: false, // 禁用多 ingress IP 模式
},
expectedInternalLen: 2, // 2个端口对应2个 internal address
expectedExternalLen: 2, // 2个端口对应2个 external address但只使用第一个 ingress IP
expectedExternalIPs: []string{"1.2.3.4", "1.2.3.4"}, // 都使用第一个 IP
},
{
name: "多 ingress IP 模式 - enableMultiIngress=true 多个 ingress",
svc: &corev1.Service{
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Name: "port1",
Port: 8080,
TargetPort: intstr.FromInt(80),
Protocol: corev1.ProtocolTCP,
},
{
Name: "port2",
Port: 9090,
TargetPort: intstr.FromInt(90),
Protocol: corev1.ProtocolTCP,
},
},
},
Status: corev1.ServiceStatus{
LoadBalancer: corev1.LoadBalancerStatus{
Ingress: []corev1.LoadBalancerIngress{
{IP: "1.2.3.4"},
{IP: "5.6.7.8"},
},
},
},
},
pod: &corev1.Pod{
Status: corev1.PodStatus{
PodIP: "10.0.0.1",
},
},
config: &clbConfig{
enableMultiIngress: true, // 启用多 ingress IP 模式
},
expectedInternalLen: 2, // 2个端口对应2个 internal address
expectedExternalLen: 4, // 2个 ingress IP × 2个端口 = 4个 external address
expectedExternalIPs: []string{"1.2.3.4", "1.2.3.4", "5.6.7.8", "5.6.7.8"}, // 每个 ingress IP 对应每个端口
},
{
name: "多 ingress IP 模式 - enableMultiIngress=true 但只有一个 ingress",
svc: &corev1.Service{
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Name: "port1",
Port: 8080,
TargetPort: intstr.FromInt(80),
Protocol: corev1.ProtocolTCP,
},
},
},
Status: corev1.ServiceStatus{
LoadBalancer: corev1.LoadBalancerStatus{
Ingress: []corev1.LoadBalancerIngress{
{IP: "1.2.3.4"}, // 只有一个 ingress IP
},
},
},
},
pod: &corev1.Pod{
Status: corev1.PodStatus{
PodIP: "10.0.0.1",
},
},
config: &clbConfig{
enableMultiIngress: true, // 启用多 ingress IP 模式,但因为只有一个 ingress 所以走单 IP 逻辑
},
expectedInternalLen: 1, // 1个端口对应1个 internal address
expectedExternalLen: 1, // 1个端口对应1个 external address因为只有一个 ingress IP走单 IP 逻辑)
expectedExternalIPs: []string{"1.2.3.4"},
},
{
name: "空端口列表",
svc: &corev1.Service{
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{}, // 空端口列表
},
Status: corev1.ServiceStatus{
LoadBalancer: corev1.LoadBalancerStatus{
Ingress: []corev1.LoadBalancerIngress{
{IP: "1.2.3.4"},
},
},
},
},
pod: &corev1.Pod{
Status: corev1.PodStatus{
PodIP: "10.0.0.1",
},
},
config: &clbConfig{
enableMultiIngress: true,
},
expectedInternalLen: 0, // 无端口,无 address
expectedExternalLen: 0,
expectedExternalIPs: []string{},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
networkStatus := &gamekruiseiov1alpha1.NetworkStatus{}
// 调用被测试的函数
networkReady(tt.svc, tt.pod, networkStatus, tt.config)
// 验证网络状态
if networkStatus.CurrentNetworkState != gamekruiseiov1alpha1.NetworkReady {
t.Errorf("Expected NetworkReady, got %v", networkStatus.CurrentNetworkState)
}
// 验证 internal addresses 数量
if len(networkStatus.InternalAddresses) != tt.expectedInternalLen {
t.Errorf("Expected %d internal addresses, got %d", tt.expectedInternalLen, len(networkStatus.InternalAddresses))
}
// 验证 external addresses 数量
if len(networkStatus.ExternalAddresses) != tt.expectedExternalLen {
t.Errorf("Expected %d external addresses, got %d", tt.expectedExternalLen, len(networkStatus.ExternalAddresses))
}
// 验证 external addresses 的 IP
actualExternalIPs := make([]string, len(networkStatus.ExternalAddresses))
for i, addr := range networkStatus.ExternalAddresses {
actualExternalIPs[i] = addr.IP
}
if !reflect.DeepEqual(actualExternalIPs, tt.expectedExternalIPs) {
t.Errorf("Expected external IPs %v, got %v", tt.expectedExternalIPs, actualExternalIPs)
}
// 验证 internal addresses 的 IP 都是 pod IP
for _, addr := range networkStatus.InternalAddresses {
if addr.IP != tt.pod.Status.PodIP {
t.Errorf("Expected internal IP %s, got %s", tt.pod.Status.PodIP, addr.IP)
}
}
// 验证端口协议和端口号的正确性
if len(tt.svc.Spec.Ports) > 0 {
// 验证 internal addresses 的端口
for i, internalAddr := range networkStatus.InternalAddresses {
if len(internalAddr.Ports) != 1 {
t.Errorf("Expected 1 port per internal address, got %d", len(internalAddr.Ports))
continue
}
expectedTargetPort := tt.svc.Spec.Ports[i].TargetPort
if *internalAddr.Ports[0].Port != expectedTargetPort {
t.Errorf("Expected internal port %v, got %v", expectedTargetPort, *internalAddr.Ports[0].Port)
}
if internalAddr.Ports[0].Protocol != tt.svc.Spec.Ports[i].Protocol {
t.Errorf("Expected protocol %v, got %v", tt.svc.Spec.Ports[i].Protocol, internalAddr.Ports[0].Protocol)
}
}
// 验证 external addresses 的端口(根据模式不同验证方式不同)
if tt.config.enableMultiIngress && len(tt.svc.Status.LoadBalancer.Ingress) > 1 {
// 多 ingress IP 模式:每个 ingress IP 都有所有端口
expectedPortCount := len(tt.svc.Spec.Ports) * len(tt.svc.Status.LoadBalancer.Ingress)
if len(networkStatus.ExternalAddresses) != expectedPortCount {
t.Errorf("Expected %d external addresses in multi-ingress mode, got %d", expectedPortCount, len(networkStatus.ExternalAddresses))
}
} else {
// 单 ingress IP 模式:只有一个 IP每个端口一个 address
expectedPortCount := len(tt.svc.Spec.Ports)
if len(networkStatus.ExternalAddresses) != expectedPortCount {
t.Errorf("Expected %d external addresses in single-ingress mode, got %d", expectedPortCount, len(networkStatus.ExternalAddresses))
}
}
}
})
}
}
func TestNetworkReady_EdgeCases(t *testing.T) {
tests := []struct {
name string
svc *corev1.Service
pod *corev1.Pod
config *clbConfig
expectPanic bool
}{
{
name: "没有 ingress IP",
svc: &corev1.Service{
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Name: "port1",
Port: 8080,
TargetPort: intstr.FromInt(80),
Protocol: corev1.ProtocolTCP,
},
},
},
Status: corev1.ServiceStatus{
LoadBalancer: corev1.LoadBalancerStatus{
Ingress: []corev1.LoadBalancerIngress{}, // 空的 ingress 列表
},
},
},
pod: &corev1.Pod{
Status: corev1.PodStatus{
PodIP: "10.0.0.1",
},
},
config: &clbConfig{
enableMultiIngress: false,
},
expectPanic: false, // 修改:现在不期望 panic
},
{
name: "没有 ingress IP",
svc: &corev1.Service{
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
{
Name: "port1",
Port: 8080,
TargetPort: intstr.FromInt(80),
Protocol: corev1.ProtocolTCP,
},
},
},
Status: corev1.ServiceStatus{
LoadBalancer: corev1.LoadBalancerStatus{
Ingress: []corev1.LoadBalancerIngress{}, // 空的 ingress 列表
},
},
},
pod: &corev1.Pod{
Status: corev1.PodStatus{
PodIP: "10.0.0.1",
},
},
config: &clbConfig{
enableMultiIngress: true,
},
expectPanic: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
networkStatus := &gamekruiseiov1alpha1.NetworkStatus{}
if tt.expectPanic {
defer func() {
if r := recover(); r == nil {
t.Errorf("Expected panic but didn't get one")
}
}()
}
// 调用被测试的函数
networkReady(tt.svc, tt.pod, networkStatus, tt.config)
if !tt.expectPanic {
// 如果不期望 panic验证基本状态
if networkStatus.CurrentNetworkState != gamekruiseiov1alpha1.NetworkReady {
t.Errorf("Expected NetworkReady, got %v", networkStatus.CurrentNetworkState)
}
}
})
}
}

View File

@ -0,0 +1,206 @@
package volcengine
import (
"context"
"encoding/json"
"fmt"
"strconv"
gamekruiseiov1alpha1 "github.com/openkruise/kruise-game/apis/v1alpha1"
"github.com/openkruise/kruise-game/cloudprovider"
"github.com/openkruise/kruise-game/cloudprovider/errors"
"github.com/openkruise/kruise-game/cloudprovider/utils"
corev1 "k8s.io/api/core/v1"
log "k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
EIPNetwork = "Volcengine-EIP"
AliasSEIP = "EIP-Network"
ReleaseStrategyConfigName = "ReleaseStrategy"
PoolIdConfigName = "PoolId"
ResourceGroupIdConfigName = "ResourceGroupId"
BandwidthConfigName = "Bandwidth"
BandwidthPackageIdConfigName = "BandwidthPackageId"
ChargeTypeConfigName = "ChargeType"
DescriptionConfigName = "Description"
VkeAnnotationPrefix = "vke.volcengine.com"
UseExistEIPAnnotationKey = "vke.volcengine.com/primary-eip-id"
WithEIPAnnotationKey = "vke.volcengine.com/primary-eip-allocate"
EipAttributeAnnotationKey = "vke.volcengine.com/primary-eip-attributes"
EipStatusKey = "vke.volcengine.com/allocated-eips"
DefaultEipConfig = "{\"type\": \"Elastic\"}"
)
type eipStatus struct {
EipId string `json:"EipId,omitempty"` // EIP 实例 ID
EipAddress string `json:"EipAddress,omitempty"` // EIP 实例公网地址
EniId string `json:"EniId,omitempty"` // Pod 实例的弹性网卡 ID
EniIp string `json:"niIp,omitempty"` // Pod 实例的弹性网卡的私网 IPv4 地址
}
type EipPlugin struct {
}
func (E EipPlugin) Name() string {
return EIPNetwork
}
func (E EipPlugin) Alias() string {
return AliasSEIP
}
func (E EipPlugin) Init(client client.Client, options cloudprovider.CloudProviderOptions, ctx context.Context) error {
log.Infof("Initializing Volcengine EIP plugin")
return nil
}
func (E EipPlugin) OnPodAdded(client client.Client, pod *corev1.Pod, ctx context.Context) (*corev1.Pod, errors.PluginError) {
log.Infof("begin to handle PodAdded for pod name %s, namespace %s", pod.Name, pod.Namespace)
networkManager := utils.NewNetworkManager(pod, client)
// 获取网络配置参数
networkConfs := networkManager.GetNetworkConfig()
log.Infof("pod %s/%s network configs: %+v", pod.Namespace, pod.Name, networkConfs)
if networkManager.GetNetworkType() != EIPNetwork {
log.Infof("pod %s/%s network type is not %s, skipping", pod.Namespace, pod.Name, EIPNetwork)
return pod, nil
}
log.Infof("processing pod %s/%s with Volcengine EIP network", pod.Namespace, pod.Name)
// 检查是否有 UseExistEIPAnnotationKey 的配置
eipID := ""
if pod.Annotations == nil {
log.Infof("pod %s/%s has no annotations, initializing", pod.Namespace, pod.Name)
pod.Annotations = make(map[string]string)
}
eipConfig := make(map[string]interface{})
// 从配置中提取参数
for _, conf := range networkConfs {
log.Infof("processing network config for pod %s/%s: %s=%s", pod.Namespace, pod.Name, conf.Name, conf.Value)
switch conf.Name {
case UseExistEIPAnnotationKey:
pod.Annotations[UseExistEIPAnnotationKey] = conf.Value
eipID = conf.Value
log.Infof("pod %s/%s using existing EIP ID: %s", pod.Namespace, pod.Name, eipID)
case "billingType":
var err error
eipConfig[conf.Name], err = strconv.ParseInt(conf.Value, 10, 64)
if err != nil {
log.Infof("failed to parse billingType for pod %s/%s: %v", pod.Namespace, pod.Name, err)
return pod, errors.ToPluginError(err, errors.InternalError)
}
log.Infof("pod %s/%s billingType set to: %v", pod.Namespace, pod.Name, eipConfig[conf.Name])
case "bandwidth":
var err error
eipConfig[conf.Name], err = strconv.ParseInt(conf.Value, 10, 64)
if err != nil {
log.Infof("failed to parse bandwidth for pod %s/%s: %v", pod.Namespace, pod.Name, err)
return pod, errors.ToPluginError(err, errors.InternalError)
}
log.Infof("pod %s/%s bandwidth set to: %v", pod.Namespace, pod.Name, eipConfig[conf.Name])
default:
eipConfig[conf.Name] = conf.Value
log.Infof("pod %s/%s setting %s to: %v", pod.Namespace, pod.Name, conf.Name, conf.Value)
}
}
// 更新 Pod 注解
if eipID != "" {
// 使用已有的 EIP
log.Infof("pod %s/%s using existing EIP ID: %s", pod.Namespace, pod.Name, eipID)
pod.Annotations[UseExistEIPAnnotationKey] = eipID
} else {
// 使用新建逻辑
if len(eipConfig) == 0 {
eipConfig["description"] = "Created by the OKG Volcengine EIP plugin. Do not delete or modify."
}
configs, _ := json.Marshal(eipConfig)
log.Infof("pod %s/%s allocating new EIP with config: %s", pod.Namespace, pod.Name, string(configs))
pod.Annotations[WithEIPAnnotationKey] = DefaultEipConfig
pod.Annotations[EipAttributeAnnotationKey] = string(configs)
}
log.Infof("completed OnPodAdded for pod %s/%s", pod.Namespace, pod.Name)
return pod, nil
}
func (E EipPlugin) OnPodUpdated(client client.Client, pod *corev1.Pod, ctx context.Context) (*corev1.Pod, errors.PluginError) {
log.Infof("begin to handle PodUpdated for pod name %s, namespace %s", pod.Name, pod.Namespace)
networkManager := utils.NewNetworkManager(pod, client)
networkStatus, _ := networkManager.GetNetworkStatus()
if networkStatus == nil {
log.Infof("network status is nil for pod %s/%s, updating to waiting state", pod.Namespace, pod.Name)
pod, err := networkManager.UpdateNetworkStatus(gamekruiseiov1alpha1.NetworkStatus{
CurrentNetworkState: gamekruiseiov1alpha1.NetworkWaiting,
}, pod)
if err != nil {
log.Infof("failed to update network status for pod %s/%s: %v", pod.Namespace, pod.Name, err)
return pod, errors.ToPluginError(err, errors.InternalError)
}
return pod, nil
}
podEipStatus := []eipStatus{}
if str, ok := pod.Annotations[EipStatusKey]; ok {
log.Infof("found EIP status annotation for pod %s/%s: %s", pod.Namespace, pod.Name, str)
err := json.Unmarshal([]byte(str), &podEipStatus)
if err != nil {
log.Infof("failed to unmarshal EipStatusKey for pod %s/%s: %v", pod.Namespace, pod.Name, err)
return pod, errors.ToPluginError(fmt.Errorf("failed to unmarshal EipStatusKey, err: %w", err), errors.ParameterError)
}
log.Infof("updating network status for pod %s/%s, internal IP: %s, external IP: %s",
pod.Namespace, pod.Name, podEipStatus[0].EniIp, podEipStatus[0].EipAddress)
var internalAddresses []gamekruiseiov1alpha1.NetworkAddress
var externalAddresses []gamekruiseiov1alpha1.NetworkAddress
for _, eipStatus := range podEipStatus {
internalAddresses = append(internalAddresses, gamekruiseiov1alpha1.NetworkAddress{
IP: eipStatus.EniIp,
})
externalAddresses = append(externalAddresses, gamekruiseiov1alpha1.NetworkAddress{
IP: eipStatus.EipAddress,
})
}
networkStatus.InternalAddresses = internalAddresses
networkStatus.ExternalAddresses = externalAddresses
networkStatus.CurrentNetworkState = gamekruiseiov1alpha1.NetworkReady
log.Infof("network for pod %s/%s is ready, EIP: %s", pod.Namespace, pod.Name, podEipStatus[0].EipAddress)
pod, err = networkManager.UpdateNetworkStatus(*networkStatus, pod)
if err != nil {
log.Infof("failed to update network status for pod %s/%s: %v", pod.Namespace, pod.Name, err)
}
return pod, errors.ToPluginError(err, errors.InternalError)
}
log.Infof("no EIP status found for pod %s/%s, waiting for allocation", pod.Namespace, pod.Name)
return pod, nil
}
func (E EipPlugin) OnPodDeleted(client client.Client, pod *corev1.Pod, ctx context.Context) errors.PluginError {
log.Infof("handling pod deletion for pod %s/%s", pod.Namespace, pod.Name)
// 检查是否需要额外处理
if pod.Annotations != nil {
if eipID, ok := pod.Annotations[UseExistEIPAnnotationKey]; ok {
log.Infof("pod %s/%s being deleted had existing EIP ID: %s", pod.Namespace, pod.Name, eipID)
}
if _, ok := pod.Annotations[WithEIPAnnotationKey]; ok {
log.Infof("pod %s/%s being deleted had allocated EIP", pod.Namespace, pod.Name)
}
}
log.Infof("completed deletion handling for pod %s/%s", pod.Namespace, pod.Name)
return nil
}
func init() {
volcengineProvider.registerPlugin(&EipPlugin{})
}

View File

@ -0,0 +1,182 @@
package volcengine
import (
"context"
"encoding/json"
"testing"
"github.com/openkruise/kruise-game/apis/v1alpha1"
gamekruiseiov1alpha1 "github.com/openkruise/kruise-game/apis/v1alpha1"
"github.com/openkruise/kruise-game/cloudprovider/alibabacloud/apis/v1beta1"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
func TestEipPlugin_Init(t *testing.T) {
plugin := EipPlugin{}
assert.Equal(t, EIPNetwork, plugin.Name())
assert.Equal(t, AliasSEIP, plugin.Alias())
err := plugin.Init(nil, nil, context.Background())
assert.NoError(t, err)
}
func TestEipPlugin_OnPodAdded_UseExistingEIP(t *testing.T) {
// 创建测试 Pod
networkConf := []v1alpha1.NetworkConfParams{}
networkConf = append(networkConf, v1alpha1.NetworkConfParams{
Name: UseExistEIPAnnotationKey,
Value: "eip-12345",
})
jsonStr, _ := json.Marshal(networkConf)
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pod",
Namespace: "default",
Annotations: map[string]string{
v1alpha1.GameServerNetworkType: EIPNetwork,
v1alpha1.GameServerNetworkConf: string(jsonStr),
},
},
}
// 创建假的 client
scheme := runtime.NewScheme()
_ = corev1.AddToScheme(scheme)
fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build()
// 执行测试
plugin := EipPlugin{}
updatedPod, err := plugin.OnPodAdded(fakeClient, pod, context.Background())
// 检查结果
assert.NoError(t, err)
assert.Equal(t, "eip-12345", updatedPod.Annotations[UseExistEIPAnnotationKey])
assert.Equal(t, EIPNetwork, updatedPod.Annotations[v1alpha1.GameServerNetworkType])
jErr := json.Unmarshal([]byte(updatedPod.Annotations[v1alpha1.GameServerNetworkConf]), &networkConf)
assert.NoError(t, jErr)
}
func addKvToParams(networkConf []v1alpha1.NetworkConfParams, keys []string, values []string) []v1alpha1.NetworkConfParams {
// 遍历 keys 和 values添加到 map 中
for i := 0; i < len(keys); i++ {
networkConf = append(networkConf, v1alpha1.NetworkConfParams{
Name: keys[i],
Value: values[i],
})
}
return networkConf
}
func TestEipPlugin_OnPodAdded_NewEIP(t *testing.T) {
networkConf := []v1alpha1.NetworkConfParams{}
networkConf = addKvToParams(networkConf, []string{"name", "isp", "bandwidth", "description", "billingType"},
[]string{"eip-demo", "BGP", "100", "demo for pods eip", "2"})
jsonStr, _ := json.Marshal(networkConf)
// 创建测试 Pod 并添加相关注解
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pod",
Namespace: "default",
Annotations: map[string]string{
v1alpha1.GameServerNetworkType: EIPNetwork,
v1alpha1.GameServerNetworkConf: string(jsonStr),
},
},
}
// 创建假的 client
scheme := runtime.NewScheme()
_ = corev1.AddToScheme(scheme)
fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build()
// 执行测试
plugin := EipPlugin{}
updatedPod, err := plugin.OnPodAdded(fakeClient, pod, context.Background())
// 检查结果
assert.NoError(t, err)
assert.Equal(t, DefaultEipConfig, updatedPod.Annotations[WithEIPAnnotationKey])
assert.Equal(t, EIPNetwork, updatedPod.Annotations[v1alpha1.GameServerNetworkType])
attributeStr, ok := pod.Annotations[EipAttributeAnnotationKey]
assert.True(t, ok)
attributes := make(map[string]interface{})
jErr := json.Unmarshal([]byte(attributeStr), &attributes)
assert.NoError(t, jErr)
assert.Equal(t, "eip-demo", attributes["name"])
assert.Equal(t, "BGP", attributes["isp"])
assert.Equal(t, float64(100), attributes["bandwidth"])
assert.Equal(t, "demo for pods eip", attributes["description"])
assert.Equal(t, float64(2), attributes["billingType"])
}
func TestEipPlugin_OnPodUpdated_WithNetworkStatus(t *testing.T) {
// 创建测试 Pod 并添加网络状态
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pod",
Namespace: "default",
Annotations: map[string]string{
v1alpha1.GameServerNetworkType: EIPNetwork,
"cloud.kruise.io/network-status": `{"currentNetworkState":"Waiting"}`,
},
},
Status: corev1.PodStatus{},
}
// 创建假的 client 包含 PodEIP
scheme := runtime.NewScheme()
_ = corev1.AddToScheme(scheme)
_ = v1beta1.AddToScheme(scheme)
_ = gamekruiseiov1alpha1.AddToScheme(scheme)
fakeClient := fake.NewClientBuilder().
WithScheme(scheme).
WithObjects(pod).
Build()
// 执行测试
plugin := EipPlugin{}
// Ensure network status includes EIP information
networkStatus := &v1alpha1.NetworkStatus{}
networkStatus.ExternalAddresses = []v1alpha1.NetworkAddress{{IP: "203.0.113.1"}}
networkStatus.InternalAddresses = []v1alpha1.NetworkAddress{{IP: "10.0.0.1"}}
networkStatus.CurrentNetworkState = gamekruiseiov1alpha1.NetworkReady
networkStatusBytes, jErr := json.Marshal(networkStatus)
assert.NoError(t, jErr)
pod.Annotations[v1alpha1.GameServerNetworkStatus] = string(networkStatusBytes)
updatedPod, err := plugin.OnPodUpdated(fakeClient, pod, context.Background())
assert.NoError(t, err)
// 更新一下podStatus
// Update network status manually to simulate what OnPodUpdated should do
jErr = json.Unmarshal([]byte(updatedPod.Annotations[v1alpha1.GameServerNetworkStatus]), &networkStatus)
assert.NoError(t, jErr)
// 检查结果
assert.Contains(t, updatedPod.Annotations[v1alpha1.GameServerNetworkStatus], "Ready")
assert.Contains(t, updatedPod.Annotations[v1alpha1.GameServerNetworkStatus], "203.0.113.1")
assert.Contains(t, updatedPod.Annotations[v1alpha1.GameServerNetworkStatus], "10.0.0.1")
}
func TestEipPlugin_OnPodDeleted(t *testing.T) {
plugin := EipPlugin{}
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pod",
Namespace: "default",
Annotations: map[string]string{
v1alpha1.GameServerNetworkType: EIPNetwork,
"cloud.kruise.io/network-status": `{"currentNetworkState":"Waiting"}`,
},
},
Status: corev1.PodStatus{},
}
err := plugin.OnPodDeleted(nil, pod, context.Background())
assert.Nil(t, err)
}

View File

@ -0,0 +1,31 @@
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: selfsigned-issuer
namespace: system
spec:
selfSigned: {}
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: cert
namespace: system
spec:
commonName: kruise-game-controller-manager
dnsNames:
- $(SERVICE_NAME).$(SERVICE_NAMESPACE)
- $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc
- $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local
secretName: kruise-game-certs
usages:
- server auth
- client auth
privateKey:
algorithm: RSA
size: 2048
rotationPolicy: Never
issuerRef:
name: selfsigned-issuer
kind: Issuer
group: cert-manager.io

View File

@ -0,0 +1,5 @@
resources:
- certificate.yaml
configurations:
- kustomizeconfig.yaml

View File

@ -0,0 +1,16 @@
# This configuration is for teaching kustomize how to update name ref and var substitution
nameReference:
- kind: Issuer
group: cert-manager.io
fieldSpecs:
- kind: Certificate
group: cert-manager.io
path: spec/issuerRef/name
varReference:
- kind: Certificate
group: cert-manager.io
path: spec/commonName
- kind: Certificate
group: cert-manager.io
path: spec/dnsNames

View File

@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.9.0
creationTimestamp: null
controller-gen.kubebuilder.io/version: v0.16.5
name: poddnats.alibabacloud.com
spec:
group: alibabacloud.com
@ -21,14 +20,19 @@ spec:
description: PodDNAT is the Schema for the poddnats API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object

View File

@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.9.0
creationTimestamp: null
controller-gen.kubebuilder.io/version: v0.16.5
name: podeips.alibabacloud.com
spec:
group: alibabacloud.com
@ -21,14 +20,19 @@ spec:
description: PodEIP is the Schema for the podeips API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.9.0
creationTimestamp: null
controller-gen.kubebuilder.io/version: v0.16.5
name: dedicatedclblisteners.networking.cloud.tencent.com
spec:
group: networking.cloud.tencent.com
@ -39,14 +38,19 @@ spec:
API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object

View File

@ -20,7 +20,7 @@ bases:
# crd/kustomization.yaml
- ../webhook
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required.
#- ../certmanager
- ../certmanager
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
# - ../prometheus
@ -38,39 +38,39 @@ patchesStrategicMerge:
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
# crd/kustomization.yaml
#- manager_webhook_patch.yaml
- manager_webhook_patch.yaml
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'.
# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks.
# 'CERTMANAGER' needs to be enabled to use ca injection
#- webhookcainjection_patch.yaml
- webhookcainjection_patch.yaml
# the following config is for teaching kustomize how to do var substitution
vars:
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
# objref:
# kind: Certificate
# group: cert-manager.io
# version: v1
# name: serving-cert # this name should match the one in certificate.yaml
# fieldref:
# fieldpath: metadata.namespace
#- name: CERTIFICATE_NAME
# objref:
# kind: Certificate
# group: cert-manager.io
# version: v1
# name: serving-cert # this name should match the one in certificate.yaml
#- name: SERVICE_NAMESPACE # namespace of the service
# objref:
# kind: Service
# version: v1
# name: webhook-service
# fieldref:
# fieldpath: metadata.namespace
#- name: SERVICE_NAME
# objref:
# kind: Service
# version: v1
# name: webhook-service
- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
objref:
kind: Certificate
group: cert-manager.io
version: v1
name: cert # this name should match the one in certificate.yaml
fieldref:
fieldpath: metadata.namespace
- name: CERTIFICATE_NAME
objref:
kind: Certificate
group: cert-manager.io
version: v1
name: cert # this name should match the one in certificate.yaml
- name: SERVICE_NAMESPACE # namespace of the service
objref:
kind: Service
version: v1
name: webhook-service
fieldref:
fieldpath: metadata.namespace
- name: SERVICE_NAME
objref:
kind: Service
version: v1
name: webhook-service

View File

@ -9,15 +9,16 @@ spec:
containers:
- name: manager
ports:
- containerPort: 9443
- containerPort: 9876
name: webhook-server
protocol: TCP
volumeMounts:
- mountPath: /tmp/k8s-webhook-server/serving-certs
- mountPath: /tmp/webhook-certs/
name: cert
readOnly: true
volumes:
- name: cert
secret:
defaultMode: 420
secretName: webhook-server-cert
secretName: kruise-game-certs
optional: false

View File

@ -1,8 +1,15 @@
# This patch add annotation to admission webhook config and
# the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize.
apiVersion: admissionregistration.k8s.io/v1beta1
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
name: mutating-webhook-configuration
name: mutating-webhook
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: validating-webhook
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)

View File

@ -15,6 +15,13 @@ max_port = 1502
min_port = 1000
block_ports = [1025, 1434, 1068]
[hwcloud]
enable = true
[hwcloud.elb]
max_port = 700
min_port = 500
block_ports = []
[volcengine]
enable = true
[volcengine.clb]
@ -34,3 +41,8 @@ enable = false
max_port = 700
min_port = 500
[tencentcloud]
enable = true
[tencentcloud.clb]
min_port = 700
max_port = 750

View File

@ -41,6 +41,7 @@ spec:
- --provider-config=/etc/kruise-game/config.toml
- --api-server-qps=5
- --api-server-qps-burst=10
- --enable-cert-generation=false
image: controller:latest
name: manager
env:

View File

@ -2,7 +2,6 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: manager-role
rules:
- apiGroups:
@ -13,19 +12,49 @@ rules:
- create
- patch
- apiGroups:
- admissionregistration.k8s.io
- ""
resources:
- mutatingwebhookconfigurations
- nodes
- persistentvolumeclaims
- persistentvolumes
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
- persistentvolumeclaims/status
- persistentvolumes/status
verbs:
- get
- apiGroups:
- ""
resources:
- pods
- services
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- pods/status
- services/status
verbs:
- get
- patch
- update
- apiGroups:
- admissionregistration.k8s.io
resources:
- mutatingwebhookconfigurations
- validatingwebhookconfigurations
verbs:
- create
@ -38,19 +67,6 @@ rules:
- alibabacloud.com
resources:
- poddnats
verbs:
- get
- list
- watch
- apiGroups:
- alibabacloud.com
resources:
- poddnats/status
verbs:
- get
- apiGroups:
- alibabacloud.com
resources:
- podeips
verbs:
- get
@ -59,6 +75,7 @@ rules:
- apiGroups:
- alibabacloud.com
resources:
- poddnats/status
- podeips/status
verbs:
- get
@ -76,17 +93,6 @@ rules:
- apps.kruise.io
resources:
- podprobemarkers
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- apps.kruise.io
resources:
- statefulsets
verbs:
- create
@ -104,88 +110,6 @@ rules:
- get
- patch
- update
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- get
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- persistentvolumeclaims/status
verbs:
- get
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- persistentvolumes/status
verbs:
- get
- apiGroups:
- ""
resources:
- pods
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- pods/status
verbs:
- get
- patch
- update
- apiGroups:
- ""
resources:
- services
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- services/status
verbs:
- get
- patch
- update
- apiGroups:
- elbv2.k8s.aws
resources:
@ -201,16 +125,6 @@ rules:
- elbv2.services.k8s.aws
resources:
- listeners
verbs:
- create
- get
- list
- patch
- update
- watch
- apiGroups:
- elbv2.services.k8s.aws
resources:
- targetgroups
verbs:
- create
@ -223,31 +137,6 @@ rules:
- game.kruise.io
resources:
- gameservers
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- game.kruise.io
resources:
- gameservers/finalizers
verbs:
- update
- apiGroups:
- game.kruise.io
resources:
- gameservers/status
verbs:
- get
- patch
- update
- apiGroups:
- game.kruise.io
resources:
- gameserversets
verbs:
- create
@ -260,17 +149,37 @@ rules:
- apiGroups:
- game.kruise.io
resources:
- gameservers/finalizers
- gameserversets/finalizers
verbs:
- update
- apiGroups:
- game.kruise.io
resources:
- gameservers/status
- gameserversets/status
verbs:
- get
- patch
- update
- apiGroups:
- networking.cloud.tencent.com
resources:
- dedicatedclblisteners
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- networking.cloud.tencent.com
resources:
- dedicatedclblisteners/status
verbs:
- get
- apiGroups:
- networking.k8s.io
resources:

View File

@ -0,0 +1,297 @@
---
# service account
apiVersion: v1
kind: ServiceAccount
metadata:
name: index-offset-scheduler
namespace: kruise-game-system
---
# clusterRole
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: 'true'
name: index-offset-scheduler
rules:
- apiGroups:
- ''
- events.k8s.io
resources:
- events
verbs:
- create
- patch
- update
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- apiGroups:
- coordination.k8s.io
resourceNames:
- kube-scheduler
- index-offset-scheduler
resources:
- leases
verbs:
- get
- list
- update
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leasecandidates
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
- apiGroups:
- ''
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- pods
verbs:
- delete
- get
- list
- watch
- apiGroups:
- ''
resources:
- bindings
- pods/binding
verbs:
- create
- apiGroups:
- ''
resources:
- pods/status
verbs:
- patch
- update
- apiGroups:
- ''
resources:
- replicationcontrollers
- services
verbs:
- get
- list
- watch
- apiGroups:
- apps
- extensions
resources:
- replicasets
verbs:
- get
- list
- watch
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- get
- list
- watch
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- persistentvolumeclaims
- persistentvolumes
verbs:
- get
- list
- watch
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
- apiGroups:
- storage.k8s.io
resources:
- csinodes
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- storage.k8s.io
resources:
- csidrivers
verbs:
- get
- list
- watch
- apiGroups:
- storage.k8s.io
resources:
- csistoragecapacities
verbs:
- get
- list
- watch
- apiGroups:
- ""
resourceNames:
- kube-scheduler
- index-offset-scheduler
resources:
- endpoints
verbs:
- delete
- get
- patch
- update
---
# ClusterRoleBinding: index-offset-scheduler
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: index-offset-scheduler-as-kube-scheduler
subjects:
- kind: ServiceAccount
name: index-offset-scheduler
namespace: kruise-game-system
roleRef:
kind: ClusterRole
name: index-offset-scheduler
apiGroup: rbac.authorization.k8s.io
---
# ClusterRoleBinding: system:volume-scheduler
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: index-offset-scheduler-as-volume-scheduler
subjects:
- kind: ServiceAccount
name: index-offset-scheduler
namespace: kruise-game-system
roleRef:
kind: ClusterRole
name: system:volume-scheduler
apiGroup: rbac.authorization.k8s.io
---
# RoleBinding: apiserver
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: index-offset-scheduler-extension-apiserver-authentication-reader
namespace: kube-system
roleRef:
kind: Role
name: extension-apiserver-authentication-reader
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: index-offset-scheduler
namespace: kruise-game-system
---
# configmap
apiVersion: v1
kind: ConfigMap
metadata:
name: index-offset-scheduler-config
namespace: kruise-game-system
data:
scheduler-config.yaml: |
# stable v1 after version 1.25
apiVersion: kubescheduler.config.k8s.io/v1
kind: KubeSchedulerConfiguration
leaderElection:
leaderElect: false
resourceNamespace: kruise-game-system
resourceName: index-offset-scheduler
profiles:
- schedulerName: index-offset-scheduler
plugins:
score:
enabled:
- name: index-offset-scheduler
---
# deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: index-offset-scheduler
namespace: kruise-game-system
labels:
app: index-offset-scheduler
spec:
replicas: 1
selector:
matchLabels:
app: index-offset-scheduler
template:
metadata:
labels:
app: index-offset-scheduler
spec:
serviceAccountName: index-offset-scheduler
containers:
- name: scheduler
# change your image
image: openkruise/kruise-game-scheduler-index-offset:v1.0
imagePullPolicy: Always
command:
- /app/index-offset-scheduler
- --config=/etc/kubernetes/scheduler-config.yaml
- --v=5
resources:
requests:
cpu: 100m
memory: 50Mi
limits:
cpu: 500m
memory: 512Mi
volumeMounts:
- name: config
mountPath: /etc/kubernetes
# imagePullSecrets:
# - name: <your image pull secret>
volumes:
- name: config
configMap:
name: index-offset-scheduler-config

View File

@ -0,0 +1,2 @@
resources:
- index-offset-scheduler.yaml

View File

@ -1,2 +1,6 @@
resources:
- manifests.yaml
- service.yaml
configurations:
- kustomizeconfig.yaml

View File

@ -0,0 +1,24 @@
# the following config is for teaching kustomize where to look at when substituting vars.
nameReference:
- kind: Service
version: v1
fieldSpecs:
- kind: MutatingWebhookConfiguration
group: admissionregistration.k8s.io
path: webhooks/clientConfig/service/name
- kind: ValidatingWebhookConfiguration
group: admissionregistration.k8s.io
path: webhooks/clientConfig/service/name
namespace:
- kind: MutatingWebhookConfiguration
group: admissionregistration.k8s.io
path: webhooks/clientConfig/service/namespace
create: true
- kind: ValidatingWebhookConfiguration
group: admissionregistration.k8s.io
path: webhooks/clientConfig/service/namespace
create: true
varReference:
- path: metadata/annotations

View File

@ -0,0 +1,65 @@
---
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
name: mutating-webhook
webhooks:
- admissionReviewVersions:
- v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: kruise-game-system
path: /mutate-v1-pod
failurePolicy: Fail
matchPolicy: Equivalent
name: mgameserverset.kb.io
rules:
- operations:
- CREATE
- UPDATE
- DELETE
apiGroups:
- ""
apiVersions:
- v1
resources:
- pods
objectSelector:
matchExpressions:
- key: game.kruise.io/owner-gss
operator: Exists
sideEffects: None
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: validating-webhook
webhooks:
- admissionReviewVersions:
- v1
- v1beta1
clientConfig:
service:
name: webhook-service
namespace: kruise-game-system
path: /validate-v1alpha1-gss
failurePolicy: Fail
matchPolicy: Equivalent
name: vgameserverset.kb.io
namespaceSelector: {}
objectSelector: {}
rules:
- apiGroups:
- game.kruise.io
apiVersions:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- gameserversets
sideEffects: None
timeoutSeconds: 10

View File

@ -7,6 +7,6 @@ metadata:
spec:
ports:
- port: 443
targetPort: 9876
targetPort: webhook-server
selector:
control-plane: controller-manager

View File

@ -133,6 +133,7 @@ OpenKruiseGame supports the following network plugins:
- AlibabaCloud-NATGW
- AlibabaCloud-SLB
- AlibabaCloud-SLB-SharedPort
- Volcengine-EIP
---
@ -421,7 +422,7 @@ SlbIds
PortProtocols
- Meaning: the ports in the pod to be exposed and the protocols. You can specify multiple ports and protocols.
- Value: in the format of port1/protocol1,port2/protocol2,... The protocol names must be in uppercase letters.
- Value: in the format of port1/protocol1,port2/protocol2,... (same protocol port should like 8000/TCPUDP) The protocol names must be in uppercase letters.
- Configuration change supported or not: yes.
Fixed
@ -430,6 +431,12 @@ Fixed
- Value: false or true.
- Configuration change supported or not: yes.
ExternalTrafficPolicyType
- Meaning: Service LB forward type, if Local Service LB just forward traffice to local node Pod, we can keep source IP without SNAT
- Value: : Local/Cluster Default value is Cluster
- Configuration change supported or not: not. It maybe related to "IP/Port mapping relationship Fixed", recommend not to change
AllowNotReadyContainers
- Meaning: the container names that are allowed not ready when inplace updating, when traffic will not be cut.
@ -1074,3 +1081,258 @@ The network status of GameServer would be as follows:
networkType: TencentCloud-CLB
```
---
### HwCloud-ELB
#### Plugin name
`HwCloud-ELB`
#### Cloud Provider
HwCloud
#### Plugin description
- HwCloud-ELB enables game servers to be accessed from the Internet by using Layer 4 Load Balancer (ELB) of Huawei Cloud. ELB is a type of Server Load Balancer (SLB). HwCloud-ELB uses different ports of the same ELB instance to forward Internet traffic to different game servers. The ELB instance only forwards traffic, but does not implement load balancing.
- This network plugin supports network isolation.
#### Network parameters
ElbIds
- Meaning: the ELB instance ID. You can fill in multiple ids. at least one
- Value: in the format of elbId-0,elbId-1,... An example value can be "lb-9zeo7prq1m25ctpfrw1m7,lb-bp1qz7h50yd3w58h2f8je"
- Configuration change supported or not: yes. You can add new elbIds at the end. However, it is recommended not to change existing elbId that is in use.
PortProtocols
- Meaning: the ports in the pod to be exposed and the protocols. You can specify multiple ports and protocols.
- Value: in the format of port1/protocol1,port2/protocol2,... (same protocol port should like 8000/TCPUDP) The protocol names must be in uppercase letters.
- Configuration change supported or not: yes.
Fixed
- Meaning: whether the mapping relationship is fixed. If the mapping relationship is fixed, the mapping relationship remains unchanged even if the pod is deleted and recreated.
- Value: false or true.
- Configuration change supported or not: yes.
AllowNotReadyContainers
- Meaning: the container names that are allowed not ready when inplace updating, when traffic will not be cut.
- Value: {containerName_0},{containerName_1},... Examplesidecar
- Configuration change supported or not: It cannot be changed during the in-place updating process.
ExternalTrafficPolicyType
- Meaning: Service LB forward type, if Local Service LB just forward traffice to local node Pod, we can keep source IP without SNAT
- Value: : Local/Cluster Default value is Cluster
- Configuration change supported or not: not. It maybe related to "IP/Port mapping relationship Fixed", recommend not to change
LB config parameters consistent with huawei cloud ccm https://github.com/kubernetes-sigs/cloud-provider-huaweicloud/blob/master/docs/usage-guide.md
LBHealthCheckFlag
- Meaning: Whether to enable health check
- Format: "on" means on, "off" means off. Default is on
- Whether to support changes: Yes
LBHealthCheckOption
- Meaning: Health Check Config
- Format: json string link {"delay": 3, "timeout": 15, "max_retries": 3}
- Whether to support changes: Yes
ElbClass
- Meaning: huawei lb class
- Format: dedicated or shared (default dedicated)
- Whether to support changes: No
ElbConnLimit
- Meaning: elb conn limit work with shared class lb
- Format: the value ranges from -1 to 2147483647. The default value is -1
- Whether to support changes: No
ElbLbAlgorithm
- Meaning: Specifies the load balancing algorithm of the backend server group
- Format: ROUND_ROBIN,LEAST_CONNECTIONS,SOURCE_IP default ROUND_ROBIN
- Whether to support changes: Yes
ElbSessionAffinityFlag
- Meaning: Specifies whether to enable session affinity
- Format: on, off default off
- Whether to support changes: Yes
ElbSessionAffinityOption
- Meaning: Specifies the sticky session timeout duration in minutes.
- Format: json string like {"type": "SOURCE_IP", "persistence_timeout": 15}
- Whether to support changes: Yes
ElbTransparentClientIP
- Meaning: Specifies whether to pass source IP addresses of the clients to backend servers
- Format: true or false default false
- Whether to support changes: Yes
ElbXForwardedHost
- Meaning: Specifies whether to rewrite the X-Forwarded-Host header
- Format: true or false default false
- Whether to support changes: Yes
ElbIdleTimeout
- Meaning: Specifies the idle timeout for the listener
- Format: 0 to 4000 default not set, use lb default value
- Whether to support changes: Yes
ElbRequestTimeout
- Meaning: Specifies the request timeout for the listener.
- Format: 1 to 300 default not set, use lb default value
- Whether to support changes: Yes
ElbResponseTimeout
- Meaning: Specifies the response timeout for the listener
- Format: 1 to 300 default not set, use lb default value
- Whether to support changes: Yes
#### Plugin configuration
```
[hwcloud]
enable = true
[hwcloud.elb]
max_port = 700
min_port = 500
block_ports = []
```
---
### Volcengine-EIP
#### Plugin name
`Volcengine-EIP`
#### Cloud Provider
Volcengine
#### Plugin description
- Allocates or binds a dedicated Elastic IP (EIP) from Volcengine for each GameServer. You can specify an existing EIP via annotation or `networkConf`, or let the system allocate a new EIP automatically.
- The exposed public access port is consistent with the port listened to in the container. Security group policies need to be configured by the user.
- Suitable for game server scenarios that require public network access.
- Requires the `vpc-cni-controlplane` component to be installed in the cluster. For details, see [component documentation](https://www.volcengine.com/docs/6460/101015).
#### Network parameters
> For more parameters, refer to: https://www.volcengine.com/docs/6460/1152127
name
- EIP name. If not specified, the system will generate one automatically.
- Whether to support changes: no.
isp
- EIP type.
- Whether to support changes: no.
projectName
- Meaning: Project name to which the EIP belongs. Default is `default`.
- Whether to support changes: no.
bandwidth
- Meaning: Peak bandwidth in Mbps. Optional.
- Whether to support changes: no.
bandwidthPackageId
- Meaning: Shared bandwidth package ID to bind. Optional. If not set, EIP will not be bound to a shared bandwidth package.
- Whether to support changes: no.
billingType
- Meaning: EIP billing type.
- Value:
- 2: (default) Pay-by-bandwidth.
- 3: Pay-by-traffic.
- Whether to support changes: no.
description
- Meaning: Description of the EIP resource.
- Whether to support changes: no.
#### Annotation parameters
- `vke.volcengine.com/primary-eip-id`: Specify an existing EIP ID. The Pod will bind this EIP at startup.
#### Plugin configuration
None
#### Example
```yaml
apiVersion: game.kruise.io/v1alpha1
kind: GameServerSet
metadata:
name: eip-nginx
namespace: default
spec:
replicas: 1
updateStrategy:
rollingUpdate:
podUpdatePolicy: InPlaceIfPossible
network:
networkType: Volcengine-EIP
gameServerTemplate:
spec:
containers:
- image: nginx
name: nginx
```
The network status of the generated GameServer is as follows:
```yaml
networkStatus:
createTime: "2025-01-17T10:10:18Z"
currentNetworkState: Ready
desiredNetworkState: Ready
externalAddresses:
- ip: 106.xx.xx.xx
internalAddresses:
- ip: 192.168.1.51
lastTransitionTime: "2025-01-17T10:10:18Z"
networkType: Volcengine-EIP
```
Pod annotation example:
```yaml
metadata:
annotations:
vke.volcengine.com/primary-eip-id: eip-xxx
vke.volcengine.com/primary-eip-attributes: '{"bandwidth":3,"billingType":"2"}'
```
The EIP resource will be named `{pod namespace}/{pod name}` in the Volcengine console, corresponding one-to-one with each GameServer.
---

View File

@ -139,6 +139,7 @@ EOF
- AlibabaCloud-SLB-SharedPort
- AlibabaCloud-NLB-SharedPort
- Volcengine-CLB
- Volcengine-EIP
---
@ -424,7 +425,7 @@ SlbIds
PortProtocols
- 含义pod暴露的端口及协议支持填写多个端口/协议
- 格式port1/protocol1,port2/protocol2,...(协议需大写)
- 格式port1/protocol1,port2/protocol2,... (多协议相同端口 比如: 8000/TCPUDP)(协议需大写)
- 是否支持变更:支持
Fixed
@ -433,6 +434,12 @@ Fixed
- 填写格式false / true
- 是否支持变更:支持
ExternalTrafficPolicyType
- 含义Service LB 是否只转发给本地实例。若是Local 创建Local类型Service, 配合cloud-manager只配置对应Node可以保留客户端源IP地址
- 填写格式: Local/Cluster 默认Cluster
- 是否支持变更不支持。跟是否固定IP/端口有关系,建议不更改
AllowNotReadyContainers
- 含义:在容器原地升级时允许不断流的对应容器名称,可填写多个
@ -874,6 +881,125 @@ status:
此外生成的EIP资源在阿里云控制台中会以{pod namespace}/{pod name}命名,与每一个游戏服一一对应。
---
### Volcengine-EIP
#### 插件名称
`Volcengine-EIP`
#### Cloud Provider
Volcengine
#### 插件说明
- 为每个 GameServer 单独分配或绑定火山引擎的弹性公网 IPEIP支持通过注解或 networkConf 指定已有 EIP 或自动分配新 EIP。
- 暴露的公网访问端口与容器中监听的端口一致,安全组策略需自行配置。
- 适用于需要公网访问能力的游戏服务器等场景。
- 需要在集群中安装 `vpc-cni-controlplane` 组件,详情请见[组件说明页](https://www.volcengine.com/docs/6460/101015)
`
#### 网络参数
> 更多参数请参考https://www.volcengine.com/docs/6460/1152127
name
- EIP 名称,未指定时由系统自动生成。
- 是否支持变更:否
isp
- EIP 线路类型
- 是否支持变更:否
projectName
- 含义EIP 所属项目名称,默认为 default。
- 是否支持变更:否
bandwidth
- 含义峰值带宽。单位Mbps。可为空
- 是否支持变更:否
bandwidthPackageId
- 含义:要绑定的共享带宽包 ID。可为空则 EIP 不绑定共享带宽包。
- 是否支持变更:否
billingType
- 含义EIP 的计费方式。
- EIP 计费方式,取值:
- 2默认值按量计费-按带宽上限计费。
- 3按量计费-按实际流量计费。
- 是否支持变更:否
description
- 含义:对 EIP 资源的描述。
- 是否支持变更:否
#### 注解参数
- `vke.volcengine.com/primary-eip-id`:指定已有 EIP IDPod 启动时绑定该 EIP。
#### 插件配置
#### 示例说明
```yaml
apiVersion: game.kruise.io/v1alpha1
kind: GameServerSet
metadata:
name: eip-nginx
namespace: default
spec:
replicas: 1
updateStrategy:
rollingUpdate:
podUpdatePolicy: InPlaceIfPossible
network:
networkType: Volcengine-EIP
gameServerTemplate:
spec:
containers:
- image: nginx
name: nginx
```
生成的 GameServer networkStatus 字段如下所示:
```yaml
networkStatus:
createTime: "2025-01-17T10:10:18Z"
currentNetworkState: Ready
desiredNetworkState: Ready
externalAddresses:
- ip: 106.xx.xx.xx
internalAddresses:
- ip: 192.168.1.51
lastTransitionTime: "2025-01-17T10:10:18Z"
networkType: Volcengine-EIP
```
Pod 相关注解示例:
```yaml
metadata:
annotations:
vke.volcengine.com/primary-eip-id: eip-xxx
vke.volcengine.com/primary-eip-attributes: '{"bandwidth":3,"billingType":"2"}'
```
EIP 资源会在火山引擎控制台以 `{pod namespace}/{pod name}` 命名,与每一个游戏服一一对应。
---
### AlibabaCloud-NLB-SharedPort
#### 插件名称
@ -1015,6 +1141,148 @@ min_port = 1000
max_port = 1100
```
---
### HwCloud-ELB
#### Plugin name
`HwCloud-ELB`
#### Cloud Provider
HwCloud
#### Plugin description
- HwCloud-ELB 使用华为云负载均衡器ELB作为对外服务的承载实体在此模式下不同游戏服使用 ELB 的不同端口对外暴露,此时 ELB 只做转发,并未均衡流量。
- 需安装https://github.com/kubernetes-sigs/cloud-provider-huaweicloud。
- 是否支持网络隔离:是。
#### Network parameters
ElbIds
- 含义: elb 的ID, 可填写多个 (必须至少1)
- 填写格式: 例如 "lb-9zeo7prq1m25ctpfrw1m7,lb-bp1qz7h50yd3w58h2f8je"
- 是否支持变更:支持,只追加
PortProtocols
- 含义pod暴露的端口及协议支持填写多个端口/协议。
- 格式port1/protocol1,port2/protocol2,...(协议需大写)
- 是否支持变更:支持。
Fixed
- 含义是否固定访问IP/端口。若是即使pod删除重建网络内外映射关系不会改变
- 填写格式false / true
- 是否支持变更:支持
AllowNotReadyContainers
- 含义:在容器原地升级时允许不断流的对应容器名称,可填写多个
- 格式:{containerName_0},{containerName_1},... 例如sidecar
- 是否支持变更:在原地升级过程中不可变更。
ExternalTrafficPolicyType
- 含义Service LB 是否只转发给本地实例。若是Local 创建Local类型Service, 配合cloud-manager只配置对应Node可以保留客户端源IP地址
- 填写格式: Local/Cluster 默认Cluster
- 是否支持变更不支持。跟是否固定IP/端口有关系,建议不更改
LB config parameters consistent with huawei cloud ccm https://github.com/kubernetes-sigs/cloud-provider-huaweicloud/blob/master/docs/usage-guide.md
LBHealthCheckFlag
- 含义:是否开启健康检查
- 填写格式: on 或者 off ,默认 on
- 是否支持变更:支持
LBHealthCheckOption
- 含义:健康检查的配置信息
- 填写格式: json 字符串 比如 {"delay": 3, "timeout": 15, "max_retries": 3} 默认空
- 是否支持变更:支持
ElbClass
- 含义elb 类型
- 填写格式: 独享dedicated 或者 共享shared 默认dedicated
- 是否支持变更:不支持
ElbConnLimit
- 含义共享型LB的连接限制数
- 填写格式: -1 到 2147483647 默认-1 不限制
- 是否支持变更:不支持
ElbLbAlgorithm
- 含义RS 的 LB 算法
- 填写格式: ROUND_ROBIN,LEAST_CONNECTIONS,SOURCE_IP default ROUND_ROBIN
- 是否支持变更: 支持
ElbSessionAffinityFlag
- 含义是否开启session亲和
- 填写格式: on 或者 off 默认 off
- 是否支持变更:不支持
ElbSessionAffinityOption
- 含义session亲和的超时配置
- 填写格式: json 字符串 比如 {"type": "SOURCE_IP", "persistence_timeout": 15}
- 是否支持变更:支持
ElbTransparentClientIP
- 含义是否透传源IP
- 填写格式: true 或者 false 默认 false
- 是否支持变更:支持
ElbXForwardedHost
- 含义是否重写X-Forwarded-Host头
- 填写格式: true 或者 false 默认 false
- 是否支持变更:支持
ElbIdleTimeout
- 含义rs 的空闲超时时间,最后会彻底删除
- 填写格式: 0 到 4000默认不设置使用LB的默认配置
- 是否支持变更:支持
ElbRequestTimeout
- 含义httphttps请求超时时间
- 填写格式: 1 到 300默认不设置使用LB的默认配置
- 是否支持变更:支持
ElbResponseTimeout
- 含义httphttps响应超时时间
- 填写格式: 1 到 300默认不设置使用LB的默认配置
- 是否支持变更:支持
#### Plugin configuration
```
[hwcloud]
enable = true
[hwcloud.elb]
max_port = 700
min_port = 500
block_ports = []
```
---
#### 示例说明
```yaml

53
go.mod
View File

@ -1,8 +1,8 @@
module github.com/openkruise/kruise-game
go 1.21
go 1.22.0
toolchain go1.21.5
toolchain go1.22.12
require (
github.com/BurntSushi/toml v1.2.1
@ -10,38 +10,33 @@ require (
github.com/davecgh/go-spew v1.1.1
github.com/kr/pretty v0.3.1
github.com/onsi/ginkgo v1.16.5
github.com/onsi/gomega v1.30.0
github.com/openkruise/kruise-api v1.7.1
github.com/onsi/gomega v1.32.0
github.com/openkruise/kruise-api v1.8.0
github.com/prometheus/client_golang v1.18.0
github.com/stretchr/testify v1.8.4
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e
google.golang.org/grpc v1.58.3
google.golang.org/protobuf v1.33.0
k8s.io/api v0.29.0
k8s.io/apimachinery v0.29.0
k8s.io/client-go v0.29.0
k8s.io/code-generator v0.29.0
k8s.io/api v0.30.10
k8s.io/apimachinery v0.30.10
k8s.io/client-go v0.30.10
k8s.io/code-generator v0.30.10
k8s.io/klog/v2 v2.120.1
k8s.io/utils v0.0.0-20230726121419-3b25d923346b
sigs.k8s.io/aws-load-balancer-controller v0.0.0-20240322180528-61e0135b77cd
sigs.k8s.io/controller-runtime v0.17.2
sigs.k8s.io/controller-runtime v0.18.6
)
require (
cloud.google.com/go/compute/metadata v0.3.0 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.18 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/aws-controllers-k8s/runtime v0.34.0 // indirect
github.com/aws/aws-sdk-go v1.50.20 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/zapr v1.3.0 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
@ -49,7 +44,7 @@ require (
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gnostic v0.5.7-v3refs // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.6.0 // indirect
@ -64,6 +59,7 @@ require (
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/nxadm/tail v1.4.8 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.45.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
@ -71,7 +67,6 @@ require (
github.com/spf13/pflag v1.0.5 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.26.0 // indirect
golang.org/x/crypto v0.22.0 // indirect
golang.org/x/mod v0.17.0 // indirect
golang.org/x/net v0.24.0 // indirect
golang.org/x/oauth2 v0.17.0 // indirect
@ -88,20 +83,10 @@ require (
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.29.0 // indirect
k8s.io/component-base v0.29.0 // indirect
k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 // indirect
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect
k8s.io/apiextensions-apiserver v0.30.1 // indirect
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
)
replace (
k8s.io/api => k8s.io/api v0.24.2
k8s.io/apimachinery => k8s.io/apimachinery v0.24.2
k8s.io/client-go => k8s.io/client-go v0.24.2
k8s.io/code-generator => k8s.io/code-generator v0.24.2
k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42
sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.12.3
)

563
go.sum
View File

@ -1,66 +1,5 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM=
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q=
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/aws-controllers-k8s/elbv2-controller v0.0.9 h1:llZSR3zUAYpc6RgQM0zVOdc21RTex9xWm0BM6llJugQ=
github.com/aws-controllers-k8s/elbv2-controller v0.0.9/go.mod h1:bdPik6wE6Zb0WFV38cR4lEfbV1mYdiduHcwkytrsfKQ=
github.com/aws-controllers-k8s/runtime v0.34.0 h1:pz8MTzz8bY9JMTSMjvWx9SAJ6bJQIEx5ZrXw6wS74mc=
@ -69,63 +8,30 @@ github.com/aws/aws-sdk-go v1.50.20 h1:xfAnSDVf/azIWTVQXQODp89bubvCS85r70O3nuQ4dn
github.com/aws/aws-sdk-go v1.50.20/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c=
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
@ -133,108 +39,46 @@ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEe
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54=
github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
@ -242,45 +86,32 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4=
github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8=
github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
github.com/openkruise/kruise-api v1.7.1 h1:pF+tPHWY1SS0X7sXTOIHZ5sNb5h5MBy1D7h6bJI5yW8=
github.com/openkruise/kruise-api v1.7.1/go.mod h1:ZD94u+GSQGtKrDfFhMVpQhzjr7g7UlXhYfRoNp/EhJs=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk=
github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg=
github.com/openkruise/kruise-api v1.8.0 h1:DoUb873uuf2Bhoajim+9tb/X0eFpwIxRydc4Awfeeiw=
github.com/openkruise/kruise-api v1.8.0/go.mod h1:XRpoTk7VFgh9r5HRUZurwhiC3cpCf5BX8X4beZLcIfA=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@ -288,221 +119,77 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM=
github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA=
golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ=
golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
@ -511,76 +198,19 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY=
golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
@ -590,96 +220,10 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw=
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ=
google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
@ -687,29 +231,19 @@ google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@ -717,58 +251,33 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.24.2 h1:g518dPU/L7VRLxWfcadQn2OnsiGWVOadTLpdnqgY2OI=
k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg=
k8s.io/apiextensions-apiserver v0.29.0 h1:0VuspFG7Hj+SxyF/Z/2T0uFbI5gb5LRgEyUVE3Q4lV0=
k8s.io/apiextensions-apiserver v0.29.0/go.mod h1:TKmpy3bTS0mr9pylH0nOt/QzQRrW7/h7yLdRForMZwc=
k8s.io/apimachinery v0.24.2 h1:5QlH9SL2C8KMcrNJPor+LbXVTaZRReml7svPEh4OKDM=
k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM=
k8s.io/client-go v0.24.2 h1:CoXFSf8if+bLEbinDqN9ePIDGzcLtqhfd6jpfnwGOFA=
k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30=
k8s.io/code-generator v0.24.2 h1:EGeRWzJrpwi6T6CvoNl0spM6fnAnOdCr0rz7H4NU1rk=
k8s.io/code-generator v0.24.2/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w=
k8s.io/component-base v0.29.0 h1:T7rjd5wvLnPBV1vC4zWd/iWRbV8Mdxs+nGaoaFzGw3s=
k8s.io/component-base v0.29.0/go.mod h1:sADonFTQ9Zc9yFLghpDpmNXEdHyQmFIGbiuZbqAXQ1M=
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks=
k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/api v0.30.10 h1:2YvzRF/BELgCvxbQqFKaan5hnj2+y7JOuqu2WpVk3gg=
k8s.io/api v0.30.10/go.mod h1:Hyz3ZuK7jVLJBUFvwzDSGwxHuDdsrGs5RzF16wfHIn4=
k8s.io/apiextensions-apiserver v0.30.1 h1:4fAJZ9985BmpJG6PkoxVRpXv9vmPUOVzl614xarePws=
k8s.io/apiextensions-apiserver v0.30.1/go.mod h1:R4GuSrlhgq43oRY9sF2IToFh7PVlF1JjfWdoG3pixk4=
k8s.io/apimachinery v0.30.10 h1:UflKuJeSSArttm05wjYP0GwpTlvjnMbDKFn6F7rKkKU=
k8s.io/apimachinery v0.30.10/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
k8s.io/client-go v0.30.10 h1:C0oWM82QMvosIl/IdJhWfTUb7rIxM52rNSutFBknAVY=
k8s.io/client-go v0.30.10/go.mod h1:OfTvt0yuo8VpMViOsgvYQb+tMJQLNWVBqXWkzdFXSq4=
k8s.io/code-generator v0.30.10 h1:1p47NC8/zijsgCuqI0F20ErxsWE3VLUGEEcxoiweMeo=
k8s.io/code-generator v0.30.10/go.mod h1:b5HvR9KGVjQOK1fbnZfP/FL4Qe3Zox5CfXJ5Wp7tqQo=
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo=
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8=
k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU=
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk=
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/aws-load-balancer-controller v0.0.0-20240322180528-61e0135b77cd h1:/3ifrMqLoWmX/XPNgQHvJOP+NmeNqH4BEzWHUd1O7EU=
sigs.k8s.io/aws-load-balancer-controller v0.0.0-20240322180528-61e0135b77cd/go.mod h1:M1AzTPNpGdaumE60FGNDHc2ZOqUdm8BBuiMbMEWYOx4=
sigs.k8s.io/controller-runtime v0.12.3 h1:FCM8xeY/FI8hoAfh/V4XbbYMY20gElh9yh+A98usMio=
sigs.k8s.io/controller-runtime v0.12.3/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0=
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY=
sigs.k8s.io/controller-runtime v0.18.6 h1:UnEoLBLDpQwzJ2jYh6aTdiMhGjNDR7IdFn9YEqHIccc=
sigs.k8s.io/controller-runtime v0.18.6/go.mod h1:Dcsa9v8AEBWa3sQNJHsuWPT4ICv99irl5wj83NiC12U=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=

32
main.go
View File

@ -36,6 +36,7 @@ import (
"k8s.io/client-go/rest"
elbv2api "sigs.k8s.io/aws-load-balancer-controller/apis/elbv2/v1beta1"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
@ -43,14 +44,15 @@ import (
"github.com/openkruise/kruise-game/cloudprovider"
aliv1beta1 "github.com/openkruise/kruise-game/cloudprovider/alibabacloud/apis/v1beta1"
cpmanager "github.com/openkruise/kruise-game/cloudprovider/manager"
tencentv1alpha1 "github.com/openkruise/kruise-game/cloudprovider/tencentcloud/apis/v1alpha1"
kruisegameclientset "github.com/openkruise/kruise-game/pkg/client/clientset/versioned"
kruisegamevisions "github.com/openkruise/kruise-game/pkg/client/informers/externalversions"
controller "github.com/openkruise/kruise-game/pkg/controllers"
"github.com/openkruise/kruise-game/pkg/externalscaler"
"github.com/openkruise/kruise-game/pkg/metrics"
utilclient "github.com/openkruise/kruise-game/pkg/util/client"
"github.com/openkruise/kruise-game/pkg/util/client"
"github.com/openkruise/kruise-game/pkg/webhook"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
ctrlwebhook "sigs.k8s.io/controller-runtime/pkg/webhook"
//+kubebuilder:scaffold:imports
)
@ -69,7 +71,6 @@ func init() {
utilruntime.Must(kruiseV1alpha1.AddToScheme(scheme))
utilruntime.Must(aliv1beta1.AddToScheme(scheme))
utilruntime.Must(tencentv1alpha1.AddToScheme(scheme))
utilruntime.Must(ackv1alpha1.AddToScheme(scheme))
utilruntime.Must(elbv2api.AddToScheme(scheme))
@ -121,8 +122,9 @@ func main() {
setRestConfig(restConfig)
mgr, err := ctrl.NewManager(restConfig, ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
Port: 9443,
Metrics: metricsserver.Options{
BindAddress: metricsAddr,
},
HealthProbeBindAddress: probeAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: "game-kruise-manager",
@ -137,9 +139,16 @@ func main() {
// if you are doing or is intended to do any operation such as perform cleanups
// after the manager stops then its usage might be unsafe.
// LeaderElectionReleaseOnCancel: true,
Namespace: namespace,
Cache: cache.Options{
SyncPeriod: syncPeriod,
NewClient: utilclient.NewClient,
DefaultNamespaces: getCacheNamespacesFromFlag(namespace),
},
WebhookServer: ctrlwebhook.NewServer(ctrlwebhook.Options{
Host: "0.0.0.0",
Port: webhook.GetPort(),
CertDir: webhook.GetCertDir(),
}),
NewCache: client.NewCache,
})
if err != nil {
setupLog.Error(err, "unable to start kruise-game-manager")
@ -226,3 +235,12 @@ func setRestConfig(c *rest.Config) {
c.Burst = apiServerBurstQPSFlag
}
}
func getCacheNamespacesFromFlag(ns string) map[string]cache.Config {
if ns == "" {
return nil
}
return map[string]cache.Config{
ns: {},
}
}

View File

@ -18,6 +18,7 @@ package gameserver
import (
"context"
"flag"
"reflect"
"time"
@ -48,6 +49,10 @@ import (
utildiscovery "github.com/openkruise/kruise-game/pkg/util/discovery"
)
func init() {
flag.IntVar(&concurrentReconciles, "gameserver-workers", concurrentReconciles, "Max concurrent workers for GameServer controller.")
}
var (
controllerKind = gamekruiseiov1alpha1.SchemeGroupVersion.WithKind("GameServer")
// leave it to batch size
@ -78,15 +83,17 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
klog.Error(err)
return err
}
if err = c.Watch(&source.Kind{Type: &gamekruiseiov1alpha1.GameServer{}}, &handler.EnqueueRequestForObject{}); err != nil {
if err = c.Watch(source.Kind(mgr.GetCache(),
&gamekruiseiov1alpha1.GameServer{},
&handler.TypedEnqueueRequestForObject[*gamekruiseiov1alpha1.GameServer]{})); err != nil {
klog.Error(err)
return err
}
if err = watchPod(c); err != nil {
if err = watchPod(mgr, c); err != nil {
klog.Error(err)
return err
}
if err = watchNode(c, mgr.GetClient()); err != nil {
if err = watchNode(mgr, c); err != nil {
klog.Error(err)
return err
}
@ -101,10 +108,10 @@ type GameServerReconciler struct {
recorder record.EventRecorder
}
func watchPod(c controller.Controller) error {
if err := c.Watch(&source.Kind{Type: &corev1.Pod{}}, &handler.Funcs{
CreateFunc: func(createEvent event.CreateEvent, limitingInterface workqueue.RateLimitingInterface) {
pod := createEvent.Object.(*corev1.Pod)
func watchPod(mgr manager.Manager, c controller.Controller) error {
if err := c.Watch(source.Kind(mgr.GetCache(), &corev1.Pod{}, &handler.TypedFuncs[*corev1.Pod]{
CreateFunc: func(ctx context.Context, createEvent event.TypedCreateEvent[*corev1.Pod], limitingInterface workqueue.RateLimitingInterface) {
pod := createEvent.Object
if _, exist := pod.GetLabels()[gamekruiseiov1alpha1.GameServerOwnerGssKey]; exist {
limitingInterface.Add(reconcile.Request{NamespacedName: types.NamespacedName{
Name: pod.GetName(),
@ -112,8 +119,8 @@ func watchPod(c controller.Controller) error {
}})
}
},
UpdateFunc: func(updateEvent event.UpdateEvent, limitingInterface workqueue.RateLimitingInterface) {
newPod := updateEvent.ObjectNew.(*corev1.Pod)
UpdateFunc: func(ctx context.Context, updateEvent event.TypedUpdateEvent[*corev1.Pod], limitingInterface workqueue.RateLimitingInterface) {
newPod := updateEvent.ObjectNew
if _, exist := newPod.GetLabels()[gamekruiseiov1alpha1.GameServerOwnerGssKey]; exist {
limitingInterface.Add(reconcile.Request{NamespacedName: types.NamespacedName{
Name: newPod.GetName(),
@ -121,8 +128,8 @@ func watchPod(c controller.Controller) error {
}})
}
},
DeleteFunc: func(deleteEvent event.DeleteEvent, limitingInterface workqueue.RateLimitingInterface) {
pod := deleteEvent.Object.(*corev1.Pod)
DeleteFunc: func(ctx context.Context, deleteEvent event.TypedDeleteEvent[*corev1.Pod], limitingInterface workqueue.RateLimitingInterface) {
pod := deleteEvent.Object
if _, exist := pod.GetLabels()[gamekruiseiov1alpha1.GameServerOwnerGssKey]; exist {
limitingInterface.Add(reconcile.Request{
NamespacedName: types.NamespacedName{
@ -132,17 +139,20 @@ func watchPod(c controller.Controller) error {
})
}
},
}); err != nil {
})); err != nil {
return err
}
return nil
}
func watchNode(c controller.Controller, cli client.Client) error {
if err := c.Watch(&source.Kind{Type: &corev1.Node{}}, &handler.Funcs{
UpdateFunc: func(updateEvent event.UpdateEvent, limitingInterface workqueue.RateLimitingInterface) {
nodeNew := updateEvent.ObjectNew.(*corev1.Node)
nodeOld := updateEvent.ObjectOld.(*corev1.Node)
func watchNode(mgr manager.Manager, c controller.Controller) error {
cli := mgr.GetClient()
// watch node condition change
if err := c.Watch(source.Kind(mgr.GetCache(), &corev1.Node{}, &handler.TypedFuncs[*corev1.Node]{
UpdateFunc: func(ctx context.Context, updateEvent event.TypedUpdateEvent[*corev1.Node], limitingInterface workqueue.RateLimitingInterface) {
nodeNew := updateEvent.ObjectNew
nodeOld := updateEvent.ObjectOld
if reflect.DeepEqual(nodeNew.Status.Conditions, nodeOld.Status.Conditions) {
return
}
@ -166,7 +176,7 @@ func watchNode(c controller.Controller, cli client.Client) error {
})
}
},
}); err != nil {
})); err != nil {
return err
}
return nil

View File

@ -43,11 +43,12 @@ var (
)
const (
TimeFormat = "2006-01-02 15:04:05"
TimeFormat = time.RFC3339
)
const (
StateReason = "GsStateChanged"
GsNetworkStateReason = "GsNetworkState"
)
type Control interface {
@ -110,6 +111,7 @@ func (manager GameServerManager) SyncGsToPod() error {
}
if string(gs.Spec.OpsState) != podGsOpsState {
newLabels[gameKruiseV1alpha1.GameServerOpsStateKey] = string(gs.Spec.OpsState)
newAnnotations[gameKruiseV1alpha1.GameServerOpsStateLastChangedTime] = time.Now().Format(TimeFormat)
if podGsOpsState != "" {
eventType := corev1.EventTypeNormal
if gs.Spec.OpsState == gameKruiseV1alpha1.Maintaining {
@ -128,6 +130,11 @@ func (manager GameServerManager) SyncGsToPod() error {
var gsState gameKruiseV1alpha1.GameServerState
switch pod.Status.Phase {
case corev1.PodRunning:
// GameServer Deleting
if !pod.DeletionTimestamp.IsZero() {
gsState = gameKruiseV1alpha1.Deleting
break
}
// GameServer Updating
lifecycleState, exist := pod.GetLabels()[kruisePub.LifecycleStateKey]
if exist && lifecycleState == string(kruisePub.LifecycleStateUpdating) {
@ -144,12 +151,6 @@ func (manager GameServerManager) SyncGsToPod() error {
gsState = gameKruiseV1alpha1.PreDelete
break
}
// GameServer Deleting
if !pod.DeletionTimestamp.IsZero() {
gsState = gameKruiseV1alpha1.Deleting
break
}
// GameServer Ready / NotReady
_, condition := util.GetPodConditionFromList(pod.Status.Conditions, corev1.PodReady)
if condition != nil {
@ -174,14 +175,23 @@ func (manager GameServerManager) SyncGsToPod() error {
if gsState == gameKruiseV1alpha1.Crash {
eventType = corev1.EventTypeWarning
}
newAnnotations[gameKruiseV1alpha1.GameServerStateLastChangedTime] = time.Now().Format(TimeFormat)
manager.eventRecorder.Eventf(gs, eventType, StateReason, "State turn from %s to %s ", podGsState, string(gsState))
}
}
if pod.Annotations[gameKruiseV1alpha1.GameServerNetworkType] != "" {
oldTime, err := time.Parse(TimeFormat, pod.Annotations[gameKruiseV1alpha1.GameServerNetworkTriggerTime])
if (err == nil && time.Since(oldTime) > NetworkIntervalTime && time.Since(gs.Status.NetworkStatus.LastTransitionTime.Time) < NetworkTotalWaitTime) || (pod.Annotations[gameKruiseV1alpha1.GameServerNetworkTriggerTime] == "") {
if err != nil {
klog.Errorf("Failed to parse previous network trigger time for GameServer %s/%s: %v", gs.Namespace, gs.Name, err)
newAnnotations[gameKruiseV1alpha1.GameServerNetworkTriggerTime] = time.Now().Format(TimeFormat)
} else {
timeSinceOldTrigger := time.Since(oldTime)
timeSinceNetworkTransition := time.Since(gs.Status.NetworkStatus.LastTransitionTime.Time)
if timeSinceOldTrigger > NetworkIntervalTime && timeSinceNetworkTransition < NetworkTotalWaitTime {
klog.V(4).Infof("GameServer %s/%s network trigger conditions met, updating trigger time", gs.Namespace, gs.Name)
newAnnotations[gameKruiseV1alpha1.GameServerNetworkTriggerTime] = time.Now().Format(TimeFormat)
}
}
}
@ -309,10 +319,14 @@ func (manager GameServerManager) SyncPodToGs(gss *gameKruiseV1alpha1.GameServerS
func (manager GameServerManager) WaitOrNot() bool {
networkStatus := manager.gameServer.Status.NetworkStatus
alreadyWait := time.Since(networkStatus.LastTransitionTime.Time)
if networkStatus.DesiredNetworkState != networkStatus.CurrentNetworkState && alreadyWait < NetworkTotalWaitTime {
if networkStatus.DesiredNetworkState != networkStatus.CurrentNetworkState {
if alreadyWait < NetworkTotalWaitTime {
klog.Infof("GameServer %s/%s DesiredNetworkState: %s CurrentNetworkState: %s. %v remaining",
manager.gameServer.GetNamespace(), manager.gameServer.GetName(), networkStatus.DesiredNetworkState, networkStatus.CurrentNetworkState, NetworkTotalWaitTime-alreadyWait)
return true
} else {
manager.eventRecorder.Eventf(manager.gameServer, corev1.EventTypeWarning, GsNetworkStateReason, "Network wait timeout: waited %v, max %v", alreadyWait, NetworkTotalWaitTime)
}
}
return false
}

View File

@ -2,6 +2,10 @@ package gameserver
import (
"context"
"reflect"
"strconv"
"testing"
kruiseV1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1"
kruiseV1beta1 "github.com/openkruise/kruise-api/apps/v1beta1"
gameKruiseV1alpha1 "github.com/openkruise/kruise-game/apis/v1alpha1"
@ -13,11 +17,8 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"reflect"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"strconv"
"testing"
)
var (
@ -929,7 +930,8 @@ func TestSyncPodToGs(t *testing.T) {
for i, test := range tests {
objs := []client.Object{test.gs, test.pod, test.node, test.gss}
c := fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build()
c := fake.NewClientBuilder().WithScheme(scheme).
WithObjects(objs...).WithStatusSubresource(objs...).Build()
manager := &GameServerManager{
client: c,
gameServer: test.gs,

View File

@ -18,7 +18,9 @@ package gameserverset
import (
"context"
"flag"
kruisev1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1"
kruiseV1beta1 "github.com/openkruise/kruise-api/apps/v1beta1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
@ -37,6 +39,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
@ -45,6 +48,10 @@ import (
utildiscovery "github.com/openkruise/kruise-game/pkg/util/discovery"
)
func init() {
flag.IntVar(&concurrentReconciles, "gameserverset-workers", concurrentReconciles, "Max concurrent workers for GameServerSet controller.")
}
var (
controllerKind = gamekruiseiov1alpha1.SchemeGroupVersion.WithKind("GameServerSet")
// leave it to batch size
@ -76,17 +83,28 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
return err
}
if err = c.Watch(&source.Kind{Type: &gamekruiseiov1alpha1.GameServerSet{}}, &handler.EnqueueRequestForObject{}); err != nil {
if err = c.Watch(source.Kind(mgr.GetCache(), &gamekruiseiov1alpha1.GameServerSet{}, &handler.TypedEnqueueRequestForObject[*gamekruiseiov1alpha1.GameServerSet]{})); err != nil {
klog.Error(err)
return err
}
if err = watchPod(c); err != nil {
if err = c.Watch(source.Kind(mgr.GetCache(), &kruisev1alpha1.PodProbeMarker{}, &handler.TypedEnqueueRequestForObject[*kruisev1alpha1.PodProbeMarker]{}, predicate.TypedFuncs[*kruisev1alpha1.PodProbeMarker]{
UpdateFunc: func(e event.TypedUpdateEvent[*kruisev1alpha1.PodProbeMarker]) bool {
oldScS := e.ObjectOld
newScS := e.ObjectNew
return oldScS.Status.ObservedGeneration != newScS.Status.ObservedGeneration
},
})); err != nil {
klog.Error(err)
return err
}
if err = watchWorkloads(c); err != nil {
if err = watchPod(mgr, c); err != nil {
klog.Error(err)
return err
}
if err = watchWorkloads(mgr, c); err != nil {
klog.Error(err)
return err
}
@ -95,11 +113,10 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
}
// watch pod
func watchPod(c controller.Controller) (err error) {
if err := c.Watch(&source.Kind{Type: &corev1.Pod{}}, &handler.Funcs{
CreateFunc: func(createEvent event.CreateEvent, limitingInterface workqueue.RateLimitingInterface) {
pod := createEvent.Object.(*corev1.Pod)
func watchPod(mgr manager.Manager, c controller.Controller) (err error) {
if err := c.Watch(source.Kind(mgr.GetCache(), &corev1.Pod{}, &handler.TypedFuncs[*corev1.Pod]{
CreateFunc: func(ctx context.Context, createEvent event.TypedCreateEvent[*corev1.Pod], limitingInterface workqueue.RateLimitingInterface) {
pod := createEvent.Object
if gssName, exist := pod.GetLabels()[gamekruiseiov1alpha1.GameServerOwnerGssKey]; exist {
limitingInterface.Add(reconcile.Request{NamespacedName: types.NamespacedName{
Name: gssName,
@ -107,8 +124,8 @@ func watchPod(c controller.Controller) (err error) {
}})
}
},
UpdateFunc: func(updateEvent event.UpdateEvent, limitingInterface workqueue.RateLimitingInterface) {
pod := updateEvent.ObjectNew.(*corev1.Pod)
UpdateFunc: func(ctx context.Context, updateEvent event.TypedUpdateEvent[*corev1.Pod], limitingInterface workqueue.RateLimitingInterface) {
pod := updateEvent.ObjectNew
if gssName, exist := pod.GetLabels()[gamekruiseiov1alpha1.GameServerOwnerGssKey]; exist {
limitingInterface.Add(reconcile.Request{NamespacedName: types.NamespacedName{
Name: gssName,
@ -116,8 +133,8 @@ func watchPod(c controller.Controller) (err error) {
}})
}
},
DeleteFunc: func(deleteEvent event.DeleteEvent, limitingInterface workqueue.RateLimitingInterface) {
pod := deleteEvent.Object.(*corev1.Pod)
DeleteFunc: func(ctx context.Context, deleteEvent event.TypedDeleteEvent[*corev1.Pod], limitingInterface workqueue.RateLimitingInterface) {
pod := deleteEvent.Object
if gssName, exist := pod.GetLabels()[gamekruiseiov1alpha1.GameServerOwnerGssKey]; exist {
limitingInterface.Add(reconcile.Request{NamespacedName: types.NamespacedName{
Name: gssName,
@ -125,18 +142,20 @@ func watchPod(c controller.Controller) (err error) {
}})
}
},
}); err != nil {
})); err != nil {
return err
}
return nil
}
// watch workloads
func watchWorkloads(c controller.Controller) (err error) {
if err := c.Watch(&source.Kind{Type: &kruiseV1beta1.StatefulSet{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &gamekruiseiov1alpha1.GameServerSet{},
}); err != nil {
func watchWorkloads(mgr manager.Manager, c controller.Controller) (err error) {
if err := c.Watch(source.Kind(mgr.GetCache(), &kruiseV1beta1.StatefulSet{}, handler.TypedEnqueueRequestForOwner[*kruiseV1beta1.StatefulSet](
mgr.GetScheme(),
mgr.GetRESTMapper(),
&gamekruiseiov1alpha1.GameServerSet{},
handler.OnlyControllerOwner(),
))); err != nil {
return err
}
return nil
@ -177,6 +196,16 @@ func (r *GameServerSetReconciler) Reconcile(ctx context.Context, req ctrl.Reques
return reconcile.Result{}, err
}
gsm := NewGameServerSetManager(gss, r.Client, r.recorder)
// The serverless scenario PodProbeMarker takes effect during the Webhook phase, so need to create the PodProbeMarker in advance.
err, done := gsm.SyncPodProbeMarker()
if err != nil {
klog.Errorf("GameServerSet %s failed to synchronize PodProbeMarker in %s,because of %s.", namespacedName.Name, namespacedName.Namespace, err.Error())
return reconcile.Result{}, err
} else if !done {
return reconcile.Result{}, nil
}
// get advanced statefulset
asts := &kruiseV1beta1.StatefulSet{}
err = r.Get(ctx, namespacedName, asts)
@ -200,13 +229,14 @@ func (r *GameServerSetReconciler) Reconcile(ctx context.Context, req ctrl.Reques
Namespace: gss.GetNamespace(),
LabelSelector: labels.SelectorFromSet(map[string]string{
gamekruiseiov1alpha1.GameServerOwnerGssKey: gss.GetName(),
})})
}),
})
if err != nil {
klog.Errorf("failed to list GameServers of GameServerSet %s in %s.", gss.GetName(), gss.GetNamespace())
return reconcile.Result{}, err
}
gsm := NewGameServerSetManager(gss, asts, podList.Items, r.Client, r.recorder)
gsm.SyncStsAndPodList(asts, podList.Items)
// kill game servers
newReplicas := gsm.GetReplicasAfterKilling()
@ -241,12 +271,6 @@ func (r *GameServerSetReconciler) Reconcile(ctx context.Context, req ctrl.Reques
return reconcile.Result{}, nil
}
err = gsm.SyncPodProbeMarker()
if err != nil {
klog.Errorf("GameServerSet %s failed to synchronize PodProbeMarker in %s,because of %s.", namespacedName.Name, namespacedName.Namespace, err.Error())
return reconcile.Result{}, err
}
// sync GameServerSet Status
err = gsm.SyncStatus()
if err != nil {
@ -265,7 +289,12 @@ func (r *GameServerSetReconciler) SetupWithManager(mgr ctrl.Manager) (c controll
}
func (r *GameServerSetReconciler) initAsts(gss *gamekruiseiov1alpha1.GameServerSet) error {
asts := &kruiseV1beta1.StatefulSet{}
asts := &kruiseV1beta1.StatefulSet{
TypeMeta: metav1.TypeMeta{
Kind: "StatefulSet",
APIVersion: "apps.kruise.io/v1beta1",
},
}
asts.Namespace = gss.GetNamespace()
asts.Name = gss.GetName()

View File

@ -2,6 +2,9 @@ package gameserverset
import (
"context"
"reflect"
"testing"
appspub "github.com/openkruise/kruise-api/apps/pub"
kruiseV1beta1 "github.com/openkruise/kruise-api/apps/v1beta1"
gameKruiseV1alpha1 "github.com/openkruise/kruise-game/apis/v1alpha1"
@ -10,11 +13,10 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/ptr"
"reflect"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"testing"
)
func TestInitAsts(t *testing.T) {
@ -114,7 +116,7 @@ func TestInitAsts(t *testing.T) {
},
Spec: gameKruiseV1alpha1.GameServerSetSpec{
Replicas: ptr.To[int32](4),
ReserveGameServerIds: []int{0},
ReserveGameServerIds: []intstr.IntOrString{intstr.FromInt(0)},
UpdateStrategy: gameKruiseV1alpha1.UpdateStrategy{
Type: apps.RollingUpdateStatefulSetStrategyType,
RollingUpdate: &gameKruiseV1alpha1.RollingUpdateStatefulSetStrategy{},
@ -143,7 +145,7 @@ func TestInitAsts(t *testing.T) {
},
Spec: kruiseV1beta1.StatefulSetSpec{
Replicas: ptr.To[int32](4),
ReserveOrdinals: []int{0},
ReserveOrdinals: []intstr.IntOrString{intstr.FromInt(0)},
PodManagementPolicy: apps.ParallelPodManagement,
ServiceName: "case1",
Selector: &metav1.LabelSelector{

View File

@ -18,6 +18,7 @@ package gameserverset
import (
"context"
kruisePub "github.com/openkruise/kruise-api/apps/pub"
kruiseV1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1"
kruiseV1beta1 "github.com/openkruise/kruise-api/apps/v1beta1"
corev1 "k8s.io/api/core/v1"
@ -26,6 +27,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
@ -45,10 +47,19 @@ type Control interface {
SyncStatus() error
IsNeedToScale() bool
IsNeedToUpdateWorkload() bool
SyncPodProbeMarker() error
SyncPodProbeMarker() (error, bool)
GetReplicasAfterKilling() *int32
SyncStsAndPodList(asts *kruiseV1beta1.StatefulSet, gsList []corev1.Pod)
}
const (
DefaultTimeoutSeconds = 5
DefaultInitialDelaySeconds = 10
DefaultPeriodSeconds = 3
DefaultSuccessThreshold = 1
DefaultFailureThreshold = 3
)
const (
ScaleReason = "Scale"
CreatePPMReason = "CreatePpm"
@ -65,16 +76,19 @@ type GameServerSetManager struct {
eventRecorder record.EventRecorder
}
func NewGameServerSetManager(gss *gameKruiseV1alpha1.GameServerSet, asts *kruiseV1beta1.StatefulSet, gsList []corev1.Pod, c client.Client, recorder record.EventRecorder) Control {
func NewGameServerSetManager(gss *gameKruiseV1alpha1.GameServerSet, c client.Client, recorder record.EventRecorder) Control {
return &GameServerSetManager{
gameServerSet: gss,
asts: asts,
podList: gsList,
client: c,
eventRecorder: recorder,
}
}
func (manager *GameServerSetManager) SyncStsAndPodList(asts *kruiseV1beta1.StatefulSet, gsList []corev1.Pod) {
manager.asts = asts
manager.podList = gsList
}
func (manager *GameServerSetManager) GetReplicasAfterKilling() *int32 {
gss := manager.gameServerSet
asts := manager.asts
@ -96,13 +110,18 @@ func (manager *GameServerSetManager) GetReplicasAfterKilling() *int32 {
return ptr.To[int32](*gss.Spec.Replicas - int32(toKill))
}
// IsNeedToScale checks if the GameServerSet need to scale,
// return True when the replicas or reserveGameServerIds is changed
func (manager *GameServerSetManager) IsNeedToScale() bool {
gss := manager.gameServerSet
asts := manager.asts
gssSpecReserveIds := util.GetReserveOrdinalIntSet(gss.Spec.ReserveGameServerIds)
// no need to scale
return !(*gss.Spec.Replicas == *asts.Spec.Replicas &&
util.IsSliceEqual(util.StringToIntSlice(gss.GetAnnotations()[gameKruiseV1alpha1.GameServerSetReserveIdsKey], ","), gss.Spec.ReserveGameServerIds))
util.StringToOrdinalIntSet(
gss.GetAnnotations()[gameKruiseV1alpha1.GameServerSetReserveIdsKey], ",",
).Equal(gssSpecReserveIds))
}
func (manager *GameServerSetManager) GameServerScale() error {
@ -111,33 +130,31 @@ func (manager *GameServerSetManager) GameServerScale() error {
c := manager.client
ctx := context.Background()
var podList []corev1.Pod
for _, pod := range manager.podList {
if pod.GetDeletionTimestamp() == nil {
podList = append(podList, pod)
}
}
podList = append(podList, manager.podList...)
currentReplicas := len(podList)
currentReplicas := int(*asts.Spec.Replicas)
expectedReplicas := int(*gss.Spec.Replicas)
as := gss.GetAnnotations()
reserveIds := util.StringToIntSlice(as[gameKruiseV1alpha1.GameServerSetReserveIdsKey], ",")
notExistIds := util.GetSliceInANotInB(asts.Spec.ReserveOrdinals, reserveIds)
gssReserveIds := gss.Spec.ReserveGameServerIds
specReserveIds := util.GetReserveOrdinalIntSet(asts.Spec.ReserveOrdinals)
reserveIds := util.GetReserveOrdinalIntSet(
util.StringToIntStrSlice(as[gameKruiseV1alpha1.GameServerSetReserveIdsKey], ","))
notExistIds := util.GetSetInANotInB(specReserveIds, reserveIds)
gssReserveIds := util.GetReserveOrdinalIntSet(gss.Spec.ReserveGameServerIds)
klog.Infof("GameServers %s/%s already has %d replicas, expect to have %d replicas; With newExplicit: %v; oldExplicit: %v; oldImplicit: %v",
gss.GetNamespace(), gss.GetName(), currentReplicas, expectedReplicas, gssReserveIds, reserveIds, notExistIds)
klog.Infof("GameServers %s/%s already has %d replicas, expect to have %d replicas; With newExplicit: %v; oldExplicit: %v; oldImplicit: %v; total pods num: %d",
gss.GetNamespace(), gss.GetName(), currentReplicas, expectedReplicas, gssReserveIds, reserveIds, notExistIds, len(podList))
manager.eventRecorder.Eventf(gss, corev1.EventTypeNormal, ScaleReason, "scale from %d to %d", currentReplicas, expectedReplicas)
newManageIds, newReserveIds := computeToScaleGs(gssReserveIds, reserveIds, notExistIds, expectedReplicas, podList)
if gss.Spec.GameServerTemplate.ReclaimPolicy == gameKruiseV1alpha1.DeleteGameServerReclaimPolicy {
err := SyncGameServer(gss, c, newManageIds, util.GetIndexListFromPodList(podList))
err := SyncGameServer(gss, c, newManageIds, util.GetIndexSetFromPodList(podList))
if err != nil {
return err
}
}
asts.Spec.ReserveOrdinals = newReserveIds
asts.Spec.ReserveOrdinals = util.OrdinalSetToIntStrSlice(newReserveIds)
asts.Spec.Replicas = gss.Spec.Replicas
asts.Spec.ScaleStrategy = &kruiseV1beta1.StatefulSetScaleStrategy{
MaxUnavailable: gss.Spec.ScaleStrategy.MaxUnavailable,
@ -152,8 +169,8 @@ func (manager *GameServerSetManager) GameServerScale() error {
gssReserveIds = newReserveIds
}
gssAnnotations := make(map[string]string)
gssAnnotations[gameKruiseV1alpha1.GameServerSetReserveIdsKey] = util.IntSliceToString(gssReserveIds, ",")
patchGss := map[string]interface{}{"spec": map[string]interface{}{"reserveGameServerIds": gssReserveIds}, "metadata": map[string]map[string]string{"annotations": gssAnnotations}}
gssAnnotations[gameKruiseV1alpha1.GameServerSetReserveIdsKey] = util.OrdinalSetToString(gssReserveIds)
patchGss := map[string]interface{}{"spec": map[string]interface{}{"reserveGameServerIds": util.OrdinalSetToIntStrSlice(gssReserveIds)}, "metadata": map[string]map[string]string{"annotations": gssAnnotations}}
patchGssBytes, _ := json.Marshal(patchGss)
err = c.Patch(ctx, gss, client.RawPatch(types.MergePatchType, patchGssBytes))
if err != nil {
@ -169,23 +186,37 @@ func (manager *GameServerSetManager) GameServerScale() error {
// notExistIds is the implicit id list.
// gssReserveIds is the newest explicit id list.
// pods is the pods that managed by gss now.
func computeToScaleGs(gssReserveIds, reserveIds, notExistIds []int, expectedReplicas int, pods []corev1.Pod) ([]int, []int) {
func computeToScaleGs(gssReserveIds, reserveIds, notExistIds sets.Set[int], expectedReplicas int, pods []corev1.Pod) (workloadManageIds sets.Set[int], newReverseIds sets.Set[int]) {
// 1. Get newest implicit list & explicit.
newAddExplicit := util.GetSliceInANotInB(gssReserveIds, reserveIds)
newDeleteExplicit := util.GetSliceInANotInB(reserveIds, gssReserveIds)
newImplicit := util.GetSliceInANotInB(notExistIds, newAddExplicit)
newImplicit = append(newImplicit, newDeleteExplicit...)
newAddExplicit := util.GetSetInANotInB(gssReserveIds, reserveIds)
newDeleteExplicit := util.GetSetInANotInB(reserveIds, gssReserveIds)
newImplicit := util.GetSetInANotInB(notExistIds, newAddExplicit)
newImplicit = newImplicit.Union(newDeleteExplicit)
newExplicit := gssReserveIds
// 2. Remove the pods ids is in newExplicit.
var workloadManageIds []int
workloadManageIds = sets.New[int]()
var newPods []corev1.Pod
deletingPodIds := sets.New[int]()
preDeletingPodIds := sets.New[int]()
for _, pod := range pods {
index := util.GetIndexFromGsName(pod.Name)
if util.IsNumInList(index, newExplicit) {
// if pod is deleting, exclude it.
if pod.GetDeletionTimestamp() != nil {
deletingPodIds.Insert(index)
continue
}
workloadManageIds = append(workloadManageIds, index)
// if pod is preDeleting, exclude it.
if lifecycleState, exist := pod.GetLabels()[kruisePub.LifecycleStateKey]; exist && lifecycleState == string(kruisePub.LifecycleStatePreparingDelete) {
preDeletingPodIds.Insert(index)
continue
}
if newExplicit.Has(index) {
continue
}
workloadManageIds.Insert(index)
newPods = append(newPods, pod)
}
@ -197,38 +228,38 @@ func computeToScaleGs(gssReserveIds, reserveIds, notExistIds []int, expectedRepl
num := 0
var toAdd []int
for i := 0; num < expectedReplicas-existReplicas; i++ {
if util.IsNumInList(i, workloadManageIds) || util.IsNumInList(i, newExplicit) {
if workloadManageIds.Has(i) || newExplicit.Has(i) || preDeletingPodIds.Has(i) {
continue
}
if util.IsNumInList(i, newImplicit) {
newImplicit = util.GetSliceInANotInB(newImplicit, []int{i})
if newImplicit.Has(i) {
newImplicit.Delete(i)
}
toAdd = append(toAdd, i)
num++
}
workloadManageIds = append(workloadManageIds, toAdd...)
workloadManageIds.Insert(toAdd...)
} else if existReplicas > expectedReplicas {
// Delete pods.
sortedGs := util.DeleteSequenceGs(newPods)
sort.Sort(sortedGs)
toDelete := util.GetIndexListFromPodList(sortedGs[:existReplicas-expectedReplicas])
workloadManageIds = util.GetSliceInANotInB(workloadManageIds, toDelete)
newImplicit = append(newImplicit, toDelete...)
toDelete := util.GetIndexSetFromPodList(sortedGs[:existReplicas-expectedReplicas])
workloadManageIds = util.GetSetInANotInB(workloadManageIds, toDelete)
newImplicit = newImplicit.Union(toDelete)
}
return workloadManageIds, append(newImplicit, newExplicit...)
return workloadManageIds, newImplicit.Union(newExplicit)
}
func SyncGameServer(gss *gameKruiseV1alpha1.GameServerSet, c client.Client, newManageIds, oldManageIds []int) error {
func SyncGameServer(gss *gameKruiseV1alpha1.GameServerSet, c client.Client, newManageIds, oldManageIds sets.Set[int]) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
addIds := util.GetSliceInANotInB(newManageIds, oldManageIds)
deleteIds := util.GetSliceInANotInB(oldManageIds, newManageIds)
addIds := util.GetSetInANotInB(newManageIds, oldManageIds)
deleteIds := util.GetSetInANotInB(oldManageIds, newManageIds)
errch := make(chan error, len(addIds)+len(deleteIds))
var wg sync.WaitGroup
for _, gsId := range append(addIds, deleteIds...) {
for _, gsId := range addIds.Union(deleteIds).UnsortedList() {
wg.Add(1)
id := gsId
go func(ctx context.Context) {
@ -249,7 +280,7 @@ func SyncGameServer(gss *gameKruiseV1alpha1.GameServerSet, c client.Client, newM
return
}
if util.IsNumInList(id, addIds) && gs.GetLabels()[gameKruiseV1alpha1.GameServerDeletingKey] == "true" {
if addIds.Has(id) && gs.GetLabels()[gameKruiseV1alpha1.GameServerDeletingKey] == "true" {
gsLabels := make(map[string]string)
gsLabels[gameKruiseV1alpha1.GameServerDeletingKey] = "false"
patchGs := map[string]interface{}{"metadata": map[string]map[string]string{"labels": gsLabels}}
@ -266,7 +297,7 @@ func SyncGameServer(gss *gameKruiseV1alpha1.GameServerSet, c client.Client, newM
klog.Infof("GameServer %s/%s DeletingKey turn into false", gss.Namespace, gsName)
}
if util.IsNumInList(id, deleteIds) && gs.GetLabels()[gameKruiseV1alpha1.GameServerDeletingKey] != "true" {
if deleteIds.Has(id) && gs.GetLabels()[gameKruiseV1alpha1.GameServerDeletingKey] != "true" {
gsLabels := make(map[string]string)
gsLabels[gameKruiseV1alpha1.GameServerDeletingKey] = "true"
patchGs := map[string]interface{}{"metadata": map[string]map[string]string{"labels": gsLabels}}
@ -317,7 +348,7 @@ func (manager *GameServerSetManager) UpdateWorkload() error {
return retryErr
}
func (manager *GameServerSetManager) SyncPodProbeMarker() error {
func (manager *GameServerSetManager) SyncPodProbeMarker() (error, bool) {
gss := manager.gameServerSet
sqs := gss.Spec.ServiceQualities
c := manager.client
@ -332,27 +363,38 @@ func (manager *GameServerSetManager) SyncPodProbeMarker() error {
if err != nil {
if errors.IsNotFound(err) {
if sqs == nil {
return nil
return nil, true
}
// create ppm
manager.eventRecorder.Event(gss, corev1.EventTypeNormal, CreatePPMReason, "create PodProbeMarker")
return c.Create(ctx, createPpm(gss))
ppm = createPpm(gss)
klog.Infof("GameserverSet(%s/%s) create PodProbeMarker(%s)", gss.Namespace, gss.Name, ppm.Name)
return c.Create(ctx, ppm), false
}
return err
return err, false
}
// delete ppm
if sqs == nil {
return c.Delete(ctx, ppm)
klog.Infof("GameserverSet(%s/%s) ServiceQualities is empty, and delete PodProbeMarker", gss.Namespace, gss.Name)
return c.Delete(ctx, ppm), false
}
// update ppm
if util.GetHash(gss.Spec.ServiceQualities) != ppm.GetAnnotations()[gameKruiseV1alpha1.PpmHashKey] {
ppm.Spec.Probes = constructProbes(gss)
ppm.Annotations[gameKruiseV1alpha1.PpmHashKey] = util.GetHash(gss.Spec.ServiceQualities)
by, _ := json.Marshal(ppm.Spec.Probes)
manager.eventRecorder.Event(gss, corev1.EventTypeNormal, UpdatePPMReason, "update PodProbeMarker")
return c.Update(ctx, ppm)
klog.Infof("GameserverSet(%s/%s) update PodProbeMarker(%s) body(%s)", gss.Namespace, gss.Name, ppm.Name, string(by))
return c.Update(ctx, ppm), false
}
return nil
// Determine PodProbeMarker Status to ensure that PodProbeMarker resources have been processed by kruise-manager
if ppm.Generation != ppm.Status.ObservedGeneration {
klog.Infof("GameserverSet(%s/%s) PodProbeMarker(%s) status observedGeneration is inconsistent, and wait a moment", gss.Namespace, gss.Name, ppm.Name)
return nil, false
}
return nil, true
}
func constructProbes(gss *gameKruiseV1alpha1.GameServerSet) []kruiseV1alpha1.PodContainerProbe {
@ -366,6 +408,21 @@ func constructProbes(gss *gameKruiseV1alpha1.GameServerSet) []kruiseV1alpha1.Pod
},
PodConditionType: util.AddPrefixGameKruise(sq.Name),
}
if probe.Probe.TimeoutSeconds == 0 {
probe.Probe.TimeoutSeconds = DefaultTimeoutSeconds
}
if probe.Probe.InitialDelaySeconds == 0 {
probe.Probe.InitialDelaySeconds = DefaultInitialDelaySeconds
}
if probe.Probe.PeriodSeconds == 0 {
probe.Probe.PeriodSeconds = DefaultPeriodSeconds
}
if probe.Probe.SuccessThreshold == 0 {
probe.Probe.SuccessThreshold = DefaultSuccessThreshold
}
if probe.Probe.FailureThreshold == 0 {
probe.Probe.FailureThreshold = DefaultFailureThreshold
}
probes = append(probes, probe)
}
return probes
@ -410,11 +467,13 @@ func (manager *GameServerSetManager) SyncStatus() error {
maintainingGs := 0
waitToBeDeletedGs := 0
preDeleteGs := 0
for _, pod := range podList {
podLabels := pod.GetLabels()
opsState := podLabels[gameKruiseV1alpha1.GameServerOpsStateKey]
state := podLabels[gameKruiseV1alpha1.GameServerStateKey]
// ops state
switch opsState {
@ -423,6 +482,12 @@ func (manager *GameServerSetManager) SyncStatus() error {
case string(gameKruiseV1alpha1.Maintaining):
maintainingGs++
}
// state
switch state {
case string(gameKruiseV1alpha1.PreDelete):
preDeleteGs++
}
}
status := gameKruiseV1alpha1.GameServerSetStatus{
@ -434,6 +499,7 @@ func (manager *GameServerSetManager) SyncStatus() error {
UpdatedReadyReplicas: asts.Status.UpdatedReadyReplicas,
MaintainingReplicas: ptr.To[int32](int32(maintainingGs)),
WaitToBeDeletedReplicas: ptr.To[int32](int32(waitToBeDeletedGs)),
PreDeleteReplicas: ptr.To[int32](int32(preDeleteGs)),
LabelSelector: asts.Status.LabelSelector,
ObservedGeneration: gss.GetGeneration(),
}

View File

@ -2,10 +2,7 @@ package gameserverset
import (
"context"
"reflect"
"strconv"
"testing"
"fmt"
appspub "github.com/openkruise/kruise-api/apps/pub"
kruiseV1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1"
kruiseV1beta1 "github.com/openkruise/kruise-api/apps/v1beta1"
@ -14,11 +11,16 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/record"
"k8s.io/utils/ptr"
"reflect"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"strconv"
"testing"
gameKruiseV1alpha1 "github.com/openkruise/kruise-game/apis/v1alpha1"
"github.com/openkruise/kruise-game/pkg/util"
@ -36,20 +38,22 @@ func init() {
}
func TestComputeToScaleGs(t *testing.T) {
timeNow := metav1.Now()
tests := []struct {
newGssReserveIds []int
oldGssreserveIds []int
notExistIds []int
newGssReserveIds sets.Set[int]
oldGssreserveIds sets.Set[int]
notExistIds sets.Set[int]
expectedReplicas int
pods []corev1.Pod
newReserveIds []int
newManageIds []int
newReserveIds sets.Set[int]
newManageIds sets.Set[int]
}{
// case 0
{
newGssReserveIds: []int{2, 3, 4},
oldGssreserveIds: []int{2, 3},
notExistIds: []int{5},
newGssReserveIds: sets.New(2, 3, 4),
oldGssreserveIds: sets.New(2, 3),
notExistIds: sets.New(5),
expectedReplicas: 3,
pods: []corev1.Pod{
{
@ -89,14 +93,14 @@ func TestComputeToScaleGs(t *testing.T) {
},
},
},
newReserveIds: []int{2, 3, 4, 5},
newManageIds: []int{0, 1, 6},
newReserveIds: sets.New(2, 3, 4, 5),
newManageIds: sets.New(0, 1, 6),
},
// case 1
{
newGssReserveIds: []int{0, 2, 3},
oldGssreserveIds: []int{0, 4, 5},
notExistIds: []int{},
newGssReserveIds: sets.New(0, 2, 3),
oldGssreserveIds: sets.New(0, 4, 5),
notExistIds: sets.New[int](),
expectedReplicas: 3,
pods: []corev1.Pod{
{
@ -145,14 +149,14 @@ func TestComputeToScaleGs(t *testing.T) {
},
},
},
newReserveIds: []int{0, 2, 3, 4, 5},
newManageIds: []int{1, 6, 7},
newReserveIds: sets.New(0, 2, 3, 4, 5),
newManageIds: sets.New(1, 6, 7),
},
// case 2
{
newGssReserveIds: []int{0},
oldGssreserveIds: []int{0, 4, 5},
notExistIds: []int{},
newGssReserveIds: sets.New(0),
oldGssreserveIds: sets.New(0, 4, 5),
notExistIds: sets.New[int](),
expectedReplicas: 1,
pods: []corev1.Pod{
{
@ -201,14 +205,14 @@ func TestComputeToScaleGs(t *testing.T) {
},
},
},
newReserveIds: []int{0, 2, 3, 4, 5, 6, 7},
newManageIds: []int{1},
newReserveIds: sets.New(0, 2, 3, 4, 5, 6, 7),
newManageIds: sets.New(1),
},
// case 3
{
newGssReserveIds: []int{0, 2, 3},
oldGssreserveIds: []int{0, 4, 5},
notExistIds: []int{},
newGssReserveIds: sets.New(0, 2, 3),
oldGssreserveIds: sets.New(0, 4, 5),
notExistIds: sets.New[int](),
expectedReplicas: 4,
pods: []corev1.Pod{
{
@ -257,14 +261,14 @@ func TestComputeToScaleGs(t *testing.T) {
},
},
},
newReserveIds: []int{0, 2, 3, 5},
newManageIds: []int{1, 4, 6, 7},
newReserveIds: sets.New(0, 2, 3, 5),
newManageIds: sets.New(1, 4, 6, 7),
},
// case 4
{
newGssReserveIds: []int{0, 3, 5},
oldGssreserveIds: []int{0, 3, 5},
notExistIds: []int{},
newGssReserveIds: sets.New(0, 3, 5),
oldGssreserveIds: sets.New(0, 3, 5),
notExistIds: sets.New[int](),
expectedReplicas: 1,
pods: []corev1.Pod{
{
@ -304,14 +308,14 @@ func TestComputeToScaleGs(t *testing.T) {
},
},
},
newReserveIds: []int{0, 3, 5, 2, 4, 6},
newManageIds: []int{1},
newReserveIds: sets.New(0, 3, 5, 2, 4, 6),
newManageIds: sets.New(1),
},
// case 5
{
newGssReserveIds: []int{1, 2},
oldGssreserveIds: []int{},
notExistIds: []int{1, 2},
newGssReserveIds: sets.New(1, 2),
oldGssreserveIds: sets.New[int](),
notExistIds: sets.New(1, 2),
expectedReplicas: 2,
pods: []corev1.Pod{
{
@ -333,44 +337,44 @@ func TestComputeToScaleGs(t *testing.T) {
},
},
},
newReserveIds: []int{1, 2},
newManageIds: []int{0, 3},
newReserveIds: sets.New(1, 2),
newManageIds: sets.New(0, 3),
},
// case 6
{
newGssReserveIds: []int{},
oldGssreserveIds: []int{},
notExistIds: []int{},
newGssReserveIds: sets.New[int](),
oldGssreserveIds: sets.New[int](),
notExistIds: sets.New[int](),
expectedReplicas: 3,
pods: []corev1.Pod{},
newReserveIds: []int{},
newManageIds: []int{0, 1, 2},
newReserveIds: sets.New[int](),
newManageIds: sets.New(0, 1, 2),
},
// case 7
{
newGssReserveIds: []int{1, 2},
oldGssreserveIds: []int{},
notExistIds: []int{},
newGssReserveIds: sets.New(1, 2),
oldGssreserveIds: sets.New[int](),
notExistIds: sets.New[int](),
expectedReplicas: 3,
pods: []corev1.Pod{},
newReserveIds: []int{1, 2},
newManageIds: []int{0, 3, 4},
newReserveIds: sets.New(1, 2),
newManageIds: sets.New(0, 3, 4),
},
// case 8
{
newGssReserveIds: []int{0},
oldGssreserveIds: []int{},
notExistIds: []int{0},
newGssReserveIds: sets.New(0),
oldGssreserveIds: sets.New[int](),
notExistIds: sets.New(0),
expectedReplicas: 1,
pods: []corev1.Pod{},
newReserveIds: []int{0},
newManageIds: []int{1},
newReserveIds: sets.New(0),
newManageIds: sets.New(1),
},
// case 9
{
newGssReserveIds: []int{},
oldGssreserveIds: []int{1},
notExistIds: []int{},
newGssReserveIds: sets.New[int](),
oldGssreserveIds: sets.New(1),
notExistIds: sets.New[int](),
expectedReplicas: 2,
pods: []corev1.Pod{
{
@ -392,14 +396,14 @@ func TestComputeToScaleGs(t *testing.T) {
},
},
},
newReserveIds: []int{1},
newManageIds: []int{0, 2},
newReserveIds: sets.New(1),
newManageIds: sets.New(0, 2),
},
// case 10
{
newGssReserveIds: []int{0},
oldGssreserveIds: []int{},
notExistIds: []int{2, 3, 4},
newGssReserveIds: sets.New(0),
oldGssreserveIds: sets.New[int](),
notExistIds: sets.New(2, 3, 4),
expectedReplicas: 4,
pods: []corev1.Pod{
{
@ -412,31 +416,85 @@ func TestComputeToScaleGs(t *testing.T) {
},
},
},
newReserveIds: []int{0},
newManageIds: []int{1, 2, 3, 4},
newReserveIds: sets.New(0),
newManageIds: sets.New(1, 2, 3, 4),
},
// case 11
{
newGssReserveIds: sets.New[int](),
oldGssreserveIds: sets.New[int](),
notExistIds: sets.New[int](2, 3, 4),
expectedReplicas: 4,
pods: []corev1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "xxx-0",
Labels: map[string]string{
gameKruiseV1alpha1.GameServerOpsStateKey: string(gameKruiseV1alpha1.None),
gameKruiseV1alpha1.GameServerDeletePriorityKey: "0",
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "xxx-1",
Labels: map[string]string{
gameKruiseV1alpha1.GameServerOpsStateKey: string(gameKruiseV1alpha1.None),
gameKruiseV1alpha1.GameServerDeletePriorityKey: "0",
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "xxx-2",
Labels: map[string]string{
gameKruiseV1alpha1.GameServerOpsStateKey: string(gameKruiseV1alpha1.None),
gameKruiseV1alpha1.GameServerDeletePriorityKey: "0",
appspub.LifecycleStateKey: string(appspub.LifecycleStatePreparingDelete),
},
DeletionTimestamp: &timeNow,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "xxx-3",
Labels: map[string]string{
gameKruiseV1alpha1.GameServerOpsStateKey: string(gameKruiseV1alpha1.None),
gameKruiseV1alpha1.GameServerDeletePriorityKey: "0",
appspub.LifecycleStateKey: string(appspub.LifecycleStatePreparingDelete),
},
},
},
},
newReserveIds: sets.New(3),
newManageIds: sets.New(0, 1, 2, 4),
},
}
for i, test := range tests {
t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) {
t.Logf("case %d : newGssReserveIds: %v ; oldGssreserveIds: %v ; notExistIds: %v ; expectedReplicas: %d; pods: %v", i, test.newGssReserveIds, test.oldGssreserveIds, test.notExistIds, test.expectedReplicas, test.pods)
newManageIds, newReserveIds := computeToScaleGs(test.newGssReserveIds, test.oldGssreserveIds, test.notExistIds, test.expectedReplicas, test.pods)
if !util.IsSliceEqual(newReserveIds, test.newReserveIds) {
t.Errorf("case %d: expect newNotExistIds %v but got %v", i, test.newReserveIds, newReserveIds)
if !newReserveIds.Equal(test.newReserveIds) {
t.Errorf("case %d: expect newReserveIds %v but got %v", i, test.newReserveIds, newReserveIds)
}
if !util.IsSliceEqual(newManageIds, test.newManageIds) {
if !newManageIds.Equal(test.newManageIds) {
t.Errorf("case %d: expect newManageIds %v but got %v", i, test.newManageIds, newManageIds)
}
t.Logf("case %d : newManageIds: %v ; newReserveIds: %v", i, newManageIds, newReserveIds)
})
}
}
func TestIsNeedToScale(t *testing.T) {
tests := []struct {
name string
gss *gameKruiseV1alpha1.GameServerSet
asts *kruiseV1beta1.StatefulSet
result bool
}{
{
name: "case 0",
gss: &gameKruiseV1alpha1.GameServerSet{
Spec: gameKruiseV1alpha1.GameServerSetSpec{
Replicas: ptr.To[int32](5),
@ -453,13 +511,14 @@ func TestIsNeedToScale(t *testing.T) {
result: false,
},
{
name: "case 1",
gss: &gameKruiseV1alpha1.GameServerSet{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{gameKruiseV1alpha1.GameServerSetReserveIdsKey: "1,5"},
},
Spec: gameKruiseV1alpha1.GameServerSetSpec{
Replicas: ptr.To[int32](5),
ReserveGameServerIds: []int{1, 5},
ReserveGameServerIds: []intstr.IntOrString{intstr.FromInt(1), intstr.FromInt(5)},
},
},
asts: &kruiseV1beta1.StatefulSet{
@ -472,8 +531,27 @@ func TestIsNeedToScale(t *testing.T) {
},
result: false,
},
{
name: "case 2",
gss: &gameKruiseV1alpha1.GameServerSet{
Spec: gameKruiseV1alpha1.GameServerSetSpec{
Replicas: ptr.To[int32](5),
ReserveGameServerIds: []intstr.IntOrString{intstr.FromInt(1), intstr.FromInt(5)},
},
},
asts: &kruiseV1beta1.StatefulSet{
Spec: kruiseV1beta1.StatefulSetSpec{
Replicas: ptr.To[int32](5),
},
Status: kruiseV1beta1.StatefulSetStatus{
Replicas: int32(5),
},
},
result: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
manager := &GameServerSetManager{
gameServerSet: test.gss,
asts: test.asts,
@ -482,21 +560,24 @@ func TestIsNeedToScale(t *testing.T) {
if actual != test.result {
t.Errorf("expect spec %v but got %v", test.result, actual)
}
})
}
}
func TestGameServerScale(t *testing.T) {
recorder := record.NewFakeRecorder(100)
//timeNow := metav1.Now()
tests := []struct {
name string
gss *gameKruiseV1alpha1.GameServerSet
asts *kruiseV1beta1.StatefulSet
podList []corev1.Pod
astsReserveIds []int
astsReserveIds sets.Set[int]
gssReserveIds string
}{
// case0: scale down without reserveIds
{
name: "case0: scale down without reserveIds",
gss: &gameKruiseV1alpha1.GameServerSet{
ObjectMeta: metav1.ObjectMeta{
Namespace: "xxx",
@ -505,7 +586,7 @@ func TestGameServerScale(t *testing.T) {
},
Spec: gameKruiseV1alpha1.GameServerSetSpec{
Replicas: ptr.To[int32](3),
ReserveGameServerIds: []int{1},
ReserveGameServerIds: []intstr.IntOrString{intstr.FromInt(1)},
},
},
asts: &kruiseV1beta1.StatefulSet{
@ -515,7 +596,7 @@ func TestGameServerScale(t *testing.T) {
},
Spec: kruiseV1beta1.StatefulSetSpec{
Replicas: ptr.To[int32](4),
ReserveOrdinals: []int{1},
ReserveOrdinals: []intstr.IntOrString{intstr.FromInt(1)},
},
Status: kruiseV1beta1.StatefulSetStatus{
Replicas: int32(4),
@ -559,11 +640,11 @@ func TestGameServerScale(t *testing.T) {
},
},
},
astsReserveIds: []int{1, 2},
astsReserveIds: sets.New(1, 2),
gssReserveIds: "1",
},
// case1: scale down with reserveIds
{
name: "case1: scale down with reserveIds",
gss: &gameKruiseV1alpha1.GameServerSet{
ObjectMeta: metav1.ObjectMeta{
Namespace: "xxx",
@ -572,7 +653,7 @@ func TestGameServerScale(t *testing.T) {
},
Spec: gameKruiseV1alpha1.GameServerSetSpec{
Replicas: ptr.To[int32](3),
ReserveGameServerIds: []int{1, 0},
ReserveGameServerIds: []intstr.IntOrString{intstr.FromInt(1), intstr.FromInt(0)},
},
},
asts: &kruiseV1beta1.StatefulSet{
@ -582,7 +663,7 @@ func TestGameServerScale(t *testing.T) {
},
Spec: kruiseV1beta1.StatefulSetSpec{
Replicas: ptr.To[int32](4),
ReserveOrdinals: []int{1},
ReserveOrdinals: []intstr.IntOrString{intstr.FromInt(1)},
},
Status: kruiseV1beta1.StatefulSetStatus{
Replicas: int32(4),
@ -626,11 +707,11 @@ func TestGameServerScale(t *testing.T) {
},
},
},
astsReserveIds: []int{1, 0},
gssReserveIds: "1,0",
astsReserveIds: sets.New(0, 1),
gssReserveIds: "0,1",
},
// case2: scale up with reserveIds
{
name: "case2: scale up with reserveIds",
gss: &gameKruiseV1alpha1.GameServerSet{
ObjectMeta: metav1.ObjectMeta{
Namespace: "xxx",
@ -639,7 +720,7 @@ func TestGameServerScale(t *testing.T) {
},
Spec: gameKruiseV1alpha1.GameServerSetSpec{
Replicas: ptr.To[int32](5),
ReserveGameServerIds: []int{},
ReserveGameServerIds: []intstr.IntOrString{},
},
},
asts: &kruiseV1beta1.StatefulSet{
@ -649,7 +730,7 @@ func TestGameServerScale(t *testing.T) {
},
Spec: kruiseV1beta1.StatefulSetSpec{
Replicas: ptr.To[int32](4),
ReserveOrdinals: []int{1},
ReserveOrdinals: []intstr.IntOrString{intstr.FromInt(1)},
},
Status: kruiseV1beta1.StatefulSetStatus{
Replicas: int32(4),
@ -696,8 +777,8 @@ func TestGameServerScale(t *testing.T) {
astsReserveIds: nil,
gssReserveIds: "",
},
// case3: scale up with both reserveIds and others
{
name: "case3: scale up with both reserveIds and others",
gss: &gameKruiseV1alpha1.GameServerSet{
ObjectMeta: metav1.ObjectMeta{
Namespace: "xxx",
@ -706,7 +787,7 @@ func TestGameServerScale(t *testing.T) {
},
Spec: gameKruiseV1alpha1.GameServerSetSpec{
Replicas: ptr.To[int32](5),
ReserveGameServerIds: []int{},
ReserveGameServerIds: []intstr.IntOrString{},
},
},
asts: &kruiseV1beta1.StatefulSet{
@ -716,7 +797,7 @@ func TestGameServerScale(t *testing.T) {
},
Spec: kruiseV1beta1.StatefulSetSpec{
Replicas: ptr.To[int32](3),
ReserveOrdinals: []int{1, 3},
ReserveOrdinals: []intstr.IntOrString{intstr.FromInt(1), intstr.FromInt(3)},
},
Status: kruiseV1beta1.StatefulSetStatus{
Replicas: int32(3),
@ -754,9 +835,74 @@ func TestGameServerScale(t *testing.T) {
astsReserveIds: nil,
gssReserveIds: "",
},
{
name: "case4: scale up when pods with PreDelete state",
gss: &gameKruiseV1alpha1.GameServerSet{
ObjectMeta: metav1.ObjectMeta{
Namespace: "xxx",
Name: "case4",
},
Spec: gameKruiseV1alpha1.GameServerSetSpec{
Replicas: ptr.To[int32](5),
ReserveGameServerIds: []intstr.IntOrString{},
},
},
asts: &kruiseV1beta1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Namespace: "xxx",
Name: "case4",
},
Spec: kruiseV1beta1.StatefulSetSpec{
Replicas: ptr.To[int32](3),
ReserveOrdinals: []intstr.IntOrString{intstr.FromInt(3), intstr.FromInt(4), intstr.FromInt(5)},
},
},
podList: []corev1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "case4-0",
Labels: map[string]string{
gameKruiseV1alpha1.GameServerOpsStateKey: string(gameKruiseV1alpha1.None),
gameKruiseV1alpha1.GameServerDeletePriorityKey: "0",
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "case4-1",
Labels: map[string]string{
gameKruiseV1alpha1.GameServerOpsStateKey: string(gameKruiseV1alpha1.None),
gameKruiseV1alpha1.GameServerDeletePriorityKey: "0",
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "case4-2",
Labels: map[string]string{
gameKruiseV1alpha1.GameServerOpsStateKey: string(gameKruiseV1alpha1.None),
gameKruiseV1alpha1.GameServerDeletePriorityKey: "0",
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "case4-3",
Labels: map[string]string{
gameKruiseV1alpha1.GameServerOpsStateKey: string(gameKruiseV1alpha1.None),
gameKruiseV1alpha1.GameServerDeletePriorityKey: "0",
appspub.LifecycleStateKey: string(appspub.LifecycleStatePreparingDelete),
},
},
},
},
astsReserveIds: sets.New(3),
gssReserveIds: "",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
objs := []client.Object{test.asts, test.gss}
c := fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build()
manager := &GameServerSetManager{
@ -778,8 +924,9 @@ func TestGameServerScale(t *testing.T) {
}, updateAsts); err != nil {
t.Error(err)
}
if !util.IsSliceEqual(updateAsts.Spec.ReserveOrdinals, test.astsReserveIds) {
t.Errorf("expect asts ReserveOrdinals %v but got %v", test.astsReserveIds, updateAsts.Spec.ReserveOrdinals)
gotIds := util.GetReserveOrdinalIntSet(updateAsts.Spec.ReserveOrdinals)
if !gotIds.Equal(test.astsReserveIds) {
t.Errorf("expect asts ReserveOrdinals %v but got %v", test.astsReserveIds, gotIds)
}
updateGss := &gameKruiseV1alpha1.GameServerSet{}
@ -792,6 +939,7 @@ func TestGameServerScale(t *testing.T) {
if updateGss.GetAnnotations()[gameKruiseV1alpha1.GameServerSetReserveIdsKey] != test.gssReserveIds {
t.Errorf("expect asts ReserveOrdinals %v but got %v", test.gssReserveIds, updateGss.GetAnnotations()[gameKruiseV1alpha1.GameServerSetReserveIdsKey])
}
})
}
}
@ -799,8 +947,8 @@ func TestSyncGameServer(t *testing.T) {
tests := []struct {
gss *gameKruiseV1alpha1.GameServerSet
gsList []*gameKruiseV1alpha1.GameServer
newManageIds []int
oldManageIds []int
newManageIds sets.Set[int]
oldManageIds sets.Set[int]
IdsLabelTure []int
IdsLabelFalse []int
}{
@ -850,8 +998,8 @@ func TestSyncGameServer(t *testing.T) {
},
},
},
oldManageIds: []int{0, 2, 3, 4},
newManageIds: []int{0, 1},
oldManageIds: sets.New(0, 2, 3, 4),
newManageIds: sets.New(0, 1),
IdsLabelTure: []int{2, 3, 4},
IdsLabelFalse: []int{},
},
@ -865,8 +1013,8 @@ func TestSyncGameServer(t *testing.T) {
},
},
gsList: []*gameKruiseV1alpha1.GameServer{},
oldManageIds: []int{},
newManageIds: []int{0, 1, 3},
oldManageIds: sets.New[int](),
newManageIds: sets.New(0, 1, 3),
IdsLabelTure: []int{},
IdsLabelFalse: []int{},
},
@ -891,8 +1039,8 @@ func TestSyncGameServer(t *testing.T) {
},
},
},
oldManageIds: []int{},
newManageIds: []int{0},
oldManageIds: sets.New[int](),
newManageIds: sets.New(0),
IdsLabelTure: []int{},
IdsLabelFalse: []int{0},
},
@ -1011,6 +1159,7 @@ func TestNumberToKill(t *testing.T) {
Name: "xxx-2",
Namespace: "xxx",
DeletionTimestamp: &now,
Finalizers: []string{"test"},
},
},
{
@ -1052,6 +1201,7 @@ func TestNumberToKill(t *testing.T) {
Name: "xxx-2",
Namespace: "xxx",
DeletionTimestamp: &now,
Finalizers: []string{"test"},
},
},
{
@ -1093,6 +1243,7 @@ func TestNumberToKill(t *testing.T) {
Name: "xxx-2",
Namespace: "xxx",
DeletionTimestamp: &now,
Finalizers: []string{"test"},
},
},
{
@ -1229,3 +1380,317 @@ func TestGameServerSetManager_UpdateWorkload(t *testing.T) {
}
}
}
func TestGameServerSetManager_SyncPodProbeMarker(t *testing.T) {
tests := []struct {
name string
getGss func() *gameKruiseV1alpha1.GameServerSet
getPPM func() *kruiseV1alpha1.PodProbeMarker
newPPM func() *kruiseV1alpha1.PodProbeMarker
expectedDone bool
}{
{
name: "first create PPM",
getGss: func() *gameKruiseV1alpha1.GameServerSet {
obj := &gameKruiseV1alpha1.GameServerSet{
ObjectMeta: metav1.ObjectMeta{
Namespace: "xxx",
Name: "case0",
},
Spec: gameKruiseV1alpha1.GameServerSetSpec{
ServiceQualities: []gameKruiseV1alpha1.ServiceQuality{
{
Name: "healthy",
ContainerName: "main",
Probe: corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
Exec: &corev1.ExecAction{
Command: []string{"/bin/sh", "-c", "/healthy.sh"},
},
},
},
},
},
},
}
return obj
},
getPPM: func() *kruiseV1alpha1.PodProbeMarker {
return nil
},
newPPM: func() *kruiseV1alpha1.PodProbeMarker {
obj := &kruiseV1alpha1.PodProbeMarker{
Spec: kruiseV1alpha1.PodProbeMarkerSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"game.kruise.io/owner-gss": "case0",
},
},
Probes: []kruiseV1alpha1.PodContainerProbe{
{
Name: "healthy",
ContainerName: "main",
PodConditionType: "game.kruise.io/healthy",
Probe: kruiseV1alpha1.ContainerProbeSpec{
Probe: corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
Exec: &corev1.ExecAction{
Command: []string{"/bin/sh", "-c", "/healthy.sh"},
},
},
InitialDelaySeconds: DefaultInitialDelaySeconds,
TimeoutSeconds: DefaultTimeoutSeconds,
PeriodSeconds: DefaultPeriodSeconds,
SuccessThreshold: DefaultSuccessThreshold,
FailureThreshold: DefaultFailureThreshold,
},
},
},
},
},
}
return obj
},
expectedDone: false,
},
{
name: "second check PPM status, and false",
getGss: func() *gameKruiseV1alpha1.GameServerSet {
obj := &gameKruiseV1alpha1.GameServerSet{
ObjectMeta: metav1.ObjectMeta{
Namespace: "xxx",
Name: "case0",
},
Spec: gameKruiseV1alpha1.GameServerSetSpec{
ServiceQualities: []gameKruiseV1alpha1.ServiceQuality{
{
Name: "healthy",
ContainerName: "main",
Probe: corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
Exec: &corev1.ExecAction{
Command: []string{"/bin/sh", "-c", "/healthy.sh"},
},
},
},
},
},
},
}
return obj
},
getPPM: func() *kruiseV1alpha1.PodProbeMarker {
obj := &kruiseV1alpha1.PodProbeMarker{
ObjectMeta: metav1.ObjectMeta{
Namespace: "xxx",
Name: "case0",
Generation: 1,
Annotations: map[string]string{
"game.kruise.io/ppm-hash": "3716291985",
},
},
Spec: kruiseV1alpha1.PodProbeMarkerSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"game.kruise.io/owner-gss": "case0",
},
},
Probes: []kruiseV1alpha1.PodContainerProbe{
{
Name: "healthy",
ContainerName: "main",
PodConditionType: "game.kruise.io/healthy",
Probe: kruiseV1alpha1.ContainerProbeSpec{
Probe: corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
Exec: &corev1.ExecAction{
Command: []string{"/bin/sh", "-c", "/healthy.sh"},
},
},
InitialDelaySeconds: DefaultInitialDelaySeconds,
TimeoutSeconds: DefaultTimeoutSeconds,
PeriodSeconds: DefaultPeriodSeconds,
SuccessThreshold: DefaultSuccessThreshold,
FailureThreshold: DefaultFailureThreshold,
},
},
},
},
},
}
return obj
},
newPPM: func() *kruiseV1alpha1.PodProbeMarker {
obj := &kruiseV1alpha1.PodProbeMarker{
Spec: kruiseV1alpha1.PodProbeMarkerSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"game.kruise.io/owner-gss": "case0",
},
},
Probes: []kruiseV1alpha1.PodContainerProbe{
{
Name: "healthy",
ContainerName: "main",
PodConditionType: "game.kruise.io/healthy",
Probe: kruiseV1alpha1.ContainerProbeSpec{
Probe: corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
Exec: &corev1.ExecAction{
Command: []string{"/bin/sh", "-c", "/healthy.sh"},
},
},
InitialDelaySeconds: DefaultInitialDelaySeconds,
TimeoutSeconds: DefaultTimeoutSeconds,
PeriodSeconds: DefaultPeriodSeconds,
SuccessThreshold: DefaultSuccessThreshold,
FailureThreshold: DefaultFailureThreshold,
},
},
},
},
},
}
return obj
},
expectedDone: false,
},
{
name: "third check PPM status, and true",
getGss: func() *gameKruiseV1alpha1.GameServerSet {
obj := &gameKruiseV1alpha1.GameServerSet{
ObjectMeta: metav1.ObjectMeta{
Namespace: "xxx",
Name: "case0",
},
Spec: gameKruiseV1alpha1.GameServerSetSpec{
ServiceQualities: []gameKruiseV1alpha1.ServiceQuality{
{
Name: "healthy",
ContainerName: "main",
Probe: corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
Exec: &corev1.ExecAction{
Command: []string{"/bin/sh", "-c", "/healthy.sh"},
},
},
},
},
},
},
}
return obj
},
getPPM: func() *kruiseV1alpha1.PodProbeMarker {
obj := &kruiseV1alpha1.PodProbeMarker{
ObjectMeta: metav1.ObjectMeta{
Namespace: "xxx",
Name: "case0",
Generation: 1,
Annotations: map[string]string{
"game.kruise.io/ppm-hash": "3716291985",
},
},
Spec: kruiseV1alpha1.PodProbeMarkerSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"game.kruise.io/owner-gss": "case0",
},
},
Probes: []kruiseV1alpha1.PodContainerProbe{
{
Name: "healthy",
ContainerName: "main",
PodConditionType: "game.kruise.io/healthy",
Probe: kruiseV1alpha1.ContainerProbeSpec{
Probe: corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
Exec: &corev1.ExecAction{
Command: []string{"/bin/sh", "-c", "/healthy.sh"},
},
},
InitialDelaySeconds: DefaultInitialDelaySeconds,
TimeoutSeconds: DefaultTimeoutSeconds,
PeriodSeconds: DefaultPeriodSeconds,
SuccessThreshold: DefaultSuccessThreshold,
FailureThreshold: DefaultFailureThreshold,
},
},
},
},
},
Status: kruiseV1alpha1.PodProbeMarkerStatus{
ObservedGeneration: 1,
},
}
return obj
},
newPPM: func() *kruiseV1alpha1.PodProbeMarker {
obj := &kruiseV1alpha1.PodProbeMarker{
Spec: kruiseV1alpha1.PodProbeMarkerSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"game.kruise.io/owner-gss": "case0",
},
},
Probes: []kruiseV1alpha1.PodContainerProbe{
{
Name: "healthy",
ContainerName: "main",
PodConditionType: "game.kruise.io/healthy",
Probe: kruiseV1alpha1.ContainerProbeSpec{
Probe: corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
Exec: &corev1.ExecAction{
Command: []string{"/bin/sh", "-c", "/healthy.sh"},
},
},
InitialDelaySeconds: DefaultInitialDelaySeconds,
TimeoutSeconds: DefaultTimeoutSeconds,
PeriodSeconds: DefaultPeriodSeconds,
SuccessThreshold: DefaultSuccessThreshold,
FailureThreshold: DefaultFailureThreshold,
},
},
},
},
},
}
return obj
},
expectedDone: true,
},
}
recorder := record.NewFakeRecorder(100)
for _, test := range tests {
gss := test.getGss()
objs := []client.Object{gss}
ppm := test.getPPM()
if ppm != nil {
objs = append(objs, ppm)
}
c := fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build()
manager := &GameServerSetManager{
gameServerSet: gss,
client: c,
eventRecorder: recorder,
}
err, done := manager.SyncPodProbeMarker()
if err != nil {
t.Errorf("SyncPodProbeMarker failed: %s", err.Error())
} else if done != test.expectedDone {
t.Errorf("expected(%v), but get(%v)", test.expectedDone, done)
}
newObj := &kruiseV1alpha1.PodProbeMarker{}
if err = manager.client.Get(context.TODO(), types.NamespacedName{
Namespace: gss.Namespace,
Name: gss.Name,
}, newObj); err != nil {
t.Error(err)
}
if !reflect.DeepEqual(newObj.Spec, test.newPPM().Spec) {
t.Errorf("expect new asts spec %v but got %v", test.newPPM().Spec, newObj.Spec)
}
}
}

View File

@ -3,14 +3,17 @@ package externalscaler
import (
"context"
"fmt"
"math"
"strconv"
gamekruiseiov1alpha1 "github.com/openkruise/kruise-game/apis/v1alpha1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
"strconv"
)
const (
@ -39,6 +42,21 @@ func (e *ExternalScaler) IsActive(ctx context.Context, scaledObjectRef *ScaledOb
if numWaitToBeDeleted == nil {
return nil, fmt.Errorf("GameServerSet %s/%s has not inited", ns, name)
}
minNum := 0.0
minNumStr := scaledObjectRef.GetScalerMetadata()[NoneGameServerMinNumberKey]
if minNumStr != "" {
minNum, err = strconv.ParseFloat(minNumStr, 32)
if err != nil {
return nil, err
}
}
if minNum > 0.0 {
return &IsActiveResponse{
Result: true,
}, nil
}
desireReplicas := currentReplicas - *numWaitToBeDeleted
return &IsActiveResponse{
Result: desireReplicas > 0,
@ -75,7 +93,6 @@ func (e *ExternalScaler) GetMetrics(ctx context.Context, metricRequest *GetMetri
err = e.client.List(ctx, podList, &client.ListOptions{
Namespace: ns,
LabelSelector: labels.NewSelector().Add(
*isNone,
*isGssOwner,
),
})
@ -84,12 +101,36 @@ func (e *ExternalScaler) GetMetrics(ctx context.Context, metricRequest *GetMetri
return nil, err
}
noneNum := len(podList.Items)
minNum, err := strconv.ParseInt(metricRequest.ScaledObjectRef.GetScalerMetadata()[NoneGameServerMinNumberKey], 10, 32)
if err != nil {
klog.Errorf("minAvailable should be integer type, err: %s", err.Error())
totalNum := len(podList.Items)
noneNum := 0
for _, pod := range podList.Items {
if isNone.Matches(labels.Set(pod.Labels)) {
noneNum++
}
if err == nil && noneNum < int(minNum) {
}
maxNumStr := metricRequest.ScaledObjectRef.GetScalerMetadata()[NoneGameServerMaxNumberKey]
var maxNumP *int
if maxNumStr != "" {
mn, err := strconv.ParseInt(maxNumStr, 10, 32)
if err != nil {
klog.Errorf("maxAvailable should be integer type, err: %s", err.Error())
} else {
maxNumP = ptr.To(int(mn))
}
}
minNum, err := handleMinNum(totalNum, noneNum, metricRequest.ScaledObjectRef.GetScalerMetadata()[NoneGameServerMinNumberKey])
if err != nil {
klog.Error(err)
return nil, err
}
if maxNumP != nil && minNum > *maxNumP {
minNum = *maxNumP
}
if noneNum < minNum {
desireReplicas := *gss.Spec.Replicas + int32(minNum) - int32(noneNum)
klog.Infof("GameServerSet %s/%s desire replicas is %d", ns, name, desireReplicas)
return &GetMetricsResponse{
@ -123,12 +164,8 @@ func (e *ExternalScaler) GetMetrics(ctx context.Context, metricRequest *GetMetri
desireReplicas = desireReplicas - numWaitToBeDeleted
} else {
// scale down when number of GameServers with None opsState more than maxAvailable defined by user
maxNum, err := strconv.ParseInt(metricRequest.ScaledObjectRef.GetScalerMetadata()[NoneGameServerMaxNumberKey], 10, 32)
if err != nil {
klog.Errorf("maxAvailable should be integer type, err: %s", err.Error())
}
if err == nil && noneNum > int(maxNum) {
desireReplicas = (desireReplicas) + int(maxNum) - (noneNum)
if maxNumP != nil && noneNum > *maxNumP {
desireReplicas = (desireReplicas) + *maxNumP - (noneNum)
}
}
@ -146,3 +183,39 @@ func NewExternalScaler(client client.Client) *ExternalScaler {
client: client,
}
}
// handleMinNum calculate the expected min number of GameServers from the give minNumStr,
// supported format:
// - integer: minNum >= 1,
// return the fixed min number of none opState GameServers.
// - float: 0 < minNum < 1,
// return the min number of none opState GameServers
// calculated by the percentage of the total number of GameServers after scaled.
func handleMinNum(totalNum, noneNum int, minNumStr string) (int, error) {
if minNumStr == "" {
return 0, nil
}
n, err := strconv.ParseFloat(minNumStr, 32)
if err != nil {
return 0, err
}
switch {
case n > 0 && n < 1:
// for (noneNum + delta) / (totalNum + delta) >= n
// => delta >= (totalNum * n - noneNum) / (1 - n)
delta := (float64(totalNum)*n - float64(noneNum)) / (1 - n)
if delta <= 0 {
// no need to scale up
return 0, nil
}
// ceil the delta to avoid the float number
delta = math.Round(delta*100) / 100
minNum := int(math.Ceil(delta)) + noneNum
return minNum, nil
case n >= 1 || n == 0:
n = math.Ceil(n)
return int(n), nil
}
return 0, fmt.Errorf("invalid min number: must be greater than 0 or a valid percentage between 0 and 1")
}

View File

@ -0,0 +1,187 @@
package externalscaler
import (
"fmt"
"math"
"strconv"
"testing"
)
func TestHandleMinNum(t *testing.T) {
tests := []struct {
name string
totalNum int
noneNum int
minNumStr string
wantMin int
wantErr bool
}{
{
name: "invalid minNumStr - not a number",
totalNum: 10,
noneNum: 2,
minNumStr: "abc",
wantMin: 0,
wantErr: true,
},
{
name: "empty minNumStr - no scale up needed",
totalNum: 10,
noneNum: 2,
minNumStr: "",
wantMin: 0,
wantErr: false,
},
{
name: "percentage - delta <= 0, no scale up needed",
totalNum: 10,
noneNum: 5,
minNumStr: "0.5",
wantMin: 0,
wantErr: false,
},
{
name: "percentage - delta > 0, scale up needed",
totalNum: 10,
noneNum: 2,
minNumStr: "0.5",
wantMin: 8,
wantErr: false,
},
{
name: "percentage - delta > 0, minNum > totalNum",
totalNum: 5,
noneNum: 1,
minNumStr: "0.8",
wantMin: 16,
wantErr: false,
},
{
name: "percentage - exact match, no scale up",
totalNum: 20,
noneNum: 10,
minNumStr: "0.5",
wantMin: 0,
wantErr: false,
},
{
name: "percentage - slightly below, scale up by 1",
totalNum: 19,
noneNum: 9,
minNumStr: "0.5",
wantMin: 10,
wantErr: false,
},
{
name: "integer - minNum >= 1",
totalNum: 10,
noneNum: 2,
minNumStr: "3",
wantMin: 3,
wantErr: false,
},
{
name: "integer - minNum >= 1, float string",
totalNum: 10,
noneNum: 2,
minNumStr: "3.1",
wantMin: 4,
wantErr: false,
},
{
name: "integer - minNum is 1",
totalNum: 10,
noneNum: 0,
minNumStr: "1",
wantMin: 1,
wantErr: false,
},
{
name: "invalid n - zero",
totalNum: 10,
noneNum: 2,
minNumStr: "0",
wantMin: 0,
wantErr: false,
},
{
name: "invalid n - negative",
totalNum: 10,
noneNum: 2,
minNumStr: "-1",
wantMin: 0,
wantErr: true,
},
{
name: "invalid n - percentage >= 1 (e.g. 1.0 treated as integer 1)",
totalNum: 10,
noneNum: 2,
minNumStr: "1.0",
wantMin: 1,
wantErr: false,
},
{
name: "percentage - totalNum is 0, noneNum is 0",
totalNum: 0,
noneNum: 0,
minNumStr: "0.5",
wantMin: 0,
wantErr: false,
},
{
name: "integer - totalNum is 0, noneNum is 0",
totalNum: 0,
noneNum: 0,
minNumStr: "5",
wantMin: 5,
wantErr: false,
},
{
name: "percentage - totalNum is 1, noneNum is 0, minNumStr 0.5",
totalNum: 1,
noneNum: 0,
minNumStr: "0.5",
wantMin: 1,
wantErr: false,
},
{
name: "percentage - totalNum is 2, noneNum is 0, minNumStr 0.5",
totalNum: 2,
noneNum: 0,
minNumStr: "0.5",
wantMin: 2,
wantErr: false,
},
{
name: "percentage - totalNum 100, noneNum 10, minNumStr 0.2",
totalNum: 100,
noneNum: 10,
minNumStr: "0.2",
wantMin: 23,
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotMin, err := handleMinNum(tt.totalNum, tt.noneNum, tt.minNumStr)
if (err != nil) != tt.wantErr {
t.Errorf("handleMinNum() error = %v, wantErr %v", err, tt.wantErr)
return
}
if tt.wantErr && err != nil {
// If wantErr is true, we don't need to check gotMin
return
}
if gotMin != tt.wantMin {
// For debugging float calculations
if n, parseErr := strconv.ParseFloat(tt.minNumStr, 32); parseErr == nil && n > 0 && n < 1 {
delta := (float64(tt.totalNum)*n - float64(tt.noneNum)) / (1 - n)
fmt.Printf("Debug for %s: totalNum=%d, noneNum=%d, minNumStr=%s, n=%f, delta=%f, ceil(delta)=%f, calculatedMinNum=%d\n",
tt.name, tt.totalNum, tt.noneNum, tt.minNumStr, n, delta, math.Ceil(delta), int(math.Ceil(delta))+tt.noneNum)
}
t.Errorf("handleMinNum() = %v, want %v", gotMin, tt.wantMin)
}
})
}
}

View File

@ -19,12 +19,13 @@ package metrics
import (
"context"
"errors"
"sync"
gamekruisev1alpha1 "github.com/openkruise/kruise-game/apis/v1alpha1"
kruisegamevisions "github.com/openkruise/kruise-game/pkg/client/informers/externalversions"
kruisegamelister "github.com/openkruise/kruise-game/pkg/client/listers/apis/v1alpha1"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
"sync"
)
type Controller struct {
@ -56,18 +57,22 @@ func NewController(kruisegameInformerFactory kruisegamevisions.SharedInformerFac
gameServerOpsStateLastChange: make(map[string]float64),
}
gsInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
if _, err := gsInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.recordGsWhenAdd,
UpdateFunc: c.recordGsWhenUpdate,
DeleteFunc: c.recordGsWhenDelete,
})
}); err != nil {
return nil, err
}
gssInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
if _, err := gssInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: func(oldObj, newObj interface{}) {
c.recordGssWhenChange(newObj)
},
DeleteFunc: c.recordGssWhenDelete,
})
}); err != nil {
return nil, err
}
return c, nil
}
@ -82,8 +87,10 @@ func (c *Controller) recordGsWhenAdd(obj interface{}) {
state := string(gs.Status.CurrentState)
opsState := string(gs.Spec.OpsState)
gssName := gs.Labels["game.kruise.io/owner-gss"]
GameServersStateCount.WithLabelValues(state).Inc()
GameServersOpsStateCount.WithLabelValues(opsState).Inc()
GameServersOpsStateCount.WithLabelValues(opsState, gssName, gs.Namespace).Inc()
dp := 0
up := 0
@ -112,13 +119,16 @@ func (c *Controller) recordGsWhenUpdate(oldObj, newObj interface{}) {
oldOpsState := string(oldGs.Spec.OpsState)
newState := string(newGs.Status.CurrentState)
newOpsState := string(newGs.Spec.OpsState)
gssName := newGs.Labels["game.kruise.io/owner-gss"]
if oldState != newState {
GameServersStateCount.WithLabelValues(newState).Inc()
GameServersStateCount.WithLabelValues(oldState).Dec()
}
if oldOpsState != newOpsState {
GameServersOpsStateCount.WithLabelValues(newOpsState).Inc()
GameServersOpsStateCount.WithLabelValues(oldOpsState).Dec()
GameServersOpsStateCount.WithLabelValues(newOpsState, gssName, newGs.Namespace).Inc()
GameServersOpsStateCount.WithLabelValues(oldOpsState, gssName, newGs.Namespace).Dec()
}
newDp := 0
@ -141,9 +151,10 @@ func (c *Controller) recordGsWhenDelete(obj interface{}) {
state := string(gs.Status.CurrentState)
opsState := string(gs.Spec.OpsState)
gssName := gs.Labels["game.kruise.io/owner-gss"]
GameServersStateCount.WithLabelValues(state).Dec()
GameServersOpsStateCount.WithLabelValues(opsState).Dec()
GameServersOpsStateCount.WithLabelValues(opsState, gssName, gs.Namespace).Dec()
GameServerDeletionPriority.DeleteLabelValues(gs.Name, gs.Namespace)
GameServerUpdatePriority.DeleteLabelValues(gs.Name, gs.Namespace)
}

View File

@ -43,7 +43,7 @@ var (
Name: "okg_gameservers_opsState_count",
Help: "The number of gameservers per opsState",
},
[]string{"opsState"},
[]string{"opsState", "gssName", "namespace"},
)
GameServersTotal = prometheus.NewCounterVec(
prometheus.CounterOpts{

77
pkg/util/client/cache.go Normal file
View File

@ -0,0 +1,77 @@
/*
Copyright 2022 The Kruise Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"context"
"flag"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
)
var (
disableNoDeepCopy bool
)
func init() {
flag.BoolVar(&disableNoDeepCopy, "disable-no-deepcopy", false, "If you are going to disable NoDeepCopy List in some controllers and webhooks.")
}
type internalCache struct {
cache.Cache
noDeepCopyLister *noDeepCopyLister
}
func NewCache(config *rest.Config, opts cache.Options) (cache.Cache, error) {
if opts.Scheme == nil {
opts.Scheme = clientgoscheme.Scheme
}
c, err := cache.New(config, opts)
if err != nil {
return nil, err
}
return &internalCache{
Cache: c,
noDeepCopyLister: &noDeepCopyLister{cache: c, scheme: opts.Scheme},
}, nil
}
func (ic *internalCache) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
if !disableNoDeepCopy && isDisableDeepCopy(opts) {
return ic.noDeepCopyLister.List(ctx, list, opts...)
}
return ic.Cache.List(ctx, list, opts...)
}
var DisableDeepCopy = disableDeepCopy{}
type disableDeepCopy struct{}
func (disableDeepCopy) ApplyToList(*client.ListOptions) {
}
func isDisableDeepCopy(opts []client.ListOption) bool {
for _, opt := range opts {
if opt == DisableDeepCopy {
return true
}
}
return false
}

View File

@ -1,167 +0,0 @@
/*
Copyright 2022 The Kruise Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"context"
"flag"
"strings"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
)
var (
disableNoDeepCopy bool
)
func init() {
flag.BoolVar(&disableNoDeepCopy, "disable-no-deepcopy", false, "If you are going to disable NoDeepCopy List in some controllers and webhooks.")
}
// NewClient creates the default caching client with disable deepcopy list from cache.
func NewClient(cache cache.Cache, config *rest.Config, options client.Options, uncachedObjects ...client.Object) (client.Client, error) {
c, err := client.New(config, options)
if err != nil {
return nil, err
}
uncachedGVKs := map[schema.GroupVersionKind]struct{}{}
for _, obj := range uncachedObjects {
gvk, err := apiutil.GVKForObject(obj, c.Scheme())
if err != nil {
return nil, err
}
uncachedGVKs[gvk] = struct{}{}
}
return &delegatingClient{
scheme: c.Scheme(),
mapper: c.RESTMapper(),
Reader: &delegatingReader{
CacheReader: cache,
ClientReader: c,
noDeepCopyLister: &noDeepCopyLister{cache: cache, scheme: c.Scheme()},
scheme: c.Scheme(),
uncachedGVKs: uncachedGVKs,
},
Writer: c,
StatusClient: c,
}, nil
}
type delegatingClient struct {
client.Reader
client.Writer
client.StatusClient
scheme *runtime.Scheme
mapper meta.RESTMapper
}
// Scheme returns the scheme this client is using.
func (d *delegatingClient) Scheme() *runtime.Scheme {
return d.scheme
}
// RESTMapper returns the rest mapper this client is using.
func (d *delegatingClient) RESTMapper() meta.RESTMapper {
return d.mapper
}
// delegatingReader forms a Reader that will cause Get and List requests for
// unstructured types to use the ClientReader while requests for any other type
// of object with use the CacheReader. This avoids accidentally caching the
// entire cluster in the common case of loading arbitrary unstructured objects
// (e.g. from OwnerReferences).
type delegatingReader struct {
CacheReader client.Reader
ClientReader client.Reader
noDeepCopyLister *noDeepCopyLister
uncachedGVKs map[schema.GroupVersionKind]struct{}
scheme *runtime.Scheme
cacheUnstructured bool
}
func (d *delegatingReader) shouldBypassCache(obj runtime.Object) (bool, error) {
gvk, err := apiutil.GVKForObject(obj, d.scheme)
if err != nil {
return false, err
}
// TODO: this is producing unsafe guesses that don't actually work,
// but it matches ~99% of the cases out there.
if meta.IsListType(obj) {
gvk.Kind = strings.TrimSuffix(gvk.Kind, "List")
}
if _, isUncached := d.uncachedGVKs[gvk]; isUncached {
return true, nil
}
if !d.cacheUnstructured {
_, isUnstructured := obj.(*unstructured.Unstructured)
_, isUnstructuredList := obj.(*unstructured.UnstructuredList)
return isUnstructured || isUnstructuredList, nil
}
return false, nil
}
// Get retrieves an obj for a given object key from the Kubernetes Cluster.
func (d *delegatingReader) Get(ctx context.Context, key client.ObjectKey, obj client.Object) error {
if isUncached, err := d.shouldBypassCache(obj); err != nil {
return err
} else if isUncached {
return d.ClientReader.Get(ctx, key, obj)
}
return d.CacheReader.Get(ctx, key, obj)
}
// List retrieves list of objects for a given namespace and list options.
func (d *delegatingReader) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
if isUncached, err := d.shouldBypassCache(list); err != nil {
return err
} else if isUncached {
return d.ClientReader.List(ctx, list, opts...)
}
if !disableNoDeepCopy && isDisableDeepCopy(opts) {
return d.noDeepCopyLister.List(ctx, list, opts...)
}
return d.CacheReader.List(ctx, list, opts...)
}
var DisableDeepCopy = disableDeepCopy{}
type disableDeepCopy struct{}
func (_ disableDeepCopy) ApplyToList(_ *client.ListOptions) {
}
func isDisableDeepCopy(opts []client.ListOption) bool {
for _, opt := range opts {
if opt == DisableDeepCopy {
return true
}
}
return false
}

View File

@ -27,6 +27,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
@ -97,6 +98,10 @@ func GetIndexListFromPodList(podList []corev1.Pod) []int {
return indexList
}
func GetIndexSetFromPodList(podList []corev1.Pod) sets.Set[int] {
return sets.New[int](GetIndexListFromPodList(podList)...)
}
func GetIndexListFromGsList(gsList []gameKruiseV1alpha1.GameServer) []int {
var indexList []int
for i := 0; i < len(gsList); i++ {
@ -185,6 +190,9 @@ func GetNewAstsFromGss(gss *gameKruiseV1alpha1.GameServerSet, asts *kruiseV1beta
}
asts.Spec.UpdateStrategy.RollingUpdate = rollingUpdateStatefulSetStrategy
// set PersistentVolumeClaimRetentionPolicy
asts.Spec.PersistentVolumeClaimRetentionPolicy = gss.Spec.PersistentVolumeClaimRetentionPolicy
return asts
}

199
pkg/util/set.go Normal file
View File

@ -0,0 +1,199 @@
/*
Copyright 2022 The Kruise Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"slices"
"strconv"
"strings"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"
"golang.org/x/exp/constraints"
)
// see github.com/openkruise/kruise/pkg/util/api/asts.go
// ParseRange parses the start and end value from a string like "1-3"
func ParseRange(s string) (start int, end int, err error) {
split := strings.Split(s, "-")
if len(split) != 2 {
return 0, 0, fmt.Errorf("invalid range %s", s)
}
start, err = strconv.Atoi(strings.TrimSpace(split[0]))
if err != nil {
return
}
end, err = strconv.Atoi(strings.TrimSpace(split[1]))
if err != nil {
return
}
if start > end {
return 0, 0, fmt.Errorf("invalid range %s", s)
}
return
}
// GetReserveOrdinalIntSet returns a set of ints from parsed reserveOrdinal
func GetReserveOrdinalIntSet(r []intstr.IntOrString) sets.Set[int] {
values := sets.New[int]()
for _, elem := range r {
if elem.Type == intstr.Int {
values.Insert(int(elem.IntVal))
} else {
start, end, err := ParseRange(elem.StrVal)
if err != nil {
klog.ErrorS(err, "invalid range reserveOrdinal found, an empty slice will be returned", "reserveOrdinal", elem.StrVal)
return nil
}
for i := start; i <= end; i++ {
values.Insert(i)
}
}
}
return values
}
// StringToOrdinalIntSet convert a string to a set of ordinals,
// support ranged ordinals like "1-3,5-7,10"
// eg, "1, 2-5, 7" -> {1, 2, 3, 4, 5, 7}
func StringToOrdinalIntSet(str string, delimiter string) sets.Set[int] {
ret := sets.New[int]()
if str == "" {
return ret
}
strList := strings.Split(str, delimiter)
if len(strList) == 0 {
return ret
}
for _, s := range strList {
if strings.Contains(s, "-") {
start, end, err := ParseRange(s)
if err != nil {
klog.ErrorS(err, "invalid range found, skip", "range", s)
continue
}
for i := start; i <= end; i++ {
ret.Insert(i)
}
} else {
num, err := strconv.Atoi(strings.TrimSpace(s))
if err != nil {
klog.ErrorS(err, "invalid number found, skip", "number", s)
continue
}
ret.Insert(num)
}
}
return ret
}
// OrdinalSetToIntStrSlice convert a set of oridinals to a ranged intstr slice
// e.g. {1, 2, 5, 6, 7, 10} -> ["1", "2", "5-7", 10]
func OrdinalSetToIntStrSlice[T constraints.Integer](s sets.Set[T]) []intstr.IntOrString {
if s.Len() == 0 {
return nil
}
// get all ordinals and sort them
ordinals := s.UnsortedList()
slices.Sort(ordinals)
var ret []intstr.IntOrString
if len(ordinals) == 0 {
return ret
}
// Initialize sequence tracking
start := ordinals[0]
end := start
// Process all ordinals
for i := 1; i < len(ordinals); i++ {
curr := ordinals[i]
if curr == end+1 {
// Continue the current sequence
end = curr
} else {
// Add the completed sequence to results
appendSequence(&ret, start, end)
// Start a new sequence
start = curr
end = curr
}
}
// Handle the final sequence
appendSequence(&ret, start, end)
return ret
}
// Helper function to append a sequence to the result slice
func appendSequence[T constraints.Integer](ret *[]intstr.IntOrString, start, end T) {
if end < start {
start, end = end, start
}
switch {
case start == end:
*ret = append(*ret, intstr.FromInt(int(start)))
case end-start == 1:
*ret = append(*ret, intstr.FromInt(int(start)), intstr.FromInt(int(end)))
default:
*ret = append(*ret, intstr.FromString(fmt.Sprintf("%d-%d", start, end)))
}
}
// OrdinalSetToString convert a set of ordinals to a string with default delimiter ",",
// e.g. {1, 2, 5, 6, 7, 10} -> "1,2,5-7,10"
func OrdinalSetToString(s sets.Set[int]) string {
return intSetToString(s, ",")
}
func intSetToString(s sets.Set[int], delimiter string) string {
if s.Len() == 0 {
return ""
}
// get all ordinals and sort them
ss := OrdinalSetToIntStrSlice(s)
ret := make([]string, 0, len(ss))
for _, elem := range ss {
if elem.Type == intstr.Int {
ret = append(ret, strconv.Itoa(int(elem.IntVal)))
} else {
ret = append(ret, elem.StrVal)
}
}
return strings.Join(ret, delimiter)
}
// GetSetInANotInB returns a set of elements that are in set a but not in set b
func GetSetInANotInB[T comparable](a, b sets.Set[T]) sets.Set[T] {
ret := sets.New[T]()
for elem := range a {
if !b.Has(elem) {
ret.Insert(elem)
}
}
return ret
}

272
pkg/util/set_test.go Normal file
View File

@ -0,0 +1,272 @@
/*
Copyright 2022 The Kruise Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"reflect"
"testing"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
)
func TestOrdinalSetToIntStrSlice(t *testing.T) {
tests := []struct {
name string
input sets.Set[int]
expected []intstr.IntOrString
}{
{
name: "single element",
input: sets.New(5),
expected: []intstr.IntOrString{intstr.FromInt(5)},
},
{
name: "continuous elements",
input: sets.New(1, 2, 3, 4, 5),
expected: []intstr.IntOrString{intstr.FromString("1-5")},
},
{
name: "multiple continuous elements",
input: sets.New(1, 2, 3, 5, 6, 7),
expected: []intstr.IntOrString{
intstr.FromString("1-3"),
intstr.FromString("5-7"),
},
},
{
name: "multiple continuous elements with single element",
input: sets.New(1, 2, 3, 5, 7, 8, 9, 11),
expected: []intstr.IntOrString{
intstr.FromString("1-3"),
intstr.FromInt(5),
intstr.FromString("7-9"),
intstr.FromInt(11),
},
},
{
name: "unsorted continuous elements",
input: sets.New(3, 1, 2, 4, 8, 6, 7),
expected: []intstr.IntOrString{
intstr.FromString("1-4"),
intstr.FromString("6-8"),
},
},
{
name: "non-continuous elements",
input: sets.New(1, 2, 5, 7, 9),
expected: []intstr.IntOrString{
intstr.FromInt(1),
intstr.FromInt(2),
intstr.FromInt(5),
intstr.FromInt(7),
intstr.FromInt(9),
},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
result := OrdinalSetToIntStrSlice(tc.input)
if !reflect.DeepEqual(result, tc.expected) {
t.Errorf("OrdinalSetToIntStrSlice(%v) = %v, expected %v", tc.input.UnsortedList(), result, tc.expected)
}
})
}
}
// test OrdinalSetToIntStrSlice with different types
func TestOrdinalSetToIntStrSliceWithDifferentTypes(t *testing.T) {
int32Set := sets.New[int32](1, 2, 3, 5)
expected := []intstr.IntOrString{
intstr.FromString("1-3"),
intstr.FromInt(5),
}
result := OrdinalSetToIntStrSlice(int32Set)
if !reflect.DeepEqual(result, expected) {
t.Errorf("use int32 type test failed: got %v, expected %v", result, expected)
}
// 测试uint类型
uintSet := sets.New[uint](10, 11, 12, 15)
expected = []intstr.IntOrString{
intstr.FromString("10-12"),
intstr.FromInt(15),
}
result = OrdinalSetToIntStrSlice(uintSet)
if !reflect.DeepEqual(result, expected) {
t.Errorf("use uint type test failed: got %v, expected %v", result, expected)
}
}
func TestStringToOrdinalIntSet(t *testing.T) {
tests := []struct {
name string
str string
delimiter string
expected sets.Set[int]
}{
{
name: "empty string",
str: "",
delimiter: ",",
expected: sets.New[int](),
},
{
name: "single number",
str: "5",
delimiter: ",",
expected: sets.New(5),
},
{
name: "multiple numbers",
str: "1,3,5,7",
delimiter: ",",
expected: sets.New(1, 3, 5, 7),
},
{
name: "single range",
str: "1-5",
delimiter: ",",
expected: sets.New(1, 2, 3, 4, 5),
},
{
name: "multiple ranges",
str: "1-3,7-9",
delimiter: ",",
expected: sets.New(1, 2, 3, 7, 8, 9),
},
{
name: "mixed numbers and ranges",
str: "1-3,5,7-9,11",
delimiter: ",",
expected: sets.New(1, 2, 3, 5, 7, 8, 9, 11),
},
{
name: "with spaces",
str: "1-3, 5, 7-9, 11",
delimiter: ",",
expected: sets.New(1, 2, 3, 5, 7, 8, 9, 11),
},
{
name: "different delimiter",
str: "1-3;5;7-9;11",
delimiter: ";",
expected: sets.New(1, 2, 3, 5, 7, 8, 9, 11),
},
{
name: "invalid number",
str: "1,abc,3",
delimiter: ",",
expected: sets.New(1, 3),
},
{
name: "invalid range",
str: "1-3,5-abc,7-9",
delimiter: ",",
expected: sets.New(1, 2, 3, 7, 8, 9),
},
{
name: "inverted range",
str: "1-3,5-2,7-9",
delimiter: ",",
expected: sets.New(1, 2, 3, 7, 8, 9),
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
result := StringToOrdinalIntSet(tc.str, tc.delimiter)
if !result.Equal(tc.expected) {
t.Errorf("StringToOrdinalIntSet(%q, %q) = %v, expected %v",
tc.str, tc.delimiter, result.UnsortedList(), tc.expected.UnsortedList())
}
})
}
}
func TestIntSetToString(t *testing.T) {
tests := []struct {
name string
set sets.Set[int]
delimiter string
expected string
}{
{
name: "empty set",
set: sets.New[int](),
delimiter: ",",
expected: "",
},
{
name: "single element",
set: sets.New(5),
delimiter: ",",
expected: "5",
},
{
name: "multiple elements",
set: sets.New(1, 3, 5, 7),
delimiter: ",",
expected: "1,3,5,7",
},
{
name: "continuous elements",
set: sets.New(1, 2, 3, 4, 5),
delimiter: ",",
expected: "1-5",
},
{
name: "mixed continuous and single elements",
set: sets.New(1, 2, 3, 5, 7, 8, 9, 11),
delimiter: ",",
expected: "1-3,5,7-9,11",
},
{
name: "unsorted elements",
set: sets.New(5, 3, 1, 2, 4),
delimiter: ",",
expected: "1-5",
},
{
name: "different delimiter",
set: sets.New(1, 2, 3, 5, 7),
delimiter: ";",
expected: "1-3;5;7",
},
{
name: "non-continuous elements",
set: sets.New(1, 2, 5),
delimiter: ",",
expected: "1,2,5",
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
result := intSetToString(tc.set, tc.delimiter)
if result != tc.expected {
t.Errorf("intSetToString(%v, %q) = %q, expected %q",
tc.set.UnsortedList(), tc.delimiter, result, tc.expected)
}
})
}
}

View File

@ -21,6 +21,8 @@ import (
"sort"
"strconv"
"strings"
"k8s.io/apimachinery/pkg/util/intstr"
)
func IsNumInList(num int, list []int) bool {
@ -32,6 +34,15 @@ func IsNumInList(num int, list []int) bool {
return false
}
func IsNumInListInt32(num int32, list []int32) bool {
for _, n := range list {
if num == n {
return true
}
}
return false
}
func IsStringInList(str string, list []string) bool {
for _, s := range list {
if s == str {
@ -91,6 +102,29 @@ func StringToIntSlice(str string, delimiter string) []int {
return retSlice
}
func StringToIntStrSlice(str string, delimiter string) []intstr.IntOrString {
if str == "" || delimiter == "" {
return nil
}
strList := strings.Split(str, delimiter)
if len(strList) == 0 {
return nil
}
var retSlice []intstr.IntOrString
for _, item := range strList {
if item == "" {
continue
}
val, err := strconv.Atoi(item)
if err != nil {
retSlice = append(retSlice, intstr.FromString(strings.TrimSpace(item)))
} else {
retSlice = append(retSlice, intstr.FromInt(val))
}
}
return retSlice
}
func StringToInt32Slice(str string, delimiter string) []int32 {
if str == "" {
return nil

View File

@ -16,7 +16,11 @@ limitations under the License.
package util
import "testing"
import (
"testing"
"k8s.io/apimachinery/pkg/util/intstr"
)
func TestIsNumInList(t *testing.T) {
tests := []struct {
@ -331,3 +335,110 @@ func TestIsHasNegativeNum(t *testing.T) {
}
}
}
func TestStringToIntStrSlice(t *testing.T) {
tests := []struct {
name string
str string
delimiter string
result []intstr.IntOrString
}{
{
name: "mixed int and string values",
str: "4,test,1",
delimiter: ",",
result: []intstr.IntOrString{
intstr.FromInt(4),
intstr.FromString("test"),
intstr.FromInt(1),
},
},
{
name: "only int values",
str: "4,5,1",
delimiter: ",",
result: []intstr.IntOrString{
intstr.FromInt(4),
intstr.FromInt(5),
intstr.FromInt(1),
},
},
{
name: "only string values",
str: "a,b,c",
delimiter: ",",
result: []intstr.IntOrString{
intstr.FromString("a"),
intstr.FromString("b"),
intstr.FromString("c"),
},
},
{
name: "empty string",
str: "",
delimiter: ",",
result: nil,
},
{
name: "empty delimiter",
str: "1,2,3",
delimiter: "",
result: nil,
},
{
name: "empty parts",
str: "1,,3",
delimiter: ",",
result: []intstr.IntOrString{
intstr.FromInt(1),
intstr.FromInt(3),
},
},
{
name: "different delimiter",
str: "1:test:3",
delimiter: ":",
result: []intstr.IntOrString{
intstr.FromInt(1),
intstr.FromString("test"),
intstr.FromInt(3),
},
},
{
name: "reversed ids slice",
str: "1,2-5,6,7-10",
delimiter: ",",
result: []intstr.IntOrString{
intstr.FromInt(1),
intstr.FromString("2-5"),
intstr.FromInt(6),
intstr.FromString("7-10"),
},
},
{
name: "has space in the string",
str: "1, 2-3, 4",
delimiter: ",",
result: []intstr.IntOrString{
intstr.FromInt(1),
intstr.FromString("2-3"),
intstr.FromInt(4),
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
actual := StringToIntStrSlice(test.str, test.delimiter)
if len(actual) != len(test.result) {
t.Errorf("expect length %v but got %v", len(test.result), len(actual))
return
}
for i := range len(actual) {
if test.result[i].String() != actual[i].String() {
t.Errorf("index %d: expect %v but got %v", i, test.result[i], actual[i])
}
}
})
}
}

View File

@ -20,6 +20,9 @@ import (
"context"
"encoding/json"
"fmt"
"net/http"
"time"
gameKruiseV1alpha1 "github.com/openkruise/kruise-game/apis/v1alpha1"
"github.com/openkruise/kruise-game/cloudprovider/errors"
"github.com/openkruise/kruise-game/cloudprovider/manager"
@ -30,10 +33,8 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
"net/http"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
"time"
)
const (
@ -48,7 +49,7 @@ type patchResult struct {
type PodMutatingHandler struct {
Client client.Client
decoder *admission.Decoder
decoder admission.Decoder
CloudProviderManager *manager.ProviderManager
eventRecorder record.EventRecorder
}
@ -117,7 +118,7 @@ func (pmh *PodMutatingHandler) Handle(ctx context.Context, req admission.Request
}
}
func getPodFromRequest(req admission.Request, decoder *admission.Decoder) (*corev1.Pod, error) {
func getPodFromRequest(req admission.Request, decoder admission.Decoder) (*corev1.Pod, error) {
pod := &corev1.Pod{}
if req.Operation == admissionv1.Delete {
err := decoder.DecodeRaw(req.OldObject, pod)
@ -141,7 +142,7 @@ func getAdmissionResponse(req admission.Request, result patchResult) admission.R
return admission.PatchResponseFromRaw(req.Object.Raw, marshaledPod)
}
func NewPodMutatingHandler(client client.Client, decoder *admission.Decoder, cpm *manager.ProviderManager, recorder record.EventRecorder) *PodMutatingHandler {
func NewPodMutatingHandler(client client.Client, decoder admission.Decoder, cpm *manager.ProviderManager, recorder record.EventRecorder) *PodMutatingHandler {
return &PodMutatingHandler{
Client: client,
decoder: decoder,

View File

@ -2,6 +2,9 @@ package webhook
import (
"context"
"reflect"
"testing"
gameKruiseV1alpha1 "github.com/openkruise/kruise-game/apis/v1alpha1"
admissionv1 "k8s.io/api/admission/v1"
corev1 "k8s.io/api/core/v1"
@ -9,11 +12,9 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"reflect"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
"testing"
)
var (
@ -309,10 +310,7 @@ func TestGetPodFromRequest(t *testing.T) {
},
}
decoder, err := admission.NewDecoder(runtime.NewScheme())
if err != nil {
t.Error(err)
}
decoder := admission.NewDecoder(runtime.NewScheme())
for i, test := range tests {
actual, err := getPodFromRequest(test.req, decoder)

View File

@ -140,7 +140,7 @@ func (w *Writer) Write(payload map[string]FileProjection) error {
}
oldTsPath := path.Join(w.targetDir, oldTsDir)
var pathsToRemove sets.String
var pathsToRemove sets.Set[string]
// if there was no old version, there's nothing to remove
if len(oldTsDir) != 0 {
// (3)
@ -311,8 +311,8 @@ func shouldWriteFile(path string, content []byte) (bool, error) {
// pathsToRemove walks the current version of the data directory and
// determines which paths should be removed (if any) after the payload is
// written to the target directory.
func (w *Writer) pathsToRemove(payload map[string]FileProjection, oldTsDir string) (sets.String, error) {
paths := sets.NewString()
func (w *Writer) pathsToRemove(payload map[string]FileProjection, oldTsDir string) (sets.Set[string], error) {
paths := sets.New[string]()
visitor := func(path string, info os.FileInfo, err error) error {
relativePath := strings.TrimPrefix(path, oldTsDir)
relativePath = strings.TrimPrefix(relativePath, string(os.PathSeparator))
@ -331,7 +331,7 @@ func (w *Writer) pathsToRemove(payload map[string]FileProjection, oldTsDir strin
return nil, err
}
newPaths := sets.NewString()
newPaths := sets.New[string]()
for file := range payload {
// add all subpaths for the payload to the set of new paths
// to avoid attempting to remove non-empty dirs
@ -437,7 +437,7 @@ func (w *Writer) createUserVisibleFiles(payload map[string]FileProjection) error
// removeUserVisiblePaths removes the set of paths from the user-visible
// portion of the writer's target directory.
func (w *Writer) removeUserVisiblePaths(paths sets.String) error {
func (w *Writer) removeUserVisiblePaths(paths sets.Set[string]) error {
ps := string(os.PathSeparator)
var lasterr error
for p := range paths {

View File

@ -19,18 +19,21 @@ package webhook
import (
"context"
"fmt"
"net/http"
gamekruiseiov1alpha1 "github.com/openkruise/kruise-game/apis/v1alpha1"
"github.com/openkruise/kruise-game/cloudprovider/manager"
"github.com/openkruise/kruise-game/pkg/util"
admissionv1 "k8s.io/api/admission/v1"
"net/http"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
)
type GssValidaatingHandler struct {
Client client.Client
decoder *admission.Decoder
decoder admission.Decoder
CloudProviderManager *manager.ProviderManager
}
@ -64,12 +67,50 @@ func (gvh *GssValidaatingHandler) Handle(ctx context.Context, req admission.Requ
func validatingGss(gss *gamekruiseiov1alpha1.GameServerSet, client client.Client) (bool, string) {
// validate reserveGameServerIds
rgsIds := gss.Spec.ReserveGameServerIds
if util.IsRepeat(rgsIds) {
return false, fmt.Sprintf("reserveGameServerIds should not be repeat. Now it is %v", rgsIds)
vset := sets.Set[int]{}
validate := func(ids intstr.IntOrString) (bool, string) {
switch ids.Type {
case intstr.Int:
id := ids.IntVal
if id < 0 {
return false, fmt.Sprintf("reserveGameServerIds should be greater or equal to 0. Now it is %d", id)
}
if vset.Has(int(id)) {
return false, fmt.Sprintf("reserveGameServerIds should not be repeat. Now it is %d", id)
}
vset.Insert(int(id))
case intstr.String:
start, end, err := util.ParseRange(ids.StrVal)
if err != nil {
return false, fmt.Sprintf("invalid range reserveGameServerIds found, an empty slice will be returned: %s", ids.StrVal)
}
if start < 0 {
return false, fmt.Sprintf("reserveGameServerIds should be greater or equal to 0. Now it is %d", start)
}
if end < 0 {
return false, fmt.Sprintf("reserveGameServerIds should be greater or equal to 0. Now it is %d", end)
}
if start > end {
return false, fmt.Sprintf("invalid range reserveGameServerIds found, an empty slice will be returned: %s", ids.StrVal)
}
if vset.Has(int(start)) || vset.Has(int(end)) {
return false, fmt.Sprintf("reserveGameServerIds should not be repeat. Now it is %d-%d", start, end)
}
for i := start; i <= end; i++ {
if vset.Has(int(i)) {
return false, fmt.Sprintf("reserveGameServerIds should not be repeat. Now it is %d-%d", start, end)
}
vset.Insert(int(i))
}
}
return true, ""
}
for _, id := range gss.Spec.ReserveGameServerIds {
if ok, reason := validate(id); !ok {
return false, reason
}
if util.IsHasNegativeNum(rgsIds) {
return false, fmt.Sprintf("reserveGameServerIds should be greater or equal to 0. Now it is %v", rgsIds)
}
return true, "general validating success"

View File

@ -20,7 +20,6 @@ import (
"context"
"flag"
"fmt"
"log"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
"k8s.io/apimachinery/pkg/api/errors"
@ -50,6 +49,7 @@ var (
webhookCertDir string
webhookServiceNamespace string
webhookServiceName string
enableCertGeneration bool
)
func init() {
@ -57,6 +57,7 @@ func init() {
flag.StringVar(&webhookCertDir, "webhook-server-certs-dir", "/tmp/webhook-certs/", "Path to the X.509-formatted webhook certificate.")
flag.StringVar(&webhookServiceNamespace, "webhook-service-namespace", "kruise-game-system", "kruise game webhook service namespace.")
flag.StringVar(&webhookServiceName, "webhook-service-name", "kruise-game-webhook-service", "kruise game wehook service name.")
flag.BoolVar(&enableCertGeneration, "enable-cert-generation", true, "Whether to enable self-generated certs for webhook server. If set to false, you need to provide the certs in the specified directory.")
}
// +kubebuilder:rbac:groups=apps.kruise.io,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete
@ -85,6 +86,8 @@ func init() {
// +kubebuilder:rbac:groups=elbv2.k8s.aws,resources=targetgroupbindings,verbs=create;get;list;patch;update;watch
// +kubebuilder:rbac:groups=elbv2.services.k8s.aws,resources=listeners,verbs=create;get;list;patch;update;watch
// +kubebuilder:rbac:groups=elbv2.services.k8s.aws,resources=targetgroups,verbs=create;get;list;patch;update;watch
// +kubebuilder:rbac:groups=networking.cloud.tencent.com,resources=dedicatedclblisteners,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=networking.cloud.tencent.com,resources=dedicatedclblisteners/status,verbs=get
type Webhook struct {
mgr manager.Manager
@ -100,13 +103,9 @@ func NewWebhookServer(mgr manager.Manager, cpm *manager2.ProviderManager) *Webho
func (ws *Webhook) SetupWithManager(mgr manager.Manager) *Webhook {
server := mgr.GetWebhookServer()
server.Host = "0.0.0.0"
server.Port = webhookPort
server.CertDir = webhookCertDir
decoder, err := admission.NewDecoder(runtime.NewScheme())
if err != nil {
log.Fatalln(err)
}
decoder := admission.NewDecoder(runtime.NewScheme())
// Register the webhook server
recorder := mgr.GetEventRecorderFor("kruise-game-webhook")
server.Register(mutatePodPath, &webhook.Admission{Handler: NewPodMutatingHandler(mgr.GetClient(), decoder, ws.cpm, recorder)})
server.Register(validateGssPath, &webhook.Admission{Handler: &GssValidaatingHandler{Client: mgr.GetClient(), decoder: decoder, CloudProviderManager: ws.cpm}})
@ -115,11 +114,14 @@ func (ws *Webhook) SetupWithManager(mgr manager.Manager) *Webhook {
// Initialize create MutatingWebhookConfiguration before start
func (ws *Webhook) Initialize(cfg *rest.Config) error {
if enableCertGeneration {
dnsName := generator.ServiceToCommonName(webhookServiceNamespace, webhookServiceName)
var certWriter writer.CertWriter
// if enable self-generated certs, ensure the certs are generated and written to the specified directory
if webhookCertDir == "" {
return fmt.Errorf("webhook cert dir is not set")
}
var err error
var certWriter writer.CertWriter
certWriter, err = writer.NewFSCertWriter(writer.FSCertWriterOptions{Path: webhookCertDir})
if err != nil {
return fmt.Errorf("failed to constructs FSCertWriter: %v", err)
@ -135,53 +137,54 @@ func (ws *Webhook) Initialize(cfg *rest.Config) error {
}
clientSet, err := clientset.NewForConfig(cfg)
if err != nil {
return err
}
if err := checkValidatingConfiguration(dnsName, clientSet, certs.CACert); err != nil {
if err := checkValidatingConfiguration(clientSet, certs.CACert); err != nil {
return fmt.Errorf("failed to check mutating webhook,because of %s", err.Error())
}
if err := checkMutatingConfiguration(dnsName, clientSet, certs.CACert); err != nil {
if err := checkMutatingConfiguration(clientSet, certs.CACert); err != nil {
return fmt.Errorf("failed to check mutating webhook,because of %s", err.Error())
}
}
return nil
}
func checkValidatingConfiguration(dnsName string, kubeClient clientset.Interface, caBundle []byte) error {
func checkValidatingConfiguration(kubeClient clientset.Interface, caBundle []byte) error {
vwc, err := kubeClient.AdmissionregistrationV1().ValidatingWebhookConfigurations().Get(context.TODO(), validatingWebhookConfigurationName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
// create new webhook
return createValidatingWebhook(dnsName, kubeClient, caBundle)
return createValidatingWebhook(kubeClient, caBundle)
} else {
return err
}
}
return updateValidatingWebhook(vwc, dnsName, kubeClient, caBundle)
return updateValidatingWebhook(vwc, kubeClient, caBundle)
}
func checkMutatingConfiguration(dnsName string, kubeClient clientset.Interface, caBundle []byte) error {
func checkMutatingConfiguration(kubeClient clientset.Interface, caBundle []byte) error {
mwc, err := kubeClient.AdmissionregistrationV1().MutatingWebhookConfigurations().Get(context.TODO(), mutatingWebhookConfigurationName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
// create new webhook
return createMutatingWebhook(dnsName, kubeClient, caBundle)
return createMutatingWebhook(kubeClient, caBundle)
} else {
return err
}
}
return updateMutatingWebhook(mwc, dnsName, kubeClient, caBundle)
return updateMutatingWebhook(mwc, kubeClient, caBundle)
}
func createValidatingWebhook(dnsName string, kubeClient clientset.Interface, caBundle []byte) error {
func createValidatingWebhook(kubeClient clientset.Interface, caBundle []byte) error {
webhookConfig := &admissionregistrationv1.ValidatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: validatingWebhookConfigurationName,
},
Webhooks: getValidatingWebhookConf(dnsName, caBundle),
Webhooks: getValidatingWebhookConf(caBundle),
}
if _, err := kubeClient.AdmissionregistrationV1().ValidatingWebhookConfigurations().Create(context.TODO(), webhookConfig, metav1.CreateOptions{}); err != nil {
@ -190,12 +193,12 @@ func createValidatingWebhook(dnsName string, kubeClient clientset.Interface, caB
return nil
}
func createMutatingWebhook(dnsName string, kubeClient clientset.Interface, caBundle []byte) error {
func createMutatingWebhook(kubeClient clientset.Interface, caBundle []byte) error {
webhookConfig := &admissionregistrationv1.MutatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: mutatingWebhookConfigurationName,
},
Webhooks: getMutatingWebhookConf(dnsName, caBundle),
Webhooks: getMutatingWebhookConf(caBundle),
}
if _, err := kubeClient.AdmissionregistrationV1().MutatingWebhookConfigurations().Create(context.TODO(), webhookConfig, metav1.CreateOptions{}); err != nil {
@ -204,28 +207,28 @@ func createMutatingWebhook(dnsName string, kubeClient clientset.Interface, caBun
return nil
}
func updateValidatingWebhook(vwc *admissionregistrationv1.ValidatingWebhookConfiguration, dnsName string, kubeClient clientset.Interface, caBundle []byte) error {
vwc.Webhooks = getValidatingWebhookConf(dnsName, caBundle)
func updateValidatingWebhook(vwc *admissionregistrationv1.ValidatingWebhookConfiguration, kubeClient clientset.Interface, caBundle []byte) error {
vwc.Webhooks = getValidatingWebhookConf(caBundle)
if _, err := kubeClient.AdmissionregistrationV1().ValidatingWebhookConfigurations().Update(context.TODO(), vwc, metav1.UpdateOptions{}); err != nil {
return fmt.Errorf("failed to update %s: %v", validatingWebhookConfigurationName, err)
}
return nil
}
func updateMutatingWebhook(mwc *admissionregistrationv1.MutatingWebhookConfiguration, dnsName string, kubeClient clientset.Interface, caBundle []byte) error {
mwc.Webhooks = getMutatingWebhookConf(dnsName, caBundle)
func updateMutatingWebhook(mwc *admissionregistrationv1.MutatingWebhookConfiguration, kubeClient clientset.Interface, caBundle []byte) error {
mwc.Webhooks = getMutatingWebhookConf(caBundle)
if _, err := kubeClient.AdmissionregistrationV1().MutatingWebhookConfigurations().Update(context.TODO(), mwc, metav1.UpdateOptions{}); err != nil {
return fmt.Errorf("failed to update %s: %v", mutatingWebhookConfigurationName, err)
}
return nil
}
func getValidatingWebhookConf(dnsName string, caBundle []byte) []admissionregistrationv1.ValidatingWebhook {
func getValidatingWebhookConf(caBundle []byte) []admissionregistrationv1.ValidatingWebhook {
sideEffectClassNone := admissionregistrationv1.SideEffectClassNone
fail := admissionregistrationv1.Fail
return []admissionregistrationv1.ValidatingWebhook{
{
Name: dnsName,
Name: "vgameserverset.kb.io",
SideEffects: &sideEffectClassNone,
FailurePolicy: &fail,
AdmissionReviewVersions: []string{"v1", "v1beta1"},
@ -251,12 +254,12 @@ func getValidatingWebhookConf(dnsName string, caBundle []byte) []admissionregist
}
}
func getMutatingWebhookConf(dnsName string, caBundle []byte) []admissionregistrationv1.MutatingWebhook {
func getMutatingWebhookConf(caBundle []byte) []admissionregistrationv1.MutatingWebhook {
sideEffectClassNone := admissionregistrationv1.SideEffectClassNone
fail := admissionregistrationv1.Fail
return []admissionregistrationv1.MutatingWebhook{
{
Name: dnsName,
Name: "mgameserverset.kb.io",
SideEffects: &sideEffectClassNone,
FailurePolicy: &fail,
AdmissionReviewVersions: []string{"v1", "v1beta1"},
@ -290,3 +293,11 @@ func getMutatingWebhookConf(dnsName string, caBundle []byte) []admissionregistra
},
}
}
func GetPort() int {
return webhookPort
}
func GetCertDir() string {
return webhookCertDir
}

View File

@ -2,29 +2,28 @@ package webhook
import (
"context"
"reflect"
"testing"
v1 "k8s.io/api/admissionregistration/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
"reflect"
"testing"
)
func TestCheckValidatingConfiguration(t *testing.T) {
tests := []struct {
vwcNow *v1.ValidatingWebhookConfiguration
dnsName string
caBundle []byte
vwcNew *v1.ValidatingWebhookConfiguration
}{
{
vwcNow: nil,
dnsName: "dnsName",
caBundle: []byte(`xxx`),
vwcNew: &v1.ValidatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: validatingWebhookConfigurationName,
},
Webhooks: getValidatingWebhookConf("dnsName", []byte(`xxx`)),
Webhooks: getValidatingWebhookConf([]byte(`xxx`)),
},
},
{
@ -32,15 +31,14 @@ func TestCheckValidatingConfiguration(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Name: validatingWebhookConfigurationName,
},
Webhooks: getValidatingWebhookConf("dnsName", []byte(`old`)),
Webhooks: getValidatingWebhookConf([]byte(`old`)),
},
dnsName: "dnsName",
caBundle: []byte(`new`),
vwcNew: &v1.ValidatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: validatingWebhookConfigurationName,
},
Webhooks: getValidatingWebhookConf("dnsName", []byte(`new`)),
Webhooks: getValidatingWebhookConf([]byte(`new`)),
},
},
{
@ -49,13 +47,12 @@ func TestCheckValidatingConfiguration(t *testing.T) {
Name: validatingWebhookConfigurationName,
},
},
dnsName: "dnsName",
caBundle: []byte(`new`),
vwcNew: &v1.ValidatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: validatingWebhookConfigurationName,
},
Webhooks: getValidatingWebhookConf("dnsName", []byte(`new`)),
Webhooks: getValidatingWebhookConf([]byte(`new`)),
},
},
}
@ -69,7 +66,7 @@ func TestCheckValidatingConfiguration(t *testing.T) {
}
}
if err := checkValidatingConfiguration(test.dnsName, clientSet, test.caBundle); err != nil {
if err := checkValidatingConfiguration(clientSet, test.caBundle); err != nil {
t.Error(err)
}
@ -87,19 +84,17 @@ func TestCheckValidatingConfiguration(t *testing.T) {
func TestCheckMutatingConfiguration(t *testing.T) {
tests := []struct {
mwcNow *v1.MutatingWebhookConfiguration
dnsName string
caBundle []byte
mwcNew *v1.MutatingWebhookConfiguration
}{
{
mwcNow: nil,
dnsName: "dnsName",
caBundle: []byte(`xxx`),
mwcNew: &v1.MutatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: mutatingWebhookConfigurationName,
},
Webhooks: getMutatingWebhookConf("dnsName", []byte(`xxx`)),
Webhooks: getMutatingWebhookConf([]byte(`xxx`)),
},
},
{
@ -107,15 +102,14 @@ func TestCheckMutatingConfiguration(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{
Name: mutatingWebhookConfigurationName,
},
Webhooks: getMutatingWebhookConf("dnsName", []byte(`old`)),
Webhooks: getMutatingWebhookConf([]byte(`old`)),
},
dnsName: "dnsName",
caBundle: []byte(`new`),
mwcNew: &v1.MutatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: mutatingWebhookConfigurationName,
},
Webhooks: getMutatingWebhookConf("dnsName", []byte(`new`)),
Webhooks: getMutatingWebhookConf([]byte(`new`)),
},
},
{
@ -124,13 +118,12 @@ func TestCheckMutatingConfiguration(t *testing.T) {
Name: mutatingWebhookConfigurationName,
},
},
dnsName: "dnsName",
caBundle: []byte(`new`),
mwcNew: &v1.MutatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{
Name: mutatingWebhookConfigurationName,
},
Webhooks: getMutatingWebhookConf("dnsName", []byte(`new`)),
Webhooks: getMutatingWebhookConf([]byte(`new`)),
},
},
}
@ -144,7 +137,7 @@ func TestCheckMutatingConfiguration(t *testing.T) {
}
}
if err := checkMutatingConfiguration(test.dnsName, clientSet, test.caBundle); err != nil {
if err := checkMutatingConfiguration(clientSet, test.caBundle); err != nil {
t.Error(err)
}

View File

@ -49,8 +49,8 @@ func (client *Client) CreateNamespace() error {
}
func (client *Client) DeleteNamespace() error {
return wait.PollImmediate(5*time.Second, 3*time.Minute,
func() (done bool, err error) {
return wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 3*time.Minute, true,
func(ctx context.Context) (done bool, err error) {
err = client.kubeClint.CoreV1().Namespaces().Delete(context.TODO(), Namespace, metav1.DeleteOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
@ -103,8 +103,8 @@ func (client *Client) UpdateGameServerSet(gss *gameKruiseV1alpha1.GameServerSet)
}
func (client *Client) DeleteGameServerSet() error {
return wait.PollImmediate(3*time.Second, time.Minute, func() (done bool, err error) {
err = client.kruisegameClient.GameV1alpha1().GameServerSets(Namespace).Delete(context.TODO(), GameServerSet, metav1.DeleteOptions{})
return wait.PollUntilContextTimeout(context.TODO(), 3*time.Second, time.Minute, true, func(ctx context.Context) (done bool, err error) {
err = client.kruisegameClient.GameV1alpha1().GameServerSets(Namespace).Delete(ctx, GameServerSet, metav1.DeleteOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return true, nil

View File

@ -1,8 +1,13 @@
package framework
import (
"context"
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
gamekruiseiov1alpha1 "github.com/openkruise/kruise-game/apis/v1alpha1"
kruisegameclientset "github.com/openkruise/kruise-game/pkg/client/clientset/versioned"
"github.com/openkruise/kruise-game/pkg/util"
@ -15,9 +20,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"strconv"
"strings"
"time"
)
type Framework struct {
@ -52,8 +54,8 @@ func (f *Framework) AfterSuit() error {
}
func (f *Framework) AfterEach() error {
return wait.PollImmediate(5*time.Second, 3*time.Minute,
func() (done bool, err error) {
return wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 3*time.Minute, true,
func(ctx context.Context) (done bool, err error) {
err = f.client.DeleteGameServerSet()
if err != nil && !apierrors.IsNotFound(err) {
{
@ -122,7 +124,7 @@ func (f *Framework) DeployGssWithServiceQualities() (*gamekruiseiov1alpha1.GameS
return f.client.CreateGameServerSet(gss)
}
func (f *Framework) GameServerScale(gss *gamekruiseiov1alpha1.GameServerSet, desireNum int, reserveGsId *int) (*gamekruiseiov1alpha1.GameServerSet, error) {
func (f *Framework) GameServerScale(gss *gamekruiseiov1alpha1.GameServerSet, desireNum int, reserveGsId *intstr.IntOrString) (*gamekruiseiov1alpha1.GameServerSet, error) {
// TODO: change patch type
newReserves := gss.Spec.ReserveGameServerIds
if reserveGsId != nil {
@ -174,8 +176,8 @@ func (f *Framework) ChangeGameServerDeletionPriority(gsName string, deletionPrio
}
func (f *Framework) WaitForGsCreated(gss *gamekruiseiov1alpha1.GameServerSet) error {
return wait.PollImmediate(5*time.Second, 3*time.Minute,
func() (done bool, err error) {
return wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 3*time.Minute, true,
func(ctx context.Context) (done bool, err error) {
gssName := gss.GetName()
labelSelector := labels.SelectorFromSet(map[string]string{
gamekruiseiov1alpha1.GameServerOwnerGssKey: gssName,
@ -200,8 +202,8 @@ func (f *Framework) WaitForGsCreated(gss *gamekruiseiov1alpha1.GameServerSet) er
}
func (f *Framework) WaitForUpdated(gss *gamekruiseiov1alpha1.GameServerSet, name, image string) error {
return wait.PollImmediate(10*time.Second, 10*time.Minute,
func() (done bool, err error) {
return wait.PollUntilContextTimeout(context.TODO(), 10*time.Second, 10*time.Minute, true,
func(ctx context.Context) (done bool, err error) {
gssName := gss.GetName()
labelSelector := labels.SelectorFromSet(map[string]string{
gamekruiseiov1alpha1.GameServerOwnerGssKey: gssName,
@ -260,8 +262,8 @@ func (f *Framework) ExpectGssCorrect(gss *gamekruiseiov1alpha1.GameServerSet, ex
}
func (f *Framework) WaitForGsOpsStateUpdate(gsName string, opsState string) error {
return wait.PollImmediate(5*time.Second, 1*time.Minute,
func() (done bool, err error) {
return wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 3*time.Minute, true,
func(ctx context.Context) (done bool, err error) {
pod, err := f.client.GetPod(gsName)
if err != nil {
return false, err
@ -275,8 +277,8 @@ func (f *Framework) WaitForGsOpsStateUpdate(gsName string, opsState string) erro
}
func (f *Framework) WaitForGsDeletionPriorityUpdated(gsName string, deletionPriority string) error {
return wait.PollImmediate(5*time.Second, 1*time.Minute,
func() (done bool, err error) {
return wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 3*time.Minute, true,
func(ctx context.Context) (done bool, err error) {
pod, err := f.client.GetPod(gsName)
if err != nil {
return false, err
@ -294,8 +296,8 @@ func (f *Framework) DeletePodDirectly(index int) error {
podName := client.GameServerSet + "-" + strconv.Itoa(index)
// get
if err := wait.PollImmediate(5*time.Second, 3*time.Minute,
func() (done bool, err error) {
if err := wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 3*time.Minute, true,
func(ctx context.Context) (done bool, err error) {
pod, err := f.client.GetPod(podName)
if err != nil {
@ -313,8 +315,8 @@ func (f *Framework) DeletePodDirectly(index int) error {
}
// check
return wait.PollImmediate(5*time.Second, 3*time.Minute,
func() (done bool, err error) {
return wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 3*time.Minute, true,
func(ctx context.Context) (done bool, err error) {
pod, err := f.client.GetPod(podName)
if err != nil {
return false, err
@ -327,8 +329,8 @@ func (f *Framework) DeletePodDirectly(index int) error {
}
func (f *Framework) WaitForPodDeleted(podName string) error {
return wait.PollImmediate(5*time.Second, 3*time.Minute,
func() (done bool, err error) {
return wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 3*time.Minute, true,
func(ctx context.Context) (done bool, err error) {
_, err = f.client.GetPod(podName)
if apierrors.IsNotFound(err) {
return true, nil
@ -338,8 +340,8 @@ func (f *Framework) WaitForPodDeleted(podName string) error {
}
func (f *Framework) ExpectGsCorrect(gsName, opsState, dp, up string) error {
return wait.PollImmediate(5*time.Second, 3*time.Minute,
func() (done bool, err error) {
return wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 5*time.Minute, true,
func(ctx context.Context) (done bool, err error) {
gs, err := f.client.GetGameServer(gsName)
if err != nil {
return false, nil
@ -353,8 +355,8 @@ func (f *Framework) ExpectGsCorrect(gsName, opsState, dp, up string) error {
}
func (f *Framework) WaitForGsUpdatePriorityUpdated(gsName string, updatePriority string) error {
return wait.PollImmediate(5*time.Second, 1*time.Minute,
func() (done bool, err error) {
return wait.PollUntilContextTimeout(context.TODO(), 5*time.Second, 3*time.Minute, true,
func(ctx context.Context) (done bool, err error) {
pod, err := f.client.GetPod(gsName)
if err != nil {
return false, err

View File

@ -1,6 +1,8 @@
package testcase
import (
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
gameKruiseV1alpha1 "github.com/openkruise/kruise-game/apis/v1alpha1"
@ -36,6 +38,9 @@ func RunTestCases(f *framework.Framework) {
_, err = f.MarkGameServerOpsState(gss.GetName()+"-2", string(gameKruiseV1alpha1.WaitToDelete))
gomega.Expect(err).To(gomega.BeNil())
// sleep for a while to wait the status update
time.Sleep(5 * time.Second)
err = f.WaitForGsOpsStateUpdate(gss.GetName()+"-2", string(gameKruiseV1alpha1.WaitToDelete))
gomega.Expect(err).To(gomega.BeNil())
@ -104,6 +109,9 @@ func RunTestCases(f *framework.Framework) {
_, err = f.ChangeGameServerDeletionPriority(gss.GetName()+"-1", "100")
gomega.Expect(err).To(gomega.BeNil())
// sleep for a while to wait the status update
time.Sleep(5 * time.Second)
err = f.WaitForGsDeletionPriorityUpdated(gss.GetName()+"-1", "100")
gomega.Expect(err).To(gomega.BeNil())
@ -126,6 +134,9 @@ func RunTestCases(f *framework.Framework) {
_, err = f.ChangeGameServerDeletionPriority(gss.GetName()+"-1", "100")
gomega.Expect(err).To(gomega.BeNil())
// sleep for a while to wait the status update
time.Sleep(5 * time.Second)
err = f.WaitForGsDeletionPriorityUpdated(gss.GetName()+"-1", "100")
gomega.Expect(err).To(gomega.BeNil())

View File

@ -5,5 +5,3 @@ nodes:
- role: worker
- role: worker
- role: worker
featureGates:
EphemeralContainers: true