Update Golang version from v1.18.3 to v1.19.3
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
This commit is contained in:
parent
00403af749
commit
f8646f5d91
|
@ -14,7 +14,7 @@ jobs:
|
|||
- name: install Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.18.3
|
||||
go-version: 1.19.3
|
||||
- name: vendor
|
||||
run: hack/verify-vendor.sh
|
||||
- name: lint
|
||||
|
@ -37,7 +37,7 @@ jobs:
|
|||
- name: install Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.18.3
|
||||
go-version: 1.19.3
|
||||
- name: Install Protoc
|
||||
uses: arduino/setup-protoc@v1
|
||||
- name: verify codegen
|
||||
|
@ -64,7 +64,7 @@ jobs:
|
|||
- name: install Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.18.3
|
||||
go-version: 1.19.3
|
||||
- name: compile
|
||||
run: make all
|
||||
test:
|
||||
|
@ -77,7 +77,7 @@ jobs:
|
|||
- name: install Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.18.3
|
||||
go-version: 1.19.3
|
||||
- name: make test
|
||||
run: make test
|
||||
- name: Upload coverage to Codecov
|
||||
|
@ -111,7 +111,7 @@ jobs:
|
|||
- name: install Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.18.3
|
||||
go-version: 1.19.3
|
||||
- name: setup e2e test environment
|
||||
run: |
|
||||
export CLUSTER_VERSION=kindest/node:${{ matrix.k8s }}
|
||||
|
|
|
@ -34,7 +34,7 @@ jobs:
|
|||
- name: install Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.18.3
|
||||
go-version: 1.19.3
|
||||
- name: install QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: install Buildx
|
||||
|
|
|
@ -30,7 +30,7 @@ jobs:
|
|||
- name: install Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.18.3
|
||||
go-version: 1.19.3
|
||||
- name: install QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: install Buildx
|
||||
|
|
|
@ -23,7 +23,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.18.3
|
||||
go-version: 1.19.3
|
||||
- name: Making and packaging
|
||||
env:
|
||||
GOOS: ${{ matrix.os }}
|
||||
|
|
|
@ -22,7 +22,7 @@ jobs:
|
|||
- name: install Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.18.3
|
||||
go-version: 1.19.3
|
||||
- name: build images
|
||||
env:
|
||||
REGISTRY: ${{secrets.SWR_REGISTRY}}
|
||||
|
|
|
@ -19,7 +19,7 @@ jobs:
|
|||
- name: install Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.18.3
|
||||
go-version: 1.19.3
|
||||
- name: build images
|
||||
env:
|
||||
REGISTRY: ${{secrets.SWR_REGISTRY}}
|
||||
|
|
|
@ -98,7 +98,7 @@ This guide will cover:
|
|||
- Propagate an application by using `karmada`.
|
||||
|
||||
### Prerequisites
|
||||
- [Go](https://golang.org/) version v1.18+
|
||||
- [Go](https://golang.org/) version v1.19+
|
||||
- [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) version v1.19+
|
||||
- [kind](https://kind.sigs.k8s.io/) version v0.14.0+
|
||||
|
||||
|
|
2
go.mod
2
go.mod
|
@ -1,6 +1,6 @@
|
|||
module github.com/karmada-io/karmada
|
||||
|
||||
go 1.18
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/distribution/distribution/v3 v3.0.0-20210507173845-9329f6a62b67
|
||||
|
|
|
@ -22,7 +22,7 @@ KARMADA_OPENSEARCH_DASHBOARDS_LABEL="karmada-opensearch-dashboards"
|
|||
|
||||
KARMADA_GO_PACKAGE="github.com/karmada-io/karmada"
|
||||
|
||||
MIN_Go_VERSION=go1.18.0
|
||||
MIN_Go_VERSION=go1.19.0
|
||||
|
||||
KARMADA_TARGET_SOURCE=(
|
||||
karmada-aggregated-apiserver=cmd/aggregated-apiserver
|
||||
|
|
|
@ -20,10 +20,11 @@ const clusterNameMaxLength int = 48
|
|||
// If the cluster name is not valid, a list of error strings is returned. Otherwise an empty list (or nil) is returned.
|
||||
// Rules of a valid cluster name:
|
||||
// - Must be a valid label value as per RFC1123.
|
||||
// * An alphanumeric (a-z, and 0-9) string, with a maximum length of 63 characters,
|
||||
// - An alphanumeric (a-z, and 0-9) string, with a maximum length of 63 characters,
|
||||
// with the '-' character allowed anywhere except the first or last character.
|
||||
//
|
||||
// - Length must be less than 48 characters.
|
||||
// * Since cluster name used to generate execution namespace by adding a prefix, so reserve 15 characters for the prefix.
|
||||
// - Since cluster name used to generate execution namespace by adding a prefix, so reserve 15 characters for the prefix.
|
||||
func ValidateClusterName(name string) []string {
|
||||
if len(name) == 0 {
|
||||
return []string{"must be not empty"}
|
||||
|
|
|
@ -447,6 +447,7 @@ func (c *Controller) monitorClusterHealth(ctx context.Context) (err error) {
|
|||
}
|
||||
|
||||
// tryUpdateClusterHealth checks a given cluster's conditions and tries to update it.
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func (c *Controller) tryUpdateClusterHealth(ctx context.Context, cluster *clusterv1alpha1.Cluster) (*metav1.Condition, *metav1.Condition, error) {
|
||||
// Step 1: Get the last cluster heath from `clusterHealthMap`.
|
||||
|
|
|
@ -265,8 +265,9 @@ func (c *Controller) newClusterRoleBindingMapFunc() handler.MapFunc {
|
|||
}
|
||||
|
||||
// found out which clusters need to sync impersonation config from rules like:
|
||||
// resources: ["cluster/proxy"]
|
||||
// resourceNames: ["cluster1", "cluster2"]
|
||||
//
|
||||
// resources: ["cluster/proxy"]
|
||||
// resourceNames: ["cluster1", "cluster2"]
|
||||
func (c *Controller) generateRequestsFromClusterRole(clusterRole *rbacv1.ClusterRole) []reconcile.Request {
|
||||
var requests []reconcile.Request
|
||||
for i := range clusterRole.Rules {
|
||||
|
|
|
@ -33,14 +33,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{
|
|||
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
|
||||
// of clientsets, like in:
|
||||
//
|
||||
// import (
|
||||
// "k8s.io/client-go/kubernetes"
|
||||
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
|
||||
// )
|
||||
// import (
|
||||
// "k8s.io/client-go/kubernetes"
|
||||
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
|
||||
// )
|
||||
//
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
//
|
||||
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
|
||||
// correctly.
|
||||
|
|
|
@ -33,14 +33,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{
|
|||
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
|
||||
// of clientsets, like in:
|
||||
//
|
||||
// import (
|
||||
// "k8s.io/client-go/kubernetes"
|
||||
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
|
||||
// )
|
||||
// import (
|
||||
// "k8s.io/client-go/kubernetes"
|
||||
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
|
||||
// )
|
||||
//
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
//
|
||||
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
|
||||
// correctly.
|
||||
|
|
|
@ -146,7 +146,9 @@ func (h *HumanReadableGenerator) TableHandler(columnDefinitions []metav1.TableCo
|
|||
// ValidateRowPrintHandlerFunc validates print handler signature.
|
||||
// printFunc is the function that will be called to print an object.
|
||||
// It must be of the following type:
|
||||
// func printFunc(object ObjectType, options GenerateOptions) ([]metav1.TableRow, error)
|
||||
//
|
||||
// func printFunc(object ObjectType, options GenerateOptions) ([]metav1.TableRow, error)
|
||||
//
|
||||
// where ObjectType is the type of the object that will be printed, and the first
|
||||
// return value is an array of rows, with each row containing a number of cells that
|
||||
// match the number of columns defined for that printer function.
|
||||
|
|
|
@ -16,10 +16,10 @@ const (
|
|||
// is usually part of a help message. E.g.:
|
||||
//
|
||||
// Available Commands:
|
||||
// karmada-controller-manager completion generate the autocompletion script for the specified shell
|
||||
// karmada-controller-manager help Help about any command
|
||||
// karmada-controller-manager version Print the version information.
|
||||
//
|
||||
// karmada-controller-manager completion generate the autocompletion script for the specified shell
|
||||
// karmada-controller-manager help Help about any command
|
||||
// karmada-controller-manager version Print the version information.
|
||||
func generatesAvailableSubCommands(cmd *cobra.Command) []string {
|
||||
if !cmd.HasAvailableSubCommands() {
|
||||
return nil
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
)
|
||||
|
||||
// ParsingJobStatus generates new status of given 'AggregatedStatusItem'.
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func ParsingJobStatus(obj *batchv1.Job, status []workv1alpha2.AggregatedStatusItem) (*batchv1.JobStatus, error) {
|
||||
var jobFailed []string
|
||||
|
|
|
@ -124,9 +124,9 @@ func ListPodsByRS(deployment *appsv1.Deployment, rsList []*appsv1.ReplicaSet, f
|
|||
|
||||
// EqualIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash]
|
||||
// We ignore pod-template-hash because:
|
||||
// 1. The hash result would be different upon podTemplateSpec API changes
|
||||
// (e.g. the addition of a new field will cause the hash code to change)
|
||||
// 2. The deployment template won't have hash labels
|
||||
// 1. The hash result would be different upon podTemplateSpec API changes
|
||||
// (e.g. the addition of a new field will cause the hash code to change)
|
||||
// 2. The deployment template won't have hash labels
|
||||
func EqualIgnoreHash(template1, template2 *corev1.PodTemplateSpec) bool {
|
||||
t1Copy := template1.DeepCopy()
|
||||
t2Copy := template2.DeepCopy()
|
||||
|
|
|
@ -66,6 +66,7 @@ var grouplessAPIPrefixes = sets.NewString("api")
|
|||
// /api
|
||||
// /healthz
|
||||
// /
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func NewRequestInfo(req *http.Request) *apirequest.RequestInfo {
|
||||
// start with a non-resource request until proven otherwise
|
||||
|
|
|
@ -103,6 +103,7 @@ func VisitContainers(podSpec *corev1.PodSpec, mask ContainerType, visitor Contai
|
|||
// referenced by the pod spec. If visitor returns false, visiting is short-circuited.
|
||||
// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited.
|
||||
// Returns true if visiting completed, false if visiting was short-circuited.
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func VisitPodSecretNames(pod *corev1.Pod, visitor Visitor) bool {
|
||||
visitor = skipEmptyNames(visitor)
|
||||
|
|
|
@ -336,21 +336,22 @@ var _ = ginkgo.Describe("propagation with label and group constraints testing",
|
|||
})
|
||||
|
||||
/*
|
||||
ReplicaScheduling focus on dealing with the number of replicas testing when propagating resources that have replicas
|
||||
in spec (e.g. deployments, statefulsets) to member clusters with ReplicaSchedulingStrategy.
|
||||
Test Case Overview:
|
||||
Case 1:
|
||||
`ReplicaSchedulingType` value is `Duplicated`.
|
||||
Case 2:
|
||||
`ReplicaSchedulingType` value is `Duplicated`, trigger rescheduling when replicas have changed.
|
||||
Case 3:
|
||||
`ReplicaSchedulingType` value is `Divided`, `ReplicaDivisionPreference` value is `Weighted`, `WeightPreference` is nil.
|
||||
Case 4:
|
||||
`ReplicaSchedulingType` value is `Divided`, `ReplicaDivisionPreference` value is `Weighted`, `WeightPreference` is nil, trigger rescheduling when replicas have changed.
|
||||
Case 5:
|
||||
`ReplicaSchedulingType` value is `Divided`, `ReplicaDivisionPreference` value is `Weighted`, `WeightPreference` isn't nil.
|
||||
Case 6:
|
||||
`ReplicaSchedulingType` value is `Divided`, `ReplicaDivisionPreference` value is `Weighted`, `WeightPreference` isn't nil, trigger rescheduling when replicas have changed.
|
||||
ReplicaScheduling focus on dealing with the number of replicas testing when propagating resources that have replicas
|
||||
in spec (e.g. deployments, statefulsets) to member clusters with ReplicaSchedulingStrategy.
|
||||
Test Case Overview:
|
||||
|
||||
Case 1:
|
||||
`ReplicaSchedulingType` value is `Duplicated`.
|
||||
Case 2:
|
||||
`ReplicaSchedulingType` value is `Duplicated`, trigger rescheduling when replicas have changed.
|
||||
Case 3:
|
||||
`ReplicaSchedulingType` value is `Divided`, `ReplicaDivisionPreference` value is `Weighted`, `WeightPreference` is nil.
|
||||
Case 4:
|
||||
`ReplicaSchedulingType` value is `Divided`, `ReplicaDivisionPreference` value is `Weighted`, `WeightPreference` is nil, trigger rescheduling when replicas have changed.
|
||||
Case 5:
|
||||
`ReplicaSchedulingType` value is `Divided`, `ReplicaDivisionPreference` value is `Weighted`, `WeightPreference` isn't nil.
|
||||
Case 6:
|
||||
`ReplicaSchedulingType` value is `Divided`, `ReplicaDivisionPreference` value is `Weighted`, `WeightPreference` isn't nil, trigger rescheduling when replicas have changed.
|
||||
*/
|
||||
var _ = ginkgo.Describe("[ReplicaScheduling] ReplicaSchedulingStrategy testing", func() {
|
||||
var policyNamespace, policyName string
|
||||
|
|
Loading…
Reference in New Issue