upgrade default karmada-apiserver from v1.23.8 to v1.24.2

Signed-off-by: changzhen <changzhen5@huawei.com>
This commit is contained in:
changzhen 2022-07-07 15:06:17 +08:00
parent 18515424c2
commit a703560296
10 changed files with 26 additions and 38 deletions

View File

@ -43,7 +43,6 @@ spec:
- --etcd-keyfile=/etc/kubernetes/pki/karmada.key
- --etcd-servers=https://etcd-client.karmada-system.svc.cluster.local:2379
- --bind-address=0.0.0.0
- --insecure-port=0
- --kubelet-client-certificate=/etc/kubernetes/pki/karmada.crt
- --kubelet-client-key=/etc/kubernetes/pki/karmada.key
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
@ -64,7 +63,7 @@ spec:
- --tls-cert-file=/etc/kubernetes/pki/karmada.crt
- --tls-private-key-file=/etc/kubernetes/pki/karmada.key
name: karmada-apiserver
image: k8s.gcr.io/kube-apiserver:v1.23.8
image: k8s.gcr.io/kube-apiserver:v1.24.2
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 8

View File

@ -46,13 +46,12 @@ spec:
- --kubeconfig=/etc/kubeconfig
- --leader-elect=true
- --node-cidr-mask-size=24
- --port=0
- --root-ca-file=/etc/karmada/pki/server-ca.crt
- --service-account-private-key-file=/etc/karmada/pki/karmada.key
- --service-cluster-ip-range=10.96.0.0/12
- --use-service-account-credentials=true
- --v=4
image: k8s.gcr.io/kube-controller-manager:v1.23.8
image: k8s.gcr.io/kube-controller-manager:v1.24.2
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 8

View File

@ -257,7 +257,7 @@ helm install karmada-scheduler-estimator -n karmada-system ./charts/karmada
| `apiServer.podAnnotations` | Annotaions of the karmada-apiserver pods | `{}` |
| `apiServer.imagePullSecrets` | Image pull secret of the karmada-apiserver | `[]` |
| `apiServer.image.repository` | Image of the karmada-apiserver | `"k8s.gcr.io/kube-apiserver"` |
| `apiServer.image.tag` | Image tag of the karmada-apiserver | `"v1.23.8"` |
| `apiServer.image.tag` | Image tag of the karmada-apiserver | `"v1.24.2"` |
| `apiServer.image.pullPolicy` | Image pull policy of the karmada-apiserver | `"IfNotPresent"` |
| `apiServer.resources` | Resource quota of the karmada-apiserver | `{}` |
| `apiServer.hostNetwork` | Deploy karmada-apiserver with hostNetwork. If there are multiple karmadas in one cluster, you'd better set it to "false" | `"true"` |
@ -286,7 +286,7 @@ helm install karmada-scheduler-estimator -n karmada-system ./charts/karmada
| `kubeControllerManager.podAnnotations` | Annotaions of the kube-controller-manager pods | `{}` |
| `kubeControllerManager.imagePullSecrets` | Image pull secret of the kube-controller-manager | `[]` |
| `kubeControllerManager.image.repository` | Image of the kube-controller-manager | `"k8s.gcr.io/kube-controller-manager"` |
| `kubeControllerManager.image.tag` | Image tag of the kube-controller-manager | `"v1.23.8"` |
| `kubeControllerManager.image.tag` | Image tag of the kube-controller-manager | `"v1.24.2"` |
| `kubeControllerManager.image.pullPolicy` | Image pull policy of the kube-controller-manager | `"IfNotPresent"` |
| `kubeControllerManager.resources` | Resource quota of the kube-controller-manager | `{}` |
| `kubeControllerManager.nodeSelector` | Node selector of the kube-controller-manager | `{}` |

View File

@ -53,7 +53,6 @@ spec:
- --etcd-servers=https://etcd-client.{{ include "karmada.namespace" . }}.svc.{{ .Values.clusterDomain }}:2379
{{- end }}
- --bind-address=0.0.0.0
- --insecure-port=0
- --kubelet-client-certificate=/etc/kubernetes/pki/karmada.crt
- --kubelet-client-key=/etc/kubernetes/pki/karmada.key
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname

View File

@ -63,7 +63,6 @@ spec:
- --kubeconfig=/etc/kubeconfig
- --leader-elect=true
- --node-cidr-mask-size=24
- --port=0
- --root-ca-file=/etc/karmada/pki/server-ca.crt
- --service-account-private-key-file=/etc/karmada/pki/karmada.key
- --service-cluster-ip-range=10.96.0.0/12

View File

@ -286,7 +286,7 @@ apiServer:
image:
registry: k8s.gcr.io
repository: kube-apiserver
tag: "v1.23.8"
tag: "v1.24.2"
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
##
@ -426,7 +426,7 @@ kubeControllerManager:
image:
registry: k8s.gcr.io
repository: kube-controller-manager
tag: "v1.23.8"
tag: "v1.24.2"
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
##

View File

@ -43,8 +43,8 @@ var (
defaultKubeConfig = filepath.Join(homeDir(), ".kube", "config")
defaultEtcdImage = "etcd:3.5.3-0"
defaultKubeAPIServerImage = "kube-apiserver:v1.23.8"
defaultKubeControllerManagerImage = "kube-controller-manager:v1.23.8"
defaultKubeAPIServerImage = "kube-apiserver:v1.24.2"
defaultKubeControllerManagerImage = "kube-controller-manager:v1.24.2"
)
const (

View File

@ -70,7 +70,6 @@ func (i *CommandInitOption) karmadaAPIServerContainerCommand() []string {
fmt.Sprintf("--etcd-keyfile=%s/%s.key", karmadaCertsVolumeMountPath, options.EtcdClientCertAndKeyName),
fmt.Sprintf("--etcd-servers=%s", strings.TrimRight(i.etcdServers(), ",")),
"--bind-address=0.0.0.0",
"--insecure-port=0",
fmt.Sprintf("--kubelet-client-certificate=%s/%s.crt", karmadaCertsVolumeMountPath, options.KarmadaCertAndKeyName),
fmt.Sprintf("--kubelet-client-key=%s/%s.key", karmadaCertsVolumeMountPath, options.KarmadaCertAndKeyName),
"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname",
@ -272,7 +271,6 @@ func (i *CommandInitOption) makeKarmadaKubeControllerManagerDeployment() *appsv1
"--leader-elect=true",
fmt.Sprintf("--leader-elect-resource-namespace=%s", i.Namespace),
"--node-cidr-mask-size=24",
"--port=0",
fmt.Sprintf("--root-ca-file=%s/%s.crt", karmadaCertsVolumeMountPath, options.CaCertAndKeyName),
fmt.Sprintf("--service-account-private-key-file=%s/%s.key", karmadaCertsVolumeMountPath, options.KarmadaCertAndKeyName),
fmt.Sprintf("--service-cluster-ip-range=%s", serviceClusterIP),

View File

@ -8,6 +8,7 @@ import (
"github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
@ -24,6 +25,7 @@ var _ = ginkgo.Describe("Aggregated Kubernetes API Endpoint testing", func() {
var member1, member2 string
var saName, saNamespace string
var tomServiceAccount *corev1.ServiceAccount
var tomSecret *corev1.Secret
var tomClusterRole *rbacv1.ClusterRole
var tomClusterRoleBinding *rbacv1.ClusterRoleBinding
var tomClusterRoleOnMember *rbacv1.ClusterRole
@ -35,6 +37,16 @@ var _ = ginkgo.Describe("Aggregated Kubernetes API Endpoint testing", func() {
saName = fmt.Sprintf("tom-%s", rand.String(RandomStrLength))
saNamespace = testNamespace
tomServiceAccount = helper.NewServiceaccount(saNamespace, saName)
tomSecret = &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: saNamespace,
Name: saName,
Annotations: map[string]string{
corev1.ServiceAccountNameKey: saName,
},
},
Type: corev1.SecretTypeServiceAccountToken,
}
tomClusterRole = helper.NewClusterRole(tomServiceAccount.Name, []rbacv1.PolicyRule{
{
APIGroups: []string{"cluster.karmada.io"},
@ -66,6 +78,7 @@ var _ = ginkgo.Describe("Aggregated Kubernetes API Endpoint testing", func() {
ginkgo.BeforeEach(func() {
framework.CreateServiceAccount(kubeClient, tomServiceAccount)
framework.CreateSecret(kubeClient, tomSecret)
framework.CreateClusterRole(kubeClient, tomClusterRole)
framework.CreateClusterRoleBinding(kubeClient, tomClusterRoleBinding)
ginkgo.DeferCleanup(func() {

View File

@ -24,37 +24,19 @@ const (
// GetTokenFromServiceAccount get token from serviceAccount's related secret.
func GetTokenFromServiceAccount(client kubernetes.Interface, saNamespace, saName string) (string, error) {
var saRefSecret string
klog.Infof("Get serviceAccount(%s/%s)'s refer secret", saNamespace, saName)
var token string
err := wait.PollImmediate(pollInterval, pollTimeout, func() (done bool, err error) {
sa, err := client.CoreV1().ServiceAccounts(saNamespace).Get(context.TODO(), saName, metav1.GetOptions{})
saRefSecret, err := client.CoreV1().Secrets(saNamespace).Get(context.TODO(), saName, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return false, nil
}
return false, err
}
if sa.Secrets == nil || len(sa.Secrets) == 0 {
klog.Errorf("Failed to get serviceAccount(%s/%s)'s refer secret, error: %v", saNamespace, saName, err)
return false, nil
}
saRefSecret = sa.Secrets[0].Name
return true, nil
})
if err != nil {
return "", err
}
klog.Infof("Get serviceAccount(%s/%s)'s refer secret(%s)", saNamespace, saName, saRefSecret)
var token string
err = wait.PollImmediate(pollInterval, pollTimeout, func() (done bool, err error) {
secret, err := client.CoreV1().Secrets(saNamespace).Get(context.TODO(), saRefSecret, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return false, nil
}
return false, err
}
tokenByte, ok := secret.Data["token"]
tokenByte, ok := saRefSecret.Data["token"]
if !ok {
return false, nil
}
@ -64,7 +46,6 @@ func GetTokenFromServiceAccount(client kubernetes.Interface, saNamespace, saName
if err != nil {
return "", err
}
return token, nil
}