1. Support deploy karmada-scheduler-estimator in physical machine.
2. remove unnecessary sudo Signed-off-by: raymondmiaochaoyue <raymondmiaochaoyue@didiglobal.com>
This commit is contained in:
parent
da070e68f3
commit
f4fc3d1480
|
@ -503,7 +503,7 @@ function util::fill_cabundle() {
|
||||||
local ca_file=$1
|
local ca_file=$1
|
||||||
local conf=$2
|
local conf=$2
|
||||||
|
|
||||||
local ca_string=$(sudo cat ${ca_file} | base64 | tr "\n" " "|sed s/[[:space:]]//g)
|
local ca_string=$(cat "${ca_file}" | base64 | tr "\n" " "|sed s/[[:space:]]//g)
|
||||||
sed -i'' -e "s/{{caBundle}}/${ca_string}/g" "${conf}"
|
sed -i'' -e "s/{{caBundle}}/${ca_string}/g" "${conf}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -250,7 +250,7 @@ func (d *Descheduler) establishEstimatorConnections() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for i := range clusterList.Items {
|
for i := range clusterList.Items {
|
||||||
if err = estimatorclient.EstablishConnection(clusterList.Items[i].Name, d.schedulerEstimatorCache, d.schedulerEstimatorPort); err != nil {
|
if err = estimatorclient.EstablishConnection(d.KubeClient, clusterList.Items[i].Name, d.schedulerEstimatorCache, d.schedulerEstimatorPort); err != nil {
|
||||||
klog.Error(err)
|
klog.Error(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -270,7 +270,7 @@ func (d *Descheduler) reconcileEstimatorConnection(key util.QueueKey) error {
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return estimatorclient.EstablishConnection(name, d.schedulerEstimatorCache, d.schedulerEstimatorPort)
|
return estimatorclient.EstablishConnection(d.KubeClient, name, d.schedulerEstimatorCache, d.schedulerEstimatorPort)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Descheduler) recordDescheduleResultEventForResourceBinding(rb *workv1alpha2.ResourceBinding, message string, err error) {
|
func (d *Descheduler) recordDescheduleResultEventForResourceBinding(rb *workv1alpha2.ResourceBinding, message string, err error) {
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
"k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
estimatorservice "github.com/karmada-io/karmada/pkg/estimator/service"
|
estimatorservice "github.com/karmada-io/karmada/pkg/estimator/service"
|
||||||
|
@ -79,11 +80,17 @@ func (c *SchedulerEstimatorCache) GetClient(name string) (estimatorservice.Estim
|
||||||
}
|
}
|
||||||
|
|
||||||
// EstablishConnection establishes a new gRPC connection with the specified cluster scheduler estimator.
|
// EstablishConnection establishes a new gRPC connection with the specified cluster scheduler estimator.
|
||||||
func EstablishConnection(name string, estimatorCache *SchedulerEstimatorCache, port int) error {
|
func EstablishConnection(kubeClient kubernetes.Interface, name string, estimatorCache *SchedulerEstimatorCache, port int) error {
|
||||||
if estimatorCache.IsEstimatorExist(name) {
|
if estimatorCache.IsEstimatorExist(name) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
serverAddr := fmt.Sprintf("%s:%d", names.GenerateEstimatorServiceName(name), port)
|
|
||||||
|
serverAddr, err := resolveCluster(kubeClient, util.NamespaceKarmadaSystem,
|
||||||
|
names.GenerateEstimatorServiceName(name), int32(port))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
klog.Infof("Start dialing estimator server(%s) of cluster(%s).", serverAddr, name)
|
klog.Infof("Start dialing estimator server(%s) of cluster(%s).", serverAddr, name)
|
||||||
cc, err := util.Dial(serverAddr, 5*time.Second)
|
cc, err := util.Dial(serverAddr, 5*time.Second)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -0,0 +1,59 @@
|
||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
|
"k8s.io/client-go/kubernetes"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ResolveCluster parses Service resource content by itself.
|
||||||
|
// Fixes Issue https://github.com/karmada-io/karmada/issues/2487
|
||||||
|
// Modified from "k8s.io/apiserver/pkg/util/proxy/proxy.go:92 => func ResolveCluster"
|
||||||
|
func resolveCluster(kubeClient kubernetes.Interface, namespace, id string, port int32) (string, error) {
|
||||||
|
svc, err := kubeClient.CoreV1().Services(namespace).Get(context.TODO(), id, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
if apierrors.IsNotFound(err) {
|
||||||
|
/*
|
||||||
|
* When Deploying Karmada in Host Kubernetes Cluster, the kubeClient will connect kube-apiserver
|
||||||
|
* of Karmada Control Plane, rather than of host cluster.
|
||||||
|
* But the Service resource is defined in Host Kubernetes Cluster. So we cannot get its content here.
|
||||||
|
* The best thing we can do is just glue host:port together, and try to connect to it.
|
||||||
|
*/
|
||||||
|
return net.JoinHostPort(id, fmt.Sprintf("%d", port)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if svc.Spec.Type != corev1.ServiceTypeExternalName {
|
||||||
|
// We only support ExternalName type here.
|
||||||
|
// See discussions in PR: https://github.com/karmada-io/karmada/pull/2574#discussion_r979539389
|
||||||
|
return "", fmt.Errorf("unsupported service type %q", svc.Spec.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
svcPort, err := findServicePort(svc, port)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if svcPort.TargetPort.Type != intstr.Int {
|
||||||
|
return "", fmt.Errorf("ExternalName service type should have int target port, "+
|
||||||
|
"current target port: %v", svcPort.TargetPort)
|
||||||
|
}
|
||||||
|
return net.JoinHostPort(svc.Spec.ExternalName, fmt.Sprintf("%d", svcPort.TargetPort.IntVal)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// findServicePort finds the service port by name or numerically.
|
||||||
|
func findServicePort(svc *corev1.Service, port int32) (*corev1.ServicePort, error) {
|
||||||
|
for _, svcPort := range svc.Spec.Ports {
|
||||||
|
if svcPort.Port == port {
|
||||||
|
return &svcPort, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, apierrors.NewServiceUnavailable(fmt.Sprintf("no service port %d found for service %q", port, svc.Name))
|
||||||
|
}
|
|
@ -577,7 +577,7 @@ func (s *Scheduler) reconcileEstimatorConnection(key util.QueueKey) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return estimatorclient.EstablishConnection(name, s.schedulerEstimatorCache, s.schedulerEstimatorPort)
|
return estimatorclient.EstablishConnection(s.KubeClient, name, s.schedulerEstimatorCache, s.schedulerEstimatorPort)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Scheduler) establishEstimatorConnections() {
|
func (s *Scheduler) establishEstimatorConnections() {
|
||||||
|
@ -590,7 +590,7 @@ func (s *Scheduler) establishEstimatorConnections() {
|
||||||
if clusterList.Items[i].Spec.SyncMode == clusterv1alpha1.Pull && s.disableSchedulerEstimatorInPullMode {
|
if clusterList.Items[i].Spec.SyncMode == clusterv1alpha1.Pull && s.disableSchedulerEstimatorInPullMode {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err = estimatorclient.EstablishConnection(clusterList.Items[i].Name, s.schedulerEstimatorCache, s.schedulerEstimatorPort); err != nil {
|
if err = estimatorclient.EstablishConnection(s.KubeClient, clusterList.Items[i].Name, s.schedulerEstimatorCache, s.schedulerEstimatorPort); err != nil {
|
||||||
klog.Error(err)
|
klog.Error(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue