commit
93817a9a58
|
@ -35,7 +35,7 @@ func newCacheProxy(store store.RESTReader, restMapper meta.RESTMapper, minReques
|
|||
}
|
||||
}
|
||||
|
||||
func (c *cacheProxy) connect(ctx context.Context) (http.Handler, error) {
|
||||
func (c *cacheProxy) connect(ctx context.Context, _ schema.GroupVersionResource, _ string, _ rest.Responder) (http.Handler, error) {
|
||||
requestInfo, _ := request.RequestInfoFrom(ctx)
|
||||
gvr := schema.GroupVersionResource{
|
||||
Group: requestInfo.APIGroup,
|
||||
|
|
|
@ -21,13 +21,16 @@ import (
|
|||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
|
||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
"github.com/karmada-io/karmada/pkg/search/proxy/store"
|
||||
"github.com/karmada-io/karmada/pkg/util/lifted"
|
||||
)
|
||||
|
||||
var (
|
||||
podGVR = corev1.SchemeGroupVersion.WithResource("pods")
|
||||
nodeGVR = corev1.SchemeGroupVersion.WithResource("nodes")
|
||||
podGVR = corev1.SchemeGroupVersion.WithResource("pods")
|
||||
nodeGVR = corev1.SchemeGroupVersion.WithResource("nodes")
|
||||
secretGVR = corev1.SchemeGroupVersion.WithResource("secret")
|
||||
clusterGVR = clusterv1alpha1.SchemeGroupVersion.WithResource("cluster")
|
||||
)
|
||||
|
||||
func TestCacheProxy_connect(t *testing.T) {
|
||||
|
@ -216,7 +219,7 @@ func TestCacheProxy_connect(t *testing.T) {
|
|||
req = req.WithContext(request.WithNamespace(req.Context(), requestInfo.Namespace))
|
||||
}
|
||||
|
||||
h, err := p.connect(req.Context())
|
||||
h, err := p.connect(req.Context(), podGVR, "", nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
|
|
|
@ -37,7 +37,12 @@ func newClusterProxy(store store.Cache, clusterLister clusterlisters.ClusterList
|
|||
}
|
||||
}
|
||||
|
||||
func (c *clusterProxy) connect(ctx context.Context, requestInfo *request.RequestInfo, gvr schema.GroupVersionResource, proxyPath string, responder rest.Responder) (http.Handler, error) {
|
||||
func (c *clusterProxy) connect(ctx context.Context, gvr schema.GroupVersionResource, proxyPath string, responder rest.Responder) (http.Handler, error) {
|
||||
requestInfo, ok := request.RequestInfoFrom(ctx)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("missing requestInfo")
|
||||
}
|
||||
|
||||
if requestInfo.Verb == "create" {
|
||||
return nil, apierrors.NewMethodNotSupported(gvr.GroupResource(), requestInfo.Verb)
|
||||
}
|
||||
|
|
|
@ -2,18 +2,34 @@ package proxy
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
|
||||
clusterapis "github.com/karmada-io/karmada/pkg/apis/cluster"
|
||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
karmadafake "github.com/karmada-io/karmada/pkg/generated/clientset/versioned/fake"
|
||||
karmadainformers "github.com/karmada-io/karmada/pkg/generated/informers/externalversions"
|
||||
"github.com/karmada-io/karmada/pkg/search/proxy/store"
|
||||
)
|
||||
|
||||
|
@ -142,3 +158,288 @@ func TestModifyRequest(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_clusterProxy_connect(t *testing.T) {
|
||||
s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, _ *http.Request) {
|
||||
fmt.Fprint(rw, "ok")
|
||||
}))
|
||||
|
||||
reqCtx := request.WithUser(context.TODO(), &user.DefaultInfo{})
|
||||
|
||||
type fields struct {
|
||||
store store.Cache
|
||||
secrets []runtime.Object
|
||||
clusters []runtime.Object
|
||||
}
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
request *http.Request
|
||||
}
|
||||
type want struct {
|
||||
err error
|
||||
body string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want want
|
||||
}{
|
||||
{
|
||||
name: "missing requestInfo",
|
||||
fields: fields{},
|
||||
args: args{
|
||||
ctx: context.TODO(),
|
||||
},
|
||||
want: want{
|
||||
err: errors.New("missing requestInfo"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "create not supported",
|
||||
fields: fields{},
|
||||
args: args{
|
||||
ctx: request.WithRequestInfo(context.TODO(), &request.RequestInfo{Verb: "create"}),
|
||||
},
|
||||
want: want{
|
||||
err: apierrors.NewMethodNotSupported(podGVR.GroupResource(), "create"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "get cache error",
|
||||
fields: fields{
|
||||
store: &cacheFuncs{
|
||||
GetResourceFromCacheFunc: func(ctx context.Context, gvr schema.GroupVersionResource, namespace, name string) (runtime.Object, string, error) {
|
||||
return nil, "", errors.New("test error")
|
||||
},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
ctx: request.WithRequestInfo(context.TODO(), &request.RequestInfo{Verb: "get"}),
|
||||
},
|
||||
want: want{
|
||||
err: errors.New("test error"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cluster not found",
|
||||
fields: fields{
|
||||
store: &cacheFuncs{
|
||||
GetResourceFromCacheFunc: func(ctx context.Context, gvr schema.GroupVersionResource, namespace, name string) (runtime.Object, string, error) {
|
||||
return nil, "cluster1", nil
|
||||
},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
ctx: request.WithRequestInfo(context.TODO(), &request.RequestInfo{Verb: "get"}),
|
||||
},
|
||||
want: want{
|
||||
err: apierrors.NewNotFound(clusterGVR.GroupResource(), "cluster1"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "API endpoint of cluster cluster1 should not be empty",
|
||||
fields: fields{
|
||||
store: &cacheFuncs{
|
||||
GetResourceFromCacheFunc: func(ctx context.Context, gvr schema.GroupVersionResource, namespace, name string) (runtime.Object, string, error) {
|
||||
return nil, "cluster1", nil
|
||||
},
|
||||
},
|
||||
clusters: []runtime.Object{&clusterv1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "cluster1"},
|
||||
Spec: clusterv1alpha1.ClusterSpec{},
|
||||
}},
|
||||
},
|
||||
args: args{
|
||||
ctx: request.WithRequestInfo(context.TODO(), &request.RequestInfo{Verb: "get"}),
|
||||
},
|
||||
want: want{
|
||||
err: errors.New("API endpoint of cluster cluster1 should not be empty"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "impersonatorSecretRef is nil",
|
||||
fields: fields{
|
||||
store: &cacheFuncs{
|
||||
GetResourceFromCacheFunc: func(ctx context.Context, gvr schema.GroupVersionResource, namespace, name string) (runtime.Object, string, error) {
|
||||
return nil, "cluster1", nil
|
||||
},
|
||||
},
|
||||
clusters: []runtime.Object{&clusterv1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "cluster1"},
|
||||
Spec: clusterv1alpha1.ClusterSpec{
|
||||
APIEndpoint: s.URL,
|
||||
},
|
||||
}},
|
||||
},
|
||||
args: args{
|
||||
ctx: request.WithRequestInfo(context.TODO(), &request.RequestInfo{Verb: "get"}),
|
||||
},
|
||||
want: want{
|
||||
err: errors.New("the impersonatorSecretRef of cluster cluster1 is nil"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "secret not found",
|
||||
fields: fields{
|
||||
store: &cacheFuncs{
|
||||
GetResourceFromCacheFunc: func(ctx context.Context, gvr schema.GroupVersionResource, namespace, name string) (runtime.Object, string, error) {
|
||||
return nil, "cluster1", nil
|
||||
},
|
||||
},
|
||||
clusters: []runtime.Object{&clusterv1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "cluster1"},
|
||||
Spec: clusterv1alpha1.ClusterSpec{
|
||||
APIEndpoint: s.URL,
|
||||
ImpersonatorSecretRef: &clusterv1alpha1.LocalSecretReference{
|
||||
Namespace: "default",
|
||||
Name: "secret",
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
args: args{
|
||||
ctx: request.WithRequestInfo(context.TODO(), &request.RequestInfo{Verb: "get"}),
|
||||
},
|
||||
want: want{
|
||||
err: apierrors.NewNotFound(secretGVR.GroupResource(), "secret"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "response ok",
|
||||
fields: fields{
|
||||
store: &cacheFuncs{
|
||||
GetResourceFromCacheFunc: func(ctx context.Context, gvr schema.GroupVersionResource, namespace, name string) (runtime.Object, string, error) {
|
||||
return nil, "cluster1", nil
|
||||
},
|
||||
},
|
||||
clusters: []runtime.Object{&clusterv1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "cluster1"},
|
||||
Spec: clusterv1alpha1.ClusterSpec{
|
||||
APIEndpoint: s.URL,
|
||||
ImpersonatorSecretRef: &clusterv1alpha1.LocalSecretReference{
|
||||
Namespace: "default",
|
||||
Name: "secret",
|
||||
},
|
||||
},
|
||||
}},
|
||||
secrets: []runtime.Object{&corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "secret",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
clusterapis.SecretTokenKey: []byte("token"),
|
||||
},
|
||||
}},
|
||||
},
|
||||
args: args{
|
||||
ctx: request.WithRequestInfo(context.TODO(), &request.RequestInfo{Verb: "get"}),
|
||||
request: makeRequest(reqCtx, "GET", "/test", nil),
|
||||
},
|
||||
want: want{
|
||||
err: nil,
|
||||
body: "ok",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "update error",
|
||||
fields: fields{
|
||||
store: &cacheFuncs{
|
||||
GetResourceFromCacheFunc: func(ctx context.Context, gvr schema.GroupVersionResource, namespace, name string) (runtime.Object, string, error) {
|
||||
return nil, "cluster1", nil
|
||||
},
|
||||
},
|
||||
clusters: []runtime.Object{&clusterv1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "cluster1"},
|
||||
Spec: clusterv1alpha1.ClusterSpec{
|
||||
APIEndpoint: s.URL,
|
||||
ImpersonatorSecretRef: &clusterv1alpha1.LocalSecretReference{
|
||||
Namespace: "default",
|
||||
Name: "secret",
|
||||
},
|
||||
},
|
||||
}},
|
||||
secrets: []runtime.Object{&corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "secret",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
clusterapis.SecretTokenKey: []byte("token"),
|
||||
},
|
||||
}},
|
||||
},
|
||||
args: args{
|
||||
ctx: request.WithRequestInfo(context.TODO(), &request.RequestInfo{Verb: "update"}),
|
||||
request: (&http.Request{
|
||||
Method: "PUT",
|
||||
URL: &url.URL{Scheme: "https", Host: "localhost", Path: "/test"},
|
||||
Body: ioutil.NopCloser(&alwaysErrorReader{}),
|
||||
ContentLength: 10,
|
||||
Header: make(http.Header),
|
||||
}).WithContext(reqCtx),
|
||||
},
|
||||
want: want{
|
||||
err: nil,
|
||||
body: io.ErrUnexpectedEOF.Error(),
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
kubeFactory := informers.NewSharedInformerFactory(fake.NewSimpleClientset(tt.fields.secrets...), 0)
|
||||
karmadaFactory := karmadainformers.NewSharedInformerFactory(karmadafake.NewSimpleClientset(tt.fields.clusters...), 0)
|
||||
|
||||
c := &clusterProxy{
|
||||
store: tt.fields.store,
|
||||
clusterLister: karmadaFactory.Cluster().V1alpha1().Clusters().Lister(),
|
||||
secretLister: kubeFactory.Core().V1().Secrets().Lister(),
|
||||
}
|
||||
|
||||
kubeFactory.Start(stopCh)
|
||||
karmadaFactory.Start(stopCh)
|
||||
kubeFactory.WaitForCacheSync(stopCh)
|
||||
karmadaFactory.WaitForCacheSync(stopCh)
|
||||
|
||||
response := httptest.NewRecorder()
|
||||
|
||||
h, err := c.connect(tt.args.ctx, podGVR, "/proxy", newTestResponder(response))
|
||||
if !errorEquals(err, tt.want.err) {
|
||||
t.Errorf("connect() error = %v, want %v", err, tt.want.err)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if h == nil {
|
||||
t.Error("got handler nil")
|
||||
}
|
||||
|
||||
h.ServeHTTP(response, tt.args.request)
|
||||
body := response.Body.String()
|
||||
if body != tt.want.body {
|
||||
t.Errorf("got body = %v, want %v", body, tt.want.body)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func makeRequest(ctx context.Context, method, url string, body io.Reader) *http.Request {
|
||||
if ctx == nil {
|
||||
ctx = context.TODO()
|
||||
}
|
||||
r, err := http.NewRequestWithContext(ctx, method, url, body)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
type alwaysErrorReader struct{}
|
||||
|
||||
func (alwaysErrorReader) Read([]byte) (int, error) {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
|
|
@ -36,6 +36,10 @@ import (
|
|||
|
||||
const workKey = "key"
|
||||
|
||||
type connector interface {
|
||||
connect(ctx context.Context, gvr schema.GroupVersionResource, proxyPath string, responder rest.Responder) (http.Handler, error)
|
||||
}
|
||||
|
||||
// Controller syncs Cluster and GlobalResource.
|
||||
type Controller struct {
|
||||
restMapper meta.RESTMapper
|
||||
|
@ -48,9 +52,9 @@ type Controller struct {
|
|||
store store.Cache
|
||||
|
||||
// proxy
|
||||
karmadaProxy *karmadaProxy
|
||||
clusterProxy *clusterProxy
|
||||
cacheProxy *cacheProxy
|
||||
karmadaProxy connector
|
||||
clusterProxy connector
|
||||
cacheProxy connector
|
||||
}
|
||||
|
||||
// NewController create a controller for proxy
|
||||
|
@ -167,7 +171,14 @@ func (ctl *Controller) Connect(ctx context.Context, proxyPath string, responder
|
|||
newCtx = request.WithNamespace(newCtx, requestInfo.Namespace)
|
||||
newReq = newReq.WithContext(newCtx)
|
||||
|
||||
h, err := ctl.connect(newCtx, requestInfo, proxyPath, responder)
|
||||
gvr := schema.GroupVersionResource{
|
||||
Group: requestInfo.APIGroup,
|
||||
Version: requestInfo.APIVersion,
|
||||
Resource: requestInfo.Resource,
|
||||
}
|
||||
|
||||
conn := ctl.connect(requestInfo)
|
||||
h, err := conn.connect(newCtx, gvr, proxyPath, responder)
|
||||
if err != nil {
|
||||
h = http.HandlerFunc(func(delegate http.ResponseWriter, req *http.Request) {
|
||||
// Write error into delegate ResponseWriter, wrapped in metrics.InstrumentHandlerFunc, so metrics can record this error.
|
||||
|
@ -184,7 +195,7 @@ func (ctl *Controller) Connect(ctx context.Context, proxyPath string, responder
|
|||
}), nil
|
||||
}
|
||||
|
||||
func (ctl *Controller) connect(ctx context.Context, requestInfo *request.RequestInfo, path string, responder rest.Responder) (http.Handler, error) {
|
||||
func (ctl *Controller) connect(requestInfo *request.RequestInfo) connector {
|
||||
gvr := schema.GroupVersionResource{
|
||||
Group: requestInfo.APIGroup,
|
||||
Version: requestInfo.APIVersion,
|
||||
|
@ -203,20 +214,20 @@ func (ctl *Controller) connect(ctx context.Context, requestInfo *request.Request
|
|||
// - api index, e.g.: `/api`, `/apis`
|
||||
// - to workload created in karmada controller panel, such as deployments and services.
|
||||
if !requestInfo.IsResourceRequest || !ctl.store.HasResource(gvr) {
|
||||
return ctl.karmadaProxy.connect(path, responder)
|
||||
return ctl.karmadaProxy
|
||||
}
|
||||
|
||||
// 2. For reading requests, we redirect them to cache.
|
||||
// Users call these requests to read resources in member clusters, such as pods and nodes.
|
||||
if requestInfo.Subresource == "" && (requestInfo.Verb == "get" || requestInfo.Verb == "list" || requestInfo.Verb == "watch") {
|
||||
return ctl.cacheProxy.connect(ctx)
|
||||
return ctl.cacheProxy
|
||||
}
|
||||
|
||||
// 3. The remaining requests are:
|
||||
// - writing resources.
|
||||
// - or subresource requests, e.g. `pods/log`
|
||||
// We firstly find the resource from cache, and get the located cluster. Then redirect the request to the cluster.
|
||||
return ctl.clusterProxy.connect(ctx, requestInfo, gvr, path, responder)
|
||||
return ctl.clusterProxy
|
||||
}
|
||||
|
||||
// TODO: reuse with karmada/pkg/util/membercluster_client.go
|
||||
|
|
|
@ -2,18 +2,29 @@ package proxy
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
|
||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
|
@ -38,6 +49,59 @@ func init() {
|
|||
restMapper.Add(nodeGVK, meta.RESTScopeRoot)
|
||||
}
|
||||
|
||||
func TestController(t *testing.T) {
|
||||
restConfig := &restclient.Config{
|
||||
Host: "https://localhost:6443",
|
||||
}
|
||||
|
||||
cluster1 := newCluster("cluster1")
|
||||
rr := &searchv1alpha1.ResourceRegistry{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "rr"},
|
||||
Spec: searchv1alpha1.ResourceRegistrySpec{
|
||||
ResourceSelectors: []searchv1alpha1.ResourceSelector{
|
||||
podSelector,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
kubeFactory := informers.NewSharedInformerFactory(fake.NewSimpleClientset(), 0)
|
||||
karmadaFactory := karmadainformers.NewSharedInformerFactory(karmadafake.NewSimpleClientset(cluster1, rr), 0)
|
||||
|
||||
ctrl, err := NewController(
|
||||
restConfig,
|
||||
restMapper,
|
||||
kubeFactory,
|
||||
karmadaFactory,
|
||||
0,
|
||||
)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
if ctrl == nil {
|
||||
t.Error("ctrl is nil")
|
||||
return
|
||||
}
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
kubeFactory.Start(stopCh)
|
||||
karmadaFactory.Start(stopCh)
|
||||
ctrl.Start(stopCh)
|
||||
defer ctrl.Stop()
|
||||
|
||||
kubeFactory.WaitForCacheSync(stopCh)
|
||||
karmadaFactory.WaitForCacheSync(stopCh)
|
||||
// wait for controller synced
|
||||
time.Sleep(time.Second)
|
||||
|
||||
hasPod := ctrl.store.HasResource(podGVR)
|
||||
if !hasPod {
|
||||
t.Error("has no pod resource")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func TestController_reconcile(t *testing.T) {
|
||||
echoStrings := func(ss ...string) string {
|
||||
sort.Strings(ss)
|
||||
|
@ -197,6 +261,24 @@ func TestController_reconcile(t *testing.T) {
|
|||
"cluster1": echoStrings("pods"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "GetGroupVersionResource error shall be ignored",
|
||||
input: []runtime.Object{
|
||||
newCluster("cluster1"),
|
||||
&searchv1alpha1.ResourceRegistry{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "rr1"},
|
||||
Spec: searchv1alpha1.ResourceRegistrySpec{
|
||||
TargetCluster: policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: []string{"cluster1"},
|
||||
},
|
||||
ResourceSelectors: []searchv1alpha1.ResourceSelector{
|
||||
{APIVersion: "test.nonexist.group", Kind: "nonexist"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: map[string]string{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
@ -242,6 +324,416 @@ func TestController_reconcile(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestController_Connect(t *testing.T) {
|
||||
var karmadaProxying, clusterProxying, cacheProxying bool
|
||||
ctl := &Controller{
|
||||
karmadaProxy: connectFunc(func(context.Context, schema.GroupVersionResource, string, rest.Responder) (http.Handler, error) {
|
||||
return http.HandlerFunc(func(http.ResponseWriter, *http.Request) {
|
||||
karmadaProxying = true
|
||||
}), nil
|
||||
}),
|
||||
cacheProxy: connectFunc(func(context.Context, schema.GroupVersionResource, string, rest.Responder) (http.Handler, error) {
|
||||
return http.HandlerFunc(func(http.ResponseWriter, *http.Request) {
|
||||
cacheProxying = true
|
||||
}), nil
|
||||
}),
|
||||
clusterProxy: connectFunc(func(context.Context, schema.GroupVersionResource, string, rest.Responder) (http.Handler, error) {
|
||||
return http.HandlerFunc(func(http.ResponseWriter, *http.Request) {
|
||||
clusterProxying = true
|
||||
}), nil
|
||||
}),
|
||||
store: &cacheFuncs{
|
||||
HasResourceFunc: func(gvr schema.GroupVersionResource) bool { return gvr == podGVR },
|
||||
},
|
||||
}
|
||||
|
||||
type args struct {
|
||||
path string
|
||||
}
|
||||
type want struct {
|
||||
karmadaProxying, clusterProxying, cacheProxying bool
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want want
|
||||
}{
|
||||
{
|
||||
name: "get api from karmada",
|
||||
args: args{
|
||||
path: "/api",
|
||||
},
|
||||
want: want{
|
||||
karmadaProxying: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "get event api karmada",
|
||||
args: args{
|
||||
path: "/apis/events.k8s.io/v1",
|
||||
},
|
||||
want: want{
|
||||
karmadaProxying: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "list nodes from karmada",
|
||||
args: args{
|
||||
path: "/api/v1/nodes",
|
||||
},
|
||||
want: want{
|
||||
karmadaProxying: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "get node from karmada",
|
||||
args: args{
|
||||
path: "/api/v1/nodes",
|
||||
},
|
||||
want: want{
|
||||
karmadaProxying: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "list pod from cache",
|
||||
args: args{
|
||||
path: "/api/v1/pods",
|
||||
},
|
||||
want: want{
|
||||
cacheProxying: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "list pod from cache with namespace",
|
||||
args: args{
|
||||
path: "/api/v1/namespaces/default/pods",
|
||||
},
|
||||
want: want{
|
||||
cacheProxying: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "get pod from cache",
|
||||
args: args{
|
||||
path: "/api/v1/namespaces/default/pods/foo",
|
||||
},
|
||||
want: want{
|
||||
cacheProxying: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "get pod log from cluster",
|
||||
args: args{
|
||||
path: "/api/v1/namespaces/default/pods/foo/log",
|
||||
},
|
||||
want: want{
|
||||
clusterProxying: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
karmadaProxying, clusterProxying, cacheProxying = false, false, false
|
||||
conn, err := ctl.Connect(context.TODO(), tt.args.path, nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
req, err := http.NewRequest("GET", "/prefix"+tt.args.path, nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
conn.ServeHTTP(httptest.NewRecorder(), req)
|
||||
|
||||
if karmadaProxying != tt.want.karmadaProxying {
|
||||
t.Errorf("karmadaProxying get = %v, want = %v", karmadaProxying, tt.want.karmadaProxying)
|
||||
}
|
||||
if cacheProxying != tt.want.cacheProxying {
|
||||
t.Errorf("cacheProxying get = %v, want = %v", cacheProxying, tt.want.cacheProxying)
|
||||
}
|
||||
if clusterProxying != tt.want.clusterProxying {
|
||||
t.Errorf("clusterProxying get = %v, want = %v", clusterProxying, tt.want.clusterProxying)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestController_Connect_Error(t *testing.T) {
|
||||
ctl := &Controller{
|
||||
karmadaProxy: connectFunc(func(context.Context, schema.GroupVersionResource, string, rest.Responder) (http.Handler, error) {
|
||||
return nil, fmt.Errorf("test")
|
||||
}),
|
||||
negotiatedSerializer: scheme.Codecs.WithoutConversion(),
|
||||
}
|
||||
|
||||
h, err := ctl.Connect(context.TODO(), "/api", nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
response := httptest.NewRecorder()
|
||||
req, err := http.NewRequest("GET", "/api", nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
req.Header = make(http.Header)
|
||||
req.Header.Add("Accept", "application/json")
|
||||
h.ServeHTTP(response, req)
|
||||
wantBody := `{"kind":"Status","apiVersion":"get","metadata":{},"status":"Failure","message":"test","code":500}` + "\n"
|
||||
gotBody := response.Body.String()
|
||||
if wantBody != gotBody {
|
||||
t.Errorf("got body: %v", diff.StringDiff(gotBody, wantBody))
|
||||
}
|
||||
}
|
||||
|
||||
func TestController_dynamicClientForCluster(t *testing.T) {
|
||||
// copy from go/src/net/http/internal/testcert/testcert.go
|
||||
testCA := []byte(`-----BEGIN CERTIFICATE-----
|
||||
MIIDOTCCAiGgAwIBAgIQSRJrEpBGFc7tNb1fb5pKFzANBgkqhkiG9w0BAQsFADAS
|
||||
MRAwDgYDVQQKEwdBY21lIENvMCAXDTcwMDEwMTAwMDAwMFoYDzIwODQwMTI5MTYw
|
||||
MDAwWjASMRAwDgYDVQQKEwdBY21lIENvMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
|
||||
MIIBCgKCAQEA6Gba5tHV1dAKouAaXO3/ebDUU4rvwCUg/CNaJ2PT5xLD4N1Vcb8r
|
||||
bFSW2HXKq+MPfVdwIKR/1DczEoAGf/JWQTW7EgzlXrCd3rlajEX2D73faWJekD0U
|
||||
aUgz5vtrTXZ90BQL7WvRICd7FlEZ6FPOcPlumiyNmzUqtwGhO+9ad1W5BqJaRI6P
|
||||
YfouNkwR6Na4TzSj5BrqUfP0FwDizKSJ0XXmh8g8G9mtwxOSN3Ru1QFc61Xyeluk
|
||||
POGKBV/q6RBNklTNe0gI8usUMlYyoC7ytppNMW7X2vodAelSu25jgx2anj9fDVZu
|
||||
h7AXF5+4nJS4AAt0n1lNY7nGSsdZas8PbQIDAQABo4GIMIGFMA4GA1UdDwEB/wQE
|
||||
AwICpDATBgNVHSUEDDAKBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud
|
||||
DgQWBBStsdjh3/JCXXYlQryOrL4Sh7BW5TAuBgNVHREEJzAlggtleGFtcGxlLmNv
|
||||
bYcEfwAAAYcQAAAAAAAAAAAAAAAAAAAAATANBgkqhkiG9w0BAQsFAAOCAQEAxWGI
|
||||
5NhpF3nwwy/4yB4i/CwwSpLrWUa70NyhvprUBC50PxiXav1TeDzwzLx/o5HyNwsv
|
||||
cxv3HdkLW59i/0SlJSrNnWdfZ19oTcS+6PtLoVyISgtyN6DpkKpdG1cOkW3Cy2P2
|
||||
+tK/tKHRP1Y/Ra0RiDpOAmqn0gCOFGz8+lqDIor/T7MTpibL3IxqWfPrvfVRHL3B
|
||||
grw/ZQTTIVjjh4JBSW3WyWgNo/ikC1lrVxzl4iPUGptxT36Cr7Zk2Bsg0XqwbOvK
|
||||
5d+NTDREkSnUbie4GeutujmX3Dsx88UiV6UY/4lHJa6I5leHUNOHahRbpbWeOfs/
|
||||
WkBKOclmOV2xlTVuPw==
|
||||
-----END CERTIFICATE-----`)
|
||||
|
||||
type args struct {
|
||||
clusters []runtime.Object
|
||||
secrets []runtime.Object
|
||||
}
|
||||
|
||||
type want struct {
|
||||
err error
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want want
|
||||
}{
|
||||
{
|
||||
name: "cluster not found",
|
||||
args: args{
|
||||
clusters: nil,
|
||||
secrets: nil,
|
||||
},
|
||||
want: want{
|
||||
err: apierrors.NewNotFound(schema.GroupResource{Resource: "cluster", Group: "cluster.karmada.io"}, "test"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "api endpoint is empty",
|
||||
args: args{
|
||||
clusters: []runtime.Object{&clusterv1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
},
|
||||
Spec: clusterv1alpha1.ClusterSpec{},
|
||||
}},
|
||||
secrets: nil,
|
||||
},
|
||||
want: want{
|
||||
err: errors.New("the api endpoint of cluster test is empty"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "secret is empty",
|
||||
args: args{
|
||||
clusters: []runtime.Object{&clusterv1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
},
|
||||
Spec: clusterv1alpha1.ClusterSpec{
|
||||
APIEndpoint: "https://localhost",
|
||||
},
|
||||
}},
|
||||
secrets: nil,
|
||||
},
|
||||
want: want{
|
||||
err: errors.New("cluster test does not have a secret"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "secret not found",
|
||||
args: args{
|
||||
clusters: []runtime.Object{&clusterv1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
},
|
||||
Spec: clusterv1alpha1.ClusterSpec{
|
||||
APIEndpoint: "https://localhost",
|
||||
SecretRef: &clusterv1alpha1.LocalSecretReference{
|
||||
Namespace: "default",
|
||||
Name: "test_secret",
|
||||
},
|
||||
},
|
||||
}},
|
||||
secrets: nil,
|
||||
},
|
||||
want: want{
|
||||
err: errors.New(`secret "test_secret" not found`),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "token not found",
|
||||
args: args{
|
||||
clusters: []runtime.Object{&clusterv1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
},
|
||||
Spec: clusterv1alpha1.ClusterSpec{
|
||||
APIEndpoint: "https://localhost",
|
||||
SecretRef: &clusterv1alpha1.LocalSecretReference{
|
||||
Namespace: "default",
|
||||
Name: "test_secret",
|
||||
},
|
||||
},
|
||||
}},
|
||||
secrets: []runtime.Object{&corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "test_secret",
|
||||
},
|
||||
Data: map[string][]byte{},
|
||||
}},
|
||||
},
|
||||
want: want{
|
||||
err: errors.New(`the secret for cluster test is missing a non-empty value for "token"`),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "success",
|
||||
args: args{
|
||||
clusters: []runtime.Object{&clusterv1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
},
|
||||
Spec: clusterv1alpha1.ClusterSpec{
|
||||
APIEndpoint: "https://localhost",
|
||||
SecretRef: &clusterv1alpha1.LocalSecretReference{
|
||||
Namespace: "default",
|
||||
Name: "test_secret",
|
||||
},
|
||||
},
|
||||
}},
|
||||
secrets: []runtime.Object{&corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "test_secret",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
clusterv1alpha1.SecretTokenKey: []byte("test_token"),
|
||||
clusterv1alpha1.SecretCADataKey: testCA,
|
||||
},
|
||||
}},
|
||||
},
|
||||
want: want{
|
||||
err: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "has proxy",
|
||||
args: args{
|
||||
clusters: []runtime.Object{&clusterv1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
},
|
||||
Spec: clusterv1alpha1.ClusterSpec{
|
||||
APIEndpoint: "https://localhost",
|
||||
SecretRef: &clusterv1alpha1.LocalSecretReference{
|
||||
Namespace: "default",
|
||||
Name: "test_secret",
|
||||
},
|
||||
ProxyURL: "https://localhost",
|
||||
},
|
||||
}},
|
||||
secrets: []runtime.Object{&corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "test_secret",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
clusterv1alpha1.SecretTokenKey: []byte("test_token"),
|
||||
clusterv1alpha1.SecretCADataKey: testCA,
|
||||
},
|
||||
}},
|
||||
},
|
||||
want: want{
|
||||
err: nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
kubeClient := fake.NewSimpleClientset(tt.args.secrets...)
|
||||
kubeFactory := informers.NewSharedInformerFactory(kubeClient, 0)
|
||||
|
||||
karmadaClient := karmadafake.NewSimpleClientset(tt.args.clusters...)
|
||||
karmadaFactory := karmadainformers.NewSharedInformerFactory(karmadaClient, 0)
|
||||
|
||||
ctrl := &Controller{
|
||||
clusterLister: karmadaFactory.Cluster().V1alpha1().Clusters().Lister(),
|
||||
secretLister: kubeFactory.Core().V1().Secrets().Lister(),
|
||||
}
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
karmadaFactory.Start(stopCh)
|
||||
karmadaFactory.WaitForCacheSync(stopCh)
|
||||
kubeFactory.Start(stopCh)
|
||||
kubeFactory.WaitForCacheSync(stopCh)
|
||||
|
||||
client, err := ctrl.dynamicClientForCluster("test")
|
||||
|
||||
if !errorEquals(err, tt.want.err) {
|
||||
t.Errorf("got error %v, want %v", err, tt.want.err)
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if client == nil {
|
||||
t.Error("got client nil")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func errorEquals(a, b error) bool {
|
||||
if a == b {
|
||||
return true
|
||||
}
|
||||
if a == nil || b == nil {
|
||||
return false
|
||||
}
|
||||
return a.Error() == b.Error()
|
||||
}
|
||||
|
||||
type connectFunc func(ctx context.Context, gvr schema.GroupVersionResource, proxyPath string, responder rest.Responder) (http.Handler, error)
|
||||
|
||||
func (c connectFunc) connect(ctx context.Context, gvr schema.GroupVersionResource, proxyPath string, responder rest.Responder) (http.Handler, error) {
|
||||
return c(ctx, gvr, proxyPath, responder)
|
||||
}
|
||||
|
||||
func newCluster(name string) *clusterv1alpha1.Cluster {
|
||||
c := &clusterv1alpha1.Cluster{}
|
||||
c.Name = name
|
||||
|
|
|
@ -1,10 +1,12 @@
|
|||
package proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
|
||||
|
@ -34,7 +36,7 @@ func newKarmadaProxy(restConfig *restclient.Config) (*karmadaProxy, error) {
|
|||
}
|
||||
|
||||
// connect to Karmada-ApiServer directly
|
||||
func (p *karmadaProxy) connect(proxyPath string, responder rest.Responder) (http.Handler, error) {
|
||||
func (p *karmadaProxy) connect(_ context.Context, _ schema.GroupVersionResource, proxyPath string, responder rest.Responder) (http.Handler, error) {
|
||||
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
location, transport := p.resourceLocation()
|
||||
location.Path = path.Join(location.Path, proxyPath)
|
||||
|
|
|
@ -0,0 +1,128 @@
|
|||
package proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
func Test_karmadaProxy(t *testing.T) {
|
||||
var gotRequest *http.Request
|
||||
s := httptest.NewTLSServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
gotRequest = req
|
||||
}))
|
||||
defer s.Close()
|
||||
|
||||
type args struct {
|
||||
host string
|
||||
path string
|
||||
}
|
||||
|
||||
type want struct {
|
||||
path string
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want want
|
||||
}{
|
||||
{
|
||||
name: "proxy to /proxy",
|
||||
args: args{
|
||||
host: s.URL,
|
||||
path: "proxy",
|
||||
},
|
||||
want: want{
|
||||
path: "/proxy",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "proxy to /api/proxy",
|
||||
args: args{
|
||||
host: s.URL + "/api",
|
||||
path: "proxy",
|
||||
},
|
||||
want: want{
|
||||
path: "/api/proxy",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotRequest = nil
|
||||
restConfig := &restclient.Config{
|
||||
Host: tt.args.host,
|
||||
TLSClientConfig: restclient.TLSClientConfig{
|
||||
Insecure: true,
|
||||
},
|
||||
Timeout: time.Second * 1,
|
||||
}
|
||||
p, err := newKarmadaProxy(restConfig)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
response := httptest.NewRecorder()
|
||||
h, err := p.connect(context.TODO(), podGVR, tt.args.path, newTestResponder(response))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
request, err := http.NewRequest("GET", "http://localhost", nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
h.ServeHTTP(response, request)
|
||||
|
||||
if t.Failed() {
|
||||
return
|
||||
}
|
||||
|
||||
if gotRequest == nil {
|
||||
t.Error("got request nil")
|
||||
return
|
||||
}
|
||||
|
||||
if gotRequest.URL.Path != tt.want.path {
|
||||
t.Errorf("path got = %v, want = %v", gotRequest.URL.Path, tt.want.path)
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type testResponder struct {
|
||||
resp *httptest.ResponseRecorder
|
||||
}
|
||||
|
||||
func newTestResponder(response *httptest.ResponseRecorder) *testResponder {
|
||||
return &testResponder{
|
||||
resp: response,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *testResponder) Object(statusCode int, obj runtime.Object) {
|
||||
f.resp.Code = statusCode
|
||||
|
||||
if obj != nil {
|
||||
err := json.NewEncoder(f.resp).Encode(obj)
|
||||
if err != nil {
|
||||
f.Error(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *testResponder) Error(err error) {
|
||||
_, _ = f.resp.WriteString(err.Error())
|
||||
}
|
|
@ -5,7 +5,9 @@ import (
|
|||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
@ -21,6 +23,7 @@ import (
|
|||
"k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/client-go/dynamic"
|
||||
fakedynamic "k8s.io/client-go/dynamic/fake"
|
||||
kubetesting "k8s.io/client-go/testing"
|
||||
|
||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
)
|
||||
|
@ -53,6 +56,8 @@ func TestMultiClusterCache_UpdateCache(t *testing.T) {
|
|||
return fakedynamic.NewSimpleDynamicClient(scheme), nil
|
||||
}
|
||||
cache := NewMultiClusterCache(newClientFunc, restMapper)
|
||||
defer cache.Stop()
|
||||
|
||||
cluster1 := newCluster("cluster1")
|
||||
cluster2 := newCluster("cluster2")
|
||||
resources := map[string]map[schema.GroupVersionResource]struct{}{
|
||||
|
@ -68,6 +73,17 @@ func TestMultiClusterCache_UpdateCache(t *testing.T) {
|
|||
if len(cache.cache) != 2 {
|
||||
t.Errorf("cache len expect %v, actual %v", 2, len(cache.cache))
|
||||
}
|
||||
|
||||
// Then test removing cluster2 and remove node cache for cluster1
|
||||
err = cache.UpdateCache(map[string]map[schema.GroupVersionResource]struct{}{
|
||||
cluster1.Name: resourceSet(podGVR),
|
||||
})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if len(cache.cache) != 1 {
|
||||
t.Errorf("cache len expect %v, actual %v", 1, len(cache.cache))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiClusterCache_HasResource(t *testing.T) {
|
||||
|
@ -76,6 +92,7 @@ func TestMultiClusterCache_HasResource(t *testing.T) {
|
|||
return fakeClient, nil
|
||||
}
|
||||
cache := NewMultiClusterCache(newClientFunc, restMapper)
|
||||
defer cache.Stop()
|
||||
cluster1 := newCluster("cluster1")
|
||||
cluster2 := newCluster("cluster2")
|
||||
resources := map[string]map[schema.GroupVersionResource]struct{}{
|
||||
|
@ -94,7 +111,7 @@ func TestMultiClusterCache_HasResource(t *testing.T) {
|
|||
want bool
|
||||
}{
|
||||
{
|
||||
"has pods",
|
||||
"has gets",
|
||||
podGVR,
|
||||
true,
|
||||
},
|
||||
|
@ -146,6 +163,7 @@ func TestMultiClusterCache_GetResourceFromCache(t *testing.T) {
|
|||
return fakedynamic.NewSimpleDynamicClient(scheme), nil
|
||||
}
|
||||
cache := NewMultiClusterCache(newClientFunc, restMapper)
|
||||
defer cache.Stop()
|
||||
err := cache.UpdateCache(resources)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
|
@ -266,6 +284,7 @@ func TestMultiClusterCache_Get(t *testing.T) {
|
|||
return fakedynamic.NewSimpleDynamicClient(scheme), nil
|
||||
}
|
||||
cache := NewMultiClusterCache(newClientFunc, restMapper)
|
||||
defer cache.Stop()
|
||||
err := cache.UpdateCache(map[string]map[schema.GroupVersionResource]struct{}{
|
||||
cluster1.Name: resourceSet(podGVR, nodeGVR),
|
||||
cluster2.Name: resourceSet(podGVR),
|
||||
|
@ -411,15 +430,6 @@ func TestMultiClusterCache_List(t *testing.T) {
|
|||
}
|
||||
return fakedynamic.NewSimpleDynamicClient(scheme), nil
|
||||
}
|
||||
cache := NewMultiClusterCache(newClientFunc, restMapper)
|
||||
err := cache.UpdateCache(map[string]map[schema.GroupVersionResource]struct{}{
|
||||
cluster1.Name: resourceSet(podGVR),
|
||||
cluster2.Name: resourceSet(podGVR),
|
||||
})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
|
@ -432,12 +442,34 @@ func TestMultiClusterCache_List(t *testing.T) {
|
|||
errAssert func(error) bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want want
|
||||
name string
|
||||
resources map[string]map[schema.GroupVersionResource]struct{}
|
||||
args args
|
||||
want want
|
||||
}{
|
||||
{
|
||||
name: "list pods",
|
||||
name: "list gets with labelSelector",
|
||||
resources: map[string]map[schema.GroupVersionResource]struct{}{},
|
||||
args: args{
|
||||
ctx: request.WithNamespace(context.TODO(), metav1.NamespaceDefault),
|
||||
gvr: podGVR,
|
||||
options: &metainternalversion.ListOptions{
|
||||
LabelSelector: asLabelSelector("app=foo"),
|
||||
},
|
||||
},
|
||||
want: want{
|
||||
// fakeDynamic returns list with resourceVersion=""
|
||||
resourceVersion: "",
|
||||
names: sets.NewString(),
|
||||
errAssert: noError,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "list gets",
|
||||
resources: map[string]map[schema.GroupVersionResource]struct{}{
|
||||
cluster1.Name: resourceSet(podGVR),
|
||||
cluster2.Name: resourceSet(podGVR),
|
||||
},
|
||||
args: args{
|
||||
ctx: request.WithNamespace(context.TODO(), metav1.NamespaceDefault),
|
||||
gvr: podGVR,
|
||||
|
@ -451,7 +483,11 @@ func TestMultiClusterCache_List(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "list pods with labelSelector",
|
||||
name: "list gets with labelSelector",
|
||||
resources: map[string]map[schema.GroupVersionResource]struct{}{
|
||||
cluster1.Name: resourceSet(podGVR),
|
||||
cluster2.Name: resourceSet(podGVR),
|
||||
},
|
||||
args: args{
|
||||
ctx: request.WithNamespace(context.TODO(), metav1.NamespaceDefault),
|
||||
gvr: podGVR,
|
||||
|
@ -470,6 +506,14 @@ func TestMultiClusterCache_List(t *testing.T) {
|
|||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cache := NewMultiClusterCache(newClientFunc, restMapper)
|
||||
defer cache.Stop()
|
||||
err := cache.UpdateCache(tt.resources)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
obj, err := cache.List(tt.args.ctx, tt.args.gvr, tt.args.options)
|
||||
if !tt.want.errAssert(err) {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
|
@ -479,14 +523,29 @@ func TestMultiClusterCache_List(t *testing.T) {
|
|||
return
|
||||
}
|
||||
|
||||
object := obj.(*unstructured.UnstructuredList)
|
||||
object, err := meta.ListAccessor(obj)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
if tt.want.resourceVersion != object.GetResourceVersion() {
|
||||
t.Errorf("ResourceVersion want=%v, actual=%v", tt.want.resourceVersion, object.GetResourceVersion())
|
||||
}
|
||||
names := sets.NewString()
|
||||
for _, item := range object.Items {
|
||||
names.Insert(item.GetName())
|
||||
|
||||
err = meta.EachListItem(obj, func(o runtime.Object) error {
|
||||
a, err := meta.Accessor(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
names.Insert(a.GetName())
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if !tt.want.names.Equal(names) {
|
||||
t.Errorf("List items want=%v, actual=%v", strings.Join(tt.want.names.List(), ","), strings.Join(names.List(), ","))
|
||||
}
|
||||
|
@ -494,7 +553,7 @@ func TestMultiClusterCache_List(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestMultiClusterCache_List_CachSourceAnnotation(t *testing.T) {
|
||||
func TestMultiClusterCache_List_CacheSourceAnnotation(t *testing.T) {
|
||||
cluster1 := newCluster("cluster1")
|
||||
cluster2 := newCluster("cluster2")
|
||||
cluster1Client := fakedynamic.NewSimpleDynamicClient(scheme,
|
||||
|
@ -516,6 +575,7 @@ func TestMultiClusterCache_List_CachSourceAnnotation(t *testing.T) {
|
|||
return fakedynamic.NewSimpleDynamicClient(scheme), nil
|
||||
}
|
||||
cache := NewMultiClusterCache(newClientFunc, restMapper)
|
||||
defer cache.Stop()
|
||||
err := cache.UpdateCache(map[string]map[schema.GroupVersionResource]struct{}{
|
||||
cluster1.Name: resourceSet(podGVR),
|
||||
cluster2.Name: resourceSet(podGVR),
|
||||
|
@ -547,6 +607,130 @@ func TestMultiClusterCache_List_CachSourceAnnotation(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestMultiClusterCache_Watch(t *testing.T) {
|
||||
cluster1 := newCluster("cluster1")
|
||||
cluster2 := newCluster("cluster2")
|
||||
cluster1Client := NewEnhancedFakeDynamicClientWithResourceVersion(scheme, "1002",
|
||||
newUnstructuredObject(podGVK, "pod11", withDefaultNamespace(), withResourceVersion("1001")),
|
||||
newUnstructuredObject(podGVK, "pod12", withDefaultNamespace(), withResourceVersion("1002")),
|
||||
)
|
||||
cluster2Client := NewEnhancedFakeDynamicClientWithResourceVersion(scheme, "2002",
|
||||
newUnstructuredObject(podGVK, "pod21", withDefaultNamespace(), withResourceVersion("2001")),
|
||||
newUnstructuredObject(podGVK, "pod22", withDefaultNamespace(), withResourceVersion("2002")),
|
||||
)
|
||||
|
||||
newClientFunc := func(cluster string) (dynamic.Interface, error) {
|
||||
switch cluster {
|
||||
case cluster1.Name:
|
||||
return cluster1Client, nil
|
||||
case cluster2.Name:
|
||||
return cluster2Client, nil
|
||||
}
|
||||
return fakedynamic.NewSimpleDynamicClient(scheme), nil
|
||||
}
|
||||
cache := NewMultiClusterCache(newClientFunc, restMapper)
|
||||
defer cache.Stop()
|
||||
err := cache.UpdateCache(map[string]map[schema.GroupVersionResource]struct{}{
|
||||
cluster1.Name: resourceSet(podGVR),
|
||||
cluster2.Name: resourceSet(podGVR),
|
||||
})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
// wait cache synced
|
||||
time.Sleep(time.Second)
|
||||
|
||||
// put gets into Cacher.incoming chan
|
||||
_ = cluster1Client.Tracker().Add(newUnstructuredObject(podGVK, "pod13", withDefaultNamespace(), withResourceVersion("1003")))
|
||||
_ = cluster2Client.Tracker().Add(newUnstructuredObject(podGVK, "pod23", withDefaultNamespace(), withResourceVersion("2003")))
|
||||
cluster1Client.versionTracker.Set("1003")
|
||||
cluster2Client.versionTracker.Set("2003")
|
||||
|
||||
type args struct {
|
||||
options *metainternalversion.ListOptions
|
||||
}
|
||||
type want struct {
|
||||
gets sets.String
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want want
|
||||
}{
|
||||
{
|
||||
name: "resource version is empty",
|
||||
args: args{
|
||||
options: &metainternalversion.ListOptions{
|
||||
ResourceVersion: "",
|
||||
},
|
||||
},
|
||||
want: want{
|
||||
gets: sets.NewString("pod11", "pod12", "pod13", "pod21", "pod22", "pod23"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "resource version of cluster2 is empty",
|
||||
args: args{
|
||||
options: &metainternalversion.ListOptions{
|
||||
ResourceVersion: buildMultiClusterRV(cluster1.Name, "1002"),
|
||||
},
|
||||
},
|
||||
want: want{
|
||||
gets: sets.NewString("pod13", "pod21", "pod22", "pod23"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "resource versions are not empty",
|
||||
args: args{
|
||||
options: &metainternalversion.ListOptions{
|
||||
ResourceVersion: buildMultiClusterRV(cluster1.Name, "1002", cluster2.Name, "2002"),
|
||||
},
|
||||
},
|
||||
want: want{
|
||||
gets: sets.NewString("pod13", "pod23"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(request.WithNamespace(context.TODO(), metav1.NamespaceDefault), time.Second)
|
||||
defer cancel()
|
||||
watcher, err := cache.Watch(ctx, podGVR, tt.args.options)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer watcher.Stop()
|
||||
timeout := time.After(time.Second * 5)
|
||||
|
||||
gets := sets.NewString()
|
||||
LOOP:
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-watcher.ResultChan():
|
||||
if !ok {
|
||||
break LOOP
|
||||
}
|
||||
accessor, err := meta.Accessor(event.Object)
|
||||
if err == nil {
|
||||
gets.Insert(accessor.GetName())
|
||||
}
|
||||
case <-timeout:
|
||||
t.Error("timeout")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if !tt.want.gets.Equal(gets) {
|
||||
t.Errorf("Watch() got = %v, but want = %v", gets, tt.want.gets)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newCluster(name string) *clusterv1alpha1.Cluster {
|
||||
o := &clusterv1alpha1.Cluster{}
|
||||
o.Name = name
|
||||
|
@ -618,3 +802,74 @@ func resourceSet(rs ...schema.GroupVersionResource) map[schema.GroupVersionResou
|
|||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type VersionTracker interface {
|
||||
Set(string)
|
||||
Get() string
|
||||
}
|
||||
|
||||
type versionTracker struct {
|
||||
lock sync.RWMutex
|
||||
rv string
|
||||
}
|
||||
|
||||
func NewVersionTracker(rv string) VersionTracker {
|
||||
return &versionTracker{
|
||||
rv: rv,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *versionTracker) Set(rv string) {
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
t.rv = rv
|
||||
}
|
||||
|
||||
func (t *versionTracker) Get() string {
|
||||
t.lock.RLock()
|
||||
defer t.lock.RUnlock()
|
||||
return t.rv
|
||||
}
|
||||
|
||||
// EnhancedFakeDynamicClient enhances FakeDynamicClient. It will return resourceVersion for list request.
|
||||
type EnhancedFakeDynamicClient struct {
|
||||
*fakedynamic.FakeDynamicClient
|
||||
ObjectReaction kubetesting.ReactionFunc
|
||||
versionTracker VersionTracker
|
||||
}
|
||||
|
||||
// NewEnhancedFakeDynamicClientWithResourceVersion returns instance of EnhancedFakeDynamicClient.
|
||||
func NewEnhancedFakeDynamicClientWithResourceVersion(scheme *runtime.Scheme, rv string, objects ...runtime.Object) *EnhancedFakeDynamicClient {
|
||||
v := NewVersionTracker(rv)
|
||||
|
||||
c := fakedynamic.NewSimpleDynamicClient(scheme, objects...)
|
||||
c.PrependReactor("list", "*", enhancedListReaction(c.Tracker(), v))
|
||||
|
||||
return &EnhancedFakeDynamicClient{
|
||||
FakeDynamicClient: c,
|
||||
ObjectReaction: kubetesting.ObjectReaction(c.Tracker()),
|
||||
versionTracker: v,
|
||||
}
|
||||
}
|
||||
|
||||
func enhancedListReaction(o kubetesting.ObjectTracker, v VersionTracker) kubetesting.ReactionFunc {
|
||||
return func(act kubetesting.Action) (bool, runtime.Object, error) {
|
||||
action, ok := act.(kubetesting.ListActionImpl)
|
||||
if !ok {
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
ret, err := o.List(action.GetResource(), action.GetKind(), action.GetNamespace())
|
||||
if err != nil {
|
||||
return true, ret, err
|
||||
}
|
||||
|
||||
accessor, err := meta.ListAccessor(ret)
|
||||
if err != nil {
|
||||
// object is not a list object, don't change it. Don't return this error.
|
||||
return true, ret, nil
|
||||
}
|
||||
accessor.SetResourceVersion(v.Get())
|
||||
return true, ret, nil
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,13 +2,19 @@ package store
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
|
||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
)
|
||||
|
||||
func Test_watchMux_StopBySelf(t *testing.T) {
|
||||
|
@ -147,3 +153,509 @@ func (f *fakeWatcher) TryAdd(obj runtime.Object) (added bool, stopped bool) {
|
|||
return false, false
|
||||
}
|
||||
}
|
||||
|
||||
func Test_newMultiClusterResourceVersionFromString(t *testing.T) {
|
||||
type args struct {
|
||||
s string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want *multiClusterResourceVersion
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
args: args{
|
||||
s: "",
|
||||
},
|
||||
want: &multiClusterResourceVersion{
|
||||
rvs: map[string]string{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "zero",
|
||||
args: args{
|
||||
s: "0",
|
||||
},
|
||||
want: &multiClusterResourceVersion{
|
||||
rvs: map[string]string{},
|
||||
isZero: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "decode error",
|
||||
args: args{
|
||||
s: "`not encoded`",
|
||||
},
|
||||
want: &multiClusterResourceVersion{
|
||||
rvs: map[string]string{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "not a json",
|
||||
args: args{
|
||||
s: base64.RawURLEncoding.EncodeToString([]byte(`not a json`)),
|
||||
},
|
||||
want: &multiClusterResourceVersion{
|
||||
rvs: map[string]string{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "success",
|
||||
args: args{
|
||||
s: base64.RawURLEncoding.EncodeToString([]byte(`{"cluster1":"1","cluster2":"2"}`)),
|
||||
},
|
||||
want: &multiClusterResourceVersion{
|
||||
rvs: map[string]string{
|
||||
"cluster1": "1",
|
||||
"cluster2": "2",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := newMultiClusterResourceVersionFromString(tt.args.s)
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("newMultiClusterResourceVersionFromString() = %#v, want %#v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_multiClusterResourceVersion_get(t *testing.T) {
|
||||
type fields struct {
|
||||
rvs map[string]string
|
||||
isZero bool
|
||||
}
|
||||
type args struct {
|
||||
cluster string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "zero",
|
||||
fields: fields{
|
||||
isZero: true,
|
||||
rvs: map[string]string{},
|
||||
},
|
||||
args: args{
|
||||
cluster: "cluster1",
|
||||
},
|
||||
want: "0",
|
||||
},
|
||||
{
|
||||
name: "not exist",
|
||||
fields: fields{
|
||||
rvs: map[string]string{},
|
||||
},
|
||||
args: args{
|
||||
cluster: "cluster1",
|
||||
},
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
name: "get success",
|
||||
fields: fields{
|
||||
rvs: map[string]string{
|
||||
"cluster1": "1",
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
cluster: "cluster1",
|
||||
},
|
||||
want: "1",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
m := &multiClusterResourceVersion{
|
||||
rvs: tt.fields.rvs,
|
||||
isZero: tt.fields.isZero,
|
||||
}
|
||||
if got := m.get(tt.args.cluster); got != tt.want {
|
||||
t.Errorf("get() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_multiClusterResourceVersion_String(t *testing.T) {
|
||||
type fields struct {
|
||||
rvs map[string]string
|
||||
isZero bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "zero",
|
||||
fields: fields{
|
||||
isZero: true,
|
||||
rvs: map[string]string{},
|
||||
},
|
||||
want: "0",
|
||||
},
|
||||
{
|
||||
name: "empty",
|
||||
fields: fields{
|
||||
rvs: map[string]string{},
|
||||
},
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
name: "get success",
|
||||
fields: fields{
|
||||
rvs: map[string]string{
|
||||
"cluster1": "1",
|
||||
"cluster2": "2",
|
||||
},
|
||||
},
|
||||
want: base64.RawURLEncoding.EncodeToString([]byte(`{"cluster1":"1","cluster2":"2"}`)),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
m := &multiClusterResourceVersion{
|
||||
rvs: tt.fields.rvs,
|
||||
isZero: tt.fields.isZero,
|
||||
}
|
||||
if got := m.String(); got != tt.want {
|
||||
t.Errorf("String() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_newMultiClusterContinueFromString(t *testing.T) {
|
||||
type args struct {
|
||||
s string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want multiClusterContinue
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
args: args{
|
||||
s: "",
|
||||
},
|
||||
want: multiClusterContinue{},
|
||||
},
|
||||
{
|
||||
name: "not encoded",
|
||||
args: args{
|
||||
s: "not encoded",
|
||||
},
|
||||
want: multiClusterContinue{},
|
||||
},
|
||||
{
|
||||
name: "not json",
|
||||
args: args{
|
||||
s: base64.RawURLEncoding.EncodeToString([]byte("not json")),
|
||||
},
|
||||
want: multiClusterContinue{},
|
||||
},
|
||||
{
|
||||
name: "success",
|
||||
args: args{
|
||||
s: base64.RawURLEncoding.EncodeToString([]byte(`{"cluster":"cluster1","continue":"1"}`)),
|
||||
},
|
||||
want: multiClusterContinue{
|
||||
Cluster: "cluster1",
|
||||
Continue: "1",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := newMultiClusterContinueFromString(tt.args.s); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("newMultiClusterContinueFromString() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_multiClusterContinue_String(t *testing.T) {
|
||||
type fields struct {
|
||||
Cluster string
|
||||
Continue string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
fields: fields{
|
||||
Cluster: "",
|
||||
Continue: "",
|
||||
},
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
name: "success",
|
||||
fields: fields{
|
||||
Cluster: "cluster1",
|
||||
Continue: "1",
|
||||
},
|
||||
want: base64.RawURLEncoding.EncodeToString([]byte(`{"cluster":"cluster1","continue":"1"}`)),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c := &multiClusterContinue{
|
||||
Cluster: tt.fields.Cluster,
|
||||
Continue: tt.fields.Continue,
|
||||
}
|
||||
if got := c.String(); got != tt.want {
|
||||
t.Errorf("String() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveCacheSourceAnnotation(t *testing.T) {
|
||||
type args struct {
|
||||
obj runtime.Object
|
||||
}
|
||||
type want struct {
|
||||
obj runtime.Object
|
||||
changed bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want want
|
||||
}{
|
||||
{
|
||||
name: "not a meta",
|
||||
args: args{
|
||||
obj: &metav1.Status{},
|
||||
},
|
||||
want: want{
|
||||
changed: false,
|
||||
obj: &metav1.Status{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "annotation not exist",
|
||||
args: args{
|
||||
obj: &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{},
|
||||
},
|
||||
},
|
||||
want: want{
|
||||
changed: false,
|
||||
obj: &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "remove annotation",
|
||||
args: args{
|
||||
obj: &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
clusterv1alpha1.CacheSourceAnnotationKey: "cluster1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: want{
|
||||
changed: true,
|
||||
obj: &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := RemoveCacheSourceAnnotation(tt.args.obj)
|
||||
if got != tt.want.changed {
|
||||
t.Errorf("RemoveCacheSourceAnnotation() = %v, want %v", got, tt.want)
|
||||
}
|
||||
if !reflect.DeepEqual(tt.args.obj, tt.want.obj) {
|
||||
t.Errorf("RemoveCacheSourceAnnotation() got obj = %#v, want %#v", tt.args.obj, tt.want.obj)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRecoverClusterResourceVersion(t *testing.T) {
|
||||
type args struct {
|
||||
obj runtime.Object
|
||||
cluster string
|
||||
}
|
||||
type want struct {
|
||||
obj runtime.Object
|
||||
changed bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want want
|
||||
}{
|
||||
{
|
||||
name: "not a meta",
|
||||
args: args{
|
||||
obj: &metav1.Status{},
|
||||
cluster: "cluster1",
|
||||
},
|
||||
want: want{
|
||||
changed: false,
|
||||
obj: &metav1.Status{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "rv is empty",
|
||||
args: args{
|
||||
obj: &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ResourceVersion: "",
|
||||
},
|
||||
},
|
||||
cluster: "cluster1",
|
||||
},
|
||||
want: want{
|
||||
changed: false,
|
||||
obj: &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "rv is 0",
|
||||
args: args{
|
||||
obj: &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ResourceVersion: "0",
|
||||
},
|
||||
},
|
||||
cluster: "cluster1",
|
||||
},
|
||||
want: want{
|
||||
obj: &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ResourceVersion: "0",
|
||||
},
|
||||
},
|
||||
changed: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "single cluster rv",
|
||||
args: args{
|
||||
obj: &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ResourceVersion: "1000",
|
||||
},
|
||||
},
|
||||
cluster: "cluster1",
|
||||
},
|
||||
want: want{
|
||||
obj: &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ResourceVersion: "1000",
|
||||
},
|
||||
},
|
||||
changed: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "cluster rv not set",
|
||||
args: args{
|
||||
obj: &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ResourceVersion: base64.RawURLEncoding.EncodeToString([]byte(`{}`)),
|
||||
},
|
||||
},
|
||||
cluster: "cluster1",
|
||||
},
|
||||
want: want{
|
||||
obj: &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{},
|
||||
},
|
||||
changed: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "recover cluster rv",
|
||||
args: args{
|
||||
obj: &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ResourceVersion: base64.RawURLEncoding.EncodeToString([]byte(`{"cluster1":"1","cluster2":"2"}`)),
|
||||
},
|
||||
},
|
||||
cluster: "cluster1",
|
||||
},
|
||||
want: want{
|
||||
obj: &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
},
|
||||
changed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := RecoverClusterResourceVersion(tt.args.obj, tt.args.cluster)
|
||||
if got != tt.want.changed {
|
||||
t.Errorf("RecoverClusterResourceVersion() changed = %v, want %v", got, tt.want.changed)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(tt.args.obj, tt.want.obj) {
|
||||
t.Errorf("RecoverClusterResourceVersion() got obj = %#v, want %#v", tt.args.obj, tt.want.obj)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildMultiClusterResourceVersion(t *testing.T) {
|
||||
type args struct {
|
||||
clusterResourceMap map[string]string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
args: args{
|
||||
clusterResourceMap: nil,
|
||||
},
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
name: "success",
|
||||
args: args{
|
||||
clusterResourceMap: map[string]string{
|
||||
"cluster1": "1",
|
||||
},
|
||||
},
|
||||
want: base64.RawURLEncoding.EncodeToString([]byte(`{"cluster1":"1"}`)),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := BuildMultiClusterResourceVersion(tt.args.clusterResourceMap); got != tt.want {
|
||||
t.Errorf("BuildMultiClusterResourceVersion() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue