Merge pull request #2516 from XiShanYongYe-Chang/fix-2495
Use Cluster secret ref namespace in unified-auth-controller when generate ClusterRoleBinding
This commit is contained in:
commit
636817ae42
|
@ -29,11 +29,12 @@ import (
|
|||
|
||||
const (
|
||||
// ControllerName is the controller name that will be used when reporting events.
|
||||
ControllerName = "unified-auth-controller"
|
||||
rbacAPIVersion = "rbac.authorization.k8s.io/v1"
|
||||
clusterProxyResource = "clusters/proxy"
|
||||
clusterProxyAPIGroup = "cluster.karmada.io"
|
||||
karmadaImpersontorName = "karmada-impersonator"
|
||||
ControllerName = "unified-auth-controller"
|
||||
|
||||
rbacAPIVersion = "rbac.authorization.k8s.io/v1"
|
||||
clusterProxyResource = "clusters/proxy"
|
||||
clusterProxyAPIGroup = "cluster.karmada.io"
|
||||
karmadaImpersonatorName = "karmada-impersonator"
|
||||
)
|
||||
|
||||
// Controller is to sync impersonation config to member clusters for unified authentication.
|
||||
|
@ -87,6 +88,7 @@ func (c *Controller) syncImpersonationConfig(cluster *clusterv1alpha1.Cluster) e
|
|||
util.PolicyRuleResourceMatches(&clusterRole.Rules[i], clusterProxyResource) &&
|
||||
util.PolicyRuleResourceNameMatches(&clusterRole.Rules[i], cluster.Name) {
|
||||
allMatchedClusterRoles.Insert(clusterRole.Name)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -132,7 +134,7 @@ func (c *Controller) buildImpersonationClusterRole(cluster *clusterv1alpha1.Clus
|
|||
Kind: util.ClusterRoleKind,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: karmadaImpersontorName,
|
||||
Name: karmadaImpersonatorName,
|
||||
},
|
||||
Rules: rules,
|
||||
}
|
||||
|
@ -140,7 +142,7 @@ func (c *Controller) buildImpersonationClusterRole(cluster *clusterv1alpha1.Clus
|
|||
clusterRoleObj, err := helper.ToUnstructured(impersonationClusterRole)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to transform clusterrole %s. Error: %v", impersonationClusterRole.GetName(), err)
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
return c.buildWorks(cluster, clusterRoleObj)
|
||||
|
@ -153,26 +155,26 @@ func (c *Controller) buildImpersonationClusterRoleBinding(cluster *clusterv1alph
|
|||
Kind: util.ClusterRoleBindingKind,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: karmadaImpersontorName,
|
||||
Name: karmadaImpersonatorName,
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: rbacv1.ServiceAccountKind,
|
||||
Namespace: names.NamespaceKarmadaCluster,
|
||||
Namespace: cluster.Spec.ImpersonatorSecretRef.Namespace,
|
||||
Name: names.GenerateServiceAccountName("impersonator"),
|
||||
},
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Kind: util.ClusterRoleKind,
|
||||
Name: karmadaImpersontorName,
|
||||
Name: karmadaImpersonatorName,
|
||||
},
|
||||
}
|
||||
|
||||
clusterRoleBindingObj, err := helper.ToUnstructured(impersonatorClusterRoleBinding)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to transform clusterrolebinding %s. Error: %v", impersonatorClusterRoleBinding.GetName(), err)
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
return c.buildWorks(cluster, clusterRoleBindingObj)
|
||||
|
@ -213,19 +215,10 @@ func (c *Controller) SetupWithManager(mgr controllerruntime.Manager) error {
|
|||
return true
|
||||
},
|
||||
UpdateFunc: func(e event.UpdateEvent) bool {
|
||||
if _, ok := e.ObjectNew.(*clusterv1alpha1.Cluster); ok {
|
||||
return false
|
||||
}
|
||||
if _, ok := e.ObjectOld.(*clusterv1alpha1.Cluster); ok {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
return false
|
||||
},
|
||||
DeleteFunc: func(e event.DeleteEvent) bool {
|
||||
if _, ok := e.Object.(*clusterv1alpha1.Cluster); ok {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
return false
|
||||
},
|
||||
GenericFunc: func(event.GenericEvent) bool {
|
||||
return false
|
||||
|
@ -264,14 +257,14 @@ func (c *Controller) newClusterRoleBindingMapFunc() handler.MapFunc {
|
|||
|
||||
// found out which clusters need to sync impersonation config from rules like:
|
||||
// resources: ["cluster/proxy"]
|
||||
// resourceNmaes: ["cluster1", "cluster2"]
|
||||
// resourceNames: ["cluster1", "cluster2"]
|
||||
func (c *Controller) generateRequestsFromClusterRole(clusterRole *rbacv1.ClusterRole) []reconcile.Request {
|
||||
var requests []reconcile.Request
|
||||
for i := range clusterRole.Rules {
|
||||
if util.PolicyRuleAPIGroupMatches(&clusterRole.Rules[i], clusterProxyAPIGroup) &&
|
||||
util.PolicyRuleResourceMatches(&clusterRole.Rules[i], clusterProxyResource) {
|
||||
if len(clusterRole.Rules[i].ResourceNames) == 0 {
|
||||
// if rule.ResourceNames == 0, means to match all clusters
|
||||
// if the length of rule[i].ResourceNames is 0, means to match all clusters
|
||||
clusterList := &clusterv1alpha1.ClusterList{}
|
||||
if err := c.Client.List(context.TODO(), clusterList); err != nil {
|
||||
klog.Errorf("Failed to list clusters, error: %v", err)
|
||||
|
@ -283,9 +276,9 @@ func (c *Controller) generateRequestsFromClusterRole(clusterRole *rbacv1.Cluster
|
|||
}})
|
||||
}
|
||||
} else {
|
||||
for _, ruleName := range clusterRole.Rules[i].ResourceNames {
|
||||
for _, resourceName := range clusterRole.Rules[i].ResourceNames {
|
||||
requests = append(requests, reconcile.Request{NamespacedName: types.NamespacedName{
|
||||
Name: ruleName,
|
||||
Name: resourceName,
|
||||
}})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -86,8 +86,8 @@ func DeleteClusterRoleBinding(client kubeclient.Interface, name string) error {
|
|||
}
|
||||
|
||||
// PolicyRuleAPIGroupMatches determines if the given policy rule is applied for requested group.
|
||||
func PolicyRuleAPIGroupMatches(rules *rbacv1.PolicyRule, requestedGroup string) bool {
|
||||
for _, ruleGroup := range rules.APIGroups {
|
||||
func PolicyRuleAPIGroupMatches(rule *rbacv1.PolicyRule, requestedGroup string) bool {
|
||||
for _, ruleGroup := range rule.APIGroups {
|
||||
if ruleGroup == rbacv1.APIGroupAll {
|
||||
return true
|
||||
}
|
||||
|
@ -100,8 +100,8 @@ func PolicyRuleAPIGroupMatches(rules *rbacv1.PolicyRule, requestedGroup string)
|
|||
}
|
||||
|
||||
// PolicyRuleResourceMatches determines if the given policy rule is applied for requested resource.
|
||||
func PolicyRuleResourceMatches(rules *rbacv1.PolicyRule, requestedResource string) bool {
|
||||
for _, ruleResource := range rules.Resources {
|
||||
func PolicyRuleResourceMatches(rule *rbacv1.PolicyRule, requestedResource string) bool {
|
||||
for _, ruleResource := range rule.Resources {
|
||||
if ruleResource == rbacv1.ResourceAll {
|
||||
return true
|
||||
}
|
||||
|
@ -119,8 +119,8 @@ func PolicyRuleResourceNameMatches(rule *rbacv1.PolicyRule, requestedName string
|
|||
return true
|
||||
}
|
||||
|
||||
for _, ruleName := range rule.ResourceNames {
|
||||
if ruleName == requestedName {
|
||||
for _, resourceName := range rule.ResourceNames {
|
||||
if resourceName == requestedName {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package e2e
|
|||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
|
@ -11,8 +12,11 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"github.com/karmada-io/karmada/pkg/karmadactl"
|
||||
"github.com/karmada-io/karmada/pkg/karmadactl/options"
|
||||
"github.com/karmada-io/karmada/test/e2e/framework"
|
||||
"github.com/karmada-io/karmada/test/helper"
|
||||
)
|
||||
|
@ -21,7 +25,7 @@ const (
|
|||
clusterProxy = "/apis/cluster.karmada.io/v1alpha1/clusters/%s/proxy/"
|
||||
)
|
||||
|
||||
var _ = ginkgo.Describe("Aggregated Kubernetes API Endpoint testing", func() {
|
||||
var _ = framework.SerialDescribe("Aggregated Kubernetes API Endpoint testing", func() {
|
||||
var member1, member2 string
|
||||
var saName, saNamespace string
|
||||
var tomServiceAccount *corev1.ServiceAccount
|
||||
|
@ -31,6 +35,78 @@ var _ = ginkgo.Describe("Aggregated Kubernetes API Endpoint testing", func() {
|
|||
var tomClusterRoleOnMember *rbacv1.ClusterRole
|
||||
var tomClusterRoleBindingOnMember *rbacv1.ClusterRoleBinding
|
||||
|
||||
var (
|
||||
clusterName string
|
||||
homeDir string
|
||||
kubeConfigPath string
|
||||
clusterContext string
|
||||
controlPlane string
|
||||
karmadaConfig karmadactl.KarmadaConfig
|
||||
|
||||
secretStoreNamespace string
|
||||
)
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
clusterName = "member-e2e-" + rand.String(RandomStrLength)
|
||||
homeDir = os.Getenv("HOME")
|
||||
kubeConfigPath = fmt.Sprintf("%s/.kube/%s.config", homeDir, clusterName)
|
||||
clusterContext = fmt.Sprintf("kind-%s", clusterName)
|
||||
controlPlane = fmt.Sprintf("%s-control-plane", clusterName)
|
||||
karmadaConfig = karmadactl.NewKarmadaConfig(clientcmd.NewDefaultPathOptions())
|
||||
|
||||
secretStoreNamespace = "test-" + rand.String(RandomStrLength)
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("Create cluster: %s", clusterName), func() {
|
||||
err := createCluster(clusterName, kubeConfigPath, controlPlane, clusterContext)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
ginkgo.DeferCleanup(func() {
|
||||
ginkgo.By(fmt.Sprintf("Deleting clusters: %s", clusterName), func() {
|
||||
err := deleteCluster(clusterName, kubeConfigPath)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
_ = os.Remove(kubeConfigPath)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("Joinning cluster: %s", clusterName), func() {
|
||||
opts := karmadactl.CommandJoinOption{
|
||||
GlobalCommandOptions: options.GlobalCommandOptions{
|
||||
KarmadaContext: karmadaContext,
|
||||
},
|
||||
DryRun: false,
|
||||
ClusterNamespace: secretStoreNamespace,
|
||||
ClusterName: clusterName,
|
||||
ClusterContext: clusterContext,
|
||||
ClusterKubeConfig: kubeConfigPath,
|
||||
}
|
||||
err := karmadactl.RunJoin(karmadaConfig, opts)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("Unjoinning cluster: %s", clusterName), func() {
|
||||
karmadaConfig := karmadactl.NewKarmadaConfig(clientcmd.NewDefaultPathOptions())
|
||||
opts := karmadactl.CommandUnjoinOption{
|
||||
GlobalCommandOptions: options.GlobalCommandOptions{
|
||||
KarmadaContext: karmadaContext,
|
||||
},
|
||||
DryRun: false,
|
||||
ClusterNamespace: secretStoreNamespace,
|
||||
ClusterName: clusterName,
|
||||
ClusterContext: clusterContext,
|
||||
ClusterKubeConfig: kubeConfigPath,
|
||||
Wait: 5 * options.DefaultKarmadactlCommandDuration,
|
||||
}
|
||||
err := karmadactl.RunUnjoin(karmadaConfig, opts)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
member1, member2 = "member1", "member2"
|
||||
|
||||
|
@ -52,7 +128,7 @@ var _ = ginkgo.Describe("Aggregated Kubernetes API Endpoint testing", func() {
|
|||
APIGroups: []string{"cluster.karmada.io"},
|
||||
Verbs: []string{"*"},
|
||||
Resources: []string{"clusters/proxy"},
|
||||
ResourceNames: []string{member1},
|
||||
ResourceNames: []string{member1, clusterName},
|
||||
},
|
||||
})
|
||||
tomClusterRoleBinding = helper.NewClusterRoleBinding(tomServiceAccount.Name, tomClusterRole.Name, []rbacv1.Subject{
|
||||
|
@ -156,5 +232,61 @@ var _ = ginkgo.Describe("Aggregated Kubernetes API Endpoint testing", func() {
|
|||
})
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.When(fmt.Sprintf("Serviceaccount(tom) access the %s cluster", clusterName), func() {
|
||||
var clusterClient kubernetes.Interface
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
clusterConfig, err := clientcmd.BuildConfigFromFlags("", kubeConfigPath)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
clusterClient = kubernetes.NewForConfigOrDie(clusterConfig)
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
klog.Infof("Waiting for namespace present on cluster(%s)", clusterName)
|
||||
framework.WaitNamespacePresentOnClusterByClient(clusterClient, tomServiceAccount.Namespace)
|
||||
|
||||
klog.Infof("Create ServiceAccount(%s) in the cluster(%s)", klog.KObj(tomServiceAccount).String(), clusterName)
|
||||
framework.CreateServiceAccount(clusterClient, tomServiceAccount)
|
||||
ginkgo.DeferCleanup(func() {
|
||||
klog.Infof("Delete ServiceAccount(%s) in the cluster(%s)", klog.KObj(tomServiceAccount).String(), clusterName)
|
||||
framework.RemoveServiceAccount(clusterClient, tomServiceAccount.Namespace, tomServiceAccount.Name)
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
framework.RemoveClusterRole(clusterClient, tomClusterRoleOnMember.Name)
|
||||
framework.RemoveClusterRoleBinding(clusterClient, tomClusterRoleBindingOnMember.Name)
|
||||
})
|
||||
|
||||
ginkgo.It("tom access the member cluster", func() {
|
||||
ginkgo.By("access the cluster `/api` path with right", func() {
|
||||
gomega.Eventually(func(g gomega.Gomega) (int, error) {
|
||||
code, err := helper.DoRequest(fmt.Sprintf(karmadaHost+clusterProxy+"api", clusterName), tomToken)
|
||||
g.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
return code, nil
|
||||
}, pollTimeout, pollInterval).Should(gomega.Equal(http.StatusOK))
|
||||
})
|
||||
|
||||
ginkgo.By("access the cluster /api/v1/nodes path without right", func() {
|
||||
code, err := helper.DoRequest(fmt.Sprintf(karmadaHost+clusterProxy+"api/v1/nodes", clusterName), tomToken)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
gomega.Expect(code).Should(gomega.Equal(http.StatusForbidden))
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("create rbac in the %s cluster", clusterName), func() {
|
||||
framework.CreateClusterRole(clusterClient, tomClusterRoleOnMember)
|
||||
framework.CreateClusterRoleBinding(clusterClient, tomClusterRoleBindingOnMember)
|
||||
})
|
||||
|
||||
ginkgo.By(fmt.Sprintf("access the %s /api/v1/nodes path with right", clusterName), func() {
|
||||
gomega.Eventually(func(g gomega.Gomega) (int, error) {
|
||||
code, err := helper.DoRequest(fmt.Sprintf(karmadaHost+clusterProxy+"api/v1/nodes", clusterName), tomToken)
|
||||
g.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
return code, nil
|
||||
}, pollTimeout, pollInterval).Should(gomega.Equal(http.StatusOK))
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
Loading…
Reference in New Issue