mirror of https://github.com/linkerd/linkerd2.git
Require cluster-wide k8s API access (#2428)
linkerd/linkerd2#2349 removed the `--single-namespace` flag, in favor of runtime detection of cluster vs. namespace access, and also ServiceProfile availability. This maintained control-plane support for running in these two states. This change requires control-plane components have cluster-wide Kubernetes API access and ServiceProfile availability, and will error out if not. Once #2349 merges, stage 1 install will be a requirement for a successful stage 2 install. Part of #2337 Signed-off-by: Andrew Seigner <siggy@buoyant.io>
This commit is contained in:
parent
8f6c63d5ea
commit
8da2cd3fd4
|
@ -470,7 +470,7 @@ spec:
|
|||
} {
|
||||
tt := tt // pin
|
||||
t.Run("subscribes listener to "+tt.serviceType, func(t *testing.T) {
|
||||
k8sAPI, err := k8s.NewFakeAPI("", tt.k8sConfigs...)
|
||||
k8sAPI, err := k8s.NewFakeAPI(tt.k8sConfigs...)
|
||||
if err != nil {
|
||||
t.Fatalf("NewFakeAPI returned an error: %s", err)
|
||||
}
|
||||
|
|
|
@ -15,23 +15,20 @@ var containsAlphaRegexp = regexp.MustCompile("[a-zA-Z]")
|
|||
|
||||
// implements the streamingDestinationResolver interface
|
||||
type k8sResolver struct {
|
||||
k8sDNSZoneLabels []string
|
||||
controllerNamespace string
|
||||
endpointsWatcher *endpointsWatcher
|
||||
profileWatcher *profileWatcher
|
||||
k8sDNSZoneLabels []string
|
||||
endpointsWatcher *endpointsWatcher
|
||||
profileWatcher *profileWatcher
|
||||
}
|
||||
|
||||
func newK8sResolver(
|
||||
k8sDNSZoneLabels []string,
|
||||
controllerNamespace string,
|
||||
ew *endpointsWatcher,
|
||||
pw *profileWatcher,
|
||||
) *k8sResolver {
|
||||
return &k8sResolver{
|
||||
k8sDNSZoneLabels: k8sDNSZoneLabels,
|
||||
controllerNamespace: controllerNamespace,
|
||||
endpointsWatcher: ew,
|
||||
profileWatcher: pw,
|
||||
k8sDNSZoneLabels: k8sDNSZoneLabels,
|
||||
endpointsWatcher: ew,
|
||||
profileWatcher: pw,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ func TestK8sResolver(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGetState(t *testing.T) {
|
||||
k8sAPI, err := k8s.NewFakeAPI("")
|
||||
k8sAPI, err := k8s.NewFakeAPI()
|
||||
if err != nil {
|
||||
t.Fatalf("NewFakeAPI returned an error: %s", err)
|
||||
}
|
||||
|
@ -104,7 +104,6 @@ func TestGetState(t *testing.T) {
|
|||
endpointsWatcher.servicePorts = tt.servicePorts
|
||||
resolver := newK8sResolver(
|
||||
[]string{"some", "namespace"},
|
||||
"controller-ns",
|
||||
endpointsWatcher,
|
||||
newProfileWatcher(k8sAPI),
|
||||
)
|
||||
|
|
|
@ -67,7 +67,7 @@ spec:
|
|||
} {
|
||||
tt := tt // pin
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
k8sAPI, err := k8s.NewFakeAPI("", tt.k8sConfigs...)
|
||||
k8sAPI, err := k8s.NewFakeAPI(tt.k8sConfigs...)
|
||||
if err != nil {
|
||||
t.Fatalf("NewFakeAPI returned an error: %s", err)
|
||||
}
|
||||
|
|
|
@ -11,7 +11,6 @@ import (
|
|||
discoveryPb "github.com/linkerd/linkerd2/controller/gen/controller/discovery"
|
||||
"github.com/linkerd/linkerd2/controller/k8s"
|
||||
"github.com/linkerd/linkerd2/pkg/addr"
|
||||
pkgK8s "github.com/linkerd/linkerd2/pkg/k8s"
|
||||
"github.com/linkerd/linkerd2/pkg/prometheus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
|
@ -45,7 +44,7 @@ func NewServer(
|
|||
k8sAPI *k8s.API,
|
||||
done chan struct{},
|
||||
) (*grpc.Server, error) {
|
||||
resolver, err := buildResolver(k8sDNSZone, controllerNS, k8sAPI)
|
||||
resolver, err := buildResolver(k8sDNSZone, k8sAPI)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -192,13 +191,11 @@ func getHostAndPort(dest *pb.GetDestination) (string, int, error) {
|
|||
}
|
||||
|
||||
func buildResolver(
|
||||
k8sDNSZone, controllerNS string,
|
||||
k8sDNSZone string,
|
||||
k8sAPI *k8s.API,
|
||||
) (streamingDestinationResolver, error) {
|
||||
var k8sDNSZoneLabels []string
|
||||
if k8sDNSZone == "" {
|
||||
k8sDNSZoneLabels = []string{}
|
||||
} else {
|
||||
k8sDNSZoneLabels := []string{}
|
||||
if k8sDNSZone != "" {
|
||||
var err error
|
||||
k8sDNSZoneLabels, err = splitDNSName(k8sDNSZone)
|
||||
if err != nil {
|
||||
|
@ -206,16 +203,7 @@ func buildResolver(
|
|||
}
|
||||
}
|
||||
|
||||
var pw *profileWatcher
|
||||
serviceProfiles, err := pkgK8s.ServiceProfilesAccess(k8sAPI.Client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if serviceProfiles {
|
||||
pw = newProfileWatcher(k8sAPI)
|
||||
}
|
||||
|
||||
k8sResolver := newK8sResolver(k8sDNSZoneLabels, controllerNS, newEndpointsWatcher(k8sAPI), pw)
|
||||
k8sResolver := newK8sResolver(k8sDNSZoneLabels, newEndpointsWatcher(k8sAPI), newProfileWatcher(k8sAPI))
|
||||
|
||||
log.Infof("Built k8s name resolver")
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ func (m *mockDestinationServer) SendMsg(x interface{}) error { return m.errorTo
|
|||
func (m *mockDestinationServer) RecvMsg(x interface{}) error { return m.errorToReturn }
|
||||
|
||||
func TestBuildResolver(t *testing.T) {
|
||||
k8sAPI, err := k8s.NewFakeAPI("")
|
||||
k8sAPI, err := k8s.NewFakeAPI()
|
||||
if err != nil {
|
||||
t.Fatalf("NewFakeAPI returned an error: %s", err)
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ func TestBuildResolver(t *testing.T) {
|
|||
t.Run("Doesn't build a resolver if Kubernetes DNS zone isnt valid", func(t *testing.T) {
|
||||
invalidK8sDNSZones := []string{"1", "-a", "a-", "-"}
|
||||
for _, dsnZone := range invalidK8sDNSZones {
|
||||
resolver, err := buildResolver(dsnZone, "linkerd", k8sAPI)
|
||||
resolver, err := buildResolver(dsnZone, k8sAPI)
|
||||
if err == nil {
|
||||
t.Fatalf("Expecting error when k8s zone is [%s], got nothing. Resolver: %v", dsnZone, resolver)
|
||||
}
|
||||
|
@ -100,7 +100,7 @@ func TestStreamResolutionUsingCorrectResolverFor(t *testing.T) {
|
|||
stream := &mockDestinationGetServer{}
|
||||
host := "something"
|
||||
port := 666
|
||||
k8sAPI, err := k8s.NewFakeAPI("")
|
||||
k8sAPI, err := k8s.NewFakeAPI()
|
||||
if err != nil {
|
||||
t.Fatalf("NewFakeAPI returned an error: %s", err)
|
||||
}
|
||||
|
@ -149,7 +149,7 @@ func TestStreamResolutionUsingCorrectResolverFor(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpoints(t *testing.T) {
|
||||
k8sAPI, err := k8s.NewFakeAPI("")
|
||||
k8sAPI, err := k8s.NewFakeAPI()
|
||||
if err != nil {
|
||||
t.Fatalf("NewFakeAPI returned an error: %s", err)
|
||||
}
|
||||
|
|
|
@ -392,7 +392,7 @@ status:
|
|||
}
|
||||
|
||||
for _, exp := range expectations {
|
||||
k8sAPI, err := k8s.NewFakeAPI("", exp.k8sRes...)
|
||||
k8sAPI, err := k8s.NewFakeAPI(exp.k8sRes...)
|
||||
if err != nil {
|
||||
t.Fatalf("NewFakeAPI returned an error: %s", err)
|
||||
}
|
||||
|
@ -496,7 +496,7 @@ metadata:
|
|||
}
|
||||
|
||||
for _, exp := range expectations {
|
||||
k8sAPI, err := k8s.NewFakeAPI("", exp.k8sRes...)
|
||||
k8sAPI, err := k8s.NewFakeAPI(exp.k8sRes...)
|
||||
if err != nil {
|
||||
t.Fatalf("NewFakeAPI returned an error: %s", err)
|
||||
}
|
||||
|
@ -541,7 +541,7 @@ func TestEndpoints(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, exp := range expectations {
|
||||
k8sAPI, err := k8s.NewFakeAPI("")
|
||||
k8sAPI, err := k8s.NewFakeAPI()
|
||||
if err != nil {
|
||||
t.Fatalf("NewFakeAPI returned an error: %s", err)
|
||||
}
|
||||
|
|
|
@ -1043,7 +1043,7 @@ status:
|
|||
})
|
||||
|
||||
t.Run("Given an invalid resource type, returns error", func(t *testing.T) {
|
||||
k8sAPI, err := k8s.NewFakeAPI("")
|
||||
k8sAPI, err := k8s.NewFakeAPI()
|
||||
if err != nil {
|
||||
t.Fatalf("NewFakeAPI returned an error: %s", err)
|
||||
}
|
||||
|
@ -1109,7 +1109,7 @@ status:
|
|||
})
|
||||
|
||||
t.Run("Validates service stat requests", func(t *testing.T) {
|
||||
k8sAPI, err := k8s.NewFakeAPI("")
|
||||
k8sAPI, err := k8s.NewFakeAPI()
|
||||
if err != nil {
|
||||
t.Fatalf("NewFakeAPI returned an error: %s", err)
|
||||
}
|
||||
|
|
|
@ -278,7 +278,7 @@ type expectedStatRPC struct {
|
|||
}
|
||||
|
||||
func newMockGrpcServer(exp expectedStatRPC) (*mockProm, *grpcServer, error) {
|
||||
k8sAPI, err := k8s.NewFakeAPI("", exp.k8sConfigs...)
|
||||
k8sAPI, err := k8s.NewFakeAPI(exp.k8sConfigs...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
|
|
@ -61,7 +61,7 @@ func TestCertificateController(t *testing.T) {
|
|||
}
|
||||
|
||||
func newController(fixtures ...string) (*CertificateController, chan bool, chan struct{}, error) {
|
||||
k8sAPI, err := k8s.NewFakeAPI("", fixtures...)
|
||||
k8sAPI, err := k8s.NewFakeAPI(fixtures...)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("NewFakeAPI returned an error: %s", err)
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ func main() {
|
|||
stop := make(chan os.Signal, 1)
|
||||
signal.Notify(stop, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
k8sAPI, err := k8s.InitializeAPI(*kubeConfigPath, *controllerNamespace, k8s.Pod, k8s.RS)
|
||||
k8sAPI, err := k8s.InitializeAPI(*kubeConfigPath, k8s.Pod, k8s.RS)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to initialize K8s API: %s", err)
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ func main() {
|
|||
signal.Notify(stop, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
k8sAPI, err := k8s.InitializeAPI(
|
||||
*kubeConfigPath, *controllerNamespace,
|
||||
*kubeConfigPath,
|
||||
k8s.Endpoint, k8s.Pod, k8s.RS, k8s.Svc, k8s.SP,
|
||||
)
|
||||
if err != nil {
|
||||
|
|
|
@ -45,8 +45,8 @@ func main() {
|
|||
defer discoveryConn.Close()
|
||||
|
||||
k8sAPI, err := k8s.InitializeAPI(
|
||||
*kubeConfigPath, *controllerNamespace,
|
||||
k8s.DS, k8s.Deploy, k8s.Job, k8s.Pod, k8s.RC, k8s.RS, k8s.Svc, k8s.SS, k8s.SP,
|
||||
*kubeConfigPath,
|
||||
k8s.DS, k8s.Deploy, k8s.Job, k8s.NS, k8s.Pod, k8s.RC, k8s.RS, k8s.Svc, k8s.SS, k8s.SP,
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to initialize K8s API: %s", err)
|
||||
|
|
|
@ -25,11 +25,12 @@ func main() {
|
|||
signal.Notify(stop, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
k8sAPI, err := k8s.InitializeAPI(
|
||||
*kubeConfigPath, *controllerNamespace,
|
||||
*kubeConfigPath,
|
||||
k8s.DS,
|
||||
k8s.SS,
|
||||
k8s.Deploy,
|
||||
k8s.Job,
|
||||
k8s.NS,
|
||||
k8s.Pod,
|
||||
k8s.RC,
|
||||
k8s.Svc,
|
||||
|
|
|
@ -2,6 +2,7 @@ package k8s
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -44,6 +45,7 @@ const (
|
|||
Endpoint
|
||||
Job
|
||||
MWC // mutating webhook configuration
|
||||
NS
|
||||
Pod
|
||||
RC
|
||||
RS
|
||||
|
@ -62,6 +64,7 @@ type API struct {
|
|||
endpoint coreinformers.EndpointsInformer
|
||||
job batchv1informers.JobInformer
|
||||
mwc arinformers.MutatingWebhookConfigurationInformer
|
||||
ns coreinformers.NamespaceInformer
|
||||
pod coreinformers.PodInformer
|
||||
rc coreinformers.ReplicationControllerInformer
|
||||
rs appv1beta2informers.ReplicaSetInformer
|
||||
|
@ -72,82 +75,54 @@ type API struct {
|
|||
syncChecks []cache.InformerSynced
|
||||
sharedInformers informers.SharedInformerFactory
|
||||
spSharedInformers sp.SharedInformerFactory
|
||||
namespace string
|
||||
}
|
||||
|
||||
// InitializeAPI creates Kubernetes clients and returns an initialized API wrapper.
|
||||
func InitializeAPI(kubeConfig string, namespace string, resources ...APIResource) (*API, error) {
|
||||
func InitializeAPI(kubeConfig string, resources ...APIResource) (*API, error) {
|
||||
k8sClient, err := NewClientSet(kubeConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// check for cluster-wide vs. namespace-wide access
|
||||
clusterAccess, err := k8s.ClusterAccess(k8sClient, namespace)
|
||||
// check for cluster-wide access
|
||||
clusterAccess, err := k8s.ClusterAccess(k8sClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
restrictToNamespace := ""
|
||||
if !clusterAccess {
|
||||
log.Warnf("Not authorized for cluster-wide access, limiting access to \"%s\" namespace", namespace)
|
||||
restrictToNamespace = namespace
|
||||
return nil, fmt.Errorf("not authorized for cluster-wide access")
|
||||
}
|
||||
|
||||
// check for need and access to ServiceProfiles
|
||||
var spClient *spclient.Clientset
|
||||
idxSP := 0
|
||||
needSP := false
|
||||
for i := range resources {
|
||||
if resources[i] == SP {
|
||||
needSP = true
|
||||
idxSP = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if needSP {
|
||||
serviceProfiles, err := k8s.ServiceProfilesAccess(k8sClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if serviceProfiles {
|
||||
for _, res := range resources {
|
||||
if res == SP {
|
||||
serviceProfiles, err := k8s.ServiceProfilesAccess(k8sClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !serviceProfiles {
|
||||
return nil, errors.New("not authorized for ServiceProfile access")
|
||||
}
|
||||
|
||||
spClient, err = NewSpClientSet(kubeConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
log.Warn("ServiceProfiles not available")
|
||||
// remove SP from resources list
|
||||
resources = append(resources[:idxSP], resources[idxSP+1:]...)
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return NewAPI(k8sClient, spClient, restrictToNamespace, resources...), nil
|
||||
return NewAPI(k8sClient, spClient, resources...), nil
|
||||
}
|
||||
|
||||
// NewAPI takes a Kubernetes client and returns an initialized API.
|
||||
func NewAPI(k8sClient kubernetes.Interface, spClient spclient.Interface, namespace string, resources ...APIResource) *API {
|
||||
var sharedInformers informers.SharedInformerFactory
|
||||
func NewAPI(k8sClient kubernetes.Interface, spClient spclient.Interface, resources ...APIResource) *API {
|
||||
sharedInformers := informers.NewSharedInformerFactory(k8sClient, 10*time.Minute)
|
||||
|
||||
var spSharedInformers sp.SharedInformerFactory
|
||||
if namespace == "" {
|
||||
sharedInformers = informers.NewSharedInformerFactory(k8sClient, 10*time.Minute)
|
||||
if spClient != nil {
|
||||
spSharedInformers = sp.NewSharedInformerFactory(spClient, 10*time.Minute)
|
||||
}
|
||||
} else {
|
||||
sharedInformers = informers.NewFilteredSharedInformerFactory(
|
||||
k8sClient,
|
||||
10*time.Minute,
|
||||
namespace,
|
||||
nil,
|
||||
)
|
||||
if spClient != nil {
|
||||
spSharedInformers = sp.NewFilteredSharedInformerFactory(
|
||||
spClient,
|
||||
10*time.Minute,
|
||||
namespace,
|
||||
nil,
|
||||
)
|
||||
}
|
||||
if spClient != nil {
|
||||
spSharedInformers = sp.NewSharedInformerFactory(spClient, 10*time.Minute)
|
||||
}
|
||||
|
||||
api := &API{
|
||||
|
@ -155,7 +130,6 @@ func NewAPI(k8sClient kubernetes.Interface, spClient spclient.Interface, namespa
|
|||
syncChecks: make([]cache.InformerSynced, 0),
|
||||
sharedInformers: sharedInformers,
|
||||
spSharedInformers: spSharedInformers,
|
||||
namespace: namespace,
|
||||
}
|
||||
|
||||
for _, resource := range resources {
|
||||
|
@ -178,6 +152,9 @@ func NewAPI(k8sClient kubernetes.Interface, spClient spclient.Interface, namespa
|
|||
case MWC:
|
||||
api.mwc = sharedInformers.Admissionregistration().V1beta1().MutatingWebhookConfigurations()
|
||||
api.syncChecks = append(api.syncChecks, api.mwc.Informer().HasSynced)
|
||||
case NS:
|
||||
api.ns = sharedInformers.Core().V1().Namespaces()
|
||||
api.syncChecks = append(api.syncChecks, api.ns.Informer().HasSynced)
|
||||
case Pod:
|
||||
api.pod = sharedInformers.Core().V1().Pods()
|
||||
api.syncChecks = append(api.syncChecks, api.pod.Informer().HasSynced)
|
||||
|
@ -217,6 +194,14 @@ func (api *API) Sync() {
|
|||
log.Infof("caches synced")
|
||||
}
|
||||
|
||||
// NS provides access to a shared informer and lister for Namespaces.
|
||||
func (api *API) NS() coreinformers.NamespaceInformer {
|
||||
if api.ns == nil {
|
||||
panic("NS informer not configured")
|
||||
}
|
||||
return api.ns
|
||||
}
|
||||
|
||||
// Deploy provides access to a shared informer and lister for Deployments.
|
||||
func (api *API) Deploy() appv1beta2informers.DeploymentInformer {
|
||||
if api.deploy == nil {
|
||||
|
@ -499,27 +484,18 @@ func GetNamespaceOf(obj runtime.Object) (string, error) {
|
|||
}
|
||||
|
||||
// getNamespaces returns the namespace matching the specified name. If no name
|
||||
// is given, it returns all namespaces, unless the API was configured to only
|
||||
// work with a single namespace, in which case it returns that namespace. Note
|
||||
// that namespace reads are not cached.
|
||||
// is given, it returns all namespaces.
|
||||
func (api *API) getNamespaces(name string) ([]runtime.Object, error) {
|
||||
namespaces := make([]*corev1.Namespace, 0)
|
||||
|
||||
if name == "" && api.namespace != "" {
|
||||
name = api.namespace
|
||||
}
|
||||
var namespaces []*corev1.Namespace
|
||||
|
||||
if name == "" {
|
||||
namespaceList, err := api.Client.CoreV1().Namespaces().List(metav1.ListOptions{})
|
||||
var err error
|
||||
namespaces, err = api.NS().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, item := range namespaceList.Items {
|
||||
ns := item // must create separate var in order to get unique pointers
|
||||
namespaces = append(namespaces, &ns)
|
||||
}
|
||||
} else {
|
||||
namespace, err := api.Client.CoreV1().Namespaces().Get(name, metav1.GetOptions{})
|
||||
namespace, err := api.NS().Lister().Get(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ func newAPI(resourceConfigs []string, extraConfigs ...string) (*API, []runtime.O
|
|||
|
||||
k8sConfigs = append(k8sConfigs, extraConfigs...)
|
||||
|
||||
api, err := NewFakeAPI("", k8sConfigs...)
|
||||
api, err := NewFakeAPI(k8sConfigs...)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("NewFakeAPI returned an error: %s", err)
|
||||
}
|
||||
|
@ -253,41 +253,6 @@ metadata:
|
|||
}
|
||||
})
|
||||
|
||||
t.Run("In single-namespace mode", func(t *testing.T) {
|
||||
t.Run("Returns only the configured namespace", func(t *testing.T) {
|
||||
|
||||
ns1 := `
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: namespace1`
|
||||
|
||||
ns2 := `
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: namespace2`
|
||||
|
||||
api, err := NewFakeAPI("namespace1", ns1, ns2)
|
||||
if err != nil {
|
||||
t.Fatalf("NewFakeAPI returned an error: %s", err)
|
||||
}
|
||||
|
||||
namespaces, err := api.GetObjects("", k8s.Namespace, "")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
|
||||
if len(namespaces) != 1 {
|
||||
t.Fatalf("expected 1 namespace, got %d", len(namespaces))
|
||||
}
|
||||
|
||||
if namespaces[0].(*corev1.Namespace).Name != "namespace1" {
|
||||
t.Fatalf("expected namespace1, got %v", namespaces[0])
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("If objects are pods", func(t *testing.T) {
|
||||
t.Run("Return running or pending pods", func(t *testing.T) {
|
||||
expectations := []getObjectsExpected{
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
)
|
||||
|
||||
// NewFakeAPI provides a mock Kubernetes API for testing.
|
||||
func NewFakeAPI(namespace string, configs ...string) (*API, error) {
|
||||
func NewFakeAPI(configs ...string) (*API, error) {
|
||||
clientSet, spClientSet, err := k8s.NewFakeClientSets(configs...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -14,18 +14,18 @@ func NewFakeAPI(namespace string, configs ...string) (*API, error) {
|
|||
return NewAPI(
|
||||
clientSet,
|
||||
spClientSet,
|
||||
namespace,
|
||||
CM,
|
||||
Deploy,
|
||||
DS,
|
||||
SS,
|
||||
Endpoint,
|
||||
Job,
|
||||
MWC,
|
||||
NS,
|
||||
Pod,
|
||||
RC,
|
||||
RS,
|
||||
Svc,
|
||||
SP,
|
||||
MWC,
|
||||
SS,
|
||||
Svc,
|
||||
), nil
|
||||
}
|
||||
|
|
|
@ -184,7 +184,7 @@ status:
|
|||
}
|
||||
|
||||
for _, exp := range expectations {
|
||||
k8sAPI, err := k8s.NewFakeAPI("", exp.k8sRes...)
|
||||
k8sAPI, err := k8s.NewFakeAPI(exp.k8sRes...)
|
||||
if err != nil {
|
||||
t.Fatalf("NewFakeAPI returned an error: %s", err)
|
||||
}
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
package k8s
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
authV1 "k8s.io/api/authorization/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
|
@ -49,67 +51,45 @@ func ServiceProfilesAccess(k8sClient kubernetes.Interface) (bool, error) {
|
|||
if r.GroupVersion == ServiceProfileAPIVersion {
|
||||
for _, apiRes := range r.APIResources {
|
||||
if apiRes.Kind == ServiceProfileKind {
|
||||
// TODO: Modify this to honor the error returned, once we give the
|
||||
// control-plane namespace-wide access to ServiceProfiles.
|
||||
access, _ := resourceAccess(k8sClient, "", "linkerd.io", "serviceprofiles")
|
||||
return access, nil
|
||||
return resourceAccess(k8sClient, schema.GroupKind{
|
||||
Group: "linkerd.io",
|
||||
Kind: "serviceprofiles",
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
return false, errors.New("ServiceProfiles not available")
|
||||
}
|
||||
|
||||
// ClusterAccess verifies whether k8sClient is authorized to access all
|
||||
// namespaces in the cluster, or only the given namespace. If k8sClient does not
|
||||
// have at least namespace-wide access, it returns an error.
|
||||
func ClusterAccess(k8sClient kubernetes.Interface, namespace string) (bool, error) {
|
||||
return resourceAccess(k8sClient, namespace, "", "pods")
|
||||
// ClusterAccess verifies whether k8sClient is authorized to access all pods in
|
||||
// all namespaces in the cluster.
|
||||
func ClusterAccess(k8sClient kubernetes.Interface) (bool, error) {
|
||||
return resourceAccess(k8sClient, schema.GroupKind{Kind: "pods"})
|
||||
}
|
||||
|
||||
// resourceAccess verifies whether k8sClient is authorized to access a resource
|
||||
// in all namespaces in the cluster, or only the given namespace. If k8sClient
|
||||
// does not have at least namespace-wide access, it returns an error.
|
||||
func resourceAccess(k8sClient kubernetes.Interface, namespace, group, resource string) (bool, error) {
|
||||
// first check for cluster-wide access
|
||||
allowed, _, err := ResourceAuthz(
|
||||
// in all namespaces in the cluster.
|
||||
func resourceAccess(k8sClient kubernetes.Interface, gk schema.GroupKind) (bool, error) {
|
||||
allowed, reason, err := ResourceAuthz(
|
||||
k8sClient,
|
||||
"",
|
||||
"list",
|
||||
group,
|
||||
gk.Group,
|
||||
"",
|
||||
resource,
|
||||
gk.Kind,
|
||||
"",
|
||||
)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if allowed {
|
||||
// authorized for cluster-wide access
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// next check for namespace-wide access
|
||||
allowed, reason, err := ResourceAuthz(
|
||||
k8sClient,
|
||||
namespace,
|
||||
"list",
|
||||
group,
|
||||
"",
|
||||
resource,
|
||||
"",
|
||||
)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if allowed {
|
||||
// authorized for namespace-wide access
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if len(reason) > 0 {
|
||||
return false, fmt.Errorf("not authorized to access \"%s\" namespace: %s", namespace, reason)
|
||||
return false, fmt.Errorf("not authorized to access %s: %s", gk, reason)
|
||||
}
|
||||
return false, fmt.Errorf("not authorized to access \"%s\" namespace", namespace)
|
||||
return false, fmt.Errorf("not authorized to access %s", gk)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue