Remove single namespace functionality (#2474)

linkerd/linkerd2#1721 introduced a `--single-namespace` install flag,
enabling the control-plane to function within a single namespace. With
the introduction of ServiceProfiles, and upcoming identity changes, this
single namespace mode of operation is becoming less viable.

This change removes the `--single-namespace` install flag, and all
underlying support. The control-plane must have cluster-wide access to
operate.

A few related changes:
- Remove `--single-namespace` from `linkerd check`, this motivates
  combining some check categories, as we can always assume cluster-wide
  requirements.
- Simplify the `k8s.ResourceAuthz` API, as callers no longer need to
  make a decision based on cluster-wide vs. namespace-wide access.
  Components either have access, or they error out.
- Modify the web dashboard to always assume ServiceProfiles are enabled.

Reverts #1721
Part of #2337

Signed-off-by: Andrew Seigner <siggy@buoyant.io>
This commit is contained in:
Andrew Seigner 2019-03-12 00:17:22 -07:00 committed by GitHub
parent 52d44b6d4d
commit e5d2460792
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
36 changed files with 83 additions and 1589 deletions

View File

@ -226,7 +226,7 @@ When kubernetes templates change, several test fixtures usually need to be updat
regenerated with the command:
```sh
go test ./... -update
go test ./cli/cmd/... --update
```
##### Pretty-printed diffs for templated text
@ -235,7 +235,7 @@ go test ./... -update
When running `go test`, mismatched text is usually displayed as a compact
diff. If you prefer to see the full text of the mismatch with colorized
output, you can set the `LINKERD_TEST_PRETTY_DIFF` environment variable or
run `go test ./... -pretty-diff`.
run `go test ./cli/cmd/... --pretty-diff`.
### Web

View File

@ -10,13 +10,10 @@ metadata:
name: linkerd-ca
namespace: {{.Values.Namespace}}
---
kind: {{if not .Values.SingleNamespace}}Cluster{{end}}Role
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: linkerd-{{.Values.Namespace}}-ca
{{- if .Values.SingleNamespace}}
namespace: {{.Values.Namespace}}
{{- end}}
rules:
- apiGroups: [""]
resources: ["configmaps"]
@ -35,16 +32,13 @@ rules:
resources: ["secrets"]
verbs: ["create", "update"]
---
kind: {{if not .Values.SingleNamespace}}Cluster{{end}}RoleBinding
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: linkerd-{{.Values.Namespace}}-ca
{{- if .Values.SingleNamespace}}
namespace: {{.Values.Namespace}}
{{- end}}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: {{if not .Values.SingleNamespace}}Cluster{{end}}Role
kind: ClusterRole
name: linkerd-{{.Values.Namespace}}-ca
subjects:
- kind: ServiceAccount

View File

@ -9,13 +9,10 @@ metadata:
name: linkerd-controller
namespace: {{.Values.Namespace}}
---
kind: {{if not .Values.SingleNamespace}}Cluster{{end}}Role
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: linkerd-{{.Values.Namespace}}-controller
{{- if .Values.SingleNamespace}}
namespace: {{.Values.Namespace}}
{{- end}}
rules:
- apiGroups: ["extensions", "apps"]
resources: ["daemonsets", "deployments", "replicasets", "statefulsets"]
@ -24,29 +21,19 @@ rules:
resources: ["jobs"]
verbs: ["list" , "get", "watch"]
- apiGroups: [""]
resources: ["pods", "endpoints", "services", "replicationcontrollers"{{if not .Values.SingleNamespace}}, "namespaces"{{end}}]
resources: ["pods", "endpoints", "services", "replicationcontrollers", "namespaces"]
verbs: ["list", "get", "watch"]
{{- if .Values.SingleNamespace }}
- apiGroups: [""]
resources: ["namespaces"]
resourceNames: ["{{.Values.Namespace}}"]
verbs: ["list", "get", "watch"]
{{- else }}
- apiGroups: ["linkerd.io"]
resources: ["serviceprofiles"]
verbs: ["list", "get", "watch"]
{{- end }}
---
kind: {{if not .Values.SingleNamespace}}Cluster{{end}}RoleBinding
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: linkerd-{{.Values.Namespace}}-controller
{{- if .Values.SingleNamespace}}
namespace: {{.Values.Namespace}}
{{- end}}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: {{if not .Values.SingleNamespace}}Cluster{{end}}Role
kind: ClusterRole
name: linkerd-{{.Values.Namespace}}-controller
subjects:
- kind: ServiceAccount

View File

@ -1,4 +1,3 @@
{{ if not .Values.SingleNamespace -}}
---
kind: Namespace
apiVersion: v1
@ -8,5 +7,3 @@ metadata:
annotations:
{{.Values.ProxyInjectAnnotation}}: {{.Values.ProxyInjectDisabled}}
{{- end }}
{{ end -}}

View File

@ -9,28 +9,22 @@ metadata:
name: linkerd-prometheus
namespace: {{.Values.Namespace}}
---
kind: {{if not .Values.SingleNamespace}}Cluster{{end}}Role
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: linkerd-{{.Values.Namespace}}-prometheus
{{- if .Values.SingleNamespace}}
namespace: {{.Values.Namespace}}
{{- end}}
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
---
kind: {{if not .Values.SingleNamespace}}Cluster{{end}}RoleBinding
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: linkerd-{{.Values.Namespace}}-prometheus
{{- if .Values.SingleNamespace}}
namespace: {{.Values.Namespace}}
{{- end}}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: {{if not .Values.SingleNamespace}}Cluster{{end}}Role
kind: ClusterRole
name: linkerd-{{.Values.Namespace}}-prometheus
subjects:
- kind: ServiceAccount
@ -171,10 +165,6 @@ data:
- job_name: 'linkerd-proxy'
kubernetes_sd_configs:
- role: pod
{{- if .Values.SingleNamespace}}
namespaces:
names: ['{{.Values.Namespace}}']
{{- end}}
relabel_configs:
- source_labels:
- __meta_kubernetes_pod_container_name

View File

@ -1,4 +1,3 @@
{{- if not .Values.SingleNamespace }}
---
###
### Service Profile CRD
@ -104,4 +103,3 @@ spec:
type: object
not:
type: object
{{- end }}

View File

@ -19,7 +19,6 @@ type checkOptions struct {
dataPlaneOnly bool
wait time.Duration
namespace string
singleNamespace bool
cniEnabled bool
}
@ -30,7 +29,6 @@ func newCheckOptions() *checkOptions {
dataPlaneOnly: false,
wait: 300 * time.Second,
namespace: "",
singleNamespace: false,
cniEnabled: false,
}
}
@ -67,7 +65,6 @@ non-zero exit code.`,
cmd.PersistentFlags().BoolVar(&options.dataPlaneOnly, "proxy", options.dataPlaneOnly, "Only run data-plane checks, to determine if the data plane is healthy")
cmd.PersistentFlags().DurationVar(&options.wait, "wait", options.wait, "Maximum allowed time for all tests to pass")
cmd.PersistentFlags().StringVarP(&options.namespace, "namespace", "n", options.namespace, "Namespace to use for --proxy checks (default: all namespaces)")
cmd.PersistentFlags().BoolVar(&options.singleNamespace, "single-namespace", options.singleNamespace, "When running pre-installation checks (--pre), only check the permissions required to operate the control plane in a single namespace")
cmd.PersistentFlags().BoolVar(&options.cniEnabled, "linkerd-cni-enabled", options.cniEnabled, "When running pre-installation checks (--pre), assume the linkerd-cni plugin is already installed, and a NET_ADMIN check is not needed")
return cmd
@ -85,23 +82,14 @@ func configureAndRunChecks(w io.Writer, options *checkOptions) error {
}
if options.preInstallOnly {
if options.singleNamespace {
checks = append(checks, healthcheck.LinkerdPreInstallSingleNamespaceChecks)
} else {
checks = append(checks, healthcheck.LinkerdPreInstallClusterChecks)
}
checks = append(checks, healthcheck.LinkerdPreInstallChecks)
if !options.cniEnabled {
checks = append(checks, healthcheck.LinkerdPreInstallCapabilityChecks)
}
checks = append(checks, healthcheck.LinkerdPreInstallChecks)
} else {
checks = append(checks, healthcheck.LinkerdControlPlaneExistenceChecks)
checks = append(checks, healthcheck.LinkerdAPIChecks)
if !options.singleNamespace {
checks = append(checks, healthcheck.LinkerdServiceProfileChecks)
}
if options.dataPlaneOnly {
checks = append(checks, healthcheck.LinkerdDataPlaneChecks)
} else {

View File

@ -44,7 +44,6 @@ type installConfig struct {
ProxyAutoInjectEnabled bool
ProxyInjectAnnotation string
ProxyInjectDisabled string
SingleNamespace bool
EnableHA bool
ControllerUID int64
EnableH2Upgrade bool
@ -62,7 +61,6 @@ type installOptions struct {
controllerReplicas uint
controllerLogLevel string
proxyAutoInject bool
singleNamespace bool
highAvailability bool
controllerUID int64
disableH2Upgrade bool
@ -89,7 +87,6 @@ func newInstallOptions() *installOptions {
controllerReplicas: defaultControllerReplicas,
controllerLogLevel: "info",
proxyAutoInject: false,
singleNamespace: false,
highAvailability: false,
controllerUID: 2103,
disableH2Upgrade: false,
@ -118,7 +115,6 @@ func newCmdInstall() *cobra.Command {
cmd.PersistentFlags().UintVar(&options.controllerReplicas, "controller-replicas", options.controllerReplicas, "Replicas of the controller to deploy")
cmd.PersistentFlags().StringVar(&options.controllerLogLevel, "controller-log-level", options.controllerLogLevel, "Log level for the controller and web components")
cmd.PersistentFlags().BoolVar(&options.proxyAutoInject, "proxy-auto-inject", options.proxyAutoInject, "Enable proxy sidecar auto-injection via a webhook (default false)")
cmd.PersistentFlags().BoolVar(&options.singleNamespace, "single-namespace", options.singleNamespace, "Experimental: Configure the control plane to only operate in the installed namespace (default false)")
cmd.PersistentFlags().BoolVar(&options.highAvailability, "ha", options.highAvailability, "Experimental: Enable HA deployment config for the control plane (default false)")
cmd.PersistentFlags().Int64Var(&options.controllerUID, "controller-uid", options.controllerUID, "Run the control plane components under this user ID")
cmd.PersistentFlags().BoolVar(&options.disableH2Upgrade, "disable-h2-upgrade", options.disableH2Upgrade, "Prevents the controller from instructing proxies to perform transparent HTTP/2 upgrading (default false)")
@ -176,7 +172,6 @@ func validateAndBuildConfig(options *installOptions) (*installConfig, error) {
ProxyAutoInjectEnabled: options.proxyAutoInject,
ProxyInjectAnnotation: k8s.ProxyInjectAnnotation,
ProxyInjectDisabled: k8s.ProxyInjectDisabled,
SingleNamespace: options.singleNamespace,
EnableHA: options.highAvailability,
EnableH2Upgrade: !options.disableH2Upgrade,
NoInitContainer: options.noInitContainer,
@ -270,10 +265,6 @@ func (options *installOptions) validate() error {
return fmt.Errorf("--controller-log-level must be one of: panic, fatal, error, warn, info, debug")
}
if options.proxyAutoInject && options.singleNamespace {
return fmt.Errorf("The --proxy-auto-inject and --single-namespace flags cannot both be specified together")
}
return options.proxyConfigOptions.validate()
}

View File

@ -19,8 +19,7 @@ func TestRender(t *testing.T) {
defaultConfig.UUID = "deaab91a-f4ab-448a-b7d1-c832a2fa0a60"
// A configuration that shows that all config setting strings are honored
// by `render()`. Note that `SingleNamespace` is tested in a separate
// configuration, since it's incompatible with `ProxyAutoInjectEnabled`.
// by `render()`.
metaConfig := installConfig{
Namespace: "Namespace",
ControllerImage: "ControllerImage",
@ -50,33 +49,6 @@ func TestRender(t *testing.T) {
ProxyConfig: "ProxyConfig",
}
singleNamespaceConfig := installConfig{
Namespace: "Namespace",
ControllerImage: "ControllerImage",
WebImage: "WebImage",
PrometheusImage: "PrometheusImage",
PrometheusVolumeName: "data",
GrafanaImage: "GrafanaImage",
GrafanaVolumeName: "data",
ControllerReplicas: 1,
ImagePullPolicy: "ImagePullPolicy",
UUID: "UUID",
CliVersion: "CliVersion",
ControllerLogLevel: "ControllerLogLevel",
ControllerComponentLabel: "ControllerComponentLabel",
CreatedByAnnotation: "CreatedByAnnotation",
ControllerUID: 2103,
EnableTLS: true,
TLSTrustAnchorConfigMapName: "TLSTrustAnchorConfigMapName",
ProxyContainerName: "ProxyContainerName",
TLSTrustAnchorFileName: "TLSTrustAnchorFileName",
SingleNamespace: true,
EnableH2Upgrade: true,
NoInitContainer: false,
GlobalConfig: "GlobalConfig",
ProxyConfig: "ProxyConfig",
}
haOptions := newInstallOptions()
haOptions.highAvailability = true
haConfig, _ := validateAndBuildConfig(haOptions)
@ -110,7 +82,6 @@ func TestRender(t *testing.T) {
}{
{*defaultConfig, defaultOptions, defaultControlPlaneNamespace, "install_default.golden"},
{metaConfig, defaultOptions, metaConfig.Namespace, "install_output.golden"},
{singleNamespaceConfig, defaultOptions, singleNamespaceConfig.Namespace, "install_single_namespace_output.golden"},
{*haConfig, haOptions, haConfig.Namespace, "install_ha_output.golden"},
{*haWithOverridesConfig, haWithOverridesOptions, haWithOverridesConfig.Namespace, "install_ha_with_overrides_output.golden"},
{*noInitContainerConfig, noInitContainerOptions, noInitContainerConfig.Namespace, "install_no_init_container.golden"},
@ -185,19 +156,4 @@ func TestValidate(t *testing.T) {
}
}
})
t.Run("Rejects single namespace install with auto inject", func(t *testing.T) {
options := newInstallOptions()
options.proxyAutoInject = true
options.singleNamespace = true
expected := "The --proxy-auto-inject and --single-namespace flags cannot both be specified together"
err := options.validate()
if err == nil {
t.Fatalf("Expected error, got nothing")
}
if err.Error() != expected {
t.Fatalf("Expected error string\"%s\", got \"%s\"", expected, err)
}
})
}

View File

@ -281,7 +281,6 @@ data:
{"linkerdNamespace":"linkerd","cniEnabled":false,"version":"dev-undefined","identityContext":null}
proxy: |
{"proxyImage":{"imageName":"gcr.io/linkerd-io/proxy","pullPolicy":"IfNotPresent"},"proxyInitImage":{"imageName":"gcr.io/linkerd-io/proxy-init","pullPolicy":"IfNotPresent"},"controlPort":{"port":4190},"ignoreInboundPorts":[],"ignoreOutboundPorts":[],"inboundPort":{"port":4143},"metricsPort":{"port":4191},"outboundPort":{"port":4140},"resource":{"requestCpu":"","requestMemory":"","limitCpu":"","limitMemory":""},"proxyUid":"2102","logLevel":{"level":"warn,linkerd2_proxy=info"},"disableExternalProfiles":false}
---
###
### Service Profile CRD

View File

@ -293,7 +293,6 @@ data:
{"linkerdNamespace":"linkerd","cniEnabled":false,"version":"dev-undefined","identityContext":null}
proxy: |
{"proxyImage":{"imageName":"gcr.io/linkerd-io/proxy","pullPolicy":"IfNotPresent"},"proxyInitImage":{"imageName":"gcr.io/linkerd-io/proxy-init","pullPolicy":"IfNotPresent"},"controlPort":{"port":4190},"ignoreInboundPorts":[],"ignoreOutboundPorts":[],"inboundPort":{"port":4143},"metricsPort":{"port":4191},"outboundPort":{"port":4140},"resource":{"requestCpu":"10m","requestMemory":"20Mi","limitCpu":"","limitMemory":""},"proxyUid":"2102","logLevel":{"level":"warn,linkerd2_proxy=info"},"disableExternalProfiles":false}
---
###
### Service Profile CRD

View File

@ -293,7 +293,6 @@ data:
{"linkerdNamespace":"linkerd","cniEnabled":false,"version":"dev-undefined","identityContext":null}
proxy: |
{"proxyImage":{"imageName":"gcr.io/linkerd-io/proxy","pullPolicy":"IfNotPresent"},"proxyInitImage":{"imageName":"gcr.io/linkerd-io/proxy-init","pullPolicy":"IfNotPresent"},"controlPort":{"port":4190},"ignoreInboundPorts":[],"ignoreOutboundPorts":[],"inboundPort":{"port":4143},"metricsPort":{"port":4191},"outboundPort":{"port":4140},"resource":{"requestCpu":"400m","requestMemory":"300Mi","limitCpu":"","limitMemory":""},"proxyUid":"2102","logLevel":{"level":"warn,linkerd2_proxy=info"},"disableExternalProfiles":false}
---
###
### Service Profile CRD

View File

@ -257,7 +257,6 @@ data:
{"linkerdNamespace":"linkerd","cniEnabled":true,"version":"dev-undefined","identityContext":null}
proxy: |
{"proxyImage":{"imageName":"gcr.io/linkerd-io/proxy","pullPolicy":"IfNotPresent"},"proxyInitImage":{"imageName":"gcr.io/linkerd-io/proxy-init","pullPolicy":"IfNotPresent"},"controlPort":{"port":4190},"ignoreInboundPorts":[],"ignoreOutboundPorts":[],"inboundPort":{"port":4143},"metricsPort":{"port":4191},"outboundPort":{"port":4140},"resource":{"requestCpu":"","requestMemory":"","limitCpu":"","limitMemory":""},"proxyUid":"2102","logLevel":{"level":"warn,linkerd2_proxy=info"},"disableExternalProfiles":false}
---
###
### Service Profile CRD

View File

@ -286,7 +286,6 @@ data:
{"linkerdNamespace":"linkerd","cniEnabled":true,"version":"dev-undefined","identityContext":{}}
proxy: |
{"proxyImage":{"imageName":"gcr.io/linkerd-io/proxy","pullPolicy":"IfNotPresent"},"proxyInitImage":{"imageName":"gcr.io/linkerd-io/proxy-init","pullPolicy":"IfNotPresent"},"controlPort":{"port":4190},"ignoreInboundPorts":[],"ignoreOutboundPorts":[],"inboundPort":{"port":4143},"metricsPort":{"port":4191},"outboundPort":{"port":4140},"resource":{"requestCpu":"","requestMemory":"","limitCpu":"","limitMemory":""},"proxyUid":"2102","logLevel":{"level":"warn,linkerd2_proxy=info"},"disableExternalProfiles":false}
---
###
### Service Profile CRD

View File

@ -284,7 +284,6 @@ data:
GlobalConfig
proxy: |
ProxyConfig
---
###
### Service Profile CRD

File diff suppressed because it is too large Load Diff

View File

@ -69,15 +69,6 @@ func (k *k8sResolver) streamResolution(host string, port int, listener endpointU
}
func (k *k8sResolver) streamProfiles(host string, clientNs string, listener profileUpdateListener) error {
// In single namespace mode, we'll close the stream immediately and the proxy
// will reissue the request after 3 seconds. If we wanted to be more
// sophisticated about this in the future, we could leave the stream open
// indefinitely, or we could update the API to support a ProfilesDisabled
// message. For now, however, this works.
if k.profileWatcher == nil {
return nil
}
subscriptions := map[profileID]profileUpdateListener{}
primaryListener, secondaryListener := newFallbackProfileListener(listener)
@ -131,9 +122,7 @@ func (k *k8sResolver) getState() servicePorts {
func (k *k8sResolver) stop() {
k.endpointsWatcher.stop()
if k.profileWatcher != nil {
k.profileWatcher.stop()
}
k.profileWatcher.stop()
}
func (k *k8sResolver) resolveKubernetesService(id *serviceID, port int, listener endpointUpdateListener) error {

View File

@ -2,7 +2,6 @@ package k8s
import (
"context"
"errors"
"fmt"
"strings"
"time"
@ -85,25 +84,19 @@ func InitializeAPI(kubeConfig string, resources ...APIResource) (*API, error) {
}
// check for cluster-wide access
clusterAccess, err := k8s.ClusterAccess(k8sClient)
err = k8s.ClusterAccess(k8sClient)
if err != nil {
return nil, err
}
if !clusterAccess {
return nil, fmt.Errorf("not authorized for cluster-wide access")
}
// check for need and access to ServiceProfiles
var spClient *spclient.Clientset
for _, res := range resources {
if res == SP {
serviceProfiles, err := k8s.ServiceProfilesAccess(k8sClient)
err := k8s.ServiceProfilesAccess(k8sClient)
if err != nil {
return nil, err
}
if !serviceProfiles {
return nil, errors.New("not authorized for ServiceProfile access")
}
spClient, err = NewSpClientSet(kubeConfig)
if err != nil {

View File

@ -36,22 +36,15 @@ const (
// LinkerdPreInstall* checks enabled by `linkerd check --pre`
// LinkerdPreInstallClusterChecks adds checks to validate that the control
// plane namespace does not already exist, and that the user can create
// cluster-wide resources, including ClusterRole, ClusterRoleBinding, and
// CustomResourceDefinition. This check only runs as part of the set
// LinkerdPreInstallChecks adds checks to validate that the control plane
// namespace does not already exist, and that the user can create cluster-wide
// resources, including ClusterRole, ClusterRoleBinding, and
// CustomResourceDefinition, as well as namespace-wide resources, including
// Service, Deployment, and ConfigMap. This check only runs as part of the set
// of pre-install checks.
// This check is dependent on the output of KubernetesAPIChecks, so those
// checks must be added first.
LinkerdPreInstallClusterChecks CategoryID = "pre-kubernetes-cluster-setup"
// LinkerdPreInstallSingleNamespaceChecks adds a check to validate that the
// control plane namespace already exists, and that the user can create
// namespace-scoped resources, including Role and RoleBinding. This check only
// runs as part of the set of pre-install checks.
// This check is dependent on the output of KubernetesAPIChecks, so those
// checks must be added first.
LinkerdPreInstallSingleNamespaceChecks CategoryID = "pre-kubernetes-single-namespace-setup"
LinkerdPreInstallChecks CategoryID = "pre-kubernetes-setup"
// LinkerdPreInstallCapabilityChecks adds a check to validate the user has the
// capabilities necessary to deploy Linkerd. For example, the NET_ADMIN
@ -59,14 +52,6 @@ const (
// These checks are no run when the `--linkerd-cni-enabled` flag is set.
LinkerdPreInstallCapabilityChecks CategoryID = "pre-kubernetes-capability"
// LinkerdPreInstallChecks adds checks to validate that the user can create
// Kubernetes objects necessary to install the control plane, including
// Service, Deployment, and ConfigMap. This check only runs as part of the set
// of pre-install checks.
// This check is dependent on the output of KubernetesAPIChecks, so those
// checks must be added first.
LinkerdPreInstallChecks CategoryID = "pre-kubernetes-setup"
// LinkerdControlPlaneExistenceChecks adds a series of checks to validate that
// the control plane namespace and controller pod exist.
// These checks are dependent on the output of KubernetesAPIChecks, so those
@ -79,12 +64,6 @@ const (
// checks must be added first.
LinkerdAPIChecks CategoryID = "linkerd-api"
// LinkerdServiceProfileChecks add a check validate any ServiceProfiles that
// may already be installed.
// These checks are dependent on the output of KubernetesAPIChecks, so those
// checks must be added first.
LinkerdServiceProfileChecks CategoryID = "linkerd-service-profile"
// LinkerdVersionChecks adds a series of checks to query for the latest
// version, and validate the the CLI is up to date.
LinkerdVersionChecks CategoryID = "linkerd-version"
@ -283,7 +262,7 @@ func (hc *HealthChecker) allCategories() []category {
},
},
{
id: LinkerdPreInstallClusterChecks,
id: LinkerdPreInstallChecks,
checkers: []checker{
{
description: "control plane namespace does not already exist",
@ -320,49 +299,6 @@ func (hc *HealthChecker) allCategories() []category {
return hc.checkCanCreate("", "apiextensions.k8s.io", "v1beta1", "CustomResourceDefinition")
},
},
},
},
{
id: LinkerdPreInstallSingleNamespaceChecks,
checkers: []checker{
{
description: "control plane namespace exists",
hintAnchor: "pre-single-ns",
check: func(ctx context.Context) error {
return hc.checkNamespace(ctx, hc.ControlPlaneNamespace, true)
},
},
{
description: "can create Roles",
hintAnchor: "pre-k8s-cluster-k8s",
check: func(context.Context) error {
return hc.checkCanCreate(hc.ControlPlaneNamespace, "rbac.authorization.k8s.io", "v1beta1", "Role")
},
},
{
description: "can create RoleBindings",
hintAnchor: "pre-k8s-cluster-k8s",
check: func(context.Context) error {
return hc.checkCanCreate(hc.ControlPlaneNamespace, "rbac.authorization.k8s.io", "v1beta1", "RoleBinding")
},
},
},
},
{
id: LinkerdPreInstallCapabilityChecks,
checkers: []checker{
{
description: "has NET_ADMIN capability",
hintAnchor: "pre-k8s-cluster-net-admin",
check: func(context.Context) error {
return hc.checkNetAdmin()
},
},
},
},
{
id: LinkerdPreInstallChecks,
checkers: []checker{
{
description: "can create ServiceAccounts",
hintAnchor: "pre-k8s",
@ -393,6 +329,18 @@ func (hc *HealthChecker) allCategories() []category {
},
},
},
{
id: LinkerdPreInstallCapabilityChecks,
checkers: []checker{
{
description: "has NET_ADMIN capability",
hintAnchor: "pre-k8s-cluster-net-admin",
check: func(context.Context) error {
return hc.checkNetAdmin()
},
},
},
},
{
id: LinkerdControlPlaneExistenceChecks,
checkers: []checker{
@ -469,11 +417,6 @@ func (hc *HealthChecker) allCategories() []category {
return hc.apiClient.SelfCheck(ctx, &healthcheckPb.SelfCheckRequest{})
},
},
},
},
{
id: LinkerdServiceProfileChecks,
checkers: []checker{
{
description: "no invalid service profiles",
hintAnchor: "l5d-sp",
@ -808,7 +751,7 @@ func (hc *HealthChecker) checkCanCreate(namespace, group, version, resource stri
return fmt.Errorf("unexpected error: Kubernetes ClientSet not initialized")
}
allowed, reason, err := k8s.ResourceAuthz(
return k8s.ResourceAuthz(
hc.clientset,
namespace,
"create",
@ -817,17 +760,6 @@ func (hc *HealthChecker) checkCanCreate(namespace, group, version, resource stri
resource,
"",
)
if err != nil {
return err
}
if !allowed {
if len(reason) > 0 {
return fmt.Errorf("missing permissions to create %s: %v", resource, reason)
}
return fmt.Errorf("missing permissions to create %s", resource)
}
return nil
}
func (hc *HealthChecker) checkNetAdmin() error {
@ -851,7 +783,7 @@ func (hc *HealthChecker) checkNetAdmin() error {
// AND
// 2) provides NET_ADMIN
for _, psp := range pspList.Items {
allowed, _, err := k8s.ResourceAuthz(
err := k8s.ResourceAuthz(
hc.clientset,
"",
"use",
@ -860,11 +792,7 @@ func (hc *HealthChecker) checkNetAdmin() error {
"PodSecurityPolicy",
psp.GetName(),
)
if err != nil {
return err
}
if allowed {
if err == nil {
for _, capability := range psp.Spec.AllowedCapabilities {
if capability == "*" || capability == "NET_ADMIN" {
return nil

View File

@ -292,7 +292,7 @@ func TestHealthChecker(t *testing.T) {
}
func TestCheckCanCreate(t *testing.T) {
exp := fmt.Errorf("missing permissions to create deployments")
exp := fmt.Errorf("not authorized to access deployments.extensions")
hc := NewHealthChecker(
[]CategoryID{},

View File

@ -14,7 +14,7 @@ import (
func ResourceAuthz(
k8sClient kubernetes.Interface,
namespace, verb, group, version, resource, name string,
) (bool, string, error) {
) error {
ssar := &authV1.SelfSubjectAccessReview{
Spec: authV1.SelfSubjectAccessReviewSpec{
ResourceAttributes: &authV1.ResourceAttributes{
@ -33,63 +33,46 @@ func ResourceAuthz(
SelfSubjectAccessReviews().
Create(ssar)
if err != nil {
return false, "", err
return err
}
return result.Status.Allowed, result.Status.Reason, nil
if result.Status.Allowed {
return nil
}
gk := schema.GroupKind{
Group: group,
Kind: resource,
}
if len(result.Status.Reason) > 0 {
return fmt.Errorf("not authorized to access %s: %s", gk, result.Status.Reason)
}
return fmt.Errorf("not authorized to access %s", gk)
}
// ServiceProfilesAccess checks whether the ServiceProfile CRD is installed
// on the cluster and the client is authorized to access ServiceProfiles.
func ServiceProfilesAccess(k8sClient kubernetes.Interface) (bool, error) {
func ServiceProfilesAccess(k8sClient kubernetes.Interface) error {
res, err := k8sClient.Discovery().ServerResources()
if err != nil {
return false, err
return err
}
for _, r := range res {
if r.GroupVersion == ServiceProfileAPIVersion {
for _, apiRes := range r.APIResources {
if apiRes.Kind == ServiceProfileKind {
return resourceAccess(k8sClient, schema.GroupKind{
Group: "linkerd.io",
Kind: "serviceprofiles",
})
return ResourceAuthz(k8sClient, "", "list", "linkerd.io", "", "serviceprofiles", "")
}
}
}
}
return false, errors.New("ServiceProfiles not available")
return errors.New("ServiceProfile CRD not found")
}
// ClusterAccess verifies whether k8sClient is authorized to access all pods in
// all namespaces in the cluster.
func ClusterAccess(k8sClient kubernetes.Interface) (bool, error) {
return resourceAccess(k8sClient, schema.GroupKind{Kind: "pods"})
}
// resourceAccess verifies whether k8sClient is authorized to access a resource
// in all namespaces in the cluster.
func resourceAccess(k8sClient kubernetes.Interface, gk schema.GroupKind) (bool, error) {
allowed, reason, err := ResourceAuthz(
k8sClient,
"",
"list",
gk.Group,
"",
gk.Kind,
"",
)
if err != nil {
return false, err
}
if allowed {
return true, nil
}
if len(reason) > 0 {
return false, fmt.Errorf("not authorized to access %s: %s", gk, reason)
}
return false, fmt.Errorf("not authorized to access %s", gk)
func ClusterAccess(k8sClient kubernetes.Interface) error {
return ResourceAuthz(k8sClient, "", "list", "", "", "pods", "")
}

View File

@ -1,6 +1,7 @@
package k8s
import (
"errors"
"fmt"
"testing"
)
@ -8,8 +9,6 @@ import (
func TestResourceAuthz(t *testing.T) {
tests := []struct {
k8sConfigs []string
allowed bool
reason string
err error
}{
{
@ -37,9 +36,7 @@ subjects:
name: system:unauthenticated
apiGroup: rbac.authorization.k8s.io`,
},
false,
"",
nil,
errors.New("not authorized to access deployments.extensions"),
},
}
@ -50,7 +47,7 @@ subjects:
if err != nil {
t.Fatalf("Unexpected error: %s", err)
}
allowed, reason, err := ResourceAuthz(k8sClient, "", "list", "extensions", "v1beta1", "deployments", "")
err = ResourceAuthz(k8sClient, "", "list", "extensions", "v1beta1", "deployments", "")
if err != nil || test.err != nil {
if (err == nil && test.err != nil) ||
(err != nil && test.err == nil) ||
@ -58,12 +55,6 @@ subjects:
t.Fatalf("Unexpected error (Expected: %s, Got: %s)", test.err, err)
}
}
if allowed != test.allowed {
t.Errorf("Allowed mismatch. Expected %v, but got %v", test.allowed, allowed)
}
if reason != test.reason {
t.Errorf("Reason mismatch. Expected %v, but got %v", test.reason, reason)
}
})
}
}

View File

@ -58,13 +58,14 @@ var (
`.*-tls linkerd-(ca|controller|grafana|prometheus|web)-.*-.* linkerd-proxy WARN admin={bg=tls-config} linkerd2_proxy::transport::tls::config error reloading TLS config: Io\("/var/linkerd-io/identity/certificate\.crt", Some\(2\)\), falling back`,
`.*-tls linkerd-(ca|controller|grafana|prometheus|web)-.*-.* linkerd-proxy WARN admin={bg=tls-config} linkerd2_proxy::transport::tls::config error reloading TLS config: Io\("/var/linkerd-io/trust-anchors/trust-anchors\.pem", Some\(2\)\), falling back`,
`.*-tls linkerd-(ca|controller|grafana|prometheus|web)-.*-.* linkerd-proxy WARN proxy={server=in listen=0.0.0.0:4143} rustls::session Sending fatal alert AccessDenied`,
`.*-tls linkerd-(ca|controller|grafana|prometheus|web)-.*-.* linkerd-proxy WARN proxy={server=in listen=0\.0\.0\.0:4143} rustls::session Sending fatal alert AccessDenied`,
`.*-tls linkerd-(ca|controller|grafana|prometheus|web)-.*-.* linkerd-proxy ERR! proxy={server=in listen=0\.0\.0\.0:4143 remote=.*} linkerd2_proxy::proxy::http::router service error: an IO error occurred: Connection reset by peer (os error 104)`,
// k8s hitting readiness endpoints before components are ready
`.* linkerd-(ca|controller|grafana|prometheus|web)-.*-.* linkerd-proxy ERR! proxy={server=in listen=0\.0\.0\.0:4143 remote=.*} linkerd2_proxy::proxy::http::router service error: an error occurred trying to connect: Connection refused \(os error 111\) \(address: 127\.0\.0\.1:.*\)`,
`.* linkerd-(ca|controller|grafana|prometheus|web)-.*-.* linkerd-proxy ERR! proxy={server=out listen=127\.0\.0\.1:4140 remote=.*} linkerd2_proxy::proxy::http::router service error: an error occurred trying to connect: Connection refused \(os error 111\) \(address: .*:4191\)`,
`.* linkerd-(ca|controller|grafana|prometheus|web)-.*-.* linkerd-proxy ERR! admin={server=metrics listen=0.0.0.0:4191 remote=.*} linkerd2_proxy::control::serve_http error serving metrics: Error { kind: Shutdown, cause: Os { code: 107, kind: NotConnected, message: "Transport endpoint is not connected" } }`,
`.* linkerd-(ca|controller|grafana|prometheus|web)-.*-.* linkerd-proxy ERR! admin={server=metrics listen=0\.0\.0\.0:4191 remote=.*} linkerd2_proxy::control::serve_http error serving metrics: Error { kind: Shutdown, cause: Os { code: 107, kind: NotConnected, message: "Transport endpoint is not connected" } }`,
`.* linkerd-controller-.*-.* tap time=".*" level=error msg="\[.*\] encountered an error: rpc error: code = Canceled desc = context canceled"`,
`.* linkerd-web-.*-.* linkerd-proxy WARN trust_dns_proto::xfer::dns_exchange failed to associate send_message response to the sender`,
@ -73,10 +74,6 @@ var (
`.* linkerd-prometheus-.*-.* linkerd-proxy ERR! proxy={server=out listen=127\.0\.0\.1:4140 remote=.*} linkerd2_proxy::proxy::http::router service error: an error occurred trying to connect: Connection refused \(os error 111\) \(address: .*:(3000|999(4|5|6|7|8))\)`,
`.* linkerd-prometheus-.*-.* linkerd-proxy ERR! proxy={server=out listen=127\.0\.0\.1:4140 remote=.*} linkerd2_proxy::proxy::http::router service error: an error occurred trying to connect: operation timed out after 300ms`,
// single namespace warnings
`.*-single-namespace linkerd-controller-.*-.* (destination|public-api|tap) time=".*" level=warning msg="Not authorized for cluster-wide access, limiting access to \\".*-single-namespace\\" namespace"`,
`.*-single-namespace linkerd-controller-.*-.* (destination|public-api|tap) time=".*" level=warning msg="ServiceProfiles not available"`,
`.* linkerd-web-.*-.* web time=".*" level=error msg="Post http://linkerd-controller-api\..*\.svc\.cluster\.local:8085/api/v1/Version: context canceled"`,
`.*-tls linkerd-(ca|controller|grafana|prometheus|web)-.*-.* linkerd-proxy ERR! linkerd-destination\..*-tls\.svc\.cluster\.local:8086 rustls::session TLS alert received: Message {`,
`.*-tls linkerd-controller-.*-.* linkerd-proxy ERR! .*:9090 rustls::session TLS alert received: Message {`,
@ -102,14 +99,6 @@ func TestVersionPreInstall(t *testing.T) {
func TestCheckPreInstall(t *testing.T) {
cmd := []string{"check", "--pre", "--expected-version", TestHelper.GetVersion()}
golden := "check.pre.golden"
if TestHelper.SingleNamespace() {
cmd = append(cmd, "--single-namespace")
golden = "check.pre.single_namespace.golden"
err := TestHelper.CreateNamespaceIfNotExists(TestHelper.GetLinkerdNamespace())
if err != nil {
t.Fatalf("Namespace creation failed\n%s", err.Error())
}
}
out, _, err := TestHelper.LinkerdRun(cmd...)
if err != nil {
t.Fatalf("Check command failed\n%s", out)
@ -131,9 +120,6 @@ func TestInstall(t *testing.T) {
cmd = append(cmd, []string{"--tls", "optional"}...)
linkerdDeployReplicas["linkerd-ca"] = deploySpec{1, []string{"ca"}}
}
if TestHelper.SingleNamespace() {
cmd = append(cmd, "--single-namespace")
}
out, _, err := TestHelper.LinkerdRun(cmd...)
if err != nil {
@ -179,10 +165,6 @@ func TestVersionPostInstall(t *testing.T) {
func TestCheckPostInstall(t *testing.T) {
cmd := []string{"check", "--expected-version", TestHelper.GetVersion(), "--wait=0"}
golden := "check.golden"
if TestHelper.SingleNamespace() {
cmd = append(cmd, "--single-namespace")
golden = "check.single_namespace.golden"
}
err := TestHelper.RetryFor(time.Minute, func() error {
out, _, err := TestHelper.LinkerdRun(cmd...)
@ -285,10 +267,6 @@ func TestCheckProxy(t *testing.T) {
prefixedNs := TestHelper.GetTestNamespace("smoke-test")
cmd := []string{"check", "--proxy", "--expected-version", TestHelper.GetVersion(), "--namespace", prefixedNs, "--wait=0"}
golden := "check.proxy.golden"
if TestHelper.SingleNamespace() {
cmd = append(cmd, "--single-namespace")
golden = "check.proxy.single_namespace.golden"
}
err := TestHelper.RetryFor(time.Minute, func() error {
out, _, err := TestHelper.LinkerdRun(cmd...)

View File

@ -21,9 +21,6 @@ linkerd-api
√ control plane self-check
√ [kubernetes] control plane can talk to Kubernetes
√ [prometheus] control plane can talk to Prometheus
linkerd-service-profile
-----------------------
√ no invalid service profiles
linkerd-version

View File

@ -8,25 +8,22 @@ kubernetes-version
√ is running the minimum Kubernetes API version
√ is running the minimum kubectl version
pre-kubernetes-cluster-setup
----------------------------
pre-kubernetes-setup
--------------------
√ control plane namespace does not already exist
√ can create Namespaces
√ can create ClusterRoles
√ can create ClusterRoleBindings
√ can create CustomResourceDefinitions
pre-kubernetes-capability
-------------------------
√ has NET_ADMIN capability
pre-kubernetes-setup
--------------------
√ can create ServiceAccounts
√ can create Services
√ can create Deployments
√ can create ConfigMaps
pre-kubernetes-capability
-------------------------
√ has NET_ADMIN capability
linkerd-version
---------------
√ can determine the latest version

View File

@ -1,33 +0,0 @@
kubernetes-api
--------------
√ can initialize the client
√ can query the Kubernetes API
kubernetes-version
------------------
√ is running the minimum Kubernetes API version
√ is running the minimum kubectl version
pre-kubernetes-single-namespace-setup
-------------------------------------
√ control plane namespace exists
√ can create Roles
√ can create RoleBindings
pre-kubernetes-capability
-------------------------
√ has NET_ADMIN capability
pre-kubernetes-setup
--------------------
√ can create ServiceAccounts
√ can create Services
√ can create Deployments
√ can create ConfigMaps
linkerd-version
---------------
√ can determine the latest version
√ cli is up-to-date
Status check results are √

View File

@ -21,9 +21,6 @@ linkerd-api
√ control plane self-check
√ [kubernetes] control plane can talk to Kubernetes
√ [prometheus] control plane can talk to Prometheus
linkerd-service-profile
-----------------------
√ no invalid service profiles
linkerd-version

View File

@ -1,38 +0,0 @@
kubernetes-api
--------------
√ can initialize the client
√ can query the Kubernetes API
kubernetes-version
------------------
√ is running the minimum Kubernetes API version
√ is running the minimum kubectl version
linkerd-existence
-----------------
√ control plane namespace exists
√ controller pod is running
√ can initialize the client
√ can query the control plane API
linkerd-api
-----------
√ control plane pods are ready
√ control plane self-check
√ [kubernetes] control plane can talk to Kubernetes
√ [prometheus] control plane can talk to Prometheus
linkerd-version
---------------
√ can determine the latest version
√ cli is up-to-date
linkerd-data-plane
------------------
√ data plane namespace exists
√ data plane proxies are ready
√ data plane proxy metrics are present in Prometheus
√ data plane is up-to-date
√ data plane and cli versions match
Status check results are √

View File

@ -1,35 +0,0 @@
kubernetes-api
--------------
√ can initialize the client
√ can query the Kubernetes API
kubernetes-version
------------------
√ is running the minimum Kubernetes API version
√ is running the minimum kubectl version
linkerd-existence
-----------------
√ control plane namespace exists
√ controller pod is running
√ can initialize the client
√ can query the control plane API
linkerd-api
-----------
√ control plane pods are ready
√ control plane self-check
√ [kubernetes] control plane can talk to Kubernetes
√ [prometheus] control plane can talk to Prometheus
linkerd-version
---------------
√ can determine the latest version
√ cli is up-to-date
control-plane-version
---------------------
√ control plane is up-to-date
√ control plane and cli versions match
Status check results are √

View File

@ -17,12 +17,11 @@ import (
// TestHelper provides helpers for running the linkerd integration tests.
type TestHelper struct {
linkerd string
version string
namespace string
singleNamespace bool
tls bool
httpClient http.Client
linkerd string
version string
namespace string
tls bool
httpClient http.Client
KubernetesHelper
}
@ -36,7 +35,6 @@ func NewTestHelper() *TestHelper {
linkerd := flag.String("linkerd", "", "path to the linkerd binary to test")
namespace := flag.String("linkerd-namespace", "l5d-integration", "the namespace where linkerd is installed")
singleNamespace := flag.Bool("single-namespace", false, "configure the control plane to only operate in the installed namespace")
tls := flag.Bool("enable-tls", false, "enable TLS in tests")
runTests := flag.Bool("integration-tests", false, "must be provided to run the integration tests")
verbose := flag.Bool("verbose", false, "turn on debug logging")
@ -69,15 +67,11 @@ func NewTestHelper() *TestHelper {
if *tls {
ns += "-tls"
}
if *singleNamespace {
ns += "-single-namespace"
}
testHelper := &TestHelper{
linkerd: *linkerd,
namespace: ns,
singleNamespace: *singleNamespace,
tls: *tls,
linkerd: *linkerd,
namespace: ns,
tls: *tls,
}
version, _, err := testHelper.LinkerdRun("version", "--client", "--short")
@ -115,9 +109,6 @@ func (h *TestHelper) GetLinkerdNamespace() string {
// GetTestNamespace returns the namespace for the given test. The test namespace
// is prefixed with the linkerd namespace.
func (h *TestHelper) GetTestNamespace(testName string) string {
if h.SingleNamespace() {
return h.namespace
}
return h.namespace + "-" + testName
}
@ -126,11 +117,6 @@ func (h *TestHelper) TLS() bool {
return h.tls
}
// SingleNamespace returns whether --single-namespace is enabled for the given test or not.
func (h *TestHelper) SingleNamespace() bool {
return h.singleNamespace
}
// CombinedOutput executes a shell command and returns the output.
func (h *TestHelper) CombinedOutput(name string, arg ...string) (string, string, error) {
command := exec.Command(name, arg...)

View File

@ -139,9 +139,7 @@ class ConfigureProfilesMsg extends React.Component {
render() {
const { showAsIcon } = this.props;
if (this.props.serviceProfiles === "false") {
return null;
} else if (showAsIcon) {
if (showAsIcon) {
return this.renderDownloadProfileForm();
} else {
return (
@ -162,7 +160,6 @@ ConfigureProfilesMsg.propTypes = {
prefixedUrl: PropTypes.func.isRequired,
}).isRequired,
classes: PropTypes.shape({}).isRequired,
serviceProfiles: PropTypes.string.isRequired,
showAsIcon: PropTypes.bool,
};

View File

@ -63,7 +63,6 @@ class TopRoutes extends React.Component {
}).isRequired,
classes: PropTypes.shape({}).isRequired,
query: topRoutesQueryPropType,
serviceProfiles: PropTypes.string.isRequired,
}
static defaultProps = {
query: {
@ -237,8 +236,7 @@ class TopRoutes extends React.Component {
</Grid>
</Grid>
<Divider light className={classes.root} />
{this.props.serviceProfiles === "false" ? null :
<Typography variant="caption">You can also create a new profile <ConfigureProfilesMsg showAsIcon={true} /></Typography>}
<Typography variant="caption">You can also create a new profile <ConfigureProfilesMsg showAsIcon={true} /></Typography>
</CardContent>
);
}

View File

@ -40,8 +40,7 @@ func main() {
stop := make(chan os.Signal, 1)
signal.Notify(stop, os.Interrupt, syscall.SIGTERM)
// TODO: modify this API to always assume serviceProfiles are available
server := srv.NewServer(*addr, *grafanaAddr, *templateDir, *staticDir, *uuid, *controllerNamespace, true, *reload, client)
server := srv.NewServer(*addr, *grafanaAddr, *templateDir, *staticDir, *uuid, *controllerNamespace, *reload, client)
go func() {
log.Infof("starting HTTP server on %+v", *addr)

View File

@ -23,7 +23,6 @@ type (
apiClient public.APIClient
uuid string
controllerNamespace string
serviceProfiles bool
grafanaProxy *grafanaProxy
}
)
@ -38,7 +37,6 @@ func (h *handler) handleIndex(w http.ResponseWriter, req *http.Request, p httpro
params := appParams{
UUID: h.uuid,
ControllerNamespace: h.controllerNamespace,
ServiceProfiles: h.serviceProfiles,
PathPrefix: pathPfx,
}

View File

@ -35,7 +35,6 @@ type (
Data pb.VersionInfo
UUID string
ControllerNamespace string
ServiceProfiles bool
Error bool
ErrorMessage string
PathPrefix string
@ -57,7 +56,6 @@ func NewServer(
staticDir string,
uuid string,
controllerNamespace string,
serviceProfiles bool,
reload bool,
apiClient public.APIClient,
) *http.Server {
@ -78,7 +76,6 @@ func NewServer(
render: server.RenderTemplate,
uuid: uuid,
controllerNamespace: controllerNamespace,
serviceProfiles: serviceProfiles,
grafanaProxy: newGrafanaProxy(grafanaAddr),
}

View File

@ -3,7 +3,6 @@
data-release-version="{{.Data.ReleaseVersion}}"
data-go-version="{{.Data.GoVersion}}"
data-controller-namespace="{{.ControllerNamespace}}"
data-service-profiles="{{.ServiceProfiles}}"
data-uuid="{{.UUID}}">
{{ if .Error }}
<p>Failed to call public API: {{ .ErrorMessage }}</p>