mirror of https://github.com/linkerd/linkerd2.git
go: Enable `errorlint` checking (#7885)
Since Go 1.13, errors may "wrap" other errors. [`errorlint`][el] checks that error formatting and inspection is wrapping-aware. This change enables `errorlint` in golangci-lint and updates all error handling code to pass the lint. Some comparisons in tests have been left unchanged (using `//nolint:errorlint` comments). [el]: https://github.com/polyfloyd/go-errorlint Signed-off-by: Oliver Gould <ver@buoyant.io>
This commit is contained in:
parent
942abe9d4b
commit
f5876c2a98
|
@ -16,6 +16,7 @@ linters:
|
|||
- bodyclose
|
||||
- deadcode
|
||||
- depguard
|
||||
- errorlint
|
||||
- exportloopref
|
||||
- goconst
|
||||
- gosimple
|
||||
|
|
|
@ -149,7 +149,7 @@ non-zero exit code.`,
|
|||
func configureAndRunChecks(cmd *cobra.Command, wout io.Writer, werr io.Writer, stage string, options *checkOptions) error {
|
||||
err := options.validate()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Validation error when executing check command: %v", err)
|
||||
return fmt.Errorf("Validation error when executing check command: %w", err)
|
||||
}
|
||||
|
||||
if options.cliVersionOverride != "" {
|
||||
|
@ -171,7 +171,7 @@ func configureAndRunChecks(cmd *cobra.Command, wout io.Writer, werr io.Writer, s
|
|||
}
|
||||
values, installManifest, err = renderInstallManifest(cmd.Context())
|
||||
if err != nil {
|
||||
fmt.Fprint(os.Stderr, fmt.Errorf("Error rendering install manifest: %v", err))
|
||||
fmt.Fprintf(os.Stderr, "Error rendering install manifest: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
|
@ -219,8 +219,7 @@ func configureAndRunChecks(cmd *cobra.Command, wout io.Writer, werr io.Writer, s
|
|||
|
||||
extensionSuccess, extensionWarning, err := runExtensionChecks(cmd, wout, werr, options)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to run extensions checks: %s", err)
|
||||
fmt.Fprintln(werr, err)
|
||||
fmt.Fprintf(werr, "Failed to run extensions checks: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
|
|
@ -97,14 +97,14 @@ destination.`,
|
|||
|
||||
client, conn, err := destination.NewExternalClient(cmd.Context(), controlPlaneNamespace, k8sAPI)
|
||||
if err != nil {
|
||||
fmt.Fprint(os.Stderr, fmt.Errorf("Error creating destination client: %s", err))
|
||||
fmt.Fprintf(os.Stderr, "Error creating destination client: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
endpoints, err := requestEndpointsFromAPI(client, args)
|
||||
if err != nil {
|
||||
fmt.Fprint(os.Stderr, fmt.Errorf("Destination API error: %s", err))
|
||||
fmt.Fprintf(os.Stderr, "Destination API error: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
|
|
@ -191,7 +191,7 @@ func (rt resourceTransformerInject) transform(bytes []byte) ([]byte, []inject.Re
|
|||
|
||||
if ok, _ := report.Injectable(); !ok {
|
||||
if errs := report.ThrowInjectError(); len(errs) > 0 {
|
||||
return bytes, reports, fmt.Errorf("failed to inject %s%s%s: %v", report.Kind, slash, report.Name, concatErrors(errs, ", "))
|
||||
return bytes, reports, fmt.Errorf("failed to inject %s%s%s: %w", report.Kind, slash, report.Name, concatErrors(errs, ", "))
|
||||
}
|
||||
return bytes, reports, nil
|
||||
}
|
||||
|
|
|
@ -61,10 +61,10 @@ func processYAML(in io.Reader, out io.Writer, report io.Writer, rt resourceTrans
|
|||
for {
|
||||
// Read a single YAML object
|
||||
bytes, err := reader.Read()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
return []error{err}
|
||||
}
|
||||
|
||||
|
|
|
@ -80,7 +80,7 @@ func (options *cniPluginOptions) pluginImage() string {
|
|||
func newCmdInstallCNIPlugin() *cobra.Command {
|
||||
options, err := newCNIInstallOptionsWithDefaults()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%s", err)
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
|
|
@ -174,7 +174,7 @@ func newCmdInstallControlPlane(values *l5dcharts.Values) *cobra.Command {
|
|||
installOnlyFlags, installOnlyFlagSet := makeInstallFlags(values)
|
||||
installUpgradeFlags, installUpgradeFlagSet, err := makeInstallUpgradeFlags(values)
|
||||
if err != nil {
|
||||
fmt.Fprint(os.Stderr, err.Error())
|
||||
fmt.Fprintln(os.Stderr, err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
proxyFlags, proxyFlagSet := makeProxyFlags(values)
|
||||
|
@ -450,7 +450,7 @@ func render(w io.Writer, values *l5dcharts.Values, stage string, valuesOverrides
|
|||
// Attach the final values into the `Values` field for rendering to work
|
||||
renderedTemplates, err := engine.Render(chart, fullValues)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to render the template: %s", err)
|
||||
return fmt.Errorf("failed to render the template: %w", err)
|
||||
}
|
||||
|
||||
// Merge templates and inject
|
||||
|
@ -551,17 +551,21 @@ func errAfterRunningChecks(cniEnabled bool) error {
|
|||
errMsgs := []string{}
|
||||
hc.RunChecks(func(result *healthcheck.CheckResult) {
|
||||
if result.Err != nil {
|
||||
if ce, ok := result.Err.(*healthcheck.CategoryError); ok {
|
||||
var ce healthcheck.CategoryError
|
||||
if errors.As(result.Err, &ce) {
|
||||
if ce.Category == healthcheck.KubernetesAPIChecks {
|
||||
k8sAPIError = ce
|
||||
} else if re, ok := ce.Err.(*healthcheck.ResourceError); ok {
|
||||
// resource error, print in kind.group/name format
|
||||
for _, res := range re.Resources {
|
||||
errMsgs = append(errMsgs, res.String())
|
||||
}
|
||||
} else {
|
||||
// unknown category error, just print it
|
||||
errMsgs = append(errMsgs, result.Err.Error())
|
||||
var re healthcheck.ResourceError
|
||||
if errors.As(ce.Err, &re) {
|
||||
// resource error, print in kind.group/name format
|
||||
for _, res := range re.Resources {
|
||||
errMsgs = append(errMsgs, res.String())
|
||||
}
|
||||
} else {
|
||||
// unknown category error, just print it
|
||||
errMsgs = append(errMsgs, result.Err.Error())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// unknown error, just print it
|
||||
|
|
|
@ -547,7 +547,7 @@ func validateValues(ctx context.Context, k *k8s.KubernetesAPI, values *l5dcharts
|
|||
}
|
||||
_, err = externalIssuerData.VerifyAndBuildCreds()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to validate issuer credentials: %s", err)
|
||||
return fmt.Errorf("failed to validate issuer credentials: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -559,7 +559,7 @@ func validateValues(ctx context.Context, k *k8s.KubernetesAPI, values *l5dcharts
|
|||
}
|
||||
_, err := issuerData.VerifyAndBuildCreds()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to validate issuer credentials: %s", err)
|
||||
return fmt.Errorf("failed to validate issuer credentials: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -570,7 +570,7 @@ func validateProxyValues(values *l5dcharts.Values) error {
|
|||
networks := strings.Split(values.ClusterNetworks, ",")
|
||||
for _, network := range networks {
|
||||
if _, _, err := net.ParseCIDR(network); err != nil {
|
||||
return fmt.Errorf("cannot parse destination get networks: %s", err)
|
||||
return fmt.Errorf("cannot parse destination get networks: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -672,7 +672,7 @@ func initializeIssuerCredentials(ctx context.Context, k *k8s.KubernetesAPI, valu
|
|||
// No credentials have been supplied so we will generate them.
|
||||
root, err := tls.GenerateRootCAWithDefaults(issuerName(values.IdentityTrustDomain))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate root certificate for identity: %s", err)
|
||||
return fmt.Errorf("failed to generate root certificate for identity: %w", err)
|
||||
}
|
||||
values.Identity.Issuer.TLS.KeyPEM = root.Cred.EncodePrivateKeyPEM()
|
||||
values.Identity.Issuer.TLS.CrtPEM = root.Cred.Crt.EncodeCertificatePEM()
|
||||
|
|
|
@ -205,17 +205,17 @@ func k8sClient(manifestsFile string) (*k8s.KubernetesAPI, error) {
|
|||
if manifestsFile != "" {
|
||||
readers, err := read(manifestsFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse manifests from %s: %s", manifestsFile, err)
|
||||
return nil, fmt.Errorf("failed to parse manifests from %s: %w", manifestsFile, err)
|
||||
}
|
||||
|
||||
k, err = k8s.NewFakeAPIFromManifests(readers)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse Kubernetes objects from manifest %s: %s", manifestsFile, err)
|
||||
return nil, fmt.Errorf("failed to parse Kubernetes objects from manifest %s: %w", manifestsFile, err)
|
||||
}
|
||||
} else {
|
||||
k, err = k8s.NewAPI(kubeconfigPath, kubeContext, impersonate, impersonateGroup, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to create a kubernetes client: %s", err)
|
||||
return nil, fmt.Errorf("failed to create a kubernetes client: %w", err)
|
||||
}
|
||||
}
|
||||
return k, nil
|
||||
|
|
|
@ -101,23 +101,23 @@ func parseConfig(stdin []byte) (*PluginConf, error) {
|
|||
|
||||
logrus.Debugf("linkerd-cni: stdin to plugin: %v", string(stdin))
|
||||
if err := json.Unmarshal(stdin, &conf); err != nil {
|
||||
return nil, fmt.Errorf("linkerd-cni: failed to parse network configuration: %v", err)
|
||||
return nil, fmt.Errorf("linkerd-cni: failed to parse network configuration: %w", err)
|
||||
}
|
||||
|
||||
if conf.RawPrevResult != nil {
|
||||
resultBytes, err := json.Marshal(conf.RawPrevResult)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("linkerd-cni: could not serialize prevResult: %v", err)
|
||||
return nil, fmt.Errorf("linkerd-cni: could not serialize prevResult: %w", err)
|
||||
}
|
||||
|
||||
res, err := version.NewResult(conf.CNIVersion, resultBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("linkerd-cni: could not parse prevResult: %v", err)
|
||||
return nil, fmt.Errorf("linkerd-cni: could not parse prevResult: %w", err)
|
||||
}
|
||||
conf.RawPrevResult = nil
|
||||
conf.PrevResult, err = cniv1.NewResultFromResult(res)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("linkerd-cni: could not convert result to version 1.0: %v", err)
|
||||
return nil, fmt.Errorf("linkerd-cni: could not convert result to version 1.0: %w", err)
|
||||
}
|
||||
logrus.Debugf("linkerd-cni: prevResult: %v", conf.PrevResult)
|
||||
}
|
||||
|
@ -206,7 +206,7 @@ func cmdAdd(args *skel.CmdArgs) error {
|
|||
// Check if there are any overridden ports to be skipped
|
||||
outboundSkipOverride, err := getAnnotationOverride(ctx, client, pod, k8s.ProxyIgnoreOutboundPortsAnnotation)
|
||||
if err != nil {
|
||||
logEntry.Errorf("linkerd-cni: could not retrieve overridden annotations: %v", err)
|
||||
logEntry.Errorf("linkerd-cni: could not retrieve overridden annotations: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -217,7 +217,7 @@ func cmdAdd(args *skel.CmdArgs) error {
|
|||
|
||||
inboundSkipOverride, err := getAnnotationOverride(ctx, client, pod, k8s.ProxyIgnoreInboundPortsAnnotation)
|
||||
if err != nil {
|
||||
logEntry.Errorf("linkerd-cni: could not retrieve overridden annotations: %v", err)
|
||||
logEntry.Errorf("linkerd-cni: could not retrieve overridden annotations: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -240,7 +240,7 @@ func cmdAdd(args *skel.CmdArgs) error {
|
|||
|
||||
err = iptables.ConfigureFirewall(*firewallConfiguration)
|
||||
if err != nil {
|
||||
logEntry.Errorf("linkerd-cni: could not configure firewall: %v", err)
|
||||
logEntry.Errorf("linkerd-cni: could not configure firewall: %s", err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -423,7 +423,7 @@ func getInboundPort(podSpec *corev1.PodSpec) (uint32, error) {
|
|||
addr := strings.Split(envVar.Value, ":")
|
||||
port, err := strconv.ParseUint(addr[1], 10, 32)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to parse inbound port for proxy container: %s", err)
|
||||
return 0, fmt.Errorf("failed to parse inbound port for proxy container: %w", err)
|
||||
}
|
||||
return uint32(port), nil
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package destination
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
|
@ -153,7 +154,8 @@ func (s *server) Get(dest *pb.GetDestination, stream pb.Destination_GetServer) e
|
|||
|
||||
err = s.endpoints.Subscribe(service, port, instanceID, translator)
|
||||
if err != nil {
|
||||
if _, ok := err.(watcher.InvalidService); ok {
|
||||
var ise watcher.InvalidService
|
||||
if errors.As(err, &ise) {
|
||||
log.Debugf("Invalid service %s", dest.GetPath())
|
||||
return status.Errorf(codes.InvalidArgument, "Invalid authority: %s", dest.GetPath())
|
||||
}
|
||||
|
@ -211,18 +213,18 @@ func (s *server) GetProfile(dest *pb.GetDestination, stream pb.Destination_GetPr
|
|||
|
||||
opaquePorts, err := getAnnotatedOpaquePorts(pod, s.defaultOpaquePorts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get opaque ports for pod: %s", err)
|
||||
return fmt.Errorf("failed to get opaque ports for pod: %w", err)
|
||||
}
|
||||
var address watcher.Address
|
||||
var endpoint *pb.WeightedAddr
|
||||
if pod != nil {
|
||||
address, err = s.createAddress(pod, port)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create address: %s", err)
|
||||
return fmt.Errorf("failed to create address: %w", err)
|
||||
}
|
||||
endpoint, err = s.createEndpoint(address, opaquePorts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create endpoint: %s", err)
|
||||
return fmt.Errorf("failed to create endpoint: %w", err)
|
||||
}
|
||||
}
|
||||
translator := newEndpointProfileTranslator(pod, port, endpoint, stream, s.log)
|
||||
|
@ -261,16 +263,16 @@ func (s *server) GetProfile(dest *pb.GetDestination, stream pb.Destination_GetPr
|
|||
if hostname != "" {
|
||||
address, err := s.getEndpointByHostname(s.k8sAPI, hostname, service, port)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get pod for hostname %s: %v", hostname, err)
|
||||
return fmt.Errorf("failed to get pod for hostname %s: %w", hostname, err)
|
||||
}
|
||||
opaquePorts, err := getAnnotatedOpaquePorts(address.Pod, s.defaultOpaquePorts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get opaque ports for pod: %s", err)
|
||||
return fmt.Errorf("failed to get opaque ports for pod: %w", err)
|
||||
}
|
||||
var endpoint *pb.WeightedAddr
|
||||
endpoint, err = s.createEndpoint(*address, opaquePorts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create endpoint: %s", err)
|
||||
return fmt.Errorf("failed to create endpoint: %w", err)
|
||||
}
|
||||
translator := newEndpointProfileTranslator(address.Pod, port, endpoint, stream, s.log)
|
||||
|
||||
|
@ -373,7 +375,7 @@ func (s *server) createAddress(pod *corev1.Pod, port uint32) (watcher.Address, e
|
|||
}
|
||||
err := watcher.SetToServerProtocol(s.k8sAPI, &address, port)
|
||||
if err != nil {
|
||||
return watcher.Address{}, fmt.Errorf("failed to set address OpaqueProtocol: %s", err)
|
||||
return watcher.Address{}, fmt.Errorf("failed to set address OpaqueProtocol: %w", err)
|
||||
}
|
||||
return address, nil
|
||||
}
|
||||
|
@ -512,7 +514,7 @@ func getPodByIP(k8sAPI *k8s.API, podIP string, port uint32, log *logging.Entry)
|
|||
func getIndexedPods(k8sAPI *k8s.API, indexName string, podIP string) ([]*corev1.Pod, error) {
|
||||
objs, err := k8sAPI.Pod().Informer().GetIndexer().ByIndex(indexName, podIP)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed getting %s indexed pods: %s", indexName, err)
|
||||
return nil, fmt.Errorf("failed getting %s indexed pods: %w", indexName, err)
|
||||
}
|
||||
pods := make([]*corev1.Pod, 0)
|
||||
for _, obj := range objs {
|
||||
|
@ -562,11 +564,11 @@ func (s *server) parseContextToken(token string) contextToken {
|
|||
func profileID(authority string, ctxToken contextToken, clusterDomain string) (watcher.ProfileID, error) {
|
||||
host, _, err := getHostAndPort(authority)
|
||||
if err != nil {
|
||||
return watcher.ProfileID{}, fmt.Errorf("invalid authority: %s", err)
|
||||
return watcher.ProfileID{}, fmt.Errorf("invalid authority: %w", err)
|
||||
}
|
||||
service, _, err := parseK8sServiceName(host, clusterDomain)
|
||||
if err != nil {
|
||||
return watcher.ProfileID{}, fmt.Errorf("invalid k8s service name: %s", err)
|
||||
return watcher.ProfileID{}, fmt.Errorf("invalid k8s service name: %w", err)
|
||||
}
|
||||
id := watcher.ProfileID{
|
||||
Name: fmt.Sprintf("%s.%s.svc.%s", service.Name, service.Namespace, clusterDomain),
|
||||
|
|
|
@ -863,7 +863,7 @@ func (pp *portPublisher) newPodRefAddress(endpointPort Port, endpointIP, podName
|
|||
}
|
||||
pod, err := pp.k8sAPI.Pod().Lister().Pods(id.Namespace).Get(id.Name)
|
||||
if err != nil {
|
||||
return Address{}, PodID{}, fmt.Errorf("unable to fetch pod %v:%v", id, err)
|
||||
return Address{}, PodID{}, fmt.Errorf("unable to fetch pod %v: %w", id, err)
|
||||
}
|
||||
ownerKind, ownerName := pp.k8sAPI.GetOwnerKindAndName(context.Background(), pod, false)
|
||||
addr := Address{
|
||||
|
@ -1152,7 +1152,7 @@ func isValidSlice(es *discovery.EndpointSlice) bool {
|
|||
func SetToServerProtocol(k8sAPI *k8s.API, address *Address, port Port) error {
|
||||
servers, err := k8sAPI.Srv().Lister().Servers("").List(labels.Everything())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list Servers: %s", err)
|
||||
return fmt.Errorf("failed to list Servers: %w", err)
|
||||
}
|
||||
if address.Pod == nil {
|
||||
return fmt.Errorf("endpoint not backed by Pod: %s:%d", address.IP, address.Port)
|
||||
|
@ -1160,7 +1160,7 @@ func SetToServerProtocol(k8sAPI *k8s.API, address *Address, port Port) error {
|
|||
for _, server := range servers {
|
||||
selector, err := metav1.LabelSelectorAsSelector(server.Spec.PodSelector)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Selector: %s", err)
|
||||
return fmt.Errorf("failed to create Selector: %w", err)
|
||||
}
|
||||
if server.Spec.ProxyProtocol == opaqueProtocol && selector.Matches(labels.Set(address.Pod.Labels)) {
|
||||
var portMatch bool
|
||||
|
|
|
@ -63,7 +63,7 @@ func InitializeIndexers(k8sAPI *k8s.API) error {
|
|||
}})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not create an indexer for services: %s", err)
|
||||
return fmt.Errorf("could not create an indexer for services: %w", err)
|
||||
}
|
||||
|
||||
err = k8sAPI.Pod().Informer().AddIndexers(cache.Indexers{PodIPIndex: func(obj interface{}) ([]string, error) {
|
||||
|
@ -81,7 +81,7 @@ func InitializeIndexers(k8sAPI *k8s.API) error {
|
|||
}})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not create an indexer for pods: %s", err)
|
||||
return fmt.Errorf("could not create an indexer for pods: %w", err)
|
||||
}
|
||||
|
||||
err = k8sAPI.Pod().Informer().AddIndexers(cache.Indexers{HostIPIndex: func(obj interface{}) ([]string, error) {
|
||||
|
@ -106,7 +106,7 @@ func InitializeIndexers(k8sAPI *k8s.API) error {
|
|||
}})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not create an indexer for pods: %s", err)
|
||||
return fmt.Errorf("could not create an indexer for pods: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -226,19 +226,19 @@ func Send(v url.Values) error {
|
|||
func send(client *http.Client, baseURL string, v url.Values) error {
|
||||
req, err := http.NewRequest("GET", baseURL, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create HTTP request for base URL [%s]: %s", baseURL, err)
|
||||
return fmt.Errorf("failed to create HTTP request for base URL [%s]: %w", baseURL, err)
|
||||
}
|
||||
req.URL.RawQuery = v.Encode()
|
||||
|
||||
log.Infof("Sending heartbeat: %s", req.URL.String())
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Check URL [%s] request failed with: %s", req.URL.String(), err)
|
||||
return fmt.Errorf("check URL [%s] request failed with: %w", req.URL.String(), err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read response body: %s", err)
|
||||
return fmt.Errorf("failed to read response body: %w", err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("request failed with code %d; response body: %s", resp.StatusCode, string(body))
|
||||
|
|
|
@ -101,7 +101,7 @@ type API struct {
|
|||
func InitializeAPI(ctx context.Context, kubeConfig string, ensureClusterWideAccess bool, resources ...APIResource) (*API, error) {
|
||||
config, err := k8s.GetConfig(kubeConfig, "")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error configuring Kubernetes API client: %v", err)
|
||||
return nil, fmt.Errorf("error configuring Kubernetes API client: %w", err)
|
||||
}
|
||||
|
||||
dynamicClient, err := dynamic.NewForConfig(config)
|
||||
|
|
|
@ -38,7 +38,7 @@ func newAPI(retry bool, resourceConfigs []string, extraConfigs ...string) (*API,
|
|||
|
||||
api, err := NewFakeAPI(k8sConfigs...)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("NewFakeAPI returned an error: %s", err)
|
||||
return nil, nil, fmt.Errorf("NewFakeAPI returned an error: %w", err)
|
||||
}
|
||||
|
||||
if retry {
|
||||
|
@ -937,7 +937,7 @@ status:
|
|||
}
|
||||
|
||||
pods, err := api.GetPodsFor(k8sInputObj, false)
|
||||
if err != exp.err {
|
||||
if !errors.Is(err, exp.err) {
|
||||
t.Fatalf("api.GetPodsFor() unexpected error, expected [%s] got: [%s]", exp.err, err)
|
||||
}
|
||||
|
||||
|
@ -1293,7 +1293,7 @@ spec:
|
|||
}
|
||||
|
||||
services, err := api.GetServicesFor(k8sInputObj, false)
|
||||
if err != exp.err {
|
||||
if !errors.Is(err, exp.err) {
|
||||
t.Fatalf("api.GetServicesFor() unexpected error, expected [%s] got: [%s]", exp.err, err)
|
||||
}
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@ package main
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"flag"
|
||||
"io"
|
||||
|
||||
|
@ -50,10 +51,10 @@ func get(client pb.DestinationClient, req *pb.GetDestination) {
|
|||
|
||||
for {
|
||||
update, err := rsp.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
|
||||
|
@ -99,10 +100,10 @@ func getProfile(client pb.DestinationClient, req *pb.GetDestination) {
|
|||
|
||||
for {
|
||||
update, err := rsp.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
log.Printf("%+v", update)
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
@ -106,7 +107,7 @@ func getConfiguredServer(
|
|||
func (s *Server) Start() {
|
||||
log.Infof("listening at %s", s.Server.Addr)
|
||||
if err := s.ListenAndServeTLS("", ""); err != nil {
|
||||
if err == http.ErrServerClosed {
|
||||
if errors.Is(err, http.ErrServerClosed) {
|
||||
return
|
||||
}
|
||||
log.Fatal(err)
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
|
@ -48,8 +49,8 @@ func TestShutdown(t *testing.T) {
|
|||
|
||||
go func() {
|
||||
if err := testServer.ListenAndServe(); err != nil {
|
||||
if err != http.ErrServerClosed {
|
||||
t.Errorf("Expected server to be gracefully shutdown with error: %q", http.ErrServerClosed)
|
||||
if !errors.Is(err, http.ErrServerClosed) {
|
||||
t.Errorf("Unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@ -57,6 +58,6 @@ func TestShutdown(t *testing.T) {
|
|||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
if err := testServer.Shutdown(ctx); err != nil {
|
||||
t.Fatal("Unexpected error: ", err)
|
||||
t.Fatalf("Unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -192,7 +192,7 @@ code.`,
|
|||
func configureAndRunChecks(wout io.Writer, werr io.Writer, options *checkOptions) error {
|
||||
err := options.validate()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Validation error when executing check command: %v", err)
|
||||
return fmt.Errorf("Validation error when executing check command: %w", err)
|
||||
}
|
||||
|
||||
hc := healthcheck.NewHealthChecker([]healthcheck.CategoryID{}, &healthcheck.Options{
|
||||
|
@ -208,8 +208,7 @@ func configureAndRunChecks(wout io.Writer, werr io.Writer, options *checkOptions
|
|||
|
||||
err = hc.InitializeKubeAPIClient()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Error initializing k8s API client: %s", err)
|
||||
fmt.Fprintln(werr, err)
|
||||
fmt.Fprintf(werr, "Error initializing k8s API client: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
|
|
@ -93,14 +93,13 @@ non-zero exit code.`,
|
|||
// Get the multicluster extension namespace
|
||||
kubeAPI, err := k8s.NewAPI(kubeconfigPath, kubeContext, impersonate, impersonateGroup, 0)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to run multicluster check: %v\n", err)
|
||||
fmt.Fprintf(os.Stderr, "failed to run multicluster check: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
_, err = kubeAPI.GetNamespaceWithExtensionLabel(context.Background(), MulticlusterExtensionName)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("%w; install by running `linkerd multicluster install | kubectl apply -f -`", err)
|
||||
fmt.Fprintln(os.Stderr, err.Error())
|
||||
fmt.Fprintf(os.Stderr, "%s; install by running `linkerd multicluster install | kubectl apply -f -`\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
return configureAndRunChecks(stdout, stderr, options)
|
||||
|
@ -124,7 +123,7 @@ non-zero exit code.`,
|
|||
func configureAndRunChecks(wout io.Writer, werr io.Writer, options *checkOptions) error {
|
||||
err := options.validate()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Validation error when executing check command: %v", err)
|
||||
return fmt.Errorf("Validation error when executing check command: %w", err)
|
||||
}
|
||||
checks := []healthcheck.CategoryID{
|
||||
linkerdMulticlusterExtensionCheck,
|
||||
|
@ -141,15 +140,13 @@ func configureAndRunChecks(wout io.Writer, werr io.Writer, options *checkOptions
|
|||
|
||||
err = linkerdHC.InitializeKubeAPIClient()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Error initializing k8s API client: %s", err)
|
||||
fmt.Fprintln(werr, err)
|
||||
fmt.Fprintf(werr, "Error initializing k8s API client: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
err = linkerdHC.InitializeLinkerdGlobalConfig(context.Background())
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Failed to fetch linkerd config: %s", err)
|
||||
fmt.Fprintln(werr, err)
|
||||
fmt.Fprintf(werr, "Failed to fetch linkerd config: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
@ -186,7 +183,7 @@ func multiclusterCategory(hc *healthChecker) *healthcheck.Category {
|
|||
WithCheck(func(ctx context.Context) error {
|
||||
localAnchors, err := tls.DecodePEMCertificates(hc.LinkerdConfig().IdentityTrustAnchorsPEM)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Cannot parse source trust anchors: %s", err)
|
||||
return fmt.Errorf("Cannot parse source trust anchors: %w", err)
|
||||
}
|
||||
return hc.checkRemoteClusterAnchors(ctx, localAnchors)
|
||||
}))
|
||||
|
@ -296,7 +293,7 @@ func multiclusterCategory(hc *healthChecker) *healthcheck.Category {
|
|||
func (hc *healthChecker) checkLinkCRD(ctx context.Context) error {
|
||||
err := hc.linkAccess(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("multicluster.linkerd.io/Link CRD is missing: %s", err)
|
||||
return fmt.Errorf("multicluster.linkerd.io/Link CRD is missing: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -322,14 +319,14 @@ func (hc *healthChecker) checkLinks(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
if len(links) == 0 {
|
||||
return &healthcheck.SkipError{Reason: "no links detected"}
|
||||
return healthcheck.SkipError{Reason: "no links detected"}
|
||||
}
|
||||
linkNames := []string{}
|
||||
for _, l := range links {
|
||||
linkNames = append(linkNames, fmt.Sprintf("\t* %s", l.TargetClusterName))
|
||||
}
|
||||
hc.links = links
|
||||
return &healthcheck.VerboseSuccess{Message: strings.Join(linkNames, "\n")}
|
||||
return healthcheck.VerboseSuccess{Message: strings.Join(linkNames, "\n")}
|
||||
}
|
||||
|
||||
func (hc *healthChecker) checkRemoteClusterConnectivity(ctx context.Context) error {
|
||||
|
@ -339,34 +336,34 @@ func (hc *healthChecker) checkRemoteClusterConnectivity(ctx context.Context) err
|
|||
// Load the credentials secret
|
||||
secret, err := hc.KubeAPIClient().Interface.CoreV1().Secrets(link.Namespace).Get(ctx, link.ClusterCredentialsSecret, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
errors = append(errors, fmt.Errorf("* secret: [%s/%s]: %s", link.Namespace, link.ClusterCredentialsSecret, err))
|
||||
errors = append(errors, fmt.Errorf("* secret: [%s/%s]: %w", link.Namespace, link.ClusterCredentialsSecret, err))
|
||||
continue
|
||||
}
|
||||
config, err := servicemirror.ParseRemoteClusterSecret(secret)
|
||||
if err != nil {
|
||||
errors = append(errors, fmt.Errorf("* secret: [%s/%s]: could not parse config secret: %s", secret.Namespace, secret.Name, err))
|
||||
errors = append(errors, fmt.Errorf("* secret: [%s/%s]: could not parse config secret: %w", secret.Namespace, secret.Name, err))
|
||||
continue
|
||||
}
|
||||
clientConfig, err := clientcmd.RESTConfigFromKubeConfig(config)
|
||||
if err != nil {
|
||||
errors = append(errors, fmt.Errorf("* secret: [%s/%s] cluster: [%s]: unable to parse api config: %s", secret.Namespace, secret.Name, link.TargetClusterName, err))
|
||||
errors = append(errors, fmt.Errorf("* secret: [%s/%s] cluster: [%s]: unable to parse api config: %w", secret.Namespace, secret.Name, link.TargetClusterName, err))
|
||||
continue
|
||||
}
|
||||
remoteAPI, err := k8s.NewAPIForConfig(clientConfig, "", []string{}, healthcheck.RequestTimeout)
|
||||
if err != nil {
|
||||
errors = append(errors, fmt.Errorf("* secret: [%s/%s] cluster: [%s]: could not instantiate api for target cluster: %s", secret.Namespace, secret.Name, link.TargetClusterName, err))
|
||||
errors = append(errors, fmt.Errorf("* secret: [%s/%s] cluster: [%s]: could not instantiate api for target cluster: %w", secret.Namespace, secret.Name, link.TargetClusterName, err))
|
||||
continue
|
||||
}
|
||||
// We use this call just to check connectivity.
|
||||
_, err = remoteAPI.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
errors = append(errors, fmt.Errorf("* failed to connect to API for cluster: [%s]: %s", link.TargetClusterName, err))
|
||||
errors = append(errors, fmt.Errorf("* failed to connect to API for cluster: [%s]: %w", link.TargetClusterName, err))
|
||||
continue
|
||||
}
|
||||
verbs := []string{"get", "list", "watch"}
|
||||
for _, verb := range verbs {
|
||||
if err := healthcheck.CheckCanPerformAction(ctx, remoteAPI, verb, corev1.NamespaceAll, "", "v1", "services"); err != nil {
|
||||
errors = append(errors, fmt.Errorf("* missing service permission [%s] for cluster [%s]: %s", verb, link.TargetClusterName, err))
|
||||
errors = append(errors, fmt.Errorf("* missing service permission [%s] for cluster [%s]: %w", verb, link.TargetClusterName, err))
|
||||
}
|
||||
}
|
||||
links = append(links, fmt.Sprintf("\t* %s", link.TargetClusterName))
|
||||
|
@ -375,9 +372,9 @@ func (hc *healthChecker) checkRemoteClusterConnectivity(ctx context.Context) err
|
|||
return joinErrors(errors, 2)
|
||||
}
|
||||
if len(links) == 0 {
|
||||
return &healthcheck.SkipError{Reason: "no links"}
|
||||
return healthcheck.SkipError{Reason: "no links"}
|
||||
}
|
||||
return &healthcheck.VerboseSuccess{Message: strings.Join(links, "\n")}
|
||||
return healthcheck.VerboseSuccess{Message: strings.Join(links, "\n")}
|
||||
}
|
||||
|
||||
func (hc *healthChecker) checkRemoteClusterAnchors(ctx context.Context, localAnchors []*x509.Certificate) error {
|
||||
|
@ -439,9 +436,9 @@ func (hc *healthChecker) checkRemoteClusterAnchors(ctx context.Context, localAnc
|
|||
return fmt.Errorf("Problematic clusters:\n %s", strings.Join(errors, "\n "))
|
||||
}
|
||||
if len(links) == 0 {
|
||||
return &healthcheck.SkipError{Reason: "no links"}
|
||||
return healthcheck.SkipError{Reason: "no links"}
|
||||
}
|
||||
return &healthcheck.VerboseSuccess{Message: strings.Join(links, "\n")}
|
||||
return healthcheck.VerboseSuccess{Message: strings.Join(links, "\n")}
|
||||
}
|
||||
|
||||
func (hc *healthChecker) checkServiceMirrorLocalRBAC(ctx context.Context) error {
|
||||
|
@ -506,9 +503,9 @@ func (hc *healthChecker) checkServiceMirrorLocalRBAC(ctx context.Context) error
|
|||
return fmt.Errorf(strings.Join(errors, "\n"))
|
||||
}
|
||||
if len(links) == 0 {
|
||||
return &healthcheck.SkipError{Reason: "no links"}
|
||||
return healthcheck.SkipError{Reason: "no links"}
|
||||
}
|
||||
return &healthcheck.VerboseSuccess{Message: strings.Join(links, "\n")}
|
||||
return healthcheck.VerboseSuccess{Message: strings.Join(links, "\n")}
|
||||
}
|
||||
|
||||
func (hc *healthChecker) checkServiceMirrorController(ctx context.Context) error {
|
||||
|
@ -541,9 +538,9 @@ func (hc *healthChecker) checkServiceMirrorController(ctx context.Context) error
|
|||
return joinErrors(errors, 2)
|
||||
}
|
||||
if len(clusterNames) == 0 {
|
||||
return &healthcheck.SkipError{Reason: "no links"}
|
||||
return healthcheck.SkipError{Reason: "no links"}
|
||||
}
|
||||
return &healthcheck.VerboseSuccess{Message: strings.Join(clusterNames, "\n")}
|
||||
return healthcheck.VerboseSuccess{Message: strings.Join(clusterNames, "\n")}
|
||||
}
|
||||
|
||||
func (hc *healthChecker) checkIfGatewayMirrorsHaveEndpoints(ctx context.Context) error {
|
||||
|
@ -570,13 +567,13 @@ func (hc *healthChecker) checkIfGatewayMirrorsHaveEndpoints(ctx context.Context)
|
|||
|
||||
vizNs, err := hc.KubeAPIClient().GetNamespaceWithExtensionLabel(ctx, vizCmd.ExtensionName)
|
||||
if err != nil {
|
||||
return &healthcheck.SkipError{Reason: "failed to fetch gateway metrics"}
|
||||
return healthcheck.SkipError{Reason: "failed to fetch gateway metrics"}
|
||||
}
|
||||
|
||||
// Check gateway liveness according to probes
|
||||
vizClient, err := client.NewExternalClient(ctx, vizNs.Name, hc.KubeAPIClient())
|
||||
if err != nil {
|
||||
errors = append(errors, fmt.Errorf("failed to initialize viz client: %s", err))
|
||||
errors = append(errors, fmt.Errorf("failed to initialize viz client: %w", err))
|
||||
break
|
||||
}
|
||||
req := vizPb.GatewaysRequest{
|
||||
|
@ -585,7 +582,7 @@ func (hc *healthChecker) checkIfGatewayMirrorsHaveEndpoints(ctx context.Context)
|
|||
}
|
||||
rsp, err := vizClient.Gateways(ctx, &req)
|
||||
if err != nil {
|
||||
errors = append(errors, fmt.Errorf("failed to fetch gateway metrics for %s.%s: %s", svc.Name, svc.Namespace, err))
|
||||
errors = append(errors, fmt.Errorf("failed to fetch gateway metrics for %s.%s: %w", svc.Name, svc.Namespace, err))
|
||||
continue
|
||||
}
|
||||
table := rsp.GetOk().GetGatewaysTable()
|
||||
|
@ -608,9 +605,9 @@ func (hc *healthChecker) checkIfGatewayMirrorsHaveEndpoints(ctx context.Context)
|
|||
return joinErrors(errors, 1)
|
||||
}
|
||||
if len(links) == 0 {
|
||||
return &healthcheck.SkipError{Reason: "no links"}
|
||||
return healthcheck.SkipError{Reason: "no links"}
|
||||
}
|
||||
return &healthcheck.VerboseSuccess{Message: strings.Join(links, "\n")}
|
||||
return healthcheck.VerboseSuccess{Message: strings.Join(links, "\n")}
|
||||
}
|
||||
|
||||
func (hc *healthChecker) checkIfMirrorServicesHaveEndpoints(ctx context.Context) error {
|
||||
|
@ -631,7 +628,7 @@ func (hc *healthChecker) checkIfMirrorServicesHaveEndpoints(ctx context.Context)
|
|||
return fmt.Errorf("Some mirror services do not have endpoints:\n %s", strings.Join(servicesWithNoEndpoints, "\n "))
|
||||
}
|
||||
if len(mirrorServices.Items) == 0 {
|
||||
return &healthcheck.SkipError{Reason: "no mirror services"}
|
||||
return healthcheck.SkipError{Reason: "no mirror services"}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -661,7 +658,7 @@ func (hc *healthChecker) checkForOrphanedServices(ctx context.Context) error {
|
|||
}
|
||||
}
|
||||
if len(mirrorServices.Items) == 0 {
|
||||
return &healthcheck.SkipError{Reason: "no mirror services"}
|
||||
return healthcheck.SkipError{Reason: "no mirror services"}
|
||||
}
|
||||
if len(errors) > 0 {
|
||||
return joinErrors(errors, 1)
|
||||
|
|
|
@ -47,7 +47,7 @@ func newGatewaysCommand() *cobra.Command {
|
|||
|
||||
vizNs, err := k8sAPI.GetNamespaceWithExtensionLabel(ctx, vizCmd.ExtensionName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("make sure the linkerd-viz extension is installed, using 'linkerd viz install' (%s)", err)
|
||||
return fmt.Errorf("make sure the linkerd-viz extension is installed, using 'linkerd viz install' (%w)", err)
|
||||
}
|
||||
|
||||
client, err := client.NewExternalClient(ctx, vizNs.Name, k8sAPI)
|
||||
|
@ -57,7 +57,7 @@ func newGatewaysCommand() *cobra.Command {
|
|||
|
||||
resp, err := requestGatewaysFromAPI(client, req)
|
||||
if err != nil {
|
||||
fmt.Fprint(os.Stderr, err.Error())
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
@ -76,7 +76,7 @@ func newGatewaysCommand() *cobra.Command {
|
|||
func requestGatewaysFromAPI(client pb.ApiClient, req *pb.GatewaysRequest) (*pb.GatewaysResponse, error) {
|
||||
resp, err := client.Gateways(context.Background(), req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Gateways API error: %v", err)
|
||||
return nil, fmt.Errorf("Gateways API error: %w", err)
|
||||
}
|
||||
if e := resp.GetError(); e != nil {
|
||||
return nil, fmt.Errorf("Gateways API response error: %v", e.Error)
|
||||
|
|
|
@ -44,7 +44,7 @@ func newMulticlusterInstallCommand() *cobra.Command {
|
|||
var ignoreCluster bool
|
||||
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%s", err)
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ func newLinkCommand() *cobra.Command {
|
|||
var valuesOptions valuespkg.Options
|
||||
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%s", err)
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
|
|
@ -155,7 +155,7 @@ func loadCredentials(ctx context.Context, link multicluster.Link, namespace stri
|
|||
// Load the credentials secret
|
||||
secret, err := k8sAPI.Interface.CoreV1().Secrets(namespace).Get(ctx, link.ClusterCredentialsSecret, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to load credentials secret %s: %s", link.ClusterCredentialsSecret, err)
|
||||
return nil, fmt.Errorf("failed to load credentials secret %s: %w", link.ClusterCredentialsSecret, err)
|
||||
}
|
||||
return sm.ParseRemoteClusterSecret(secret)
|
||||
}
|
||||
|
@ -180,7 +180,7 @@ func restartClusterWatcher(
|
|||
|
||||
cfg, err := clientcmd.RESTConfigFromKubeConfig(creds)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to parse kube config: %s", err)
|
||||
return fmt.Errorf("unable to parse kube config: %w", err)
|
||||
}
|
||||
|
||||
clusterWatcher, err = servicemirror.NewRemoteClusterServiceWatcher(
|
||||
|
@ -194,17 +194,17 @@ func restartClusterWatcher(
|
|||
enableHeadlessSvc,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to create cluster watcher: %s", err)
|
||||
return fmt.Errorf("unable to create cluster watcher: %w", err)
|
||||
}
|
||||
|
||||
err = clusterWatcher.Start(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to start cluster watcher: %s", err)
|
||||
return fmt.Errorf("failed to start cluster watcher: %w", err)
|
||||
}
|
||||
|
||||
workerMetrics, err := metrics.NewWorkerMetrics(link.TargetClusterName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create metrics for cluster watcher: %s", err)
|
||||
return fmt.Errorf("failed to create metrics for cluster watcher: %w", err)
|
||||
}
|
||||
probeWorker = servicemirror.NewProbeWorker(fmt.Sprintf("probe-gateway-%s", link.TargetClusterName), &link.ProbeSpec, workerMetrics, link.TargetClusterName)
|
||||
probeWorker.Start()
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
func newUnlinkCommand() *cobra.Command {
|
||||
opts, err := newLinkOptionsWithDefault()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%s", err)
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@ package servicemirror
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
|
@ -159,11 +160,11 @@ func NewRemoteClusterServiceWatcher(
|
|||
) (*RemoteClusterServiceWatcher, error) {
|
||||
remoteAPI, err := k8s.InitializeAPIForConfig(ctx, cfg, false, k8s.Svc, k8s.Endpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot initialize api for target cluster %s: %s", clusterName, err)
|
||||
return nil, fmt.Errorf("cannot initialize api for target cluster %s: %w", clusterName, err)
|
||||
}
|
||||
_, err = remoteAPI.Client.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot connect to api for target cluster %s: %s", clusterName, err)
|
||||
return nil, fmt.Errorf("cannot connect to api for target cluster %s: %w", clusterName, err)
|
||||
}
|
||||
|
||||
// Create k8s event recorder
|
||||
|
@ -273,7 +274,7 @@ func (rcsw *RemoteClusterServiceWatcher) cleanupOrphanedServices(ctx context.Con
|
|||
|
||||
servicesOnLocalCluster, err := rcsw.localAPIClient.Svc().Lister().List(labels.Set(matchLabels).AsSelector())
|
||||
if err != nil {
|
||||
innerErr := fmt.Errorf("failed to list services while cleaning up mirror services: %s", err)
|
||||
innerErr := fmt.Errorf("failed to list services while cleaning up mirror services: %w", err)
|
||||
if kerrors.IsNotFound(err) {
|
||||
return innerErr
|
||||
}
|
||||
|
@ -314,7 +315,7 @@ func (rcsw *RemoteClusterServiceWatcher) cleanupMirroredResources(ctx context.Co
|
|||
|
||||
services, err := rcsw.localAPIClient.Svc().Lister().List(labels.Set(matchLabels).AsSelector())
|
||||
if err != nil {
|
||||
innerErr := fmt.Errorf("could not retrieve mirrored services that need cleaning up: %s", err)
|
||||
innerErr := fmt.Errorf("could not retrieve mirrored services that need cleaning up: %w", err)
|
||||
if kerrors.IsNotFound(err) {
|
||||
return innerErr
|
||||
}
|
||||
|
@ -328,7 +329,7 @@ func (rcsw *RemoteClusterServiceWatcher) cleanupMirroredResources(ctx context.Co
|
|||
if kerrors.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
errors = append(errors, fmt.Errorf("Could not delete service %s/%s: %s", svc.Namespace, svc.Name, err))
|
||||
errors = append(errors, fmt.Errorf("Could not delete service %s/%s: %w", svc.Namespace, svc.Name, err))
|
||||
} else {
|
||||
rcsw.log.Infof("Deleted service %s/%s", svc.Namespace, svc.Name)
|
||||
}
|
||||
|
@ -336,7 +337,7 @@ func (rcsw *RemoteClusterServiceWatcher) cleanupMirroredResources(ctx context.Co
|
|||
|
||||
endpoints, err := rcsw.localAPIClient.Endpoint().Lister().List(labels.Set(matchLabels).AsSelector())
|
||||
if err != nil {
|
||||
innerErr := fmt.Errorf("could not retrieve endpoints that need cleaning up: %s", err)
|
||||
innerErr := fmt.Errorf("could not retrieve endpoints that need cleaning up: %w", err)
|
||||
if kerrors.IsNotFound(err) {
|
||||
return innerErr
|
||||
}
|
||||
|
@ -348,7 +349,7 @@ func (rcsw *RemoteClusterServiceWatcher) cleanupMirroredResources(ctx context.Co
|
|||
if kerrors.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
errors = append(errors, fmt.Errorf("Could not delete endpoints %s/%s: %s", endpoint.Namespace, endpoint.Name, err))
|
||||
errors = append(errors, fmt.Errorf("Could not delete endpoints %s/%s: %w", endpoint.Namespace, endpoint.Name, err))
|
||||
} else {
|
||||
rcsw.log.Infof("Deleted endpoints %s/%s", endpoint.Namespace, endpoint.Name)
|
||||
}
|
||||
|
@ -370,7 +371,7 @@ func (rcsw *RemoteClusterServiceWatcher) handleRemoteServiceDeleted(ctx context.
|
|||
rcsw.log.Debugf("Failed to delete mirror service %s/%s: %v", ev.Namespace, ev.Name, err)
|
||||
return nil
|
||||
}
|
||||
errors = append(errors, fmt.Errorf("could not fetch service %s/%s: %s", ev.Namespace, localServiceName, err))
|
||||
errors = append(errors, fmt.Errorf("could not fetch service %s/%s: %w", ev.Namespace, localServiceName, err))
|
||||
}
|
||||
|
||||
// If the mirror service is headless, also delete its endpoint mirror
|
||||
|
@ -382,7 +383,7 @@ func (rcsw *RemoteClusterServiceWatcher) handleRemoteServiceDeleted(ctx context.
|
|||
endpointMirrorServices, err := rcsw.localAPIClient.Svc().Lister().List(labels.Set(matchLabels).AsSelector())
|
||||
if err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
errors = append(errors, fmt.Errorf("could not fetch endpoint mirrors for mirror service %s/%s: %s", ev.Namespace, localServiceName, err))
|
||||
errors = append(errors, fmt.Errorf("could not fetch endpoint mirrors for mirror service %s/%s: %w", ev.Namespace, localServiceName, err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -390,7 +391,7 @@ func (rcsw *RemoteClusterServiceWatcher) handleRemoteServiceDeleted(ctx context.
|
|||
err = rcsw.localAPIClient.Client.CoreV1().Services(endpointMirror.Namespace).Delete(ctx, endpointMirror.Name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
errors = append(errors, fmt.Errorf("could not delete endpoint mirror %s/%s: %s", endpointMirror.Namespace, endpointMirror.Name, err))
|
||||
errors = append(errors, fmt.Errorf("could not delete endpoint mirror %s/%s: %w", endpointMirror.Namespace, endpointMirror.Name, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -399,7 +400,7 @@ func (rcsw *RemoteClusterServiceWatcher) handleRemoteServiceDeleted(ctx context.
|
|||
rcsw.log.Infof("Deleting mirrored service %s/%s", ev.Namespace, localServiceName)
|
||||
if err := rcsw.localAPIClient.Client.CoreV1().Services(ev.Namespace).Delete(ctx, localServiceName, metav1.DeleteOptions{}); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
errors = append(errors, fmt.Errorf("could not delete service: %s/%s: %s", ev.Namespace, localServiceName, err))
|
||||
errors = append(errors, fmt.Errorf("could not delete service: %s/%s: %w", ev.Namespace, localServiceName, err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -753,21 +754,18 @@ func (rcsw *RemoteClusterServiceWatcher) processEvents(ctx context.Context) {
|
|||
if err == nil {
|
||||
rcsw.eventsQueue.Forget(event)
|
||||
} else {
|
||||
switch e := err.(type) {
|
||||
case RetryableError:
|
||||
{
|
||||
rcsw.log.Warnf("Requeues: %d, Limit: %d for event %s", rcsw.eventsQueue.NumRequeues(event), rcsw.requeueLimit, event)
|
||||
if (rcsw.eventsQueue.NumRequeues(event) < rcsw.requeueLimit) && !done {
|
||||
rcsw.log.Errorf("Error processing %s (will retry): %s", event, e)
|
||||
rcsw.eventsQueue.AddRateLimited(event)
|
||||
} else {
|
||||
rcsw.log.Errorf("Error processing %s (giving up): %s", event, e)
|
||||
rcsw.eventsQueue.Forget(event)
|
||||
}
|
||||
var re RetryableError
|
||||
if errors.As(err, &re) {
|
||||
rcsw.log.Warnf("Requeues: %d, Limit: %d for event %s", rcsw.eventsQueue.NumRequeues(event), rcsw.requeueLimit, event)
|
||||
if (rcsw.eventsQueue.NumRequeues(event) < rcsw.requeueLimit) && !done {
|
||||
rcsw.log.Errorf("Error processing %s (will retry): %s", event, re)
|
||||
rcsw.eventsQueue.AddRateLimited(event)
|
||||
} else {
|
||||
rcsw.log.Errorf("Error processing %s (giving up): %s", event, re)
|
||||
rcsw.eventsQueue.Forget(event)
|
||||
}
|
||||
default:
|
||||
rcsw.log.Errorf("Error processing %s (will not retry): %s", event, e)
|
||||
rcsw.log.Error(e)
|
||||
} else {
|
||||
rcsw.log.Errorf("Error processing %s (will not retry): %s", event, err)
|
||||
}
|
||||
}
|
||||
if done {
|
||||
|
@ -891,7 +889,7 @@ func (rcsw *RemoteClusterServiceWatcher) resolveGatewayAddress() ([]corev1.Endpo
|
|||
IP: ipAddr.String(),
|
||||
})
|
||||
} else {
|
||||
err = fmt.Errorf("Error resolving '%s': %s", addr, err)
|
||||
err = fmt.Errorf("Error resolving '%s': %w", addr, err)
|
||||
rcsw.log.Warn(err)
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
@ -1061,7 +1059,7 @@ func (rcsw *RemoteClusterServiceWatcher) handleCreateOrUpdateEndpoints(
|
|||
exportedService, err := rcsw.remoteAPIClient.Svc().Lister().Services(exportedEndpoints.Namespace).Get(exportedEndpoints.Name)
|
||||
if err != nil {
|
||||
return RetryableError{[]error{
|
||||
fmt.Errorf("error retrieving exported service %s/%s: %v", exportedEndpoints.Namespace, exportedEndpoints.Name, err),
|
||||
fmt.Errorf("error retrieving exported service %s/%s: %w", exportedEndpoints.Namespace, exportedEndpoints.Name, err),
|
||||
}}
|
||||
}
|
||||
gatewayAddresses, err := rcsw.resolveGatewayAddress()
|
||||
|
|
|
@ -38,7 +38,7 @@ func (rcsw *RemoteClusterServiceWatcher) createOrUpdateHeadlessEndpoints(ctx con
|
|||
exportedService, err := rcsw.remoteAPIClient.Svc().Lister().Services(exportedEndpoints.Namespace).Get(exportedEndpoints.Name)
|
||||
if err != nil {
|
||||
rcsw.log.Debugf("failed to retrieve exported service %s/%s when updating its headless mirror endpoints: %v", exportedEndpoints.Namespace, exportedEndpoints.Name, err)
|
||||
return fmt.Errorf("error retrieving exported service %s/%s: %v", exportedEndpoints.Namespace, exportedEndpoints.Name, err)
|
||||
return fmt.Errorf("error retrieving exported service %s/%s: %w", exportedEndpoints.Namespace, exportedEndpoints.Name, err)
|
||||
}
|
||||
|
||||
// Check whether the endpoints should be processed for a headless exported
|
||||
|
@ -158,7 +158,7 @@ func (rcsw *RemoteClusterServiceWatcher) createOrUpdateHeadlessEndpoints(ctx con
|
|||
err := rcsw.localAPIClient.Client.CoreV1().Services(service.Namespace).Delete(ctx, service.Name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
errors = append(errors, fmt.Errorf("error deleting Endpoint Mirror service %s/%s: %v", service.Namespace, service.Name, err))
|
||||
errors = append(errors, fmt.Errorf("error deleting Endpoint Mirror service %s/%s: %w", service.Namespace, service.Name, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -306,12 +306,12 @@ func (v *Values) ToMap() (map[string]interface{}, error) {
|
|||
var valuesMap map[string]interface{}
|
||||
rawValues, err := yaml.Marshal(v)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal the values struct: %s", err)
|
||||
return nil, fmt.Errorf("Failed to marshal the values struct: %w", err)
|
||||
}
|
||||
|
||||
err = yaml.Unmarshal(rawValues, &valuesMap)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to Unmarshal Values into a map: %s", err)
|
||||
return nil, fmt.Errorf("Failed to Unmarshal Values into a map: %w", err)
|
||||
}
|
||||
|
||||
return valuesMap, nil
|
||||
|
|
|
@ -55,7 +55,7 @@ func Uninstall(ctx context.Context, k8sAPI *k8s.KubernetesAPI, selector string)
|
|||
}
|
||||
for _, r := range resources {
|
||||
if err := r.RenderResource(os.Stdout); err != nil {
|
||||
return fmt.Errorf("error rendering Kubernetes resource: %v", err)
|
||||
return fmt.Errorf("error rendering Kubernetes resource: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -20,12 +20,12 @@ func Values(filepath string) (*l5dcharts.Values, error) {
|
|||
values := &l5dcharts.Values{}
|
||||
configYaml, err := ioutil.ReadFile(filepath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read config file: %s", err)
|
||||
return nil, fmt.Errorf("failed to read config file: %w", err)
|
||||
}
|
||||
|
||||
log.Debugf("%s config YAML: %s", filepath, configYaml)
|
||||
if err = yaml.Unmarshal(configYaml, values); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal JSON from: %s: %s", filepath, err)
|
||||
return nil, fmt.Errorf("failed to unmarshal JSON from: %s: %w", filepath, err)
|
||||
}
|
||||
return values, err
|
||||
}
|
||||
|
|
|
@ -215,7 +215,7 @@ type ResourceError struct {
|
|||
|
||||
// Error satisfies the error interface for ResourceError. The output is intended
|
||||
// for `linkerd check`.
|
||||
func (e *ResourceError) Error() string {
|
||||
func (e ResourceError) Error() string {
|
||||
names := []string{}
|
||||
for _, res := range e.Resources {
|
||||
names = append(names, res.name)
|
||||
|
@ -231,13 +231,14 @@ type CategoryError struct {
|
|||
}
|
||||
|
||||
// Error satisfies the error interface for CategoryError.
|
||||
func (e *CategoryError) Error() string {
|
||||
func (e CategoryError) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
// IsCategoryError returns true if passed in error is of type CategoryError and belong to the given category
|
||||
func IsCategoryError(err error, categoryID CategoryID) bool {
|
||||
if ce, ok := err.(*CategoryError); ok {
|
||||
var ce CategoryError
|
||||
if errors.As(err, &ce) {
|
||||
return ce.Category == categoryID
|
||||
}
|
||||
return false
|
||||
|
@ -249,7 +250,7 @@ type SkipError struct {
|
|||
}
|
||||
|
||||
// Error satisfies the error interface for SkipError.
|
||||
func (e *SkipError) Error() string {
|
||||
func (e SkipError) Error() string {
|
||||
return e.Reason
|
||||
}
|
||||
|
||||
|
@ -261,7 +262,7 @@ type VerboseSuccess struct {
|
|||
|
||||
// Error satisfies the error interface for VerboseSuccess. Since VerboseSuccess
|
||||
// does not actually represent a failure, this returns the empty string.
|
||||
func (e *VerboseSuccess) Error() string {
|
||||
func (e VerboseSuccess) Error() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
|
@ -822,7 +823,7 @@ func (hc *HealthChecker) allCategories() []*Category {
|
|||
err := hc.InitializeLinkerdGlobalConfig(ctx)
|
||||
if err != nil {
|
||||
if kerrors.IsNotFound(err) {
|
||||
return &SkipError{Reason: configMapDoesNotExistSkipReason}
|
||||
return SkipError{Reason: configMapDoesNotExistSkipReason}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -846,7 +847,7 @@ func (hc *HealthChecker) allCategories() []*Category {
|
|||
fatal: true,
|
||||
check: func(ctx context.Context) error {
|
||||
if !hc.CNIEnabled {
|
||||
return &SkipError{Reason: linkerdCNIDisabledSkipReason}
|
||||
return SkipError{Reason: linkerdCNIDisabledSkipReason}
|
||||
}
|
||||
_, err := hc.kubeAPI.CoreV1().ConfigMaps(hc.CNINamespace).Get(ctx, linkerdCNIConfigMapName, metav1.GetOptions{})
|
||||
return err
|
||||
|
@ -858,7 +859,7 @@ func (hc *HealthChecker) allCategories() []*Category {
|
|||
fatal: true,
|
||||
check: func(ctx context.Context) error {
|
||||
if !hc.CNIEnabled {
|
||||
return &SkipError{Reason: linkerdCNIDisabledSkipReason}
|
||||
return SkipError{Reason: linkerdCNIDisabledSkipReason}
|
||||
}
|
||||
_, err := hc.kubeAPI.RbacV1().ClusterRoles().Get(ctx, linkerdCNIResourceName, metav1.GetOptions{})
|
||||
if kerrors.IsNotFound(err) {
|
||||
|
@ -873,7 +874,7 @@ func (hc *HealthChecker) allCategories() []*Category {
|
|||
fatal: true,
|
||||
check: func(ctx context.Context) error {
|
||||
if !hc.CNIEnabled {
|
||||
return &SkipError{Reason: linkerdCNIDisabledSkipReason}
|
||||
return SkipError{Reason: linkerdCNIDisabledSkipReason}
|
||||
}
|
||||
_, err := hc.kubeAPI.RbacV1().ClusterRoleBindings().Get(ctx, linkerdCNIResourceName, metav1.GetOptions{})
|
||||
if kerrors.IsNotFound(err) {
|
||||
|
@ -888,7 +889,7 @@ func (hc *HealthChecker) allCategories() []*Category {
|
|||
fatal: true,
|
||||
check: func(ctx context.Context) error {
|
||||
if !hc.CNIEnabled {
|
||||
return &SkipError{Reason: linkerdCNIDisabledSkipReason}
|
||||
return SkipError{Reason: linkerdCNIDisabledSkipReason}
|
||||
}
|
||||
_, err := hc.kubeAPI.CoreV1().ServiceAccounts(hc.CNINamespace).Get(ctx, linkerdCNIResourceName, metav1.GetOptions{})
|
||||
if kerrors.IsNotFound(err) {
|
||||
|
@ -903,7 +904,7 @@ func (hc *HealthChecker) allCategories() []*Category {
|
|||
fatal: true,
|
||||
check: func(ctx context.Context) (err error) {
|
||||
if !hc.CNIEnabled {
|
||||
return &SkipError{Reason: linkerdCNIDisabledSkipReason}
|
||||
return SkipError{Reason: linkerdCNIDisabledSkipReason}
|
||||
}
|
||||
hc.cniDaemonSet, err = hc.kubeAPI.Interface.AppsV1().DaemonSets(hc.CNINamespace).Get(ctx, linkerdCNIResourceName, metav1.GetOptions{})
|
||||
if kerrors.IsNotFound(err) {
|
||||
|
@ -920,7 +921,7 @@ func (hc *HealthChecker) allCategories() []*Category {
|
|||
fatal: true,
|
||||
check: func(ctx context.Context) (err error) {
|
||||
if !hc.CNIEnabled {
|
||||
return &SkipError{Reason: linkerdCNIDisabledSkipReason}
|
||||
return SkipError{Reason: linkerdCNIDisabledSkipReason}
|
||||
}
|
||||
hc.cniDaemonSet, err = hc.kubeAPI.Interface.AppsV1().DaemonSets(hc.CNINamespace).Get(ctx, linkerdCNIResourceName, metav1.GetOptions{})
|
||||
if kerrors.IsNotFound(err) {
|
||||
|
@ -1007,7 +1008,7 @@ func (hc *HealthChecker) allCategories() []*Category {
|
|||
fatal: true,
|
||||
check: func(context.Context) error {
|
||||
if err := issuercerts.CheckCertAlgoRequirements(hc.issuerCert.Certificate); err != nil {
|
||||
return fmt.Errorf("issuer certificate %s", err)
|
||||
return fmt.Errorf("issuer certificate %w", err)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
|
@ -1018,7 +1019,7 @@ func (hc *HealthChecker) allCategories() []*Category {
|
|||
fatal: true,
|
||||
check: func(ctx context.Context) error {
|
||||
if err := issuercerts.CheckCertValidityPeriod(hc.issuerCert.Certificate); err != nil {
|
||||
return fmt.Errorf("issuer certificate is %s", err)
|
||||
return fmt.Errorf("issuer certificate is %w", err)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
|
@ -1029,7 +1030,7 @@ func (hc *HealthChecker) allCategories() []*Category {
|
|||
hintAnchor: "l5d-identity-issuer-cert-not-expiring-soon",
|
||||
check: func(context.Context) error {
|
||||
if err := issuercerts.CheckExpiringSoon(hc.issuerCert.Certificate); err != nil {
|
||||
return fmt.Errorf("issuer certificate %s", err)
|
||||
return fmt.Errorf("issuer certificate %w", err)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
|
@ -1127,14 +1128,14 @@ func (hc *HealthChecker) allCategories() []*Category {
|
|||
check: func(ctx context.Context) (err error) {
|
||||
anchors, err := hc.fetchWebhookCaBundle(ctx, k8s.PolicyValidatorWebhookConfigName)
|
||||
if kerrors.IsNotFound(err) {
|
||||
return &SkipError{Reason: "policy-validator not installed"}
|
||||
return SkipError{Reason: "policy-validator not installed"}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cert, err := hc.FetchCredsFromSecret(ctx, hc.ControlPlaneNamespace, policyValidatorTLSSecretName)
|
||||
if kerrors.IsNotFound(err) {
|
||||
return &SkipError{Reason: "policy-validator not installed"}
|
||||
return SkipError{Reason: "policy-validator not installed"}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -1150,7 +1151,7 @@ func (hc *HealthChecker) allCategories() []*Category {
|
|||
check: func(ctx context.Context) error {
|
||||
cert, err := hc.FetchCredsFromSecret(ctx, hc.ControlPlaneNamespace, policyValidatorTLSSecretName)
|
||||
if kerrors.IsNotFound(err) {
|
||||
return &SkipError{Reason: "policy-validator not installed"}
|
||||
return SkipError{Reason: "policy-validator not installed"}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -1403,7 +1404,7 @@ func (hc *HealthChecker) allCategories() []*Category {
|
|||
if policy != nil && *policy == admissionRegistration.Fail {
|
||||
return hc.checkHAMetadataPresentOnKubeSystemNamespace(ctx)
|
||||
}
|
||||
return &SkipError{Reason: "not run for non HA installs"}
|
||||
return SkipError{Reason: "not run for non HA installs"}
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -1415,7 +1416,7 @@ func (hc *HealthChecker) allCategories() []*Category {
|
|||
if hc.isHA() {
|
||||
return hc.checkMinReplicasAvailable(ctx)
|
||||
}
|
||||
return &SkipError{Reason: "not run for non HA installs"}
|
||||
return SkipError{Reason: "not run for non HA installs"}
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -1481,11 +1482,11 @@ func (hc *HealthChecker) CheckCertAndAnchors(cert *tls.Cred, trustAnchors []*x50
|
|||
|
||||
// check cert validity
|
||||
if err := issuercerts.CheckCertValidityPeriod(cert.Certificate); err != nil {
|
||||
return fmt.Errorf("certificate is %s", err)
|
||||
return fmt.Errorf("certificate is %w", err)
|
||||
}
|
||||
|
||||
if err := cert.Verify(tls.CertificatesToPool(trustAnchors), identityName, time.Time{}); err != nil {
|
||||
return fmt.Errorf("cert is not issued by the trust anchor: %s", err)
|
||||
return fmt.Errorf("cert is not issued by the trust anchor: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -1526,7 +1527,7 @@ func (hc *HealthChecker) CheckCertAndAnchorsExpiringSoon(cert *tls.Cred) error {
|
|||
|
||||
// check cert not expiring soon
|
||||
if err := issuercerts.CheckExpiringSoon(cert.Certificate); err != nil {
|
||||
return fmt.Errorf("certificate %s", err)
|
||||
return fmt.Errorf("certificate %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1616,7 +1617,8 @@ func (hc *HealthChecker) runCheck(category *Category, c *Checker, observer Check
|
|||
ctx, cancel := context.WithTimeout(context.Background(), RequestTimeout)
|
||||
defer cancel()
|
||||
err := c.check(ctx)
|
||||
if se, ok := err.(*SkipError); ok {
|
||||
var se SkipError
|
||||
if errors.As(err, &se) {
|
||||
log.Debugf("Skipping check: %s. Reason: %s", c.description, se.Reason)
|
||||
return true
|
||||
}
|
||||
|
@ -1627,10 +1629,11 @@ func (hc *HealthChecker) runCheck(category *Category, c *Checker, observer Check
|
|||
Warning: c.warning,
|
||||
HintURL: fmt.Sprintf("%s%s", category.hintBaseURL, c.hintAnchor),
|
||||
}
|
||||
if vs, ok := err.(*VerboseSuccess); ok {
|
||||
var vs VerboseSuccess
|
||||
if errors.As(err, &vs) {
|
||||
checkResult.Description = fmt.Sprintf("%s\n%s", checkResult.Description, vs.Message)
|
||||
} else if err != nil {
|
||||
checkResult.Err = &CategoryError{category.ID, err}
|
||||
checkResult.Err = CategoryError{category.ID, err}
|
||||
}
|
||||
|
||||
if checkResult.Err != nil && time.Now().Before(c.retryDeadline) {
|
||||
|
@ -1875,7 +1878,7 @@ func (hc *HealthChecker) checkClusterNetworks(ctx context.Context) error {
|
|||
}
|
||||
if !podCIDRExists {
|
||||
// DigitalOcean for example, doesn't expose spec.podCIDR (#6398)
|
||||
return &SkipError{Reason: podCIDRUnavailableSkipReason}
|
||||
return SkipError{Reason: podCIDRUnavailableSkipReason}
|
||||
}
|
||||
if len(badPodCIDRS) > 0 {
|
||||
sort.Strings(badPodCIDRS)
|
||||
|
@ -2213,7 +2216,7 @@ func checkResources(resourceName string, objects []runtime.Object, expectedNames
|
|||
}
|
||||
resources = append(resources, res)
|
||||
}
|
||||
return &ResourceError{resourceName, resources}
|
||||
return ResourceError{resourceName, resources}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -2498,18 +2501,18 @@ func (hc *HealthChecker) checkCanCreateNonNamespacedResources(ctx context.Contex
|
|||
for {
|
||||
// Read single object YAML
|
||||
objYAML, err := yamlReader.Read()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading install manifest: %v", err)
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
return fmt.Errorf("error reading install manifest: %w", err)
|
||||
}
|
||||
|
||||
// Create unstructured object from YAML
|
||||
objMap := map[string]interface{}{}
|
||||
err = yaml.Unmarshal(objYAML, &objMap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error unmarshaling yaml object %s: %v", objYAML, err)
|
||||
return fmt.Errorf("error unmarshaling yaml object %s: %w", objYAML, err)
|
||||
}
|
||||
if len(objMap) == 0 {
|
||||
// Ignore header blocks with only comments
|
||||
|
|
|
@ -155,7 +155,7 @@ func RunExtensionsChecks(wout io.Writer, werr io.Writer, extensions []string, fl
|
|||
if len(stderr.String()) > 0 {
|
||||
err = errors.New(stderr.String())
|
||||
} else {
|
||||
err = fmt.Errorf("invalid extension check output from \"%s\" (JSON object expected):\n%s\n[%s]", command, stdout.String(), err)
|
||||
err = fmt.Errorf("invalid extension check output from \"%s\" (JSON object expected):\n%s\n[%w]", command, stdout.String(), err)
|
||||
}
|
||||
results.Results = append(results.Results, CheckResult{
|
||||
Category: CategoryID(extensionCmd),
|
||||
|
|
|
@ -142,7 +142,7 @@ func TestHealthChecker(t *testing.T) {
|
|||
{
|
||||
description: "skip",
|
||||
check: func(context.Context) error {
|
||||
return &SkipError{Reason: "needs skipping"}
|
||||
return SkipError{Reason: "needs skipping"}
|
||||
},
|
||||
retryDeadline: time.Time{},
|
||||
},
|
||||
|
@ -156,7 +156,7 @@ func TestHealthChecker(t *testing.T) {
|
|||
{
|
||||
description: "skipRpc",
|
||||
check: func(context.Context) error {
|
||||
return &SkipError{Reason: "needs skipping"}
|
||||
return SkipError{Reason: "needs skipping"}
|
||||
},
|
||||
retryDeadline: time.Time{},
|
||||
},
|
||||
|
|
|
@ -14,12 +14,12 @@ import (
|
|||
func GetServerVersion(ctx context.Context, controlPlaneNamespace string, kubeAPI *k8s.KubernetesAPI) (string, error) {
|
||||
cm, err := config.FetchLinkerdConfigMap(ctx, kubeAPI, controlPlaneNamespace)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to fetch linkerd-config: %s", err)
|
||||
return "", fmt.Errorf("failed to fetch linkerd-config: %w", err)
|
||||
}
|
||||
|
||||
values, err := linkerd2.ValuesFromConfigMap(cm)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to load values from linkerd-config: %s", err)
|
||||
return "", fmt.Errorf("failed to load values from linkerd-config: %w", err)
|
||||
}
|
||||
|
||||
return values.LinkerdVersion, nil
|
||||
|
|
|
@ -119,12 +119,12 @@ func (svc *Service) loadCredentials() (tls.Issuer, error) {
|
|||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read CA from disk: %s", err)
|
||||
return nil, fmt.Errorf("failed to read CA from disk: %w", err)
|
||||
}
|
||||
|
||||
// Don't verify with dns name as this is not a leaf certificate
|
||||
if err := creds.Crt.Verify(svc.trustAnchors, "", time.Time{}); err != nil {
|
||||
return nil, fmt.Errorf("failed to verify issuer credentials for '%s' with trust anchors: %s", svc.expectedName, err)
|
||||
return nil, fmt.Errorf("failed to verify issuer credentials for '%s' with trust anchors: %w", svc.expectedName, err)
|
||||
}
|
||||
|
||||
if !creds.Certificate.IsCA {
|
||||
|
@ -202,18 +202,20 @@ func (svc *Service) Certify(ctx context.Context, req *pb.CertifyRequest) (*pb.Ce
|
|||
log.Debugf("Validating token for %s", reqIdentity)
|
||||
tokIdentity, err := svc.validator.Validate(ctx, tok)
|
||||
if err != nil {
|
||||
switch e := err.(type) {
|
||||
case NotAuthenticated:
|
||||
log.Infof("authentication failed for %s: %s", reqIdentity, e)
|
||||
return nil, status.Error(codes.FailedPrecondition, e.Error())
|
||||
case InvalidToken:
|
||||
log.Debugf("invalid token provided for %s: %s", reqIdentity, e)
|
||||
return nil, status.Error(codes.InvalidArgument, e.Error())
|
||||
default:
|
||||
msg := fmt.Sprintf("error validating token for %s: %s", reqIdentity, e)
|
||||
log.Error(msg)
|
||||
return nil, status.Error(codes.Internal, msg)
|
||||
var nae NotAuthenticated
|
||||
if errors.As(err, &nae) {
|
||||
log.Infof("authentication failed for %s: %s", reqIdentity, nae)
|
||||
return nil, status.Error(codes.FailedPrecondition, nae.Error())
|
||||
}
|
||||
var ite InvalidToken
|
||||
if errors.As(err, &ite) {
|
||||
log.Debugf("invalid token provided for %s: %s", reqIdentity, ite)
|
||||
return nil, status.Error(codes.InvalidArgument, ite.Error())
|
||||
}
|
||||
|
||||
msg := fmt.Sprintf("error validating token for %s: %s", reqIdentity, err)
|
||||
log.Error(msg)
|
||||
return nil, status.Error(codes.Internal, msg)
|
||||
}
|
||||
|
||||
// Ensure the requested identity matches the token's identity.
|
||||
|
|
|
@ -276,13 +276,13 @@ func (conf *ResourceConfig) GetPodPatch(injectProxy bool) ([]byte, error) {
|
|||
|
||||
values, err := conf.GetOverriddenValues()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not generate Overridden Values: %s", err)
|
||||
return nil, fmt.Errorf("could not generate Overridden Values: %w", err)
|
||||
}
|
||||
|
||||
if values.ClusterNetworks != "" {
|
||||
for _, network := range strings.Split(strings.Trim(values.ClusterNetworks, ","), ",") {
|
||||
if _, _, err := net.ParseCIDR(network); err != nil {
|
||||
return nil, fmt.Errorf("cannot parse destination get networks: %s", err)
|
||||
return nil, fmt.Errorf("cannot parse destination get networks: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -158,7 +158,7 @@ func CheckCertAlgoRequirements(cert *x509.Certificate) error {
|
|||
func (ic *IssuerCertData) VerifyAndBuildCreds() (*tls.Cred, error) {
|
||||
creds, err := tls.ValidateAndCreateCreds(ic.IssuerCrt, ic.IssuerKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read CA: %s", err)
|
||||
return nil, fmt.Errorf("failed to read CA: %w", err)
|
||||
}
|
||||
|
||||
// we check the time validity of the issuer cert
|
||||
|
|
|
@ -48,7 +48,7 @@ type KubernetesAPI struct {
|
|||
func NewAPI(configPath, kubeContext string, impersonate string, impersonateGroup []string, timeout time.Duration) (*KubernetesAPI, error) {
|
||||
config, err := GetConfig(configPath, kubeContext)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error configuring Kubernetes API client: %v", err)
|
||||
return nil, fmt.Errorf("error configuring Kubernetes API client: %w", err)
|
||||
}
|
||||
return NewAPIForConfig(config, impersonate, impersonateGroup, timeout)
|
||||
}
|
||||
|
@ -73,24 +73,24 @@ func NewAPIForConfig(config *rest.Config, impersonate string, impersonateGroup [
|
|||
|
||||
clientset, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error configuring Kubernetes API clientset: %v", err)
|
||||
return nil, fmt.Errorf("error configuring Kubernetes API clientset: %w", err)
|
||||
}
|
||||
apiextensions, err := apiextensionsclient.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error configuring Kubernetes API Extensions clientset: %v", err)
|
||||
return nil, fmt.Errorf("error configuring Kubernetes API Extensions clientset: %w", err)
|
||||
}
|
||||
aggregatorClient, err := apiregistration.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error configuring Kubernetes API server aggregator: %v", err)
|
||||
return nil, fmt.Errorf("error configuring Kubernetes API server aggregator: %w", err)
|
||||
}
|
||||
dynamicClient, err := dynamic.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error configuring Kubernetes Dynamic Client: %v", err)
|
||||
return nil, fmt.Errorf("error configuring Kubernetes Dynamic Client: %w", err)
|
||||
}
|
||||
|
||||
l5dCrdClient, err := crdclient.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error configuring Linkerd CRD clientset: %v", err)
|
||||
return nil, fmt.Errorf("error configuring Linkerd CRD clientset: %w", err)
|
||||
}
|
||||
|
||||
return &KubernetesAPI{
|
||||
|
@ -108,7 +108,7 @@ func NewAPIForConfig(config *rest.Config, impersonate string, impersonateGroup [
|
|||
func (kubeAPI *KubernetesAPI) NewClient() (*http.Client, error) {
|
||||
secureTransport, err := rest.TransportFor(kubeAPI.Config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error instantiating Kubernetes API client: %v", err)
|
||||
return nil, fmt.Errorf("error instantiating Kubernetes API client: %w", err)
|
||||
}
|
||||
|
||||
return &http.Client{
|
||||
|
|
|
@ -2,6 +2,7 @@ package k8s
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
|
@ -152,10 +153,10 @@ func newFakeClientSetsFromManifests(readers []io.Reader) (
|
|||
for {
|
||||
// Read a single YAML object
|
||||
bytes, err := r.Read()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
|
||||
|
|
|
@ -192,7 +192,7 @@ func (pf *PortForward) run() error {
|
|||
|
||||
err = fw.ForwardPorts()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("%s for %s/%s", err, pf.namespace, pf.podName)
|
||||
err = fmt.Errorf("%w for %s/%s", err, pf.namespace, pf.podName)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -78,55 +78,55 @@ func FetchKubernetesResources(ctx context.Context, k *k8s.KubernetesAPI, options
|
|||
|
||||
clusterRoles, err := fetchClusterRoles(ctx, k, options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fetch ClusterRole resources:%v", err)
|
||||
return nil, fmt.Errorf("could not fetch ClusterRole resources: %w", err)
|
||||
}
|
||||
resources = append(resources, clusterRoles...)
|
||||
|
||||
clusterRoleBindings, err := fetchClusterRoleBindings(ctx, k, options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fetch ClusterRoleBinding resources:%v", err)
|
||||
return nil, fmt.Errorf("could not fetch ClusterRoleBinding resources: %w", err)
|
||||
}
|
||||
resources = append(resources, clusterRoleBindings...)
|
||||
|
||||
roles, err := fetchRoles(ctx, k, options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fetch Roles:%v", err)
|
||||
return nil, fmt.Errorf("could not fetch Roles: %w", err)
|
||||
}
|
||||
resources = append(resources, roles...)
|
||||
|
||||
roleBindings, err := fetchRoleBindings(ctx, k, options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fetch RoleBindings:%v", err)
|
||||
return nil, fmt.Errorf("could not fetch RoleBindings: %w", err)
|
||||
}
|
||||
resources = append(resources, roleBindings...)
|
||||
|
||||
crds, err := fetchCustomResourceDefinitions(ctx, k, options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fetch CustomResourceDefinition resources:%v", err)
|
||||
return nil, fmt.Errorf("could not fetch CustomResourceDefinition resources: %w", err)
|
||||
}
|
||||
resources = append(resources, crds...)
|
||||
|
||||
apiCRDs, err := fetchAPIRegistrationResources(ctx, k, options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fetch APIService CRDs:%v", err)
|
||||
return nil, fmt.Errorf("could not fetch APIService CRDs: %w", err)
|
||||
}
|
||||
resources = append(resources, apiCRDs...)
|
||||
|
||||
mutatinghooks, err := fetchMutatingWebhooksConfiguration(ctx, k, options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fetch MutatingWebhookConfigurations:%v", err)
|
||||
return nil, fmt.Errorf("could not fetch MutatingWebhookConfigurations: %w", err)
|
||||
}
|
||||
resources = append(resources, mutatinghooks...)
|
||||
|
||||
validationhooks, err := fetchValidatingWebhooksConfiguration(ctx, k, options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fetch ValidatingWebhookConfiguration:%v", err)
|
||||
return nil, fmt.Errorf("could not fetch ValidatingWebhookConfiguration: %w", err)
|
||||
}
|
||||
resources = append(resources, validationhooks...)
|
||||
|
||||
namespaces, err := fetchNamespace(ctx, k, options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not fetch Namespace:%v", err)
|
||||
return nil, fmt.Errorf("could not fetch Namespace: %w", err)
|
||||
}
|
||||
resources = append(resources, namespaces...)
|
||||
|
||||
|
|
|
@ -31,17 +31,17 @@ func RenderOpenAPI(fileName, namespace, name, clusterDomain string, w io.Writer)
|
|||
|
||||
bytes, err := ioutil.ReadAll(input)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading file: %s", err)
|
||||
return fmt.Errorf("Error reading file: %w", err)
|
||||
}
|
||||
json, err := yaml.YAMLToJSON(bytes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error parsing yaml: %s", err)
|
||||
return fmt.Errorf("Error parsing yaml: %w", err)
|
||||
}
|
||||
|
||||
swagger := spec.Swagger{}
|
||||
err = swagger.UnmarshalJSON(json)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error parsing OpenAPI spec: %s", err)
|
||||
return fmt.Errorf("Error parsing OpenAPI spec: %w", err)
|
||||
}
|
||||
|
||||
profile := swaggerToServiceProfile(swagger, namespace, name, clusterDomain)
|
||||
|
|
|
@ -54,38 +54,38 @@ func Validate(data []byte) error {
|
|||
var serviceProfile sp.ServiceProfile
|
||||
err := yaml.UnmarshalStrict(data, &serviceProfile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to validate ServiceProfile: %s", err)
|
||||
return fmt.Errorf("failed to validate ServiceProfile: %w", err)
|
||||
}
|
||||
|
||||
errs := validation.IsDNS1123Subdomain(serviceProfile.Name)
|
||||
if len(errs) > 0 {
|
||||
return fmt.Errorf("ServiceProfile \"%s\" has invalid name: %s", serviceProfile.Name, errs[0])
|
||||
return fmt.Errorf("ServiceProfile %q has invalid name: %s", serviceProfile.Name, errs[0])
|
||||
}
|
||||
|
||||
for _, route := range serviceProfile.Spec.Routes {
|
||||
if route.Name == "" {
|
||||
return fmt.Errorf("ServiceProfile \"%s\" has a route with no name", serviceProfile.Name)
|
||||
return fmt.Errorf("ServiceProfile %q has a route with no name", serviceProfile.Name)
|
||||
}
|
||||
if route.Timeout != "" {
|
||||
_, err := time.ParseDuration(route.Timeout)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ServiceProfile \"%s\" has a route with an invalid timeout: %s", serviceProfile.Name, err)
|
||||
return fmt.Errorf("ServiceProfile %q has a route with an invalid timeout: %w", serviceProfile.Name, err)
|
||||
}
|
||||
}
|
||||
if route.Condition == nil {
|
||||
return fmt.Errorf("ServiceProfile \"%s\" has a route with no condition", serviceProfile.Name)
|
||||
return fmt.Errorf("ServiceProfile %q has a route with no condition", serviceProfile.Name)
|
||||
}
|
||||
err := ValidateRequestMatch(route.Condition)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ServiceProfile \"%s\" has a route with an invalid condition: %s", serviceProfile.Name, err)
|
||||
return fmt.Errorf("ServiceProfile %q has a route with an invalid condition: %w", serviceProfile.Name, err)
|
||||
}
|
||||
for _, rc := range route.ResponseClasses {
|
||||
if rc.Condition == nil {
|
||||
return fmt.Errorf("ServiceProfile \"%s\" has a response class with no condition", serviceProfile.Name)
|
||||
return fmt.Errorf("ServiceProfile %q has a response class with no condition", serviceProfile.Name)
|
||||
}
|
||||
err = ValidateResponseMatch(rc.Condition)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ServiceProfile \"%s\" has a response class with an invalid condition: %s", serviceProfile.Name, err)
|
||||
return fmt.Errorf("ServiceProfile %q has a response class with an invalid condition: %w", serviceProfile.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -93,16 +93,16 @@ func Validate(data []byte) error {
|
|||
rb := serviceProfile.Spec.RetryBudget
|
||||
if rb != nil {
|
||||
if rb.RetryRatio < 0 {
|
||||
return fmt.Errorf("ServiceProfile \"%s\" RetryBudget RetryRatio must be non-negative: %f", serviceProfile.Name, rb.RetryRatio)
|
||||
return fmt.Errorf("ServiceProfile %q RetryBudget RetryRatio must be non-negative: %f", serviceProfile.Name, rb.RetryRatio)
|
||||
}
|
||||
|
||||
if rb.TTL == "" {
|
||||
return fmt.Errorf("ServiceProfile \"%s\" RetryBudget missing TTL field", serviceProfile.Name)
|
||||
return fmt.Errorf("ServiceProfile %q RetryBudget missing TTL field", serviceProfile.Name)
|
||||
}
|
||||
|
||||
_, err := time.ParseDuration(rb.TTL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ServiceProfile \"%s\" RetryBudget: %s", serviceProfile.Name, err)
|
||||
return fmt.Errorf("ServiceProfile %q RetryBudget: %w", serviceProfile.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -235,7 +235,7 @@ func readFile(fileName string) (io.Reader, error) {
|
|||
func writeProfile(profile sp.ServiceProfile, w io.Writer) error {
|
||||
output, err := yaml.Marshal(profile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing Service Profile: %s", err)
|
||||
return fmt.Errorf("Error writing Service Profile: %w", err)
|
||||
}
|
||||
_, err = w.Write(output)
|
||||
return err
|
||||
|
|
|
@ -47,11 +47,11 @@ func ServiceProfileYamlEquals(actual, expected v1alpha2.ServiceProfile) error {
|
|||
if !reflect.DeepEqual(actual, expected) {
|
||||
actualYaml, err := yaml.Marshal(actual)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Service profile mismatch but failed to marshal actual service profile: %v", err)
|
||||
return fmt.Errorf("Service profile mismatch but failed to marshal actual service profile: %w", err)
|
||||
}
|
||||
expectedYaml, err := yaml.Marshal(expected)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Service profile mismatch but failed to marshal expected service profile: %v", err)
|
||||
return fmt.Errorf("Service profile mismatch but failed to marshal expected service profile: %w", err)
|
||||
}
|
||||
return fmt.Errorf("Expected [%s] but got [%s]", string(expectedYaml), string(actualYaml))
|
||||
}
|
||||
|
|
|
@ -69,9 +69,10 @@ func WriteErrorToHTTPResponse(w http.ResponseWriter, errorObtained error) {
|
|||
statusCode := defaultHTTPErrorStatusCode
|
||||
errorToReturn := errorObtained
|
||||
|
||||
if httpErr, ok := errorObtained.(HTTPError); ok {
|
||||
statusCode = httpErr.Code
|
||||
errorToReturn = httpErr.WrappedError
|
||||
var he HTTPError
|
||||
if errors.As(errorObtained, &he) {
|
||||
statusCode = he.Code
|
||||
errorToReturn = he.WrappedError
|
||||
}
|
||||
|
||||
w.Header().Set(errorHeader, http.StatusText(statusCode))
|
||||
|
@ -158,7 +159,7 @@ func CheckIfResponseHasError(rsp *http.Response) error {
|
|||
|
||||
err := FromByteStreamToProtocolBuffers(reader, &apiError)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Response has %s header [%s], but response body didn't contain protobuf error: %v", errorHeader, errorMsg, err)
|
||||
return fmt.Errorf("response has %s header [%s], but response body didn't contain protobuf error: %w", errorHeader, errorMsg, err)
|
||||
}
|
||||
|
||||
return errors.New(apiError.Error)
|
||||
|
|
|
@ -103,10 +103,11 @@ func TestHttpRequestToProto(t *testing.T) {
|
|||
t.Fatalf("Expecting error, got nothing")
|
||||
}
|
||||
|
||||
if httpErr, ok := err.(HTTPError); ok {
|
||||
var he HTTPError
|
||||
if errors.As(err, &he) {
|
||||
expectedStatusCode := http.StatusBadRequest
|
||||
if httpErr.Code != expectedStatusCode || httpErr.WrappedError == nil {
|
||||
t.Fatalf("Expected error status to be [%d] and contain wrapper error, got status [%d] and error [%v]", expectedStatusCode, httpErr.Code, httpErr.WrappedError)
|
||||
if he.Code != expectedStatusCode || he.WrappedError == nil {
|
||||
t.Fatalf("Expected error status to be [%d] and contain wrapper error, got status [%d] and error [%s]", expectedStatusCode, he.Code, he.WrappedError)
|
||||
}
|
||||
} else {
|
||||
t.Fatalf("Expected error to be httpError, got: %v", err)
|
||||
|
|
|
@ -97,7 +97,7 @@ func (crt *Crt) Verify(roots *x509.CertPool, name string, currentTime time.Time)
|
|||
}
|
||||
|
||||
if crtExpiryError(err) {
|
||||
return fmt.Errorf("%s - Current Time : %s - Invalid before %s - Invalid After %s", err, currentTime, crt.Certificate.NotBefore, crt.Certificate.NotAfter)
|
||||
return fmt.Errorf("%w - Current Time : %s - Invalid before %s - Invalid After %s", err, currentTime, crt.Certificate.NotBefore, crt.Certificate.NotAfter)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -232,10 +232,9 @@ func DecodePEMCrt(txt string) (*Crt, error) {
|
|||
}
|
||||
|
||||
func crtExpiryError(err error) bool {
|
||||
switch v := err.(type) {
|
||||
case x509.CertificateInvalidError:
|
||||
return v.Reason == x509.Expired
|
||||
default:
|
||||
return false
|
||||
var cie x509.CertificateInvalidError
|
||||
if errors.As(err, &cie) {
|
||||
return cie.Reason == x509.Expired
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -78,7 +78,7 @@ LOOP:
|
|||
func (fscw *FsCredsWatcher) UpdateCert(certVal *atomic.Value) error {
|
||||
creds, err := ReadPEMCreds(fscw.keyFilePath, fscw.certFilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read cert from disk: %s", err)
|
||||
return fmt.Errorf("failed to read cert from disk: %w", err)
|
||||
}
|
||||
|
||||
certPEM := creds.EncodePEM()
|
||||
|
|
|
@ -45,7 +45,7 @@ func (c Channels) Match(actualVersion string) error {
|
|||
|
||||
actual, err := parseChannelVersion(actualVersion)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse actual version: %s", err)
|
||||
return fmt.Errorf("failed to parse actual version: %w", err)
|
||||
}
|
||||
|
||||
for _, cv := range c.array {
|
||||
|
@ -95,7 +95,7 @@ func getLatestVersions(ctx context.Context, client *http.Client, url string) (Ch
|
|||
for c, v := range versionRsp {
|
||||
cv, err := parseChannelVersion(v)
|
||||
if err != nil {
|
||||
return Channels{}, fmt.Errorf("unexpected versioncheck response: %s", err)
|
||||
return Channels{}, fmt.Errorf("unexpected versioncheck response: %w", err)
|
||||
}
|
||||
|
||||
if c != cv.channel {
|
||||
|
|
|
@ -51,11 +51,11 @@ func match(expectedVersion, actualVersion string) error {
|
|||
|
||||
actual, err := parseChannelVersion(actualVersion)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse actual version: %s", err)
|
||||
return fmt.Errorf("failed to parse actual version: %w", err)
|
||||
}
|
||||
expected, err := parseChannelVersion(expectedVersion)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse expected version: %s", err)
|
||||
return fmt.Errorf("failed to parse expected version: %w", err)
|
||||
}
|
||||
|
||||
if actual.channel != expected.channel {
|
||||
|
|
|
@ -135,11 +135,11 @@ func generateAndStoreCSR(p, id string, key *ecdsa.PrivateKey) ([]byte, error) {
|
|||
}
|
||||
csrb, err := x509.CreateCertificateRequest(rand.Reader, &csr, key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create CSR: %s", err)
|
||||
return nil, fmt.Errorf("failed to create CSR: %w", err)
|
||||
}
|
||||
|
||||
if err = ioutil.WriteFile(p, csrb, 0600); err != nil {
|
||||
return nil, fmt.Errorf("failed to write CSR: %s", err)
|
||||
return nil, fmt.Errorf("failed to write CSR: %w", err)
|
||||
}
|
||||
|
||||
return csrb, nil
|
||||
|
|
|
@ -42,6 +42,7 @@ func TestEgressHttp(t *testing.T) {
|
|||
|
||||
err = TestHelper.CheckPods(ctx, ns, "egress-test", 1)
|
||||
if err != nil {
|
||||
//nolint:errorlint
|
||||
if rce, ok := err.(*testutil.RestartCountError); ok {
|
||||
testutil.AnnotatedWarn(t, "CheckPods timed-out", rce)
|
||||
} else {
|
||||
|
|
|
@ -40,6 +40,7 @@ func TestGoodEndpoints(t *testing.T) {
|
|||
|
||||
err = TestHelper.CheckPods(ctx, ns, "nginx", 1)
|
||||
if err != nil {
|
||||
//nolint:errorlint
|
||||
if rce, ok := err.(*testutil.RestartCountError); ok {
|
||||
testutil.AnnotatedWarn(t, "CheckPods timed-out", rce)
|
||||
} else {
|
||||
|
@ -55,7 +56,7 @@ func TestGoodEndpoints(t *testing.T) {
|
|||
err = TestHelper.RetryFor(5*time.Second, func() error {
|
||||
out, err = TestHelper.LinkerdRun("diagnostics", "endpoints", endpointCase.authority, "-ojson")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get endpoints for %s: %s", endpointCase.authority, err)
|
||||
return fmt.Errorf("failed to get endpoints for %s: %w", endpointCase.authority, err)
|
||||
}
|
||||
|
||||
re := regexp.MustCompile(endpointCase.expectedRE)
|
||||
|
|
|
@ -49,6 +49,7 @@ func TestLocalhostServer(t *testing.T) {
|
|||
for _, deploy := range []string{"nginx", "slow-cooker"} {
|
||||
err = TestHelper.CheckPods(ctx, ns, deploy, 1)
|
||||
if err != nil {
|
||||
//nolint:errorlint
|
||||
if rce, ok := err.(*testutil.RestartCountError); ok {
|
||||
testutil.AnnotatedWarn(t, "CheckPods timed-out", rce)
|
||||
} else {
|
||||
|
|
|
@ -60,6 +60,7 @@ func TestOpaquePorts(t *testing.T) {
|
|||
// with the rest of the test.
|
||||
for _, deploy := range []string{opaquePodApp, opaqueSvcApp, opaqueUnmeshedSvcPod} {
|
||||
if err := TestHelper.CheckPods(ctx, opaquePortsNs, deploy, 1); err != nil {
|
||||
//nolint:errorlint
|
||||
if rce, ok := err.(*testutil.RestartCountError); ok {
|
||||
testutil.AnnotatedWarn(t, "CheckPods timed-out", rce)
|
||||
} else {
|
||||
|
@ -82,11 +83,11 @@ func TestOpaquePorts(t *testing.T) {
|
|||
err := TestHelper.RetryFor(30*time.Second, func() error {
|
||||
pods, err := TestHelper.GetPods(ctx, opaquePortsNs, map[string]string{"app": opaquePodSC})
|
||||
if err != nil || len(pods) == 0 {
|
||||
return fmt.Errorf("error getting pods\n%s", err)
|
||||
return fmt.Errorf("error getting pods\n%w", err)
|
||||
}
|
||||
metrics, err := getPodMetrics(pods[0], opaquePortsNs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting metrics for pod\n%s", err)
|
||||
return fmt.Errorf("error getting metrics for pod\n%w", err)
|
||||
}
|
||||
if httpRequestTotalMetricRE.MatchString(metrics) {
|
||||
return fmt.Errorf("expected not to find HTTP outbound requests when pod is opaque\n%s", metrics)
|
||||
|
@ -94,11 +95,11 @@ func TestOpaquePorts(t *testing.T) {
|
|||
// Check the application metrics
|
||||
pods, err = TestHelper.GetPods(ctx, opaquePortsNs, map[string]string{"app": opaquePodApp})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting pods\n%s", err)
|
||||
return fmt.Errorf("error getting pods\n%w", err)
|
||||
}
|
||||
metrics, err = getPodMetrics(pods[0], opaquePortsNs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting metrics for pod\n%s", err)
|
||||
return fmt.Errorf("error getting metrics for pod\n%w", err)
|
||||
}
|
||||
if !tcpMetricRE.MatchString(metrics) {
|
||||
return fmt.Errorf("failed to find expected TCP metric when pod is opaque\n%s", metrics)
|
||||
|
@ -108,7 +109,7 @@ func TestOpaquePorts(t *testing.T) {
|
|||
})
|
||||
|
||||
if err != nil {
|
||||
testutil.AnnotatedFatalf(t, "unexpected metric output", "unexpected metric output: %v", err)
|
||||
testutil.AnnotatedFatalf(t, "unexpected metric output", "unexpected metric output: %s", err)
|
||||
}
|
||||
})
|
||||
|
||||
|
@ -117,11 +118,11 @@ func TestOpaquePorts(t *testing.T) {
|
|||
err := TestHelper.RetryFor(30*time.Second, func() error {
|
||||
pods, err := TestHelper.GetPods(ctx, opaquePortsNs, map[string]string{"app": opaqueSvcSC})
|
||||
if err != nil || len(pods) == 0 {
|
||||
return fmt.Errorf("error getting pods\n%s", err)
|
||||
return fmt.Errorf("error getting pods\n%w", err)
|
||||
}
|
||||
metrics, err := getPodMetrics(pods[0], opaquePortsNs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting metrics for pod\n%s", err)
|
||||
return fmt.Errorf("error getting metrics for pod\n%w", err)
|
||||
}
|
||||
if httpRequestTotalMetricRE.MatchString(metrics) {
|
||||
return fmt.Errorf("expected not to find HTTP outbound requests when service is opaque\n%s", metrics)
|
||||
|
@ -129,11 +130,11 @@ func TestOpaquePorts(t *testing.T) {
|
|||
// Check the application metrics
|
||||
pods, err = TestHelper.GetPods(ctx, opaquePortsNs, map[string]string{"app": opaqueSvcApp})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting pods\n%s", err)
|
||||
return fmt.Errorf("error getting pods\n%w", err)
|
||||
}
|
||||
metrics, err = getPodMetrics(pods[0], opaquePortsNs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting metrics for pod\n%s", err)
|
||||
return fmt.Errorf("error getting metrics for pod\n%w", err)
|
||||
}
|
||||
if !tcpMetricRE.MatchString(metrics) {
|
||||
return fmt.Errorf("failed to find expected TCP metric when pod is opaque\n%s", metrics)
|
||||
|
@ -154,11 +155,11 @@ func TestOpaquePorts(t *testing.T) {
|
|||
pods, err := TestHelper.GetPods(ctx, opaquePortsNs,
|
||||
map[string]string{"app": opaqueUnmeshedSvcSC})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting pods\n%s", err)
|
||||
return fmt.Errorf("error getting pods\n%w", err)
|
||||
}
|
||||
metrics, err := getPodMetrics(pods[0], opaquePortsNs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting metrics for pod\n%s", err)
|
||||
return fmt.Errorf("error getting metrics for pod\n%w", err)
|
||||
}
|
||||
|
||||
if httpRequestTotalUnmeshedRE.MatchString(metrics) {
|
||||
|
|
|
@ -53,6 +53,7 @@ func TestSkipInboundPorts(t *testing.T) {
|
|||
// Check all booksapp deployments are up and running
|
||||
for _, deploy := range booksappDeployments {
|
||||
if err := TestHelper.CheckPods(ctx, ns, deploy, 1); err != nil {
|
||||
//nolint:errorlint
|
||||
if rce, ok := err.(*testutil.RestartCountError); ok {
|
||||
testutil.AnnotatedWarn(t, "CheckPods timed-out", rce)
|
||||
} else {
|
||||
|
@ -67,7 +68,7 @@ func TestSkipInboundPorts(t *testing.T) {
|
|||
err := TestHelper.RetryFor(30*time.Second, func() error {
|
||||
pods, err := TestHelper.GetPods(ctx, ns, map[string]string{"app": "webapp"})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting pods\n%s", err)
|
||||
return fmt.Errorf("error getting pods\n%w", err)
|
||||
}
|
||||
|
||||
podName := fmt.Sprintf("pod/%s", pods[0].Name)
|
||||
|
@ -75,7 +76,7 @@ func TestSkipInboundPorts(t *testing.T) {
|
|||
|
||||
metrics, err := TestHelper.LinkerdRun(cmd...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting metrics for pod\n%s", err)
|
||||
return fmt.Errorf("error getting metrics for pod\n%w", err)
|
||||
}
|
||||
|
||||
if httpResponseTotalMetricRE.MatchString(metrics) {
|
||||
|
|
|
@ -51,6 +51,7 @@ func verifyInstallApp(ctx context.Context, t *testing.T) {
|
|||
}
|
||||
|
||||
if err := TestHelper.CheckPods(ctx, prefixedNs, TestAppBackendDeploymentName, 1); err != nil {
|
||||
//nolint:errorlint
|
||||
if rce, ok := err.(*testutil.RestartCountError); ok {
|
||||
testutil.AnnotatedWarn(t, "CheckPods timed-out", rce)
|
||||
} else {
|
||||
|
@ -59,6 +60,7 @@ func verifyInstallApp(ctx context.Context, t *testing.T) {
|
|||
}
|
||||
|
||||
if err := TestHelper.CheckPods(ctx, prefixedNs, "slow-cooker", 1); err != nil {
|
||||
//nolint:errorlint
|
||||
if rce, ok := err.(*testutil.RestartCountError); ok {
|
||||
testutil.AnnotatedWarn(t, "CheckPods timed-out", rce)
|
||||
} else {
|
||||
|
|
|
@ -37,6 +37,7 @@ func TestRabbitMQDeploy(t *testing.T) {
|
|||
testutil.AnnotatedFatalf(t, "kubectl apply command failed", "'kubectl apply' command failed: %s", err)
|
||||
}
|
||||
if err := TestHelper.CheckPods(ctx, testNamespace, "rabbitmq", 1); err != nil {
|
||||
//nolint:errorlint
|
||||
if rce, ok := err.(*testutil.RestartCountError); ok {
|
||||
testutil.AnnotatedWarn(t, "CheckPods timed-out %s", rce)
|
||||
} else {
|
||||
|
@ -54,6 +55,7 @@ func TestRabbitMQDeploy(t *testing.T) {
|
|||
testutil.AnnotatedFatalf(t, "kubectl apply command failed", "'kubectl apply' command failed: %s", err)
|
||||
}
|
||||
if err := TestHelper.CheckPods(ctx, testNamespace, "rabbitmq-client", 1); err != nil {
|
||||
//nolint:errorlint
|
||||
if rce, ok := err.(*testutil.RestartCountError); ok {
|
||||
testutil.AnnotatedWarn(t, "CheckPods timed-out %s", rce)
|
||||
} else {
|
||||
|
@ -66,11 +68,11 @@ func TestRabbitMQDeploy(t *testing.T) {
|
|||
err = TestHelper.RetryFor(timeout, func() error {
|
||||
out, err := TestHelper.Kubectl("", "-n", testNamespace, "logs", "-lapp=rabbitmq-client", "-crabbitmq-client")
|
||||
if err != nil {
|
||||
return fmt.Errorf("'kubectl logs -l app=rabbitmq-client -c rabbitmq-client' command failed\n%s", err)
|
||||
return fmt.Errorf("'kubectl logs -l app=rabbitmq-client -c rabbitmq-client' command failed\n%w", err)
|
||||
}
|
||||
err = TestHelper.ValidateOutput(out, golden)
|
||||
if err != nil {
|
||||
return fmt.Errorf("received unexpected output\n%s", err.Error())
|
||||
return fmt.Errorf("received unexpected output\n%w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
|
|
@ -173,7 +173,7 @@ func TestCheckVizWithExternalPrometheus(t *testing.T) {
|
|||
err = TestHelper.RetryFor(timeout, func() error {
|
||||
out, err := TestHelper.LinkerdRun(cmd...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("'linkerd viz check' command failed\n%s", err)
|
||||
return fmt.Errorf("'linkerd viz check' command failed\n%w", err)
|
||||
}
|
||||
|
||||
if out != expected.String() {
|
||||
|
|
|
@ -199,6 +199,7 @@ func TestCliStatForLinkerdNamespace(t *testing.T) {
|
|||
// wait for deployments to start
|
||||
for _, deploy := range []string{"backend", "failing", "slow-cooker"} {
|
||||
if err := TestHelper.CheckPods(ctx, prefixedNs, deploy, 1); err != nil {
|
||||
//nolint:errorlint
|
||||
if rce, ok := err.(*testutil.RestartCountError); ok {
|
||||
testutil.AnnotatedWarn(t, "CheckPods timed-out", rce)
|
||||
} else {
|
||||
|
@ -273,49 +274,49 @@ func TestCliStatForLinkerdNamespace(t *testing.T) {
|
|||
func validateRowStats(name, expectedMeshCount, expectedStatus string, rowStats map[string]*testutil.RowStat, isAuthority bool) error {
|
||||
stat, ok := rowStats[name]
|
||||
if !ok {
|
||||
return fmt.Errorf("No stats found for [%s]", name)
|
||||
return fmt.Errorf("no stats found for [%s]", name)
|
||||
}
|
||||
|
||||
if stat.Status != expectedStatus {
|
||||
return fmt.Errorf("Expected status '%s' for '%s', got '%s'",
|
||||
return fmt.Errorf("expected status '%s' for '%s', got '%s'",
|
||||
expectedStatus, name, stat.Status)
|
||||
}
|
||||
|
||||
if stat.Meshed != expectedMeshCount {
|
||||
return fmt.Errorf("Expected mesh count [%s] for [%s], got [%s]",
|
||||
return fmt.Errorf("expected mesh count [%s] for [%s], got [%s]",
|
||||
expectedMeshCount, name, stat.Meshed)
|
||||
}
|
||||
|
||||
expectedSuccessRate := "100.00%"
|
||||
if stat.Success != expectedSuccessRate {
|
||||
return fmt.Errorf("Expected success rate [%s] for [%s], got [%s]",
|
||||
return fmt.Errorf("expected success rate [%s] for [%s], got [%s]",
|
||||
expectedSuccessRate, name, stat.Success)
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(stat.Rps, "rps") {
|
||||
return fmt.Errorf("Unexpected rps for [%s], got [%s]",
|
||||
return fmt.Errorf("unexpected rps for [%s], got [%s]",
|
||||
name, stat.Rps)
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(stat.P50Latency, "ms") {
|
||||
return fmt.Errorf("Unexpected p50 latency for [%s], got [%s]",
|
||||
return fmt.Errorf("unexpected p50 latency for [%s], got [%s]",
|
||||
name, stat.P50Latency)
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(stat.P95Latency, "ms") {
|
||||
return fmt.Errorf("Unexpected p95 latency for [%s], got [%s]",
|
||||
return fmt.Errorf("unexpected p95 latency for [%s], got [%s]",
|
||||
name, stat.P95Latency)
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(stat.P99Latency, "ms") {
|
||||
return fmt.Errorf("Unexpected p99 latency for [%s], got [%s]",
|
||||
return fmt.Errorf("unexpected p99 latency for [%s], got [%s]",
|
||||
name, stat.P99Latency)
|
||||
}
|
||||
|
||||
if stat.TCPOpenConnections != "-" && !isAuthority {
|
||||
_, err := strconv.Atoi(stat.TCPOpenConnections)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error parsing number of TCP connections [%s]: %s", stat.TCPOpenConnections, err.Error())
|
||||
return fmt.Errorf("error parsing number of TCP connections [%s]: %w", stat.TCPOpenConnections, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -107,6 +107,7 @@ func TestUpgradeTestAppWorksBeforeUpgrade(t *testing.T) {
|
|||
testAppNamespace := "upgrade-test"
|
||||
for _, deploy := range []string{"emoji", "voting", "web"} {
|
||||
if err := TestHelper.CheckPods(ctx, testAppNamespace, deploy, 1); err != nil {
|
||||
//nolint:errorlint
|
||||
if rce, ok := err.(*testutil.RestartCountError); ok {
|
||||
testutil.AnnotatedWarn(t, "CheckPods timed-out", rce)
|
||||
} else {
|
||||
|
@ -773,7 +774,7 @@ func testCheckCommand(t *testing.T, stage, expectedVersion, namespace, cliVersio
|
|||
out, err := TestHelper.LinkerdRun(cmd...)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("'linkerd check' command failed\n%s\n%s", err, out)
|
||||
return fmt.Errorf("'linkerd check' command failed\n%w\n%s", err, out)
|
||||
}
|
||||
|
||||
if !strings.Contains(out, expected) {
|
||||
|
@ -908,6 +909,7 @@ func TestRestarts(t *testing.T) {
|
|||
}
|
||||
for deploy, spec := range expectedDeployments {
|
||||
if err := TestHelper.CheckPods(context.Background(), spec.Namespace, deploy, spec.Replicas); err != nil {
|
||||
//nolint:errorlint
|
||||
if rce, ok := err.(*testutil.RestartCountError); ok {
|
||||
testutil.AnnotatedWarn(t, "CheckPods timed-out", rce)
|
||||
} else {
|
||||
|
|
|
@ -59,6 +59,7 @@ func TestSmoke(t *testing.T) {
|
|||
// Wait for pods to in smoke-test dpeloyment to come up
|
||||
for _, deploy := range []string{"smoke-test-terminus", "smoke-test-gateway"} {
|
||||
if err := TestHelper.CheckPods(ctx, ns, deploy, 1); err != nil {
|
||||
//nolint:errorlint
|
||||
if rce, ok := err.(*testutil.RestartCountError); ok {
|
||||
testutil.AnnotatedWarn(t, "CheckPods timed-out", rce)
|
||||
} else {
|
||||
|
@ -77,7 +78,7 @@ func TestSmoke(t *testing.T) {
|
|||
err = TestHelper.RetryFor(timeout, func() error {
|
||||
out, err := TestHelper.LinkerdRun(cmd...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("'linkerd check' command failed\n%s\n%s", err, out)
|
||||
return fmt.Errorf("'linkerd check' command failed\n%w\n%s", err, out)
|
||||
}
|
||||
|
||||
if !strings.Contains(out, expected) {
|
||||
|
|
|
@ -81,6 +81,7 @@ func TestResourcesPostInstall(t *testing.T) {
|
|||
}
|
||||
for deploy, spec := range expectedDeployments {
|
||||
if err := TestHelper.CheckPods(ctx, spec.Namespace, deploy, spec.Replicas); err != nil {
|
||||
//nolint:errorlint
|
||||
if rce, ok := err.(*testutil.RestartCountError); ok {
|
||||
testutil.AnnotatedWarn(t, "CheckPods timed-out", rce)
|
||||
} else {
|
||||
|
|
|
@ -110,11 +110,11 @@ func TestCheckMulticluster(t *testing.T) {
|
|||
err := TestHelper.RetryFor(timeout, func() error {
|
||||
out, err := TestHelper.LinkerdRun(cmd...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("'linkerd multicluster check' command failed\n%s", err)
|
||||
return fmt.Errorf("'linkerd multicluster check' command failed\n%w", err)
|
||||
}
|
||||
err = TestHelper.ValidateOutput(out, golden)
|
||||
if err != nil {
|
||||
return fmt.Errorf("received unexpected output\n%s", err.Error())
|
||||
return fmt.Errorf("received unexpected output\n%w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
|
|
@ -65,7 +65,7 @@ func TestMulticlusterStatefulSetTargetTraffic(t *testing.T) {
|
|||
// Check gateway metrics
|
||||
metrics, err := TestHelper.LinkerdRun(dgCmd...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get metrics for gateway deployment: %s", err)
|
||||
return fmt.Errorf("failed to get metrics for gateway deployment: %w", err)
|
||||
}
|
||||
|
||||
// If no match, it means there are no open tcp conns from gateway to
|
||||
|
@ -90,7 +90,7 @@ func TestMulticlusterStatefulSetTargetTraffic(t *testing.T) {
|
|||
// Check gateway metrics
|
||||
metrics, err := TestHelper.LinkerdRun(dgCmd...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get metrics for gateway deployment: %s", err)
|
||||
return fmt.Errorf("failed to get metrics for gateway deployment: %w", err)
|
||||
}
|
||||
|
||||
// If no match, it means there are no outbound HTTP requests from
|
||||
|
|
|
@ -34,7 +34,7 @@ func TestTargetTraffic(t *testing.T) {
|
|||
"--container", "web-svc",
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s\n%s", err, out)
|
||||
return fmt.Errorf("%w\n%s", err, out)
|
||||
}
|
||||
// Check for expected error messages
|
||||
for _, row := range strings.Split(out, "\n") {
|
||||
|
|
|
@ -92,6 +92,7 @@ func TestDirectEdges(t *testing.T) {
|
|||
}
|
||||
|
||||
if err := TestHelper.CheckPods(ctx, testNamespace, "terminus", 1); err != nil {
|
||||
//nolint:errorlint
|
||||
if rce, ok := err.(*testutil.RestartCountError); ok {
|
||||
testutil.AnnotatedWarn(t, "CheckPods timed-out", rce)
|
||||
} else {
|
||||
|
@ -130,6 +131,7 @@ func TestDirectEdges(t *testing.T) {
|
|||
}
|
||||
|
||||
if err := TestHelper.CheckPods(ctx, testNamespace, "slow-cooker", 1); err != nil {
|
||||
//nolint:errorlint
|
||||
if rce, ok := err.(*testutil.RestartCountError); ok {
|
||||
testutil.AnnotatedWarn(t, "CheckPods timed-out", rce)
|
||||
} else {
|
||||
|
@ -156,7 +158,7 @@ func TestDirectEdges(t *testing.T) {
|
|||
}
|
||||
var buf bytes.Buffer
|
||||
if err := tpl.Execute(&buf, vars); err != nil {
|
||||
return fmt.Errorf("failed to parse direct_edges.golden template: %s", err)
|
||||
return fmt.Errorf("failed to parse direct_edges.golden template: %w", err)
|
||||
}
|
||||
|
||||
pods, err := TestHelper.Kubectl("", []string{"get", "pods", "-A"}...)
|
||||
|
|
|
@ -107,7 +107,7 @@ func TestCheckViz(t *testing.T) {
|
|||
err = TestHelper.RetryFor(timeout, func() error {
|
||||
out, err := TestHelper.LinkerdRun(cmd...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("'linkerd viz check' command failed\n%s", err)
|
||||
return fmt.Errorf("'linkerd viz check' command failed\n%w", err)
|
||||
}
|
||||
|
||||
if out != expected.String() {
|
||||
|
|
|
@ -64,6 +64,7 @@ func TestPolicy(t *testing.T) {
|
|||
// wait for deployments to start
|
||||
for _, deploy := range []string{"web", "emoji", "vote-bot", "voting"} {
|
||||
if err := TestHelper.CheckPods(ctx, prefixedNs, deploy, 1); err != nil {
|
||||
//nolint:errorlint
|
||||
if rce, ok := err.(*testutil.RestartCountError); ok {
|
||||
testutil.AnnotatedWarn(t, "CheckPods timed-out", rce)
|
||||
} else {
|
||||
|
@ -211,7 +212,7 @@ func validateAuthzRows(name string, rowStats map[string]*testutil.RowStat, isSer
|
|||
if isServer {
|
||||
_, err := strconv.Atoi(stat.TCPOpenConnections)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error parsing number of TCP connections [%s]: %s", stat.TCPOpenConnections, err.Error())
|
||||
return fmt.Errorf("Error parsing number of TCP connections [%s]: %w", stat.TCPOpenConnections, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -58,6 +58,7 @@ func testProfiles(t *testing.T) {
|
|||
// wait for deployments to start
|
||||
for _, deploy := range []string{"t1", "t2", "t3", "gateway"} {
|
||||
if err := TestHelper.CheckPods(ctx, testNamespace, deploy, 1); err != nil {
|
||||
//nolint:errorlint
|
||||
if rce, ok := err.(*testutil.RestartCountError); ok {
|
||||
testutil.AnnotatedWarn(t, "CheckPods timed-out", rce)
|
||||
} else {
|
||||
|
@ -228,7 +229,7 @@ func assertRouteStat(upstream, namespace, downstream string, t *testing.T, asser
|
|||
err := TestHelper.RetryFor(timeout, func() error {
|
||||
routes, err := getRoutes(upstream, namespace, []string{"--to", downstream})
|
||||
if err != nil {
|
||||
return fmt.Errorf("'linkerd routes' command failed: %s", err)
|
||||
return fmt.Errorf("'linkerd routes' command failed: %w", err)
|
||||
}
|
||||
|
||||
var testRoute *cmd2.JSONRouteStats
|
||||
|
|
|
@ -199,6 +199,7 @@ func TestCliStatForLinkerdNamespace(t *testing.T) {
|
|||
// wait for deployments to start
|
||||
for _, deploy := range []string{"backend", "failing", "slow-cooker"} {
|
||||
if err := TestHelper.CheckPods(ctx, prefixedNs, deploy, 1); err != nil {
|
||||
//nolint:errorlint
|
||||
if rce, ok := err.(*testutil.RestartCountError); ok {
|
||||
testutil.AnnotatedWarn(t, "CheckPods timed-out", rce)
|
||||
} else {
|
||||
|
@ -315,7 +316,7 @@ func validateRowStats(name, expectedMeshCount, expectedStatus string, rowStats m
|
|||
if stat.TCPOpenConnections != "-" && !isAuthority {
|
||||
_, err := strconv.Atoi(stat.TCPOpenConnections)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error parsing number of TCP connections [%s]: %s", stat.TCPOpenConnections, err.Error())
|
||||
return fmt.Errorf("Error parsing number of TCP connections [%s]: %w", stat.TCPOpenConnections, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -86,6 +86,7 @@ func TestCliTap(t *testing.T) {
|
|||
// wait for deployments to start
|
||||
for _, deploy := range []string{"t1", "t2", "t3", "gateway"} {
|
||||
if err := TestHelper.CheckPods(ctx, prefixedNs, deploy, 1); err != nil {
|
||||
//nolint:errorlint
|
||||
if rce, ok := err.(*testutil.RestartCountError); ok {
|
||||
testutil.AnnotatedWarn(t, "CheckPods timed-out", rce)
|
||||
} else {
|
||||
|
|
|
@ -70,7 +70,7 @@ func TestTracing(t *testing.T) {
|
|||
err = TestHelper.RetryFor(timeout, func() error {
|
||||
out, err := TestHelper.LinkerdRun(checkCmd...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("'linkerd jaeger check' command failed\n%s\n%s", err, out)
|
||||
return fmt.Errorf("'linkerd jaeger check' command failed\n%w\n%s", err, out)
|
||||
}
|
||||
|
||||
pods, err := TestHelper.KubernetesHelper.GetPods(context.Background(), tracingNs, nil)
|
||||
|
@ -133,6 +133,7 @@ func TestTracing(t *testing.T) {
|
|||
{ns: tracingNs, name: "jaeger"},
|
||||
} {
|
||||
if err := TestHelper.CheckPods(ctx, deploy.ns, deploy.name, 1); err != nil {
|
||||
//nolint:errorlint
|
||||
if rce, ok := err.(*testutil.RestartCountError); ok {
|
||||
testutil.AnnotatedWarn(t, "CheckPods timed-out", rce)
|
||||
} else {
|
||||
|
|
|
@ -87,6 +87,7 @@ func TestTrafficSplitCliWithSP(t *testing.T) {
|
|||
// wait for deployments to start
|
||||
for _, deploy := range []string{"backend", "failing", "slow-cooker"} {
|
||||
if err := TestHelper.CheckPods(ctx, prefixedNs, deploy, 1); err != nil {
|
||||
//nolint:errorlint
|
||||
if rce, ok := err.(*testutil.RestartCountError); ok {
|
||||
testutil.AnnotatedWarn(t, "CheckPods timed-out", rce)
|
||||
} else {
|
||||
|
|
|
@ -27,6 +27,7 @@ func TestResourcesPostInstall(namespace string, services []Service, deploys map[
|
|||
// Tests Pods and Deployments
|
||||
for deploy, spec := range deploys {
|
||||
if err := h.CheckPods(ctx, spec.Namespace, deploy, spec.Replicas); err != nil {
|
||||
//nolint:errorlint
|
||||
if rce, ok := err.(*RestartCountError); ok {
|
||||
AnnotatedWarn(t, "CheckPods timed-out", rce)
|
||||
} else {
|
||||
|
|
|
@ -344,6 +344,7 @@ func (h *KubernetesHelper) WaitUntilDeployReady(deploys map[string]DeploySpec) {
|
|||
for deploy, spec := range deploys {
|
||||
if err := h.CheckPods(ctx, spec.Namespace, deploy, 1); err != nil {
|
||||
var out string
|
||||
//nolint:errorlint
|
||||
if rce, ok := err.(*RestartCountError); ok {
|
||||
out = fmt.Sprintf("error running test: failed to wait for deploy/%s to become 'ready', too many restarts (%v)\n", deploy, rce)
|
||||
} else {
|
||||
|
|
|
@ -421,7 +421,7 @@ type: kubernetes.io/tls`, base64.StdEncoding.EncodeToString([]byte(root)), base6
|
|||
func (h *TestHelper) LinkerdRun(arg ...string) (string, error) {
|
||||
out, stderr, err := h.PipeToLinkerdRun("", arg...)
|
||||
if err != nil {
|
||||
return out, fmt.Errorf("command failed: linkerd %s\n%s\n%s", strings.Join(arg, " "), err, stderr)
|
||||
return out, fmt.Errorf("command failed: linkerd %s\n%w\n%s", strings.Join(arg, " "), err, stderr)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
@ -634,7 +634,7 @@ func (h *TestHelper) HTTPGetURL(url string) (string, error) {
|
|||
defer resp.Body.Close()
|
||||
bytes, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading response body: %v", err)
|
||||
return fmt.Errorf("Error reading response body: %w", err)
|
||||
}
|
||||
body = string(bytes)
|
||||
|
||||
|
@ -763,7 +763,7 @@ func ParseRows(out string, expectedRowCount, expectedColumnCount int) (map[strin
|
|||
func ParseEvents(out string) ([]*corev1.Event, error) {
|
||||
var list corev1.List
|
||||
if err := json.Unmarshal([]byte(out), &list); err != nil {
|
||||
return nil, fmt.Errorf("error unmarshaling list from `kubectl get events`: %s", err)
|
||||
return nil, fmt.Errorf("error unmarshaling list from `kubectl get events`: %w", err)
|
||||
}
|
||||
|
||||
if len(list.Items) == 0 {
|
||||
|
@ -774,7 +774,7 @@ func ParseEvents(out string) ([]*corev1.Event, error) {
|
|||
for _, i := range list.Items {
|
||||
var e corev1.Event
|
||||
if err := json.Unmarshal(i.Raw, &e); err != nil {
|
||||
return nil, fmt.Errorf("error unmarshaling list event from `kubectl get events`: %s", err)
|
||||
return nil, fmt.Errorf("error unmarshaling list event from `kubectl get events`: %w", err)
|
||||
}
|
||||
events = append(events, &e)
|
||||
}
|
||||
|
|
|
@ -68,7 +68,7 @@ code.`,
|
|||
func configureAndRunChecks(wout io.Writer, werr io.Writer, options *checkOptions) error {
|
||||
err := options.validate()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Validation error when executing check command: %v", err)
|
||||
return fmt.Errorf("validation error when executing check command: %w", err)
|
||||
}
|
||||
|
||||
hc := vizHealthCheck.NewHealthChecker([]healthcheck.CategoryID{}, &healthcheck.Options{
|
||||
|
@ -83,8 +83,7 @@ func configureAndRunChecks(wout io.Writer, werr io.Writer, options *checkOptions
|
|||
})
|
||||
err = hc.InitializeKubeAPIClient()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Error initializing k8s API client: %s", err)
|
||||
fmt.Fprintln(werr, err)
|
||||
fmt.Fprintf(werr, "Error initializing k8s API client: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
|
|
@ -122,7 +122,7 @@ func NewCmdEdges() *cobra.Command {
|
|||
|
||||
reqs, err := buildEdgesRequests(args, options)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating edges request: %s", err)
|
||||
return fmt.Errorf("Error creating edges request: %w", err)
|
||||
}
|
||||
|
||||
// The gRPC client is concurrency-safe, so we can reuse it in all the following goroutines
|
||||
|
@ -235,10 +235,10 @@ func edgesRespToRows(resp *pb.EdgesResponse) []*pb.Edge {
|
|||
func requestEdgesFromAPI(client pb.ApiClient, req *pb.EdgesRequest) (*pb.EdgesResponse, error) {
|
||||
resp, err := client.Edges(context.Background(), req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Edges API error: %+v", err)
|
||||
return nil, fmt.Errorf("Edges API error: %w", err)
|
||||
}
|
||||
if e := resp.GetError(); e != nil {
|
||||
return nil, fmt.Errorf("Edges API response error: %+v", e.Error)
|
||||
return nil, fmt.Errorf("Edges API response error: %s", e.Error)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
|
|
@ -165,7 +165,7 @@ func renderTapOutputProfile(ctx context.Context, k8sAPI *k8s.KubernetesAPI, tapR
|
|||
}
|
||||
output, err := yaml.Marshal(profile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing Service Profile: %s", err)
|
||||
return fmt.Errorf("Error writing Service Profile: %w", err)
|
||||
}
|
||||
w.Write(output)
|
||||
return nil
|
||||
|
@ -201,7 +201,7 @@ func routeSpecFromTap(tapByteStream *bufio.Reader, routeLimit int) []*sp.RouteSp
|
|||
if err != nil {
|
||||
// expected errors when hitting the tapDuration deadline
|
||||
var e net.Error
|
||||
if err != io.EOF &&
|
||||
if !errors.Is(err, io.EOF) &&
|
||||
!(errors.As(err, &e) && e.Timeout()) &&
|
||||
!errors.Is(err, context.DeadlineExceeded) &&
|
||||
!strings.HasSuffix(err.Error(), pkg.ErrClosedResponseBody) {
|
||||
|
|
|
@ -71,7 +71,7 @@ This command will only display traffic which is sent to a service that has a Ser
|
|||
}
|
||||
req, err := buildTopRoutesRequest(args[0], options)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating metrics request while making routes request: %v", err)
|
||||
return fmt.Errorf("error creating metrics request while making routes request: %w", err)
|
||||
}
|
||||
|
||||
output, err := requestRouteStatsFromAPI(
|
||||
|
@ -113,7 +113,7 @@ This command will only display traffic which is sent to a service that has a Ser
|
|||
func requestRouteStatsFromAPI(client pb.ApiClient, req *pb.TopRoutesRequest, options *routesOptions) (string, error) {
|
||||
resp, err := client.TopRoutes(context.Background(), req)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("TopRoutes API error: %v", err)
|
||||
return "", fmt.Errorf("TopRoutes API error: %w", err)
|
||||
}
|
||||
if e := resp.GetError(); e != nil {
|
||||
return "", errors.New(e.Error)
|
||||
|
|
|
@ -210,7 +210,7 @@ If no resource name is specified, displays stats about all resources of the spec
|
|||
|
||||
reqs, err := buildStatSummaryRequests(args, options)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating metrics request while making stats request: %v", err)
|
||||
return fmt.Errorf("error creating metrics request while making stats request: %w", err)
|
||||
}
|
||||
|
||||
// The gRPC client is concurrency-safe, so we can reuse it in all the following goroutines
|
||||
|
@ -283,7 +283,7 @@ func respToRows(resp *pb.StatSummaryResponse) []*pb.StatTable_PodGroup_Row {
|
|||
func requestStatsFromAPI(client pb.ApiClient, req *pb.StatSummaryRequest) (*pb.StatSummaryResponse, error) {
|
||||
resp, err := client.StatSummary(context.Background(), req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("StatSummary API error: %v", err)
|
||||
return nil, fmt.Errorf("StatSummary API error: %w", err)
|
||||
}
|
||||
if e := resp.GetError(); e != nil {
|
||||
return nil, fmt.Errorf("StatSummary API response error: %v", e.Error)
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"bufio"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
@ -230,24 +231,24 @@ func NewCmdTap() *cobra.Command {
|
|||
|
||||
err := options.validate()
|
||||
if err != nil {
|
||||
return fmt.Errorf("validation error when executing tap command: %v", err)
|
||||
return fmt.Errorf("validation error when executing tap command: %w", err)
|
||||
}
|
||||
|
||||
req, err := pkg.BuildTapByResourceRequest(requestParams)
|
||||
if err != nil {
|
||||
fmt.Fprint(os.Stderr, err.Error())
|
||||
fmt.Fprintln(os.Stderr, err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
k8sAPI, err := k8s.NewAPI(kubeconfigPath, kubeContext, impersonate, impersonateGroup, 0)
|
||||
if err != nil {
|
||||
fmt.Fprint(os.Stderr, err.Error())
|
||||
fmt.Fprintln(os.Stderr, err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
err = requestTapByResourceFromAPI(cmd.Context(), os.Stdout, k8sAPI, req, options)
|
||||
if err != nil {
|
||||
fmt.Fprint(os.Stderr, err.Error())
|
||||
fmt.Fprintln(os.Stderr, err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
@ -311,10 +312,10 @@ func renderTapEvents(tapByteStream *bufio.Reader, w io.Writer, render renderTapE
|
|||
log.Debug("Waiting for data...")
|
||||
event := tapPb.TapEvent{}
|
||||
err := protohttp.FromByteStreamToProtocolBuffers(tapByteStream, &event)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
break
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package cmd
|
|||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
|
@ -478,7 +479,7 @@ func recvEvents(tapByteStream *bufio.Reader, eventCh chan<- *tapPb.TapEvent, clo
|
|||
event := &tapPb.TapEvent{}
|
||||
err := protohttp.FromByteStreamToProtocolBuffers(tapByteStream, event)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
if errors.Is(err, io.EOF) {
|
||||
fmt.Println("Tap stream terminated")
|
||||
} else if !strings.HasSuffix(err.Error(), pkg.ErrClosedResponseBody) {
|
||||
fmt.Println(err.Error())
|
||||
|
@ -598,7 +599,7 @@ func newRow(req topRequest) (tableRow, error) {
|
|||
|
||||
latency, err := ptypes.Duration(req.rspEnd.GetSinceRequestInit())
|
||||
if err != nil {
|
||||
return tableRow{}, fmt.Errorf("error parsing duration %v: %s", req.rspEnd.GetSinceRequestInit(), err)
|
||||
return tableRow{}, fmt.Errorf("error parsing duration %v: %w", req.rspEnd.GetSinceRequestInit(), err)
|
||||
}
|
||||
// TODO: Once tap events have a classification field, we should use that field
|
||||
// instead of determining success here.
|
||||
|
|
|
@ -2,6 +2,7 @@ package api
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
|
@ -66,7 +67,7 @@ func testEdges(t *testing.T, expectations []edgesExpected) {
|
|||
}
|
||||
|
||||
rsp, err := fakeGrpcServer.Edges(context.TODO(), exp.req)
|
||||
if err != exp.err {
|
||||
if !errors.Is(err, exp.err) {
|
||||
t.Fatalf("Expected error: %s, Got: %s", exp.err, err)
|
||||
}
|
||||
|
||||
|
|
|
@ -84,7 +84,7 @@ func (s *grpcServer) ListPods(ctx context.Context, req *pb.ListPodsRequest) (*pb
|
|||
var err error
|
||||
labelSelector, err = labels.Parse(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid label selector \"%s\": %s", s, err)
|
||||
return nil, fmt.Errorf("invalid label selector %q: %w", s, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@ package api
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
|
@ -509,7 +510,7 @@ metadata:
|
|||
k8sAPI.Sync(nil)
|
||||
|
||||
rsp, err := fakeGrpcServer.ListServices(context.TODO(), &pb.ListServicesRequest{})
|
||||
if err != exp.err {
|
||||
if !errors.Is(err, exp.err) {
|
||||
t.Fatalf("Expected error: %s, Got: %s", exp.err, err)
|
||||
}
|
||||
|
||||
|
|
|
@ -900,7 +900,7 @@ func getLabelSelector(req *pb.StatSummaryRequest) (labels.Selector, error) {
|
|||
var err error
|
||||
labelSelector, err = labels.Parse(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid label selector \"%s\": %s", s, err)
|
||||
return nil, fmt.Errorf("invalid label selector %q: %w", s, err)
|
||||
}
|
||||
}
|
||||
return labelSelector, nil
|
||||
|
|
|
@ -71,7 +71,7 @@ func testStatSummary(t *testing.T, expectations []statSumExpected) {
|
|||
}
|
||||
|
||||
rsp, err := fakeGrpcServer.StatSummary(context.TODO(), exp.req)
|
||||
if err != exp.err {
|
||||
if !errors.Is(err, exp.err) {
|
||||
t.Fatalf("Expected error: %s, Got: %s", exp.err, err)
|
||||
}
|
||||
|
||||
|
|
|
@ -363,7 +363,7 @@ func getTopLabelSelector(req *pb.TopRoutesRequest) (labels.Selector, error) {
|
|||
var err error
|
||||
labelSelector, err = labels.Parse(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid label selector \"%s\": %s", s, err)
|
||||
return nil, fmt.Errorf("invalid label selector %q: %w", s, err)
|
||||
}
|
||||
}
|
||||
return labelSelector, nil
|
||||
|
|
|
@ -2,6 +2,7 @@ package api
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"testing"
|
||||
|
@ -214,7 +215,7 @@ func testTopRoutes(t *testing.T, expectations []topRoutesExpected) {
|
|||
}
|
||||
|
||||
rsp, err := fakeGrpcServer.TopRoutes(context.TODO(), exp.req)
|
||||
if err != exp.err {
|
||||
if !errors.Is(err, exp.err) {
|
||||
t.Fatalf("Expected error: %s, Got: %s", exp.err, err)
|
||||
}
|
||||
|
||||
|
|
|
@ -213,7 +213,7 @@ func (hc *HealthChecker) VizCategory() *healthcheck.Category {
|
|||
Warning().
|
||||
WithCheck(func(ctx context.Context) error {
|
||||
if hc.externalPrometheusURL != "" {
|
||||
return &healthcheck.SkipError{Reason: "prometheus is disabled"}
|
||||
return healthcheck.SkipError{Reason: "prometheus is disabled"}
|
||||
}
|
||||
|
||||
// Check for ClusterRoles
|
||||
|
|
|
@ -2,6 +2,7 @@ package api
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
@ -323,11 +324,11 @@ func (s *GRPCTapServer) tapProxy(ctx context.Context, maxRps float32, match *pro
|
|||
}
|
||||
for { // Stream loop
|
||||
event, err := rsp.Recv()
|
||||
if err == io.EOF {
|
||||
log.Debugf("[%s] proxy terminated the stream", addr)
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
log.Debugf("[%s] proxy terminated the stream", addr)
|
||||
break
|
||||
}
|
||||
log.Errorf("[%s] encountered an error: %s", addr, err)
|
||||
return
|
||||
}
|
||||
|
@ -678,7 +679,7 @@ func getLabelSelector(req *tapPb.TapByResourceRequest) (labels.Selector, error)
|
|||
var err error
|
||||
labelSelector, err = labels.Parse(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid label selector \"%s\": %s", s, err)
|
||||
return nil, fmt.Errorf("invalid label selector \"%s\": %w", s, err)
|
||||
}
|
||||
}
|
||||
return labelSelector, nil
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
|
@ -87,7 +88,7 @@ func NewServer(
|
|||
|
||||
lis, err := net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("net.Listen failed with: %s", err)
|
||||
return nil, fmt.Errorf("net.Listen failed with: %w", err)
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
|
@ -102,7 +103,7 @@ func NewServer(
|
|||
httpServer.TLSConfig.GetCertificate = s.getCertificate
|
||||
|
||||
if err := watcher.UpdateCert(s.certValue); err != nil {
|
||||
return nil, fmt.Errorf("Failed to initialized certificate: %s", err)
|
||||
return nil, fmt.Errorf("failed to initialized certificate: %w", err)
|
||||
}
|
||||
|
||||
go watcher.ProcessEvents(log, s.certValue, updateEvent, errEvent)
|
||||
|
@ -114,7 +115,7 @@ func NewServer(
|
|||
func (a *Server) Start(ctx context.Context) {
|
||||
a.log.Infof("starting tap API server on %s", a.Server.Addr)
|
||||
if err := a.ServeTLS(a.listener, "", ""); err != nil {
|
||||
if err == http.ErrServerClosed {
|
||||
if errors.Is(err, http.ErrServerClosed) {
|
||||
return
|
||||
}
|
||||
a.log.Fatal(err)
|
||||
|
@ -167,13 +168,11 @@ func serverAuth(ctx context.Context, k8sAPI *k8s.API) (string, []string, string,
|
|||
cm, err := k8sAPI.Client.CoreV1().
|
||||
ConfigMaps(metav1.NamespaceSystem).
|
||||
Get(ctx, pkgk8s.ExtensionAPIServerAuthenticationConfigMapName, metav1.GetOptions{})
|
||||
|
||||
if err != nil {
|
||||
return "", nil, "", "", fmt.Errorf("failed to load [%s] config: %s", pkgk8s.ExtensionAPIServerAuthenticationConfigMapName, err)
|
||||
return "", nil, "", "", fmt.Errorf("failed to load [%s] config: %w", pkgk8s.ExtensionAPIServerAuthenticationConfigMapName, err)
|
||||
}
|
||||
|
||||
clientCAPem, ok := cm.Data[pkgk8s.ExtensionAPIServerAuthenticationRequestHeaderClientCAFileKey]
|
||||
|
||||
if !ok {
|
||||
return "", nil, "", "", fmt.Errorf("no client CA cert available for apiextension-server")
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ func TestAPIServerAuth(t *testing.T) {
|
|||
err error
|
||||
}{
|
||||
{
|
||||
err: fmt.Errorf("failed to load [%s] config: configmaps \"%s\" not found", k8sutils.ExtensionAPIServerAuthenticationConfigMapName, k8sutils.ExtensionAPIServerAuthenticationConfigMapName),
|
||||
err: fmt.Errorf("failed to load [%s] config: configmaps %q not found", k8sutils.ExtensionAPIServerAuthenticationConfigMapName, k8sutils.ExtensionAPIServerAuthenticationConfigMapName),
|
||||
},
|
||||
{
|
||||
k8sRes: []string{`
|
||||
|
@ -63,20 +63,28 @@ data:
|
|||
}
|
||||
|
||||
clientCAPem, allowedNames, usernameHeader, groupHeader, err := serverAuth(ctx, k8sAPI)
|
||||
if !reflect.DeepEqual(err, exp.err) {
|
||||
t.Errorf("apiServerAuth returned unexpected error: %s, expected: %s", err, exp.err)
|
||||
|
||||
if err != nil && exp.err != nil {
|
||||
if err.Error() != exp.err.Error() {
|
||||
t.Errorf("apiServerAuth returned unexpected error: %q, expected: %q", err, exp.err)
|
||||
}
|
||||
} else if err != nil {
|
||||
t.Fatalf("Unexpected error: %s", err)
|
||||
} else if exp.err != nil {
|
||||
t.Fatalf("Did not encounter expected error: %s", err)
|
||||
}
|
||||
|
||||
if clientCAPem != exp.clientCAPem {
|
||||
t.Errorf("apiServerAuth returned unexpected clientCAPem: %s, expected: %s", clientCAPem, exp.clientCAPem)
|
||||
t.Errorf("apiServerAuth returned unexpected clientCAPem: %q, expected: %q", clientCAPem, exp.clientCAPem)
|
||||
}
|
||||
if !reflect.DeepEqual(allowedNames, exp.allowedNames) {
|
||||
t.Errorf("apiServerAuth returned unexpected allowedNames: %s, expected: %s", allowedNames, exp.allowedNames)
|
||||
t.Errorf("apiServerAuth returned unexpected allowedNames: %q, expected: %q", allowedNames, exp.allowedNames)
|
||||
}
|
||||
if usernameHeader != exp.usernameHeader {
|
||||
t.Errorf("apiServerAuth returned unexpected usernameHeader: %s, expected: %s", usernameHeader, exp.usernameHeader)
|
||||
t.Errorf("apiServerAuth returned unexpected usernameHeader: %q, expected: %q", usernameHeader, exp.usernameHeader)
|
||||
}
|
||||
if groupHeader != exp.groupHeader {
|
||||
t.Errorf("apiServerAuth returned unexpected groupHeader: %s, expected: %s", groupHeader, exp.groupHeader)
|
||||
t.Errorf("apiServerAuth returned unexpected groupHeader: %q, expected: %q", groupHeader, exp.groupHeader)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@ type TapRequestParams struct {
|
|||
func BuildTapByResourceRequest(params TapRequestParams) (*tapPb.TapByResourceRequest, error) {
|
||||
target, err := util.BuildResource(params.Namespace, params.Resource)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("target resource invalid: %s", err)
|
||||
return nil, fmt.Errorf("target resource invalid: %w", err)
|
||||
}
|
||||
if !contains(util.ValidTargets, target.Type) {
|
||||
return nil, fmt.Errorf("unsupported resource type [%s]", target.Type)
|
||||
|
@ -56,7 +56,7 @@ func BuildTapByResourceRequest(params TapRequestParams) (*tapPb.TapByResourceReq
|
|||
if params.ToResource != "" {
|
||||
destination, err := util.BuildResource(params.ToNamespace, params.ToResource)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("destination resource invalid: %s", err)
|
||||
return nil, fmt.Errorf("destination resource invalid: %w", err)
|
||||
}
|
||||
if !contains(ValidTapDestinations, destination.Type) {
|
||||
return nil, fmt.Errorf("unsupported resource type [%s]", destination.Type)
|
||||
|
|
|
@ -270,7 +270,8 @@ func (h *handler) handleAPITap(w http.ResponseWriter, req *http.Request, p httpr
|
|||
// If there was a [403] error when initiating a tap, close the
|
||||
// socket with `ClosePolicyViolation` status code so that the error
|
||||
// renders without the error prefix in the banner
|
||||
if httpErr, ok := err.(protohttp.HTTPError); ok && httpErr.Code == http.StatusForbidden {
|
||||
var he protohttp.HTTPError
|
||||
if errors.Is(err, &he) && he.Code == http.StatusForbidden {
|
||||
err := fmt.Errorf("missing authorization, visit %s to remedy", tappkg.TapRbacURL)
|
||||
websocketError(ws, websocket.ClosePolicyViolation, err)
|
||||
return
|
||||
|
@ -286,10 +287,10 @@ func (h *handler) handleAPITap(w http.ResponseWriter, req *http.Request, p httpr
|
|||
for {
|
||||
event := tapPb.TapEvent{}
|
||||
err := protohttp.FromByteStreamToProtocolBuffers(reader, &event)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
websocketError(ws, websocket.CloseInternalServerErr, err)
|
||||
break
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue