diff --git a/Gopkg.lock b/Gopkg.lock index 3d28bd72..9dd994e0 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -119,6 +119,14 @@ revision = "026c730a0dcc5d11f93f1cf1cc65b01247ea7b6f" version = "v4.5.0" +[[projects]] + digest = "1:53becd66889185091b58ea3fc49294996f2179fb05a89702f4de7d15e581b509" + name = "github.com/go-logr/logr" + packages = ["."] + pruneopts = "NUT" + revision = "9fb12b3b21c5415d16ac18dc5cd42c1cfdd40c4e" + version = "v0.1.0" + [[projects]] digest = "1:abea725bcf0210887f5da19d804fffa1dd45a42a56bdf5f02322345e3fee4f0d" name = "github.com/gogo/protobuf" @@ -246,6 +254,14 @@ revision = "20f1fb78b0740ba8c3cb143a61e86ba5c8669768" version = "v0.5.0" +[[projects]] + digest = "1:b391e0e88682b33e62e6bcb005a4cfdebe9a14969dab6ebc5e7e4c166986e427" + name = "github.com/imdario/mergo" + packages = ["."] + pruneopts = "NUT" + revision = "66f88b4ae75f5edcc556623b96ff32c06360fbb7" + version = "v0.3.9" + [[projects]] digest = "1:1f2aebae7e7c856562355ec0198d8ca2fa222fb05e5b1b66632a1fce39631885" name = "github.com/jmespath/go-jmespath" @@ -260,6 +276,14 @@ pruneopts = "NUT" revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682" +[[projects]] + branch = "master" + digest = "1:cf68a79fd02ab0f6942b9567d03d054b6568212fcc22de2a4afdefd096923749" + name = "github.com/kballard/go-shellquote" + packages = ["."] + pruneopts = "NUT" + revision = "95032a82bc518f77982ea72343cc1ade730072f0" + [[projects]] digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6" name = "github.com/matttproud/golang_protobuf_extensions" @@ -284,6 +308,14 @@ revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" version = "1.0.1" +[[projects]] + digest = "1:c01e66557785d9d75328704d11c44c26582980cd78209475cb141b3eedd11e9f" + name = "github.com/openzipkin/zipkin-go" + packages = ["model"] + pruneopts = "NUT" + revision = "c29478e51bfb2e9c93e0e9f5e015e5993a490399" + version = "v0.2.2" + [[projects]] digest = "1:14715f705ff5dfe0ffd6571d7d201dd8e921030f8070321a79380d8ca4ec1a24" name = "github.com/pkg/errors" @@ -731,7 +763,7 @@ version = "kubernetes-1.16.4" [[projects]] - digest = "1:952932759f1d7524d526016f70aeaa6a6947519616255ba44c30753f358b53fe" + digest = "1:faee627f5b8379d10023d8d4844a06f0b4f89f862a81d60f3bd7f3db1a16ffbd" name = "k8s.io/client-go" packages = [ "discovery", @@ -872,8 +904,12 @@ "rest", "rest/watch", "testing", + "tools/auth", "tools/cache", + "tools/clientcmd", "tools/clientcmd/api", + "tools/clientcmd/api/latest", + "tools/clientcmd/api/v1", "tools/metrics", "tools/pager", "tools/record", @@ -883,6 +919,7 @@ "util/cert", "util/connrotation", "util/flowcontrol", + "util/homedir", "util/keyutil", "util/retry", "util/workqueue", @@ -966,7 +1003,7 @@ [[projects]] branch = "master" - digest = "1:9b83d1c207c67003de88414626add45e9bce8c47502e07cbe6cb5fd48e39f2df" + digest = "1:462fe2b87ea48e65015f45b404de9db04ebd1332fca8f06e19642641308869f7" name = "knative.dev/pkg" packages = [ "apis", @@ -984,20 +1021,28 @@ "metrics", "metrics/metricskey", "reconciler", + "test", + "test/cmd", + "test/helpers", + "test/ingress", + "test/logging", + "test/monitoring", + "test/spoof", + "test/zipkin", ] pruneopts = "T" - revision = "92cdec5b35931192590f5d8affad86898b6c50d6" + revision = "466c676678ddd57b3ec05a06a2d0bc6cfca8c3db" [[projects]] branch = "master" - digest = "1:d8858077778bca77705b26d5b5262bf33a6bfbaa701fffca1578fd7ef4c4b975" + digest = "1:ce8214f24f811aa582a58b2680e3380ff3fac7668a00d490fb23349a21051dcb" name = "knative.dev/test-infra" packages = [ "scripts", "tools/dep-collector", ] pruneopts = "UT" - revision = "e84f0d1a364732918a6635cb07d895ff77ecba76" + revision = "a7b18bf0c37bf1a10743fba5c1ce988b7c2e0e30" [[projects]] digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c" diff --git a/vendor/github.com/go-logr/logr/LICENSE b/vendor/github.com/go-logr/logr/LICENSE new file mode 100644 index 00000000..8dada3ed --- /dev/null +++ b/vendor/github.com/go-logr/logr/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go new file mode 100644 index 00000000..ad72e788 --- /dev/null +++ b/vendor/github.com/go-logr/logr/logr.go @@ -0,0 +1,151 @@ +// Package logr defines abstract interfaces for logging. Packages can depend on +// these interfaces and callers can implement logging in whatever way is +// appropriate. +// +// This design derives from Dave Cheney's blog: +// http://dave.cheney.net/2015/11/05/lets-talk-about-logging +// +// This is a BETA grade API. Until there is a significant 2nd implementation, +// I don't really know how it will change. +// +// The logging specifically makes it non-trivial to use format strings, to encourage +// attaching structured information instead of unstructured format strings. +// +// Usage +// +// Logging is done using a Logger. Loggers can have name prefixes and named values +// attached, so that all log messages logged with that Logger have some base context +// associated. +// +// The term "key" is used to refer to the name associated with a particular value, to +// disambiguate it from the general Logger name. +// +// For instance, suppose we're trying to reconcile the state of an object, and we want +// to log that we've made some decision. +// +// With the traditional log package, we might write +// log.Printf( +// "decided to set field foo to value %q for object %s/%s", +// targetValue, object.Namespace, object.Name) +// +// With logr's structured logging, we'd write +// // elsewhere in the file, set up the logger to log with the prefix of "reconcilers", +// // and the named value target-type=Foo, for extra context. +// log := mainLogger.WithName("reconcilers").WithValues("target-type", "Foo") +// +// // later on... +// log.Info("setting field foo on object", "value", targetValue, "object", object) +// +// Depending on our logging implementation, we could then make logging decisions based on field values +// (like only logging such events for objects in a certain namespace), or copy the structured +// information into a structured log store. +// +// For logging errors, Logger has a method called Error. Suppose we wanted to log an +// error while reconciling. With the traditional log package, we might write +// log.Errorf("unable to reconcile object %s/%s: %v", object.Namespace, object.Name, err) +// +// With logr, we'd instead write +// // assuming the above setup for log +// log.Error(err, "unable to reconcile object", "object", object) +// +// This functions similarly to: +// log.Info("unable to reconcile object", "error", err, "object", object) +// +// However, it ensures that a standard key for the error value ("error") is used across all +// error logging. Furthermore, certain implementations may choose to attach additional +// information (such as stack traces) on calls to Error, so it's preferred to use Error +// to log errors. +// +// Parts of a log line +// +// Each log message from a Logger has four types of context: +// logger name, log verbosity, log message, and the named values. +// +// The Logger name constists of a series of name "segments" added by successive calls to WithName. +// These name segments will be joined in some way by the underlying implementation. It is strongly +// reccomended that name segements contain simple identifiers (letters, digits, and hyphen), and do +// not contain characters that could muddle the log output or confuse the joining operation (e.g. +// whitespace, commas, periods, slashes, brackets, quotes, etc). +// +// Log verbosity represents how little a log matters. Level zero, the default, matters most. +// Increasing levels matter less and less. Try to avoid lots of different verbosity levels, +// and instead provide useful keys, logger names, and log messages for users to filter on. +// It's illegal to pass a log level below zero. +// +// The log message consists of a constant message attached to the the log line. This +// should generally be a simple description of what's occuring, and should never be a format string. +// +// Variable information can then be attached using named values (key/value pairs). Keys are arbitrary +// strings, while values may be any Go value. +// +// Key Naming Conventions +// +// While users are generally free to use key names of their choice, it's generally best to avoid +// using the following keys, as they're frequently used by implementations: +// +// - `"error"`: the underlying error value in the `Error` method. +// - `"stacktrace"`: the stack trace associated with a particular log line or error +// (often from the `Error` message). +// - `"caller"`: the calling information (file/line) of a particular log line. +// - `"msg"`: the log message. +// - `"level"`: the log level. +// - `"ts"`: the timestamp for a log line. +// +// Implementations are encouraged to make use of these keys to represent the above +// concepts, when neccessary (for example, in a pure-JSON output form, it would be +// necessary to represent at least message and timestamp as ordinary named values). +package logr + +// TODO: consider adding back in format strings if they're really needed +// TODO: consider other bits of zap/zapcore functionality like ObjectMarshaller (for arbitrary objects) +// TODO: consider other bits of glog functionality like Flush, InfoDepth, OutputStats + +// InfoLogger represents the ability to log non-error messages, at a particular verbosity. +type InfoLogger interface { + // Info logs a non-error message with the given key/value pairs as context. + // + // The msg argument should be used to add some constant description to + // the log line. The key/value pairs can then be used to add additional + // variable information. The key/value pairs should alternate string + // keys and arbitrary values. + Info(msg string, keysAndValues ...interface{}) + + // Enabled tests whether this InfoLogger is enabled. For example, + // commandline flags might be used to set the logging verbosity and disable + // some info logs. + Enabled() bool +} + +// Logger represents the ability to log messages, both errors and not. +type Logger interface { + // All Loggers implement InfoLogger. Calling InfoLogger methods directly on + // a Logger value is equivalent to calling them on a V(0) InfoLogger. For + // example, logger.Info() produces the same result as logger.V(0).Info. + InfoLogger + + // Error logs an error, with the given message and key/value pairs as context. + // It functions similarly to calling Info with the "error" named value, but may + // have unique behavior, and should be preferred for logging errors (see the + // package documentations for more information). + // + // The msg field should be used to add context to any underlying error, + // while the err field should be used to attach the actual error that + // triggered this log line, if present. + Error(err error, msg string, keysAndValues ...interface{}) + + // V returns an InfoLogger value for a specific verbosity level. A higher + // verbosity level means a log message is less important. It's illegal to + // pass a log level less than zero. + V(level int) InfoLogger + + // WithValues adds some key-value pairs of context to a logger. + // See Info for documentation on how key/value pairs work. + WithValues(keysAndValues ...interface{}) Logger + + // WithName adds a new element to the logger's name. + // Successive calls with WithName continue to append + // suffixes to the logger's name. It's strongly reccomended + // that name segments contain only letters, digits, and hyphens + // (see the package documentation for more information). + WithName(name string) Logger +} diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/github.com/imdario/mergo/LICENSE new file mode 100644 index 00000000..68668029 --- /dev/null +++ b/vendor/github.com/imdario/mergo/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2013 Dario Castañé. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go new file mode 100644 index 00000000..6e9aa7ba --- /dev/null +++ b/vendor/github.com/imdario/mergo/doc.go @@ -0,0 +1,44 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package mergo merges same-type structs and maps by setting default values in zero-value fields. + +Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +Usage + +From my own work-in-progress project: + + type networkConfig struct { + Protocol string + Address string + ServerType string `json: "server_type"` + Port uint16 + } + + type FssnConfig struct { + Network networkConfig + } + + var fssnDefault = FssnConfig { + networkConfig { + "tcp", + "127.0.0.1", + "http", + 31560, + }, + } + + // Inside a function [...] + + if err := mergo.Merge(&config, fssnDefault); err != nil { + log.Fatal(err) + } + + // More code [...] + +*/ +package mergo diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go new file mode 100644 index 00000000..d83258b4 --- /dev/null +++ b/vendor/github.com/imdario/mergo/map.go @@ -0,0 +1,176 @@ +// Copyright 2014 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "fmt" + "reflect" + "unicode" + "unicode/utf8" +) + +func changeInitialCase(s string, mapper func(rune) rune) string { + if s == "" { + return s + } + r, n := utf8.DecodeRuneInString(s) + return string(mapper(r)) + s[n:] +} + +func isExported(field reflect.StructField) bool { + r, _ := utf8.DecodeRuneInString(field.Name) + return r >= 'A' && r <= 'Z' +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + zeroValue := reflect.Value{} + switch dst.Kind() { + case reflect.Map: + dstMap := dst.Interface().(map[string]interface{}) + for i, n := 0, src.NumField(); i < n; i++ { + srcType := src.Type() + field := srcType.Field(i) + if !isExported(field) { + continue + } + fieldName := field.Name + fieldName = changeInitialCase(fieldName, unicode.ToLower) + if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { + dstMap[fieldName] = src.Field(i).Interface() + } + } + case reflect.Ptr: + if dst.IsNil() { + v := reflect.New(dst.Type().Elem()) + dst.Set(v) + } + dst = dst.Elem() + fallthrough + case reflect.Struct: + srcMap := src.Interface().(map[string]interface{}) + for key := range srcMap { + config.overwriteWithEmptyValue = true + srcValue := srcMap[key] + fieldName := changeInitialCase(key, unicode.ToUpper) + dstElement := dst.FieldByName(fieldName) + if dstElement == zeroValue { + // We discard it because the field doesn't exist. + continue + } + srcElement := reflect.ValueOf(srcValue) + dstKind := dstElement.Kind() + srcKind := srcElement.Kind() + if srcKind == reflect.Ptr && dstKind != reflect.Ptr { + srcElement = srcElement.Elem() + srcKind = reflect.TypeOf(srcElement.Interface()).Kind() + } else if dstKind == reflect.Ptr { + // Can this work? I guess it can't. + if srcKind != reflect.Ptr && srcElement.CanAddr() { + srcPtr := srcElement.Addr() + srcElement = reflect.ValueOf(srcPtr) + srcKind = reflect.Ptr + } + } + + if !srcElement.IsValid() { + continue + } + if srcKind == dstKind { + if _, err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { + if _, err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else if srcKind == reflect.Map { + if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else { + return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) + } + } + } + return +} + +// Map sets fields' values in dst from src. +// src can be a map with string keys or a struct. dst must be the opposite: +// if src is a map, dst must be a valid pointer to struct. If src is a struct, +// dst must be map[string]interface{}. +// It won't merge unexported (private) fields and will do recursively +// any exported field. +// If dst is a map, keys will be src fields' names in lower camel case. +// Missing key in src that doesn't match a field in dst will be skipped. This +// doesn't apply if dst is a map. +// This is separated method from Merge because it is cleaner and it keeps sane +// semantics: merging equal types, mapping different (restricted) types. +func Map(dst, src interface{}, opts ...func(*Config)) error { + return _map(dst, src, opts...) +} + +// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by +// non-empty src attribute values. +// Deprecated: Use Map(…) with WithOverride +func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { + return _map(dst, src, append(opts, WithOverride)...) +} + +func _map(dst, src interface{}, opts ...func(*Config)) error { + var ( + vDst, vSrc reflect.Value + err error + ) + config := &Config{} + + for _, opt := range opts { + opt(config) + } + + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + // To be friction-less, we redirect equal-type arguments + // to deepMerge. Only because arguments can be anything. + if vSrc.Kind() == vDst.Kind() { + _, err := deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) + return err + } + switch vSrc.Kind() { + case reflect.Struct: + if vDst.Kind() != reflect.Map { + return ErrExpectedMapAsDestination + } + case reflect.Map: + if vDst.Kind() != reflect.Struct { + return ErrExpectedStructAsDestination + } + default: + return ErrNotSupported + } + return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) +} diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go new file mode 100644 index 00000000..3332c9c2 --- /dev/null +++ b/vendor/github.com/imdario/mergo/merge.go @@ -0,0 +1,338 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "fmt" + "reflect" + "unsafe" +) + +func hasExportedField(dst reflect.Value) (exported bool) { + for i, n := 0, dst.NumField(); i < n; i++ { + field := dst.Type().Field(i) + if isExportedComponent(&field) { + return true + } + } + return +} + +func isExportedComponent(field *reflect.StructField) bool { + name := field.Name + pkgPath := field.PkgPath + if len(pkgPath) > 0 { + return false + } + c := name[0] + if 'a' <= c && c <= 'z' || c == '_' { + return false + } + return true +} + +type Config struct { + Overwrite bool + AppendSlice bool + TypeCheck bool + Transformers Transformers + overwriteWithEmptyValue bool + overwriteSliceWithEmptyValue bool +} + +type Transformers interface { + Transformer(reflect.Type) func(dst, src reflect.Value) error +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMerge(dstIn, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (dst reflect.Value, err error) { + dst = dstIn + overwrite := config.Overwrite + typeCheck := config.TypeCheck + overwriteWithEmptySrc := config.overwriteWithEmptyValue + overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue + + if !src.IsValid() { + return + } + + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return dst, nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + + if config.Transformers != nil && !isEmptyValue(dst) { + if fn := config.Transformers.Transformer(dst.Type()); fn != nil { + err = fn(dst, src) + return + } + } + + if dst.IsValid() && src.IsValid() && src.Type() != dst.Type() { + err = fmt.Errorf("cannot append two different types (%s, %s)", src.Kind(), dst.Kind()) + return + } + + switch dst.Kind() { + case reflect.Struct: + if hasExportedField(dst) { + dstCp := reflect.New(dst.Type()).Elem() + for i, n := 0, dst.NumField(); i < n; i++ { + dstField := dst.Field(i) + structField := dst.Type().Field(i) + // copy un-exported struct fields + if !isExportedComponent(&structField) { + rf := dstCp.Field(i) + rf = reflect.NewAt(rf.Type(), unsafe.Pointer(rf.UnsafeAddr())).Elem() //nolint:gosec + dstRF := dst.Field(i) + if !dst.Field(i).CanAddr() { + continue + } + + dstRF = reflect.NewAt(dstRF.Type(), unsafe.Pointer(dstRF.UnsafeAddr())).Elem() //nolint:gosec + rf.Set(dstRF) + continue + } + dstField, err = deepMerge(dstField, src.Field(i), visited, depth+1, config) + if err != nil { + return + } + dstCp.Field(i).Set(dstField) + } + + if dst.CanSet() { + dst.Set(dstCp) + } else { + dst = dstCp + } + return + } else { + if (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { + dst = src + } + } + + case reflect.Map: + if dst.IsNil() && !src.IsNil() { + if dst.CanSet() { + dst.Set(reflect.MakeMap(dst.Type())) + } else { + dst = src + return + } + } + for _, key := range src.MapKeys() { + srcElement := src.MapIndex(key) + dstElement := dst.MapIndex(key) + if !srcElement.IsValid() { + continue + } + if dst.MapIndex(key).IsValid() { + k := dstElement.Interface() + dstElement = reflect.ValueOf(k) + } + if isReflectNil(srcElement) { + if overwrite || isReflectNil(dstElement) { + dst.SetMapIndex(key, srcElement) + } + continue + } + if !srcElement.CanInterface() { + continue + } + + if srcElement.CanInterface() { + srcElement = reflect.ValueOf(srcElement.Interface()) + if dstElement.IsValid() { + dstElement = reflect.ValueOf(dstElement.Interface()) + } + } + dstElement, err = deepMerge(dstElement, srcElement, visited, depth+1, config) + if err != nil { + return + } + dst.SetMapIndex(key, dstElement) + + } + case reflect.Slice: + newSlice := dst + if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + if typeCheck && src.Type() != dst.Type() { + return dst, fmt.Errorf("cannot override two slices with different type (%s, %s)", src.Type(), dst.Type()) + } + newSlice = src + } else if config.AppendSlice { + if typeCheck && src.Type() != dst.Type() { + err = fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) + return + } + newSlice = reflect.AppendSlice(dst, src) + } + if dst.CanSet() { + dst.Set(newSlice) + } else { + dst = newSlice + } + case reflect.Ptr, reflect.Interface: + if isReflectNil(src) { + break + } + + if dst.Kind() != reflect.Ptr && src.Type().AssignableTo(dst.Type()) { + if dst.IsNil() || overwrite { + if overwrite || isEmptyValue(dst) { + if dst.CanSet() { + dst.Set(src) + } else { + dst = src + } + } + } + break + } + + if src.Kind() != reflect.Interface { + if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { + if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } else if src.Kind() == reflect.Ptr { + if dst, err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + dst = dst.Addr() + } else if dst.Elem().Type() == src.Type() { + if dst, err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { + return + } + } else { + return dst, ErrDifferentArgumentsTypes + } + break + } + if dst.IsNil() || overwrite { + if (overwrite || isEmptyValue(dst)) && (overwriteWithEmptySrc || !isEmptyValue(src)) { + if dst.CanSet() { + dst.Set(src) + } else { + dst = src + } + } + } else if _, err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + default: + overwriteFull := (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) + if overwriteFull { + if dst.CanSet() { + dst.Set(src) + } else { + dst = src + } + } + } + + return +} + +// Merge will fill any empty for value type attributes on the dst struct using corresponding +// src attributes if they themselves are not empty. dst and src must be valid same-type structs +// and dst must be a pointer to struct. +// It won't merge unexported (private) fields and will do recursively any exported field. +func Merge(dst, src interface{}, opts ...func(*Config)) error { + return merge(dst, src, opts...) +} + +// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by +// non-empty src attribute values. +// Deprecated: use Merge(…) with WithOverride +func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { + return merge(dst, src, append(opts, WithOverride)...) +} + +// WithTransformers adds transformers to merge, allowing to customize the merging of some types. +func WithTransformers(transformers Transformers) func(*Config) { + return func(config *Config) { + config.Transformers = transformers + } +} + +// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. +func WithOverride(config *Config) { + config.Overwrite = true +} + +// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values. +func WithOverwriteWithEmptyValue(config *Config) { + config.overwriteWithEmptyValue = true +} + +// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice. +func WithOverrideEmptySlice(config *Config) { + config.overwriteSliceWithEmptyValue = true +} + +// WithAppendSlice will make merge append slices instead of overwriting it. +func WithAppendSlice(config *Config) { + config.AppendSlice = true +} + +// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride). +func WithTypeCheck(config *Config) { + config.TypeCheck = true +} + +func merge(dst, src interface{}, opts ...func(*Config)) error { + var ( + vDst, vSrc reflect.Value + err error + ) + + config := &Config{} + + for _, opt := range opts { + opt(config) + } + + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + if !vDst.CanSet() { + return fmt.Errorf("cannot set dst, needs reference") + } + if vDst.Type() != vSrc.Type() { + return ErrDifferentArgumentsTypes + } + _, err = deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) + return err +} + +// IsReflectNil is the reflect value provided nil +func isReflectNil(v reflect.Value) bool { + k := v.Kind() + switch k { + case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr: + // Both interface and slice are nil if first word is 0. + // Both are always bigger than a word; assume flagIndir. + return v.IsNil() + default: + return false + } +} diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go new file mode 100644 index 00000000..a82fea2f --- /dev/null +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -0,0 +1,97 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "errors" + "reflect" +) + +// Errors reported by Mergo when it finds invalid arguments. +var ( + ErrNilArguments = errors.New("src and dst must not be nil") + ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") + ErrNotSupported = errors.New("only structs and maps are supported") + ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") + ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") +) + +// During deepMerge, must keep track of checks that are +// in progress. The comparison algorithm assumes that all +// checks in progress are true when it reencounters them. +// Visited are stored in a map indexed by 17 * a1 + a2; +type visit struct { + ptr uintptr + typ reflect.Type + next *visit +} + +// From src/pkg/encoding/json/encode.go. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + if v.IsNil() { + return true + } + return isEmptyValue(v.Elem()) + case reflect.Func: + return v.IsNil() + case reflect.Invalid: + return true + } + return false +} + +func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { + if dst == nil || src == nil { + err = ErrNilArguments + return + } + vDst = reflect.ValueOf(dst).Elem() + if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { + err = ErrNotSupported + return + } + vSrc = reflect.ValueOf(src) + // We check if vSrc is a pointer to dereference it. + if vSrc.Kind() == reflect.Ptr { + vSrc = vSrc.Elem() + } + return +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + return // TODO refactor +} diff --git a/vendor/github.com/imdario/mergo/testdata/license.yml b/vendor/github.com/imdario/mergo/testdata/license.yml new file mode 100644 index 00000000..2f1ad008 --- /dev/null +++ b/vendor/github.com/imdario/mergo/testdata/license.yml @@ -0,0 +1,4 @@ +import: ../../../../fossene/db/schema/thing.yml +fields: + site: string + author: root diff --git a/vendor/github.com/kballard/go-shellquote/LICENSE b/vendor/github.com/kballard/go-shellquote/LICENSE new file mode 100644 index 00000000..a6d77312 --- /dev/null +++ b/vendor/github.com/kballard/go-shellquote/LICENSE @@ -0,0 +1,19 @@ +Copyright (C) 2014 Kevin Ballard + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/kballard/go-shellquote/doc.go b/vendor/github.com/kballard/go-shellquote/doc.go new file mode 100644 index 00000000..9445fa4a --- /dev/null +++ b/vendor/github.com/kballard/go-shellquote/doc.go @@ -0,0 +1,3 @@ +// Shellquote provides utilities for joining/splitting strings using sh's +// word-splitting rules. +package shellquote diff --git a/vendor/github.com/kballard/go-shellquote/quote.go b/vendor/github.com/kballard/go-shellquote/quote.go new file mode 100644 index 00000000..72a8cb38 --- /dev/null +++ b/vendor/github.com/kballard/go-shellquote/quote.go @@ -0,0 +1,102 @@ +package shellquote + +import ( + "bytes" + "strings" + "unicode/utf8" +) + +// Join quotes each argument and joins them with a space. +// If passed to /bin/sh, the resulting string will be split back into the +// original arguments. +func Join(args ...string) string { + var buf bytes.Buffer + for i, arg := range args { + if i != 0 { + buf.WriteByte(' ') + } + quote(arg, &buf) + } + return buf.String() +} + +const ( + specialChars = "\\'\"`${[|&;<>()*?!" + extraSpecialChars = " \t\n" + prefixChars = "~" +) + +func quote(word string, buf *bytes.Buffer) { + // We want to try to produce a "nice" output. As such, we will + // backslash-escape most characters, but if we encounter a space, or if we + // encounter an extra-special char (which doesn't work with + // backslash-escaping) we switch over to quoting the whole word. We do this + // with a space because it's typically easier for people to read multi-word + // arguments when quoted with a space rather than with ugly backslashes + // everywhere. + origLen := buf.Len() + + if len(word) == 0 { + // oops, no content + buf.WriteString("''") + return + } + + cur, prev := word, word + atStart := true + for len(cur) > 0 { + c, l := utf8.DecodeRuneInString(cur) + cur = cur[l:] + if strings.ContainsRune(specialChars, c) || (atStart && strings.ContainsRune(prefixChars, c)) { + // copy the non-special chars up to this point + if len(cur) < len(prev) { + buf.WriteString(prev[0 : len(prev)-len(cur)-l]) + } + buf.WriteByte('\\') + buf.WriteRune(c) + prev = cur + } else if strings.ContainsRune(extraSpecialChars, c) { + // start over in quote mode + buf.Truncate(origLen) + goto quote + } + atStart = false + } + if len(prev) > 0 { + buf.WriteString(prev) + } + return + +quote: + // quote mode + // Use single-quotes, but if we find a single-quote in the word, we need + // to terminate the string, emit an escaped quote, and start the string up + // again + inQuote := false + for len(word) > 0 { + i := strings.IndexRune(word, '\'') + if i == -1 { + break + } + if i > 0 { + if !inQuote { + buf.WriteByte('\'') + inQuote = true + } + buf.WriteString(word[0:i]) + } + word = word[i+1:] + if inQuote { + buf.WriteByte('\'') + inQuote = false + } + buf.WriteString("\\'") + } + if len(word) > 0 { + if !inQuote { + buf.WriteByte('\'') + } + buf.WriteString(word) + buf.WriteByte('\'') + } +} diff --git a/vendor/github.com/kballard/go-shellquote/unquote.go b/vendor/github.com/kballard/go-shellquote/unquote.go new file mode 100644 index 00000000..b1b13da9 --- /dev/null +++ b/vendor/github.com/kballard/go-shellquote/unquote.go @@ -0,0 +1,156 @@ +package shellquote + +import ( + "bytes" + "errors" + "strings" + "unicode/utf8" +) + +var ( + UnterminatedSingleQuoteError = errors.New("Unterminated single-quoted string") + UnterminatedDoubleQuoteError = errors.New("Unterminated double-quoted string") + UnterminatedEscapeError = errors.New("Unterminated backslash-escape") +) + +var ( + splitChars = " \n\t" + singleChar = '\'' + doubleChar = '"' + escapeChar = '\\' + doubleEscapeChars = "$`\"\n\\" +) + +// Split splits a string according to /bin/sh's word-splitting rules. It +// supports backslash-escapes, single-quotes, and double-quotes. Notably it does +// not support the $'' style of quoting. It also doesn't attempt to perform any +// other sort of expansion, including brace expansion, shell expansion, or +// pathname expansion. +// +// If the given input has an unterminated quoted string or ends in a +// backslash-escape, one of UnterminatedSingleQuoteError, +// UnterminatedDoubleQuoteError, or UnterminatedEscapeError is returned. +func Split(input string) (words []string, err error) { + var buf bytes.Buffer + words = make([]string, 0) + + for len(input) > 0 { + // skip any splitChars at the start + c, l := utf8.DecodeRuneInString(input) + if strings.ContainsRune(splitChars, c) { + input = input[l:] + continue + } else if c == escapeChar { + // Look ahead for escaped newline so we can skip over it + next := input[l:] + if len(next) == 0 { + err = UnterminatedEscapeError + return + } + c2, l2 := utf8.DecodeRuneInString(next) + if c2 == '\n' { + input = next[l2:] + continue + } + } + + var word string + word, input, err = splitWord(input, &buf) + if err != nil { + return + } + words = append(words, word) + } + return +} + +func splitWord(input string, buf *bytes.Buffer) (word string, remainder string, err error) { + buf.Reset() + +raw: + { + cur := input + for len(cur) > 0 { + c, l := utf8.DecodeRuneInString(cur) + cur = cur[l:] + if c == singleChar { + buf.WriteString(input[0 : len(input)-len(cur)-l]) + input = cur + goto single + } else if c == doubleChar { + buf.WriteString(input[0 : len(input)-len(cur)-l]) + input = cur + goto double + } else if c == escapeChar { + buf.WriteString(input[0 : len(input)-len(cur)-l]) + input = cur + goto escape + } else if strings.ContainsRune(splitChars, c) { + buf.WriteString(input[0 : len(input)-len(cur)-l]) + return buf.String(), cur, nil + } + } + if len(input) > 0 { + buf.WriteString(input) + input = "" + } + goto done + } + +escape: + { + if len(input) == 0 { + return "", "", UnterminatedEscapeError + } + c, l := utf8.DecodeRuneInString(input) + if c == '\n' { + // a backslash-escaped newline is elided from the output entirely + } else { + buf.WriteString(input[:l]) + } + input = input[l:] + } + goto raw + +single: + { + i := strings.IndexRune(input, singleChar) + if i == -1 { + return "", "", UnterminatedSingleQuoteError + } + buf.WriteString(input[0:i]) + input = input[i+1:] + goto raw + } + +double: + { + cur := input + for len(cur) > 0 { + c, l := utf8.DecodeRuneInString(cur) + cur = cur[l:] + if c == doubleChar { + buf.WriteString(input[0 : len(input)-len(cur)-l]) + input = cur + goto raw + } else if c == escapeChar { + // bash only supports certain escapes in double-quoted strings + c2, l2 := utf8.DecodeRuneInString(cur) + cur = cur[l2:] + if strings.ContainsRune(doubleEscapeChars, c2) { + buf.WriteString(input[0 : len(input)-len(cur)-l-l2]) + if c2 == '\n' { + // newline is special, skip the backslash entirely + } else { + buf.WriteRune(c2) + } + input = cur + } + } + } + return "", "", UnterminatedDoubleQuoteError + } + +done: + return buf.String(), input, nil +} diff --git a/vendor/github.com/openzipkin/zipkin-go/LICENSE b/vendor/github.com/openzipkin/zipkin-go/LICENSE new file mode 100644 index 00000000..2ff72246 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go/LICENSE @@ -0,0 +1,201 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, +and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by +the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all +other entities that control, are controlled by, or are under common +control with that entity. For the purposes of this definition, +"control" means (i) the power, direct or indirect, to cause the +direction or management of such entity, whether by contract or +otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity +exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, +including but not limited to software source code, documentation +source, and configuration files. + +"Object" form shall mean any form resulting from mechanical +transformation or translation of a Source form, including but +not limited to compiled object code, generated documentation, +and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or +Object form, made available under the License, as indicated by a +copyright notice that is included in or attached to the work +(an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object +form, that is based on (or derived from) the Work and for which the +editorial revisions, annotations, elaborations, or other modifications +represent, as a whole, an original work of authorship. For the purposes +of this License, Derivative Works shall not include works that remain +separable from, or merely link (or bind by name) to the interfaces of, +the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including +the original version of the Work and any modifications or additions +to that Work or Derivative Works thereof, that is intentionally +submitted to Licensor for inclusion in the Work by the copyright owner +or by an individual or Legal Entity authorized to submit on behalf of +the copyright owner. For the purposes of this definition, "submitted" +means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, +and issue tracking systems that are managed by, or on behalf of, the +Licensor for the purpose of discussing and improving the Work, but +excluding communication that is conspicuously marked or otherwise +designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity +on behalf of whom a Contribution has been received by Licensor and +subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the +Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of +this License, each Contributor hereby grants to You a perpetual, +worldwide, non-exclusive, no-charge, royalty-free, irrevocable +(except as stated in this section) patent license to make, have made, +use, offer to sell, sell, import, and otherwise transfer the Work, +where such license applies only to those patent claims licensable +by such Contributor that are necessarily infringed by their +Contribution(s) alone or by combination of their Contribution(s) +with the Work to which such Contribution(s) was submitted. If You +institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work +or a Contribution incorporated within the Work constitutes direct +or contributory patent infringement, then any patent licenses +granted to You under this License for that Work shall terminate +as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the +Work or Derivative Works thereof in any medium, with or without +modifications, and in Source or Object form, provided that You +meet the following conditions: + +(a) You must give any other recipients of the Work or +Derivative Works a copy of this License; and + +(b) You must cause any modified files to carry prominent notices +stating that You changed the files; and + +(c) You must retain, in the Source form of any Derivative Works +that You distribute, all copyright, patent, trademark, and +attribution notices from the Source form of the Work, +excluding those notices that do not pertain to any part of +the Derivative Works; and + +(d) If the Work includes a "NOTICE" text file as part of its +distribution, then any Derivative Works that You distribute must +include a readable copy of the attribution notices contained +within such NOTICE file, excluding those notices that do not +pertain to any part of the Derivative Works, in at least one +of the following places: within a NOTICE text file distributed +as part of the Derivative Works; within the Source form or +documentation, if provided along with the Derivative Works; or, +within a display generated by the Derivative Works, if and +wherever such third-party notices normally appear. The contents +of the NOTICE file are for informational purposes only and +do not modify the License. You may add Your own attribution +notices within Derivative Works that You distribute, alongside +or as an addendum to the NOTICE text from the Work, provided +that such additional attribution notices cannot be construed +as modifying the License. + +You may add Your own copyright statement to Your modifications and +may provide additional or different license terms and conditions +for use, reproduction, or distribution of Your modifications, or +for any such Derivative Works as a whole, provided Your use, +reproduction, and distribution of the Work otherwise complies with +the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, +any Contribution intentionally submitted for inclusion in the Work +by You to the Licensor shall be under the terms and conditions of +this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify +the terms of any separate license agreement you may have executed +with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade +names, trademarks, service marks, or product names of the Licensor, +except as required for reasonable and customary use in describing the +origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or +agreed to in writing, Licensor provides the Work (and each +Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied, including, without limitation, any warranties or conditions +of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A +PARTICULAR PURPOSE. You are solely responsible for determining the +appropriateness of using or redistributing the Work and assume any +risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, +whether in tort (including negligence), contract, or otherwise, +unless required by applicable law (such as deliberate and grossly +negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, +incidental, or consequential damages of any character arising as a +result of this License or out of the use or inability to use the +Work (including but not limited to damages for loss of goodwill, +work stoppage, computer failure or malfunction, or any and all +other commercial damages or losses), even if such Contributor +has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing +the Work or Derivative Works thereof, You may choose to offer, +and charge a fee for, acceptance of support, warranty, indemnity, +or other liability obligations and/or rights consistent with this +License. However, in accepting such obligations, You may act only +on Your own behalf and on Your sole responsibility, not on behalf +of any other Contributor, and only if You agree to indemnify, +defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason +of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following +boilerplate notice, with the fields enclosed by brackets "{}" +replaced with your own identifying information. (Don't include +the brackets!) The text should be enclosed in the appropriate +comment syntax for the file format. We also recommend that a +file or class name and description of purpose be included on the +same "printed page" as the copyright notice for easier +identification within third-party archives. + +Copyright 2017 The OpenZipkin Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/openzipkin/zipkin-go/model/annotation.go b/vendor/github.com/openzipkin/zipkin-go/model/annotation.go new file mode 100644 index 00000000..795bc414 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go/model/annotation.go @@ -0,0 +1,60 @@ +// Copyright 2019 The OpenZipkin Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "errors" + "time" +) + +// ErrValidTimestampRequired error +var ErrValidTimestampRequired = errors.New("valid annotation timestamp required") + +// Annotation associates an event that explains latency with a timestamp. +type Annotation struct { + Timestamp time.Time + Value string +} + +// MarshalJSON implements custom JSON encoding +func (a *Annotation) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + Timestamp int64 `json:"timestamp"` + Value string `json:"value"` + }{ + Timestamp: a.Timestamp.Round(time.Microsecond).UnixNano() / 1e3, + Value: a.Value, + }) +} + +// UnmarshalJSON implements custom JSON decoding +func (a *Annotation) UnmarshalJSON(b []byte) error { + type Alias Annotation + annotation := &struct { + TimeStamp uint64 `json:"timestamp"` + *Alias + }{ + Alias: (*Alias)(a), + } + if err := json.Unmarshal(b, &annotation); err != nil { + return err + } + if annotation.TimeStamp < 1 { + return ErrValidTimestampRequired + } + a.Timestamp = time.Unix(0, int64(annotation.TimeStamp)*1e3) + return nil +} diff --git a/vendor/github.com/openzipkin/zipkin-go/model/doc.go b/vendor/github.com/openzipkin/zipkin-go/model/doc.go new file mode 100644 index 00000000..1b11b4df --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go/model/doc.go @@ -0,0 +1,23 @@ +// Copyright 2019 The OpenZipkin Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package model contains the Zipkin V2 model which is used by the Zipkin Go +tracer implementation. + +Third party instrumentation libraries can use the model and transport packages +found in this Zipkin Go library to directly interface with the Zipkin Server or +Zipkin Collectors without the need to use the tracer implementation itself. +*/ +package model diff --git a/vendor/github.com/openzipkin/zipkin-go/model/endpoint.go b/vendor/github.com/openzipkin/zipkin-go/model/endpoint.go new file mode 100644 index 00000000..58880bd1 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go/model/endpoint.go @@ -0,0 +1,31 @@ +// Copyright 2019 The OpenZipkin Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import "net" + +// Endpoint holds the network context of a node in the service graph. +type Endpoint struct { + ServiceName string `json:"serviceName,omitempty"` + IPv4 net.IP `json:"ipv4,omitempty"` + IPv6 net.IP `json:"ipv6,omitempty"` + Port uint16 `json:"port,omitempty"` +} + +// Empty returns if all Endpoint properties are empty / unspecified. +func (e *Endpoint) Empty() bool { + return e == nil || + (e.ServiceName == "" && e.Port == 0 && len(e.IPv4) == 0 && len(e.IPv6) == 0) +} diff --git a/vendor/github.com/openzipkin/zipkin-go/model/kind.go b/vendor/github.com/openzipkin/zipkin-go/model/kind.go new file mode 100644 index 00000000..5d512ad9 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go/model/kind.go @@ -0,0 +1,27 @@ +// Copyright 2019 The OpenZipkin Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// Kind clarifies context of timestamp, duration and remoteEndpoint in a span. +type Kind string + +// Available Kind values +const ( + Undetermined Kind = "" + Client Kind = "CLIENT" + Server Kind = "SERVER" + Producer Kind = "PRODUCER" + Consumer Kind = "CONSUMER" +) diff --git a/vendor/github.com/openzipkin/zipkin-go/model/span.go b/vendor/github.com/openzipkin/zipkin-go/model/span.go new file mode 100644 index 00000000..f428413f --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go/model/span.go @@ -0,0 +1,138 @@ +// Copyright 2019 The OpenZipkin Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "errors" + "time" +) + +// unmarshal errors +var ( + ErrValidTraceIDRequired = errors.New("valid traceId required") + ErrValidIDRequired = errors.New("valid span id required") + ErrValidDurationRequired = errors.New("valid duration required") +) + +// SpanContext holds the context of a Span. +type SpanContext struct { + TraceID TraceID `json:"traceId"` + ID ID `json:"id"` + ParentID *ID `json:"parentId,omitempty"` + Debug bool `json:"debug,omitempty"` + Sampled *bool `json:"-"` + Err error `json:"-"` +} + +// SpanModel structure. +// +// If using this library to instrument your application you will not need to +// directly access or modify this representation. The SpanModel is exported for +// use cases involving 3rd party Go instrumentation libraries desiring to +// export data to a Zipkin server using the Zipkin V2 Span model. +type SpanModel struct { + SpanContext + Name string `json:"name,omitempty"` + Kind Kind `json:"kind,omitempty"` + Timestamp time.Time `json:"-"` + Duration time.Duration `json:"-"` + Shared bool `json:"shared,omitempty"` + LocalEndpoint *Endpoint `json:"localEndpoint,omitempty"` + RemoteEndpoint *Endpoint `json:"remoteEndpoint,omitempty"` + Annotations []Annotation `json:"annotations,omitempty"` + Tags map[string]string `json:"tags,omitempty"` +} + +// MarshalJSON exports our Model into the correct format for the Zipkin V2 API. +func (s SpanModel) MarshalJSON() ([]byte, error) { + type Alias SpanModel + + var timestamp int64 + if !s.Timestamp.IsZero() { + if s.Timestamp.Unix() < 1 { + // Zipkin does not allow Timestamps before Unix epoch + return nil, ErrValidTimestampRequired + } + timestamp = s.Timestamp.Round(time.Microsecond).UnixNano() / 1e3 + } + + if s.Duration < time.Microsecond { + if s.Duration < 0 { + // negative duration is not allowed and signals a timing logic error + return nil, ErrValidDurationRequired + } else if s.Duration > 0 { + // sub microsecond durations are reported as 1 microsecond + s.Duration = 1 * time.Microsecond + } + } else { + // Duration will be rounded to nearest microsecond representation. + // + // NOTE: Duration.Round() is not available in Go 1.8 which we still support. + // To handle microsecond resolution rounding we'll add 500 nanoseconds to + // the duration. When truncated to microseconds in the call to marshal, it + // will be naturally rounded. See TestSpanDurationRounding in span_test.go + s.Duration += 500 * time.Nanosecond + } + + if s.LocalEndpoint.Empty() { + s.LocalEndpoint = nil + } + + if s.RemoteEndpoint.Empty() { + s.RemoteEndpoint = nil + } + + return json.Marshal(&struct { + T int64 `json:"timestamp,omitempty"` + D int64 `json:"duration,omitempty"` + Alias + }{ + T: timestamp, + D: s.Duration.Nanoseconds() / 1e3, + Alias: (Alias)(s), + }) +} + +// UnmarshalJSON imports our Model from a Zipkin V2 API compatible span +// representation. +func (s *SpanModel) UnmarshalJSON(b []byte) error { + type Alias SpanModel + span := &struct { + T uint64 `json:"timestamp,omitempty"` + D uint64 `json:"duration,omitempty"` + *Alias + }{ + Alias: (*Alias)(s), + } + if err := json.Unmarshal(b, &span); err != nil { + return err + } + if s.ID < 1 { + return ErrValidIDRequired + } + if span.T > 0 { + s.Timestamp = time.Unix(0, int64(span.T)*1e3) + } + s.Duration = time.Duration(span.D*1e3) * time.Nanosecond + if s.LocalEndpoint.Empty() { + s.LocalEndpoint = nil + } + + if s.RemoteEndpoint.Empty() { + s.RemoteEndpoint = nil + } + return nil +} diff --git a/vendor/github.com/openzipkin/zipkin-go/model/span_id.go b/vendor/github.com/openzipkin/zipkin-go/model/span_id.go new file mode 100644 index 00000000..452dc871 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go/model/span_id.go @@ -0,0 +1,44 @@ +// Copyright 2019 The OpenZipkin Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "strconv" +) + +// ID type +type ID uint64 + +// String outputs the 64-bit ID as hex string. +func (i ID) String() string { + return fmt.Sprintf("%016x", uint64(i)) +} + +// MarshalJSON serializes an ID type (SpanID, ParentSpanID) to HEX. +func (i ID) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf("%q", i.String())), nil +} + +// UnmarshalJSON deserializes an ID type (SpanID, ParentSpanID) from HEX. +func (i *ID) UnmarshalJSON(b []byte) (err error) { + var id uint64 + if len(b) < 3 { + return nil + } + id, err = strconv.ParseUint(string(b[1:len(b)-1]), 16, 64) + *i = ID(id) + return err +} diff --git a/vendor/github.com/openzipkin/zipkin-go/model/traceid.go b/vendor/github.com/openzipkin/zipkin-go/model/traceid.go new file mode 100644 index 00000000..68d12d38 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go/model/traceid.go @@ -0,0 +1,75 @@ +// Copyright 2019 The OpenZipkin Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "strconv" +) + +// TraceID is a 128 bit number internally stored as 2x uint64 (high & low). +// In case of 64 bit traceIDs, the value can be found in Low. +type TraceID struct { + High uint64 + Low uint64 +} + +// Empty returns if TraceID has zero value. +func (t TraceID) Empty() bool { + return t.Low == 0 && t.High == 0 +} + +// String outputs the 128-bit traceID as hex string. +func (t TraceID) String() string { + if t.High == 0 { + return fmt.Sprintf("%016x", t.Low) + } + return fmt.Sprintf("%016x%016x", t.High, t.Low) +} + +// TraceIDFromHex returns the TraceID from a hex string. +func TraceIDFromHex(h string) (t TraceID, err error) { + if len(h) > 16 { + if t.High, err = strconv.ParseUint(h[0:len(h)-16], 16, 64); err != nil { + return + } + t.Low, err = strconv.ParseUint(h[len(h)-16:], 16, 64) + return + } + t.Low, err = strconv.ParseUint(h, 16, 64) + return +} + +// MarshalJSON custom JSON serializer to export the TraceID in the required +// zero padded hex representation. +func (t TraceID) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf("%q", t.String())), nil +} + +// UnmarshalJSON custom JSON deserializer to retrieve the traceID from the hex +// encoded representation. +func (t *TraceID) UnmarshalJSON(traceID []byte) error { + if len(traceID) < 3 { + return ErrValidTraceIDRequired + } + // A valid JSON string is encoded wrapped in double quotes. We need to trim + // these before converting the hex payload. + tID, err := TraceIDFromHex(string(traceID[1 : len(traceID)-1])) + if err != nil { + return err + } + *t = tID + return nil +} diff --git a/vendor/k8s.io/client-go/tools/auth/clientauth.go b/vendor/k8s.io/client-go/tools/auth/clientauth.go new file mode 100644 index 00000000..c3417267 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/auth/clientauth.go @@ -0,0 +1,126 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package auth defines a file format for holding authentication +information needed by clients of Kubernetes. Typically, +a Kubernetes cluster will put auth info for the admin in a known +location when it is created, and will (soon) put it in a known +location within a Container's file tree for Containers that +need access to the Kubernetes API. + +Having a defined format allows: + - clients to be implemented in multiple languages + - applications which link clients to be portable across + clusters with different authentication styles (e.g. + some may use SSL Client certs, others may not, etc) + - when the format changes, applications only + need to update this code. + +The file format is json, marshalled from a struct authcfg.Info. + +Clinet libraries in other languages should use the same format. + +It is not intended to store general preferences, such as default +namespace, output options, etc. CLIs (such as kubectl) and UIs should +develop their own format and may wish to inline the authcfg.Info type. + +The authcfg.Info is just a file format. It is distinct from +client.Config which holds options for creating a client.Client. +Helper functions are provided in this package to fill in a +client.Client from an authcfg.Info. + +Example: + + import ( + "pkg/client" + "pkg/client/auth" + ) + + info, err := auth.LoadFromFile(filename) + if err != nil { + // handle error + } + clientConfig = client.Config{} + clientConfig.Host = "example.com:4901" + clientConfig = info.MergeWithConfig() + client := client.New(clientConfig) + client.Pods(ns).List() +*/ +package auth + +// TODO: need a way to rotate Tokens. Therefore, need a way for client object to be reset when the authcfg is updated. +import ( + "encoding/json" + "io/ioutil" + "os" + + restclient "k8s.io/client-go/rest" +) + +// Info holds Kubernetes API authorization config. It is intended +// to be read/written from a file as a JSON object. +type Info struct { + User string + Password string + CAFile string + CertFile string + KeyFile string + BearerToken string + Insecure *bool +} + +// LoadFromFile parses an Info object from a file path. +// If the file does not exist, then os.IsNotExist(err) == true +func LoadFromFile(path string) (*Info, error) { + var info Info + if _, err := os.Stat(path); os.IsNotExist(err) { + return nil, err + } + data, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + err = json.Unmarshal(data, &info) + if err != nil { + return nil, err + } + return &info, err +} + +// MergeWithConfig returns a copy of a client.Config with values from the Info. +// The fields of client.Config with a corresponding field in the Info are set +// with the value from the Info. +func (info Info) MergeWithConfig(c restclient.Config) (restclient.Config, error) { + var config = c + config.Username = info.User + config.Password = info.Password + config.CAFile = info.CAFile + config.CertFile = info.CertFile + config.KeyFile = info.KeyFile + config.BearerToken = info.BearerToken + if info.Insecure != nil { + config.Insecure = *info.Insecure + } + return config, nil +} + +// Complete returns true if the Kubernetes API authorization info is complete. +func (info Info) Complete() bool { + return len(info.User) > 0 || + len(info.CertFile) > 0 || + len(info.BearerToken) > 0 +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go b/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go new file mode 100644 index 00000000..35bb5dde --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/latest/latest.go @@ -0,0 +1,61 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package latest + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/json" + "k8s.io/apimachinery/pkg/runtime/serializer/versioning" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/clientcmd/api" + "k8s.io/client-go/tools/clientcmd/api/v1" +) + +// Version is the string that represents the current external default version. +const Version = "v1" + +var ExternalVersion = schema.GroupVersion{Group: "", Version: "v1"} + +// OldestVersion is the string that represents the oldest server version supported, +// for client code that wants to hardcode the lowest common denominator. +const OldestVersion = "v1" + +// Versions is the list of versions that are recognized in code. The order provided +// may be assumed to be least feature rich to most feature rich, and clients may +// choose to prefer the latter items in the list over the former items when presented +// with a set of versions to choose. +var Versions = []string{"v1"} + +var ( + Codec runtime.Codec + Scheme *runtime.Scheme +) + +func init() { + Scheme = runtime.NewScheme() + utilruntime.Must(api.AddToScheme(Scheme)) + utilruntime.Must(v1.AddToScheme(Scheme)) + yamlSerializer := json.NewYAMLSerializer(json.DefaultMetaFactory, Scheme, Scheme) + Codec = versioning.NewDefaultingCodecForScheme( + Scheme, + yamlSerializer, + yamlSerializer, + schema.GroupVersion{Version: Version}, + runtime.InternalGroupVersioner, + ) +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go new file mode 100644 index 00000000..2d7142e6 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go @@ -0,0 +1,244 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + "sort" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/clientcmd/api" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + return scheme.AddConversionFuncs( + func(in *Cluster, out *api.Cluster, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *api.Cluster, out *Cluster, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *Preferences, out *api.Preferences, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *api.Preferences, out *Preferences, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *AuthInfo, out *api.AuthInfo, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *api.AuthInfo, out *AuthInfo, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *Context, out *api.Context, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + func(in *api.Context, out *Context, s conversion.Scope) error { + return s.DefaultConvert(in, out, conversion.IgnoreMissingFields) + }, + + func(in *Config, out *api.Config, s conversion.Scope) error { + out.CurrentContext = in.CurrentContext + if err := s.Convert(&in.Preferences, &out.Preferences, 0); err != nil { + return err + } + + out.Clusters = make(map[string]*api.Cluster) + if err := s.Convert(&in.Clusters, &out.Clusters, 0); err != nil { + return err + } + out.AuthInfos = make(map[string]*api.AuthInfo) + if err := s.Convert(&in.AuthInfos, &out.AuthInfos, 0); err != nil { + return err + } + out.Contexts = make(map[string]*api.Context) + if err := s.Convert(&in.Contexts, &out.Contexts, 0); err != nil { + return err + } + out.Extensions = make(map[string]runtime.Object) + if err := s.Convert(&in.Extensions, &out.Extensions, 0); err != nil { + return err + } + return nil + }, + func(in *api.Config, out *Config, s conversion.Scope) error { + out.CurrentContext = in.CurrentContext + if err := s.Convert(&in.Preferences, &out.Preferences, 0); err != nil { + return err + } + + out.Clusters = make([]NamedCluster, 0, 0) + if err := s.Convert(&in.Clusters, &out.Clusters, 0); err != nil { + return err + } + out.AuthInfos = make([]NamedAuthInfo, 0, 0) + if err := s.Convert(&in.AuthInfos, &out.AuthInfos, 0); err != nil { + return err + } + out.Contexts = make([]NamedContext, 0, 0) + if err := s.Convert(&in.Contexts, &out.Contexts, 0); err != nil { + return err + } + out.Extensions = make([]NamedExtension, 0, 0) + if err := s.Convert(&in.Extensions, &out.Extensions, 0); err != nil { + return err + } + return nil + }, + func(in *[]NamedCluster, out *map[string]*api.Cluster, s conversion.Scope) error { + for _, curr := range *in { + newCluster := api.NewCluster() + if err := s.Convert(&curr.Cluster, newCluster, 0); err != nil { + return err + } + if (*out)[curr.Name] == nil { + (*out)[curr.Name] = newCluster + } else { + return fmt.Errorf("error converting *[]NamedCluster into *map[string]*api.Cluster: duplicate name \"%v\" in list: %v", curr.Name, *in) + } + } + + return nil + }, + func(in *map[string]*api.Cluster, out *[]NamedCluster, s conversion.Scope) error { + allKeys := make([]string, 0, len(*in)) + for key := range *in { + allKeys = append(allKeys, key) + } + sort.Strings(allKeys) + + for _, key := range allKeys { + newCluster := (*in)[key] + oldCluster := &Cluster{} + if err := s.Convert(newCluster, oldCluster, 0); err != nil { + return err + } + + namedCluster := NamedCluster{key, *oldCluster} + *out = append(*out, namedCluster) + } + + return nil + }, + func(in *[]NamedAuthInfo, out *map[string]*api.AuthInfo, s conversion.Scope) error { + for _, curr := range *in { + newAuthInfo := api.NewAuthInfo() + if err := s.Convert(&curr.AuthInfo, newAuthInfo, 0); err != nil { + return err + } + if (*out)[curr.Name] == nil { + (*out)[curr.Name] = newAuthInfo + } else { + return fmt.Errorf("error converting *[]NamedAuthInfo into *map[string]*api.AuthInfo: duplicate name \"%v\" in list: %v", curr.Name, *in) + } + } + + return nil + }, + func(in *map[string]*api.AuthInfo, out *[]NamedAuthInfo, s conversion.Scope) error { + allKeys := make([]string, 0, len(*in)) + for key := range *in { + allKeys = append(allKeys, key) + } + sort.Strings(allKeys) + + for _, key := range allKeys { + newAuthInfo := (*in)[key] + oldAuthInfo := &AuthInfo{} + if err := s.Convert(newAuthInfo, oldAuthInfo, 0); err != nil { + return err + } + + namedAuthInfo := NamedAuthInfo{key, *oldAuthInfo} + *out = append(*out, namedAuthInfo) + } + + return nil + }, + func(in *[]NamedContext, out *map[string]*api.Context, s conversion.Scope) error { + for _, curr := range *in { + newContext := api.NewContext() + if err := s.Convert(&curr.Context, newContext, 0); err != nil { + return err + } + if (*out)[curr.Name] == nil { + (*out)[curr.Name] = newContext + } else { + return fmt.Errorf("error converting *[]NamedContext into *map[string]*api.Context: duplicate name \"%v\" in list: %v", curr.Name, *in) + } + } + + return nil + }, + func(in *map[string]*api.Context, out *[]NamedContext, s conversion.Scope) error { + allKeys := make([]string, 0, len(*in)) + for key := range *in { + allKeys = append(allKeys, key) + } + sort.Strings(allKeys) + + for _, key := range allKeys { + newContext := (*in)[key] + oldContext := &Context{} + if err := s.Convert(newContext, oldContext, 0); err != nil { + return err + } + + namedContext := NamedContext{key, *oldContext} + *out = append(*out, namedContext) + } + + return nil + }, + func(in *[]NamedExtension, out *map[string]runtime.Object, s conversion.Scope) error { + for _, curr := range *in { + var newExtension runtime.Object + if err := s.Convert(&curr.Extension, &newExtension, 0); err != nil { + return err + } + if (*out)[curr.Name] == nil { + (*out)[curr.Name] = newExtension + } else { + return fmt.Errorf("error converting *[]NamedExtension into *map[string]runtime.Object: duplicate name \"%v\" in list: %v", curr.Name, *in) + } + } + + return nil + }, + func(in *map[string]runtime.Object, out *[]NamedExtension, s conversion.Scope) error { + allKeys := make([]string, 0, len(*in)) + for key := range *in { + allKeys = append(allKeys, key) + } + sort.Strings(allKeys) + + for _, key := range allKeys { + newExtension := (*in)[key] + oldExtension := &runtime.RawExtension{} + if err := s.Convert(newExtension, oldExtension, 0); err != nil { + return err + } + + namedExtension := NamedExtension{key, *oldExtension} + *out = append(*out, namedExtension) + } + + return nil + }, + ) +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go new file mode 100644 index 00000000..cbf29ccf --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +package v1 diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go new file mode 100644 index 00000000..7b91d509 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +// TODO this should be in the "kubeconfig" group +var SchemeGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes, addConversionFuncs) +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Config{}, + ) + return nil +} + +func (obj *Config) GetObjectKind() schema.ObjectKind { return obj } +func (obj *Config) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} +func (obj *Config) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go new file mode 100644 index 00000000..56afb608 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go @@ -0,0 +1,203 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// Where possible, json tags match the cli argument names. +// Top level config objects and all values required for proper functioning are not "omitempty". Any truly optional piece of config is allowed to be omitted. + +// Config holds the information needed to build connect to remote kubernetes clusters as a given user +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type Config struct { + // Legacy field from pkg/api/types.go TypeMeta. + // TODO(jlowdermilk): remove this after eliminating downstream dependencies. + // +optional + Kind string `json:"kind,omitempty"` + // Legacy field from pkg/api/types.go TypeMeta. + // TODO(jlowdermilk): remove this after eliminating downstream dependencies. + // +optional + APIVersion string `json:"apiVersion,omitempty"` + // Preferences holds general information to be use for cli interactions + Preferences Preferences `json:"preferences"` + // Clusters is a map of referencable names to cluster configs + Clusters []NamedCluster `json:"clusters"` + // AuthInfos is a map of referencable names to user configs + AuthInfos []NamedAuthInfo `json:"users"` + // Contexts is a map of referencable names to context configs + Contexts []NamedContext `json:"contexts"` + // CurrentContext is the name of the context that you would like to use by default + CurrentContext string `json:"current-context"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions []NamedExtension `json:"extensions,omitempty"` +} + +type Preferences struct { + // +optional + Colors bool `json:"colors,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions []NamedExtension `json:"extensions,omitempty"` +} + +// Cluster contains information about how to communicate with a kubernetes cluster +type Cluster struct { + // Server is the address of the kubernetes cluster (https://hostname:port). + Server string `json:"server"` + // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. + // +optional + InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` + // CertificateAuthority is the path to a cert file for the certificate authority. + // +optional + CertificateAuthority string `json:"certificate-authority,omitempty"` + // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority + // +optional + CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions []NamedExtension `json:"extensions,omitempty"` +} + +// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. +type AuthInfo struct { + // ClientCertificate is the path to a client cert file for TLS. + // +optional + ClientCertificate string `json:"client-certificate,omitempty"` + // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate + // +optional + ClientCertificateData []byte `json:"client-certificate-data,omitempty"` + // ClientKey is the path to a client key file for TLS. + // +optional + ClientKey string `json:"client-key,omitempty"` + // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey + // +optional + ClientKeyData []byte `json:"client-key-data,omitempty"` + // Token is the bearer token for authentication to the kubernetes cluster. + // +optional + Token string `json:"token,omitempty"` + // TokenFile is a pointer to a file that contains a bearer token (as described above). If both Token and TokenFile are present, Token takes precedence. + // +optional + TokenFile string `json:"tokenFile,omitempty"` + // Impersonate is the username to imperonate. The name matches the flag. + // +optional + Impersonate string `json:"as,omitempty"` + // ImpersonateGroups is the groups to imperonate. + // +optional + ImpersonateGroups []string `json:"as-groups,omitempty"` + // ImpersonateUserExtra contains additional information for impersonated user. + // +optional + ImpersonateUserExtra map[string][]string `json:"as-user-extra,omitempty"` + // Username is the username for basic authentication to the kubernetes cluster. + // +optional + Username string `json:"username,omitempty"` + // Password is the password for basic authentication to the kubernetes cluster. + // +optional + Password string `json:"password,omitempty"` + // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. + // +optional + AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"` + // Exec specifies a custom exec-based authentication plugin for the kubernetes cluster. + // +optional + Exec *ExecConfig `json:"exec,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions []NamedExtension `json:"extensions,omitempty"` +} + +// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) +type Context struct { + // Cluster is the name of the cluster for this context + Cluster string `json:"cluster"` + // AuthInfo is the name of the authInfo for this context + AuthInfo string `json:"user"` + // Namespace is the default namespace to use on unspecified requests + // +optional + Namespace string `json:"namespace,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions []NamedExtension `json:"extensions,omitempty"` +} + +// NamedCluster relates nicknames to cluster information +type NamedCluster struct { + // Name is the nickname for this Cluster + Name string `json:"name"` + // Cluster holds the cluster information + Cluster Cluster `json:"cluster"` +} + +// NamedContext relates nicknames to context information +type NamedContext struct { + // Name is the nickname for this Context + Name string `json:"name"` + // Context holds the context information + Context Context `json:"context"` +} + +// NamedAuthInfo relates nicknames to auth information +type NamedAuthInfo struct { + // Name is the nickname for this AuthInfo + Name string `json:"name"` + // AuthInfo holds the auth information + AuthInfo AuthInfo `json:"user"` +} + +// NamedExtension relates nicknames to extension information +type NamedExtension struct { + // Name is the nickname for this Extension + Name string `json:"name"` + // Extension holds the extension information + Extension runtime.RawExtension `json:"extension"` +} + +// AuthProviderConfig holds the configuration for a specified auth provider. +type AuthProviderConfig struct { + Name string `json:"name"` + Config map[string]string `json:"config"` +} + +// ExecConfig specifies a command to provide client credentials. The command is exec'd +// and outputs structured stdout holding credentials. +// +// See the client.authentiction.k8s.io API group for specifications of the exact input +// and output format +type ExecConfig struct { + // Command to execute. + Command string `json:"command"` + // Arguments to pass to the command when executing it. + // +optional + Args []string `json:"args"` + // Env defines additional environment variables to expose to the process. These + // are unioned with the host's environment, as well as variables client-go uses + // to pass argument to the plugin. + // +optional + Env []ExecEnvVar `json:"env"` + + // Preferred input version of the ExecInfo. The returned ExecCredentials MUST use + // the same encoding version as the input. + APIVersion string `json:"apiVersion,omitempty"` +} + +// ExecEnvVar is used for setting environment variables when executing an exec-based +// credential plugin. +type ExecEnvVar struct { + Name string `json:"name"` + Value string `json:"value"` +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000..da519dfa --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.deepcopy.go @@ -0,0 +1,348 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthInfo) DeepCopyInto(out *AuthInfo) { + *out = *in + if in.ClientCertificateData != nil { + in, out := &in.ClientCertificateData, &out.ClientCertificateData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.ClientKeyData != nil { + in, out := &in.ClientKeyData, &out.ClientKeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.ImpersonateGroups != nil { + in, out := &in.ImpersonateGroups, &out.ImpersonateGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ImpersonateUserExtra != nil { + in, out := &in.ImpersonateUserExtra, &out.ImpersonateUserExtra + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.AuthProvider != nil { + in, out := &in.AuthProvider, &out.AuthProvider + *out = new(AuthProviderConfig) + (*in).DeepCopyInto(*out) + } + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecConfig) + (*in).DeepCopyInto(*out) + } + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make([]NamedExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthInfo. +func (in *AuthInfo) DeepCopy() *AuthInfo { + if in == nil { + return nil + } + out := new(AuthInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthProviderConfig) DeepCopyInto(out *AuthProviderConfig) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthProviderConfig. +func (in *AuthProviderConfig) DeepCopy() *AuthProviderConfig { + if in == nil { + return nil + } + out := new(AuthProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + if in.CertificateAuthorityData != nil { + in, out := &in.CertificateAuthorityData, &out.CertificateAuthorityData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make([]NamedExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + in.Preferences.DeepCopyInto(&out.Preferences) + if in.Clusters != nil { + in, out := &in.Clusters, &out.Clusters + *out = make([]NamedCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AuthInfos != nil { + in, out := &in.AuthInfos, &out.AuthInfos + *out = make([]NamedAuthInfo, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Contexts != nil { + in, out := &in.Contexts, &out.Contexts + *out = make([]NamedContext, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make([]NamedExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Config) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Context) DeepCopyInto(out *Context) { + *out = *in + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make([]NamedExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Context. +func (in *Context) DeepCopy() *Context { + if in == nil { + return nil + } + out := new(Context) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecConfig) DeepCopyInto(out *ExecConfig) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]ExecEnvVar, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecConfig. +func (in *ExecConfig) DeepCopy() *ExecConfig { + if in == nil { + return nil + } + out := new(ExecConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecEnvVar) DeepCopyInto(out *ExecEnvVar) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecEnvVar. +func (in *ExecEnvVar) DeepCopy() *ExecEnvVar { + if in == nil { + return nil + } + out := new(ExecEnvVar) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedAuthInfo) DeepCopyInto(out *NamedAuthInfo) { + *out = *in + in.AuthInfo.DeepCopyInto(&out.AuthInfo) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedAuthInfo. +func (in *NamedAuthInfo) DeepCopy() *NamedAuthInfo { + if in == nil { + return nil + } + out := new(NamedAuthInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedCluster) DeepCopyInto(out *NamedCluster) { + *out = *in + in.Cluster.DeepCopyInto(&out.Cluster) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedCluster. +func (in *NamedCluster) DeepCopy() *NamedCluster { + if in == nil { + return nil + } + out := new(NamedCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedContext) DeepCopyInto(out *NamedContext) { + *out = *in + in.Context.DeepCopyInto(&out.Context) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedContext. +func (in *NamedContext) DeepCopy() *NamedContext { + if in == nil { + return nil + } + out := new(NamedContext) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedExtension) DeepCopyInto(out *NamedExtension) { + *out = *in + in.Extension.DeepCopyInto(&out.Extension) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedExtension. +func (in *NamedExtension) DeepCopy() *NamedExtension { + if in == nil { + return nil + } + out := new(NamedExtension) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Preferences) DeepCopyInto(out *Preferences) { + *out = *in + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make([]NamedExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preferences. +func (in *Preferences) DeepCopy() *Preferences { + if in == nil { + return nil + } + out := new(Preferences) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go b/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go new file mode 100644 index 00000000..1d3c11d8 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/auth_loaders.go @@ -0,0 +1,111 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + + "golang.org/x/crypto/ssh/terminal" + + clientauth "k8s.io/client-go/tools/auth" +) + +// AuthLoaders are used to build clientauth.Info objects. +type AuthLoader interface { + // LoadAuth takes a path to a config file and can then do anything it needs in order to return a valid clientauth.Info + LoadAuth(path string) (*clientauth.Info, error) +} + +// default implementation of an AuthLoader +type defaultAuthLoader struct{} + +// LoadAuth for defaultAuthLoader simply delegates to clientauth.LoadFromFile +func (*defaultAuthLoader) LoadAuth(path string) (*clientauth.Info, error) { + return clientauth.LoadFromFile(path) +} + +type PromptingAuthLoader struct { + reader io.Reader +} + +// LoadAuth parses an AuthInfo object from a file path. It prompts user and creates file if it doesn't exist. +func (a *PromptingAuthLoader) LoadAuth(path string) (*clientauth.Info, error) { + // Prompt for user/pass and write a file if none exists. + if _, err := os.Stat(path); os.IsNotExist(err) { + authPtr, err := a.Prompt() + auth := *authPtr + if err != nil { + return nil, err + } + data, err := json.Marshal(auth) + if err != nil { + return &auth, err + } + err = ioutil.WriteFile(path, data, 0600) + return &auth, err + } + authPtr, err := clientauth.LoadFromFile(path) + if err != nil { + return nil, err + } + return authPtr, nil +} + +// Prompt pulls the user and password from a reader +func (a *PromptingAuthLoader) Prompt() (*clientauth.Info, error) { + var err error + auth := &clientauth.Info{} + auth.User, err = promptForString("Username", a.reader, true) + if err != nil { + return nil, err + } + auth.Password, err = promptForString("Password", nil, false) + if err != nil { + return nil, err + } + return auth, nil +} + +func promptForString(field string, r io.Reader, show bool) (result string, err error) { + fmt.Printf("Please enter %s: ", field) + if show { + _, err = fmt.Fscan(r, &result) + } else { + var data []byte + if terminal.IsTerminal(int(os.Stdin.Fd())) { + data, err = terminal.ReadPassword(int(os.Stdin.Fd())) + result = string(data) + } else { + return "", fmt.Errorf("error reading input for %s", field) + } + } + return result, err +} + +// NewPromptingAuthLoader is an AuthLoader that parses an AuthInfo object from a file path. It prompts user and creates file if it doesn't exist. +func NewPromptingAuthLoader(reader io.Reader) *PromptingAuthLoader { + return &PromptingAuthLoader{reader} +} + +// NewDefaultAuthLoader returns a default implementation of an AuthLoader that only reads from a config file +func NewDefaultAuthLoader() AuthLoader { + return &defaultAuthLoader{} +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/client_config.go b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go new file mode 100644 index 00000000..9c6ac3b5 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/client_config.go @@ -0,0 +1,561 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "fmt" + "io" + "io/ioutil" + "net/url" + "os" + "strings" + + "github.com/imdario/mergo" + "k8s.io/klog" + + restclient "k8s.io/client-go/rest" + clientauth "k8s.io/client-go/tools/auth" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +var ( + // ClusterDefaults has the same behavior as the old EnvVar and DefaultCluster fields + // DEPRECATED will be replaced + ClusterDefaults = clientcmdapi.Cluster{Server: getDefaultServer()} + // DefaultClientConfig represents the legacy behavior of this package for defaulting + // DEPRECATED will be replace + DefaultClientConfig = DirectClientConfig{*clientcmdapi.NewConfig(), "", &ConfigOverrides{ + ClusterDefaults: ClusterDefaults, + }, nil, NewDefaultClientConfigLoadingRules(), promptedCredentials{}} +) + +// getDefaultServer returns a default setting for DefaultClientConfig +// DEPRECATED +func getDefaultServer() string { + if server := os.Getenv("KUBERNETES_MASTER"); len(server) > 0 { + return server + } + return "http://localhost:8080" +} + +// ClientConfig is used to make it easy to get an api server client +type ClientConfig interface { + // RawConfig returns the merged result of all overrides + RawConfig() (clientcmdapi.Config, error) + // ClientConfig returns a complete client config + ClientConfig() (*restclient.Config, error) + // Namespace returns the namespace resulting from the merged + // result of all overrides and a boolean indicating if it was + // overridden + Namespace() (string, bool, error) + // ConfigAccess returns the rules for loading/persisting the config. + ConfigAccess() ConfigAccess +} + +type PersistAuthProviderConfigForUser func(user string) restclient.AuthProviderConfigPersister + +type promptedCredentials struct { + username string + password string +} + +// DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information +type DirectClientConfig struct { + config clientcmdapi.Config + contextName string + overrides *ConfigOverrides + fallbackReader io.Reader + configAccess ConfigAccess + // promptedCredentials store the credentials input by the user + promptedCredentials promptedCredentials +} + +// NewDefaultClientConfig creates a DirectClientConfig using the config.CurrentContext as the context name +func NewDefaultClientConfig(config clientcmdapi.Config, overrides *ConfigOverrides) ClientConfig { + return &DirectClientConfig{config, config.CurrentContext, overrides, nil, NewDefaultClientConfigLoadingRules(), promptedCredentials{}} +} + +// NewNonInteractiveClientConfig creates a DirectClientConfig using the passed context name and does not have a fallback reader for auth information +func NewNonInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, configAccess ConfigAccess) ClientConfig { + return &DirectClientConfig{config, contextName, overrides, nil, configAccess, promptedCredentials{}} +} + +// NewInteractiveClientConfig creates a DirectClientConfig using the passed context name and a reader in case auth information is not provided via files or flags +func NewInteractiveClientConfig(config clientcmdapi.Config, contextName string, overrides *ConfigOverrides, fallbackReader io.Reader, configAccess ConfigAccess) ClientConfig { + return &DirectClientConfig{config, contextName, overrides, fallbackReader, configAccess, promptedCredentials{}} +} + +// NewClientConfigFromBytes takes your kubeconfig and gives you back a ClientConfig +func NewClientConfigFromBytes(configBytes []byte) (ClientConfig, error) { + config, err := Load(configBytes) + if err != nil { + return nil, err + } + + return &DirectClientConfig{*config, "", &ConfigOverrides{}, nil, nil, promptedCredentials{}}, nil +} + +// RESTConfigFromKubeConfig is a convenience method to give back a restconfig from your kubeconfig bytes. +// For programmatic access, this is what you want 80% of the time +func RESTConfigFromKubeConfig(configBytes []byte) (*restclient.Config, error) { + clientConfig, err := NewClientConfigFromBytes(configBytes) + if err != nil { + return nil, err + } + return clientConfig.ClientConfig() +} + +func (config *DirectClientConfig) RawConfig() (clientcmdapi.Config, error) { + return config.config, nil +} + +// ClientConfig implements ClientConfig +func (config *DirectClientConfig) ClientConfig() (*restclient.Config, error) { + // check that getAuthInfo, getContext, and getCluster do not return an error. + // Do this before checking if the current config is usable in the event that an + // AuthInfo, Context, or Cluster config with user-defined names are not found. + // This provides a user with the immediate cause for error if one is found + configAuthInfo, err := config.getAuthInfo() + if err != nil { + return nil, err + } + + _, err = config.getContext() + if err != nil { + return nil, err + } + + configClusterInfo, err := config.getCluster() + if err != nil { + return nil, err + } + + if err := config.ConfirmUsable(); err != nil { + return nil, err + } + + clientConfig := &restclient.Config{} + clientConfig.Host = configClusterInfo.Server + + if len(config.overrides.Timeout) > 0 { + timeout, err := ParseTimeout(config.overrides.Timeout) + if err != nil { + return nil, err + } + clientConfig.Timeout = timeout + } + + if u, err := url.ParseRequestURI(clientConfig.Host); err == nil && u.Opaque == "" && len(u.Path) > 1 { + u.RawQuery = "" + u.Fragment = "" + clientConfig.Host = u.String() + } + if len(configAuthInfo.Impersonate) > 0 { + clientConfig.Impersonate = restclient.ImpersonationConfig{ + UserName: configAuthInfo.Impersonate, + Groups: configAuthInfo.ImpersonateGroups, + Extra: configAuthInfo.ImpersonateUserExtra, + } + } + + // only try to read the auth information if we are secure + if restclient.IsConfigTransportTLS(*clientConfig) { + var err error + var persister restclient.AuthProviderConfigPersister + if config.configAccess != nil { + authInfoName, _ := config.getAuthInfoName() + persister = PersisterForUser(config.configAccess, authInfoName) + } + userAuthPartialConfig, err := config.getUserIdentificationPartialConfig(configAuthInfo, config.fallbackReader, persister) + if err != nil { + return nil, err + } + mergo.MergeWithOverwrite(clientConfig, userAuthPartialConfig) + + serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo) + if err != nil { + return nil, err + } + mergo.MergeWithOverwrite(clientConfig, serverAuthPartialConfig) + } + + return clientConfig, nil +} + +// clientauth.Info object contain both user identification and server identification. We want different precedence orders for +// both, so we have to split the objects and merge them separately +// we want this order of precedence for the server identification +// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files) +// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) +// 3. load the ~/.kubernetes_auth file as a default +func getServerIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, configClusterInfo clientcmdapi.Cluster) (*restclient.Config, error) { + mergedConfig := &restclient.Config{} + + // configClusterInfo holds the information identify the server provided by .kubeconfig + configClientConfig := &restclient.Config{} + configClientConfig.CAFile = configClusterInfo.CertificateAuthority + configClientConfig.CAData = configClusterInfo.CertificateAuthorityData + configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify + mergo.MergeWithOverwrite(mergedConfig, configClientConfig) + + return mergedConfig, nil +} + +// clientauth.Info object contain both user identification and server identification. We want different precedence orders for +// both, so we have to split the objects and merge them separately +// we want this order of precedence for user identification +// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files) +// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) +// 3. if there is not enough information to identify the user, load try the ~/.kubernetes_auth file +// 4. if there is not enough information to identify the user, prompt if possible +func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthInfo clientcmdapi.AuthInfo, fallbackReader io.Reader, persistAuthConfig restclient.AuthProviderConfigPersister) (*restclient.Config, error) { + mergedConfig := &restclient.Config{} + + // blindly overwrite existing values based on precedence + if len(configAuthInfo.Token) > 0 { + mergedConfig.BearerToken = configAuthInfo.Token + mergedConfig.BearerTokenFile = configAuthInfo.TokenFile + } else if len(configAuthInfo.TokenFile) > 0 { + tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile) + if err != nil { + return nil, err + } + mergedConfig.BearerToken = string(tokenBytes) + mergedConfig.BearerTokenFile = configAuthInfo.TokenFile + } + if len(configAuthInfo.Impersonate) > 0 { + mergedConfig.Impersonate = restclient.ImpersonationConfig{ + UserName: configAuthInfo.Impersonate, + Groups: configAuthInfo.ImpersonateGroups, + Extra: configAuthInfo.ImpersonateUserExtra, + } + } + if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 { + mergedConfig.CertFile = configAuthInfo.ClientCertificate + mergedConfig.CertData = configAuthInfo.ClientCertificateData + mergedConfig.KeyFile = configAuthInfo.ClientKey + mergedConfig.KeyData = configAuthInfo.ClientKeyData + } + if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 { + mergedConfig.Username = configAuthInfo.Username + mergedConfig.Password = configAuthInfo.Password + } + if configAuthInfo.AuthProvider != nil { + mergedConfig.AuthProvider = configAuthInfo.AuthProvider + mergedConfig.AuthConfigPersister = persistAuthConfig + } + if configAuthInfo.Exec != nil { + mergedConfig.ExecProvider = configAuthInfo.Exec + } + + // if there still isn't enough information to authenticate the user, try prompting + if !canIdentifyUser(*mergedConfig) && (fallbackReader != nil) { + if len(config.promptedCredentials.username) > 0 && len(config.promptedCredentials.password) > 0 { + mergedConfig.Username = config.promptedCredentials.username + mergedConfig.Password = config.promptedCredentials.password + return mergedConfig, nil + } + prompter := NewPromptingAuthLoader(fallbackReader) + promptedAuthInfo, err := prompter.Prompt() + if err != nil { + return nil, err + } + promptedConfig := makeUserIdentificationConfig(*promptedAuthInfo) + previouslyMergedConfig := mergedConfig + mergedConfig = &restclient.Config{} + mergo.MergeWithOverwrite(mergedConfig, promptedConfig) + mergo.MergeWithOverwrite(mergedConfig, previouslyMergedConfig) + config.promptedCredentials.username = mergedConfig.Username + config.promptedCredentials.password = mergedConfig.Password + } + + return mergedConfig, nil +} + +// makeUserIdentificationFieldsConfig returns a client.Config capable of being merged using mergo for only user identification information +func makeUserIdentificationConfig(info clientauth.Info) *restclient.Config { + config := &restclient.Config{} + config.Username = info.User + config.Password = info.Password + config.CertFile = info.CertFile + config.KeyFile = info.KeyFile + config.BearerToken = info.BearerToken + return config +} + +func canIdentifyUser(config restclient.Config) bool { + return len(config.Username) > 0 || + (len(config.CertFile) > 0 || len(config.CertData) > 0) || + len(config.BearerToken) > 0 || + config.AuthProvider != nil || + config.ExecProvider != nil +} + +// Namespace implements ClientConfig +func (config *DirectClientConfig) Namespace() (string, bool, error) { + if config.overrides != nil && config.overrides.Context.Namespace != "" { + // In the event we have an empty config but we do have a namespace override, we should return + // the namespace override instead of having config.ConfirmUsable() return an error. This allows + // things like in-cluster clients to execute `kubectl get pods --namespace=foo` and have the + // --namespace flag honored instead of being ignored. + return config.overrides.Context.Namespace, true, nil + } + + if err := config.ConfirmUsable(); err != nil { + return "", false, err + } + + configContext, err := config.getContext() + if err != nil { + return "", false, err + } + + if len(configContext.Namespace) == 0 { + return "default", false, nil + } + + return configContext.Namespace, false, nil +} + +// ConfigAccess implements ClientConfig +func (config *DirectClientConfig) ConfigAccess() ConfigAccess { + return config.configAccess +} + +// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config, +// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. +func (config *DirectClientConfig) ConfirmUsable() error { + validationErrors := make([]error, 0) + + var contextName string + if len(config.contextName) != 0 { + contextName = config.contextName + } else { + contextName = config.config.CurrentContext + } + + if len(contextName) > 0 { + _, exists := config.config.Contexts[contextName] + if !exists { + validationErrors = append(validationErrors, &errContextNotFound{contextName}) + } + } + + authInfoName, _ := config.getAuthInfoName() + authInfo, _ := config.getAuthInfo() + validationErrors = append(validationErrors, validateAuthInfo(authInfoName, authInfo)...) + clusterName, _ := config.getClusterName() + cluster, _ := config.getCluster() + validationErrors = append(validationErrors, validateClusterInfo(clusterName, cluster)...) + // when direct client config is specified, and our only error is that no server is defined, we should + // return a standard "no config" error + if len(validationErrors) == 1 && validationErrors[0] == ErrEmptyCluster { + return newErrConfigurationInvalid([]error{ErrEmptyConfig}) + } + return newErrConfigurationInvalid(validationErrors) +} + +// getContextName returns the default, or user-set context name, and a boolean that indicates +// whether the default context name has been overwritten by a user-set flag, or left as its default value +func (config *DirectClientConfig) getContextName() (string, bool) { + if len(config.overrides.CurrentContext) != 0 { + return config.overrides.CurrentContext, true + } + if len(config.contextName) != 0 { + return config.contextName, false + } + + return config.config.CurrentContext, false +} + +// getAuthInfoName returns a string containing the current authinfo name for the current context, +// and a boolean indicating whether the default authInfo name is overwritten by a user-set flag, or +// left as its default value +func (config *DirectClientConfig) getAuthInfoName() (string, bool) { + if len(config.overrides.Context.AuthInfo) != 0 { + return config.overrides.Context.AuthInfo, true + } + context, _ := config.getContext() + return context.AuthInfo, false +} + +// getClusterName returns a string containing the default, or user-set cluster name, and a boolean +// indicating whether the default clusterName has been overwritten by a user-set flag, or left as +// its default value +func (config *DirectClientConfig) getClusterName() (string, bool) { + if len(config.overrides.Context.Cluster) != 0 { + return config.overrides.Context.Cluster, true + } + context, _ := config.getContext() + return context.Cluster, false +} + +// getContext returns the clientcmdapi.Context, or an error if a required context is not found. +func (config *DirectClientConfig) getContext() (clientcmdapi.Context, error) { + contexts := config.config.Contexts + contextName, required := config.getContextName() + + mergedContext := clientcmdapi.NewContext() + if configContext, exists := contexts[contextName]; exists { + mergo.MergeWithOverwrite(mergedContext, configContext) + } else if required { + return clientcmdapi.Context{}, fmt.Errorf("context %q does not exist", contextName) + } + mergo.MergeWithOverwrite(mergedContext, config.overrides.Context) + + return *mergedContext, nil +} + +// getAuthInfo returns the clientcmdapi.AuthInfo, or an error if a required auth info is not found. +func (config *DirectClientConfig) getAuthInfo() (clientcmdapi.AuthInfo, error) { + authInfos := config.config.AuthInfos + authInfoName, required := config.getAuthInfoName() + + mergedAuthInfo := clientcmdapi.NewAuthInfo() + if configAuthInfo, exists := authInfos[authInfoName]; exists { + mergo.MergeWithOverwrite(mergedAuthInfo, configAuthInfo) + } else if required { + return clientcmdapi.AuthInfo{}, fmt.Errorf("auth info %q does not exist", authInfoName) + } + mergo.MergeWithOverwrite(mergedAuthInfo, config.overrides.AuthInfo) + + return *mergedAuthInfo, nil +} + +// getCluster returns the clientcmdapi.Cluster, or an error if a required cluster is not found. +func (config *DirectClientConfig) getCluster() (clientcmdapi.Cluster, error) { + clusterInfos := config.config.Clusters + clusterInfoName, required := config.getClusterName() + + mergedClusterInfo := clientcmdapi.NewCluster() + mergo.MergeWithOverwrite(mergedClusterInfo, config.overrides.ClusterDefaults) + if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists { + mergo.MergeWithOverwrite(mergedClusterInfo, configClusterInfo) + } else if required { + return clientcmdapi.Cluster{}, fmt.Errorf("cluster %q does not exist", clusterInfoName) + } + mergo.MergeWithOverwrite(mergedClusterInfo, config.overrides.ClusterInfo) + // An override of --insecure-skip-tls-verify=true and no accompanying CA/CA data should clear already-set CA/CA data + // otherwise, a kubeconfig containing a CA reference would return an error that "CA and insecure-skip-tls-verify couldn't both be set" + caLen := len(config.overrides.ClusterInfo.CertificateAuthority) + caDataLen := len(config.overrides.ClusterInfo.CertificateAuthorityData) + if config.overrides.ClusterInfo.InsecureSkipTLSVerify && caLen == 0 && caDataLen == 0 { + mergedClusterInfo.CertificateAuthority = "" + mergedClusterInfo.CertificateAuthorityData = nil + } + + return *mergedClusterInfo, nil +} + +// inClusterClientConfig makes a config that will work from within a kubernetes cluster container environment. +// Can take options overrides for flags explicitly provided to the command inside the cluster container. +type inClusterClientConfig struct { + overrides *ConfigOverrides + inClusterConfigProvider func() (*restclient.Config, error) +} + +var _ ClientConfig = &inClusterClientConfig{} + +func (config *inClusterClientConfig) RawConfig() (clientcmdapi.Config, error) { + return clientcmdapi.Config{}, fmt.Errorf("inCluster environment config doesn't support multiple clusters") +} + +func (config *inClusterClientConfig) ClientConfig() (*restclient.Config, error) { + if config.inClusterConfigProvider == nil { + config.inClusterConfigProvider = restclient.InClusterConfig + } + + icc, err := config.inClusterConfigProvider() + if err != nil { + return nil, err + } + + // in-cluster configs only takes a host, token, or CA file + // if any of them were individually provided, overwrite anything else + if config.overrides != nil { + if server := config.overrides.ClusterInfo.Server; len(server) > 0 { + icc.Host = server + } + if len(config.overrides.AuthInfo.Token) > 0 || len(config.overrides.AuthInfo.TokenFile) > 0 { + icc.BearerToken = config.overrides.AuthInfo.Token + icc.BearerTokenFile = config.overrides.AuthInfo.TokenFile + } + if certificateAuthorityFile := config.overrides.ClusterInfo.CertificateAuthority; len(certificateAuthorityFile) > 0 { + icc.TLSClientConfig.CAFile = certificateAuthorityFile + } + } + + return icc, err +} + +func (config *inClusterClientConfig) Namespace() (string, bool, error) { + // This way assumes you've set the POD_NAMESPACE environment variable using the downward API. + // This check has to be done first for backwards compatibility with the way InClusterConfig was originally set up + if ns := os.Getenv("POD_NAMESPACE"); ns != "" { + return ns, false, nil + } + + // Fall back to the namespace associated with the service account token, if available + if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil { + if ns := strings.TrimSpace(string(data)); len(ns) > 0 { + return ns, false, nil + } + } + + return "default", false, nil +} + +func (config *inClusterClientConfig) ConfigAccess() ConfigAccess { + return NewDefaultClientConfigLoadingRules() +} + +// Possible returns true if loading an inside-kubernetes-cluster is possible. +func (config *inClusterClientConfig) Possible() bool { + fi, err := os.Stat("/var/run/secrets/kubernetes.io/serviceaccount/token") + return os.Getenv("KUBERNETES_SERVICE_HOST") != "" && + os.Getenv("KUBERNETES_SERVICE_PORT") != "" && + err == nil && !fi.IsDir() +} + +// BuildConfigFromFlags is a helper function that builds configs from a master +// url or a kubeconfig filepath. These are passed in as command line flags for cluster +// components. Warnings should reflect this usage. If neither masterUrl or kubeconfigPath +// are passed in we fallback to inClusterConfig. If inClusterConfig fails, we fallback +// to the default config. +func BuildConfigFromFlags(masterUrl, kubeconfigPath string) (*restclient.Config, error) { + if kubeconfigPath == "" && masterUrl == "" { + klog.Warningf("Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.") + kubeconfig, err := restclient.InClusterConfig() + if err == nil { + return kubeconfig, nil + } + klog.Warning("error creating inClusterConfig, falling back to default config: ", err) + } + return NewNonInteractiveDeferredLoadingClientConfig( + &ClientConfigLoadingRules{ExplicitPath: kubeconfigPath}, + &ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterUrl}}).ClientConfig() +} + +// BuildConfigFromKubeconfigGetter is a helper function that builds configs from a master +// url and a kubeconfigGetter. +func BuildConfigFromKubeconfigGetter(masterUrl string, kubeconfigGetter KubeconfigGetter) (*restclient.Config, error) { + // TODO: We do not need a DeferredLoader here. Refactor code and see if we can use DirectClientConfig here. + cc := NewNonInteractiveDeferredLoadingClientConfig( + &ClientConfigGetter{kubeconfigGetter: kubeconfigGetter}, + &ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterUrl}}) + return cc.ClientConfig() +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/config.go b/vendor/k8s.io/client-go/tools/clientcmd/config.go new file mode 100644 index 00000000..b8cc3968 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/config.go @@ -0,0 +1,490 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "errors" + "os" + "path" + "path/filepath" + "reflect" + "sort" + + "k8s.io/klog" + + restclient "k8s.io/client-go/rest" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +// ConfigAccess is used by subcommands and methods in this package to load and modify the appropriate config files +type ConfigAccess interface { + // GetLoadingPrecedence returns the slice of files that should be used for loading and inspecting the config + GetLoadingPrecedence() []string + // GetStartingConfig returns the config that subcommands should being operating against. It may or may not be merged depending on loading rules + GetStartingConfig() (*clientcmdapi.Config, error) + // GetDefaultFilename returns the name of the file you should write into (create if necessary), if you're trying to create a new stanza as opposed to updating an existing one. + GetDefaultFilename() string + // IsExplicitFile indicates whether or not this command is interested in exactly one file. This implementation only ever does that via a flag, but implementations that handle local, global, and flags may have more + IsExplicitFile() bool + // GetExplicitFile returns the particular file this command is operating against. This implementation only ever has one, but implementations that handle local, global, and flags may have more + GetExplicitFile() string +} + +type PathOptions struct { + // GlobalFile is the full path to the file to load as the global (final) option + GlobalFile string + // EnvVar is the env var name that points to the list of kubeconfig files to load + EnvVar string + // ExplicitFileFlag is the name of the flag to use for prompting for the kubeconfig file + ExplicitFileFlag string + + // GlobalFileSubpath is an optional value used for displaying help + GlobalFileSubpath string + + LoadingRules *ClientConfigLoadingRules +} + +func (o *PathOptions) GetEnvVarFiles() []string { + if len(o.EnvVar) == 0 { + return []string{} + } + + envVarValue := os.Getenv(o.EnvVar) + if len(envVarValue) == 0 { + return []string{} + } + + fileList := filepath.SplitList(envVarValue) + // prevent the same path load multiple times + return deduplicate(fileList) +} + +func (o *PathOptions) GetLoadingPrecedence() []string { + if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 { + return envVarFiles + } + + return []string{o.GlobalFile} +} + +func (o *PathOptions) GetStartingConfig() (*clientcmdapi.Config, error) { + // don't mutate the original + loadingRules := *o.LoadingRules + loadingRules.Precedence = o.GetLoadingPrecedence() + + clientConfig := NewNonInteractiveDeferredLoadingClientConfig(&loadingRules, &ConfigOverrides{}) + rawConfig, err := clientConfig.RawConfig() + if os.IsNotExist(err) { + return clientcmdapi.NewConfig(), nil + } + if err != nil { + return nil, err + } + + return &rawConfig, nil +} + +func (o *PathOptions) GetDefaultFilename() string { + if o.IsExplicitFile() { + return o.GetExplicitFile() + } + + if envVarFiles := o.GetEnvVarFiles(); len(envVarFiles) > 0 { + if len(envVarFiles) == 1 { + return envVarFiles[0] + } + + // if any of the envvar files already exists, return it + for _, envVarFile := range envVarFiles { + if _, err := os.Stat(envVarFile); err == nil { + return envVarFile + } + } + + // otherwise, return the last one in the list + return envVarFiles[len(envVarFiles)-1] + } + + return o.GlobalFile +} + +func (o *PathOptions) IsExplicitFile() bool { + if len(o.LoadingRules.ExplicitPath) > 0 { + return true + } + + return false +} + +func (o *PathOptions) GetExplicitFile() string { + return o.LoadingRules.ExplicitPath +} + +func NewDefaultPathOptions() *PathOptions { + ret := &PathOptions{ + GlobalFile: RecommendedHomeFile, + EnvVar: RecommendedConfigPathEnvVar, + ExplicitFileFlag: RecommendedConfigPathFlag, + + GlobalFileSubpath: path.Join(RecommendedHomeDir, RecommendedFileName), + + LoadingRules: NewDefaultClientConfigLoadingRules(), + } + ret.LoadingRules.DoNotResolvePaths = true + + return ret +} + +// ModifyConfig takes a Config object, iterates through Clusters, AuthInfos, and Contexts, uses the LocationOfOrigin if specified or +// uses the default destination file to write the results into. This results in multiple file reads, but it's very easy to follow. +// Preferences and CurrentContext should always be set in the default destination file. Since we can't distinguish between empty and missing values +// (no nil strings), we're forced have separate handling for them. In the kubeconfig cases, newConfig should have at most one difference, +// that means that this code will only write into a single file. If you want to relativizePaths, you must provide a fully qualified path in any +// modified element. +func ModifyConfig(configAccess ConfigAccess, newConfig clientcmdapi.Config, relativizePaths bool) error { + possibleSources := configAccess.GetLoadingPrecedence() + // sort the possible kubeconfig files so we always "lock" in the same order + // to avoid deadlock (note: this can fail w/ symlinks, but... come on). + sort.Strings(possibleSources) + for _, filename := range possibleSources { + if err := lockFile(filename); err != nil { + return err + } + defer unlockFile(filename) + } + + startingConfig, err := configAccess.GetStartingConfig() + if err != nil { + return err + } + + // We need to find all differences, locate their original files, read a partial config to modify only that stanza and write out the file. + // Special case the test for current context and preferences since those always write to the default file. + if reflect.DeepEqual(*startingConfig, newConfig) { + // nothing to do + return nil + } + + if startingConfig.CurrentContext != newConfig.CurrentContext { + if err := writeCurrentContext(configAccess, newConfig.CurrentContext); err != nil { + return err + } + } + + if !reflect.DeepEqual(startingConfig.Preferences, newConfig.Preferences) { + if err := writePreferences(configAccess, newConfig.Preferences); err != nil { + return err + } + } + + // Search every cluster, authInfo, and context. First from new to old for differences, then from old to new for deletions + for key, cluster := range newConfig.Clusters { + startingCluster, exists := startingConfig.Clusters[key] + if !reflect.DeepEqual(cluster, startingCluster) || !exists { + destinationFile := cluster.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite, err := getConfigFromFile(destinationFile) + if err != nil { + return err + } + t := *cluster + + configToWrite.Clusters[key] = &t + configToWrite.Clusters[key].LocationOfOrigin = destinationFile + if relativizePaths { + if err := RelativizeClusterLocalPaths(configToWrite.Clusters[key]); err != nil { + return err + } + } + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + // seenConfigs stores a map of config source filenames to computed config objects + seenConfigs := map[string]*clientcmdapi.Config{} + + for key, context := range newConfig.Contexts { + startingContext, exists := startingConfig.Contexts[key] + if !reflect.DeepEqual(context, startingContext) || !exists { + destinationFile := context.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + // we only obtain a fresh config object from its source file + // if we have not seen it already - this prevents us from + // reading and writing to the same number of files repeatedly + // when multiple / all contexts share the same destination file. + configToWrite, seen := seenConfigs[destinationFile] + if !seen { + var err error + configToWrite, err = getConfigFromFile(destinationFile) + if err != nil { + return err + } + seenConfigs[destinationFile] = configToWrite + } + + configToWrite.Contexts[key] = context + } + } + + // actually persist config object changes + for destinationFile, configToWrite := range seenConfigs { + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + + for key, authInfo := range newConfig.AuthInfos { + startingAuthInfo, exists := startingConfig.AuthInfos[key] + if !reflect.DeepEqual(authInfo, startingAuthInfo) || !exists { + destinationFile := authInfo.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite, err := getConfigFromFile(destinationFile) + if err != nil { + return err + } + t := *authInfo + configToWrite.AuthInfos[key] = &t + configToWrite.AuthInfos[key].LocationOfOrigin = destinationFile + if relativizePaths { + if err := RelativizeAuthInfoLocalPaths(configToWrite.AuthInfos[key]); err != nil { + return err + } + } + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + for key, cluster := range startingConfig.Clusters { + if _, exists := newConfig.Clusters[key]; !exists { + destinationFile := cluster.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite, err := getConfigFromFile(destinationFile) + if err != nil { + return err + } + delete(configToWrite.Clusters, key) + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + for key, context := range startingConfig.Contexts { + if _, exists := newConfig.Contexts[key]; !exists { + destinationFile := context.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite, err := getConfigFromFile(destinationFile) + if err != nil { + return err + } + delete(configToWrite.Contexts, key) + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + for key, authInfo := range startingConfig.AuthInfos { + if _, exists := newConfig.AuthInfos[key]; !exists { + destinationFile := authInfo.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } + + configToWrite, err := getConfigFromFile(destinationFile) + if err != nil { + return err + } + delete(configToWrite.AuthInfos, key) + + if err := WriteToFile(*configToWrite, destinationFile); err != nil { + return err + } + } + } + + return nil +} + +func PersisterForUser(configAccess ConfigAccess, user string) restclient.AuthProviderConfigPersister { + return &persister{configAccess, user} +} + +type persister struct { + configAccess ConfigAccess + user string +} + +func (p *persister) Persist(config map[string]string) error { + newConfig, err := p.configAccess.GetStartingConfig() + if err != nil { + return err + } + authInfo, ok := newConfig.AuthInfos[p.user] + if ok && authInfo.AuthProvider != nil { + authInfo.AuthProvider.Config = config + ModifyConfig(p.configAccess, *newConfig, false) + } + return nil +} + +// writeCurrentContext takes three possible paths. +// If newCurrentContext is the same as the startingConfig's current context, then we exit. +// If newCurrentContext has a value, then that value is written into the default destination file. +// If newCurrentContext is empty, then we find the config file that is setting the CurrentContext and clear the value from that file +func writeCurrentContext(configAccess ConfigAccess, newCurrentContext string) error { + if startingConfig, err := configAccess.GetStartingConfig(); err != nil { + return err + } else if startingConfig.CurrentContext == newCurrentContext { + return nil + } + + if configAccess.IsExplicitFile() { + file := configAccess.GetExplicitFile() + currConfig, err := getConfigFromFile(file) + if err != nil { + return err + } + currConfig.CurrentContext = newCurrentContext + if err := WriteToFile(*currConfig, file); err != nil { + return err + } + + return nil + } + + if len(newCurrentContext) > 0 { + destinationFile := configAccess.GetDefaultFilename() + config, err := getConfigFromFile(destinationFile) + if err != nil { + return err + } + config.CurrentContext = newCurrentContext + + if err := WriteToFile(*config, destinationFile); err != nil { + return err + } + + return nil + } + + // we're supposed to be clearing the current context. We need to find the first spot in the chain that is setting it and clear it + for _, file := range configAccess.GetLoadingPrecedence() { + if _, err := os.Stat(file); err == nil { + currConfig, err := getConfigFromFile(file) + if err != nil { + return err + } + + if len(currConfig.CurrentContext) > 0 { + currConfig.CurrentContext = newCurrentContext + if err := WriteToFile(*currConfig, file); err != nil { + return err + } + + return nil + } + } + } + + return errors.New("no config found to write context") +} + +func writePreferences(configAccess ConfigAccess, newPrefs clientcmdapi.Preferences) error { + if startingConfig, err := configAccess.GetStartingConfig(); err != nil { + return err + } else if reflect.DeepEqual(startingConfig.Preferences, newPrefs) { + return nil + } + + if configAccess.IsExplicitFile() { + file := configAccess.GetExplicitFile() + currConfig, err := getConfigFromFile(file) + if err != nil { + return err + } + currConfig.Preferences = newPrefs + if err := WriteToFile(*currConfig, file); err != nil { + return err + } + + return nil + } + + for _, file := range configAccess.GetLoadingPrecedence() { + currConfig, err := getConfigFromFile(file) + if err != nil { + return err + } + + if !reflect.DeepEqual(currConfig.Preferences, newPrefs) { + currConfig.Preferences = newPrefs + if err := WriteToFile(*currConfig, file); err != nil { + return err + } + + return nil + } + } + + return errors.New("no config found to write preferences") +} + +// getConfigFromFile tries to read a kubeconfig file and if it can't, returns an error. One exception, missing files result in empty configs, not an error. +func getConfigFromFile(filename string) (*clientcmdapi.Config, error) { + config, err := LoadFromFile(filename) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + if config == nil { + config = clientcmdapi.NewConfig() + } + return config, nil +} + +// GetConfigFromFileOrDie tries to read a kubeconfig file and if it can't, it calls exit. One exception, missing files result in empty configs, not an exit +func GetConfigFromFileOrDie(filename string) *clientcmdapi.Config { + config, err := getConfigFromFile(filename) + if err != nil { + klog.FatalDepth(1, err) + } + + return config +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/doc.go new file mode 100644 index 00000000..424311ee --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/doc.go @@ -0,0 +1,37 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package clientcmd provides one stop shopping for building a working client from a fixed config, +from a .kubeconfig file, from command line flags, or from any merged combination. + +Sample usage from merged .kubeconfig files (local directory, home directory) + + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + // if you want to change the loading rules (which files in which order), you can do so here + + configOverrides := &clientcmd.ConfigOverrides{} + // if you want to change override values or bind them to flags, there are methods to help you + + kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides) + config, err := kubeConfig.ClientConfig() + if err != nil { + // Do something + } + client, err := metav1.New(config) + // ... +*/ +package clientcmd // import "k8s.io/client-go/tools/clientcmd" diff --git a/vendor/k8s.io/client-go/tools/clientcmd/flag.go b/vendor/k8s.io/client-go/tools/clientcmd/flag.go new file mode 100644 index 00000000..8d60d201 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/flag.go @@ -0,0 +1,49 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +// transformingStringValue implements pflag.Value to store string values, +// allowing transforming them while being set +type transformingStringValue struct { + target *string + transformer func(string) (string, error) +} + +func newTransformingStringValue(val string, target *string, transformer func(string) (string, error)) *transformingStringValue { + *target = val + return &transformingStringValue{ + target: target, + transformer: transformer, + } +} + +func (t *transformingStringValue) Set(val string) error { + val, err := t.transformer(val) + if err != nil { + return err + } + *t.target = val + return nil +} + +func (t *transformingStringValue) Type() string { + return "string" +} + +func (t *transformingStringValue) String() string { + return string(*t.target) +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/helpers.go b/vendor/k8s.io/client-go/tools/clientcmd/helpers.go new file mode 100644 index 00000000..b609d1a7 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/helpers.go @@ -0,0 +1,35 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "fmt" + "strconv" + "time" +) + +// ParseTimeout returns a parsed duration from a string +// A duration string value must be a positive integer, optionally followed by a corresponding time unit (s|m|h). +func ParseTimeout(duration string) (time.Duration, error) { + if i, err := strconv.ParseInt(duration, 10, 64); err == nil && i >= 0 { + return (time.Duration(i) * time.Second), nil + } + if requestTimeout, err := time.ParseDuration(duration); err == nil { + return requestTimeout, nil + } + return 0, fmt.Errorf("Invalid timeout value. Timeout must be a single integer in seconds, or an integer followed by a corresponding time unit (e.g. 1s | 2m | 3h)") +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/loader.go b/vendor/k8s.io/client-go/tools/clientcmd/loader.go new file mode 100644 index 00000000..4e37e792 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/loader.go @@ -0,0 +1,649 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "reflect" + goruntime "runtime" + "strings" + + "github.com/imdario/mergo" + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + restclient "k8s.io/client-go/rest" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + clientcmdlatest "k8s.io/client-go/tools/clientcmd/api/latest" + "k8s.io/client-go/util/homedir" +) + +const ( + RecommendedConfigPathFlag = "kubeconfig" + RecommendedConfigPathEnvVar = "KUBECONFIG" + RecommendedHomeDir = ".kube" + RecommendedFileName = "config" + RecommendedSchemaName = "schema" +) + +var ( + RecommendedConfigDir = path.Join(homedir.HomeDir(), RecommendedHomeDir) + RecommendedHomeFile = path.Join(RecommendedConfigDir, RecommendedFileName) + RecommendedSchemaFile = path.Join(RecommendedConfigDir, RecommendedSchemaName) +) + +// currentMigrationRules returns a map that holds the history of recommended home directories used in previous versions. +// Any future changes to RecommendedHomeFile and related are expected to add a migration rule here, in order to make +// sure existing config files are migrated to their new locations properly. +func currentMigrationRules() map[string]string { + oldRecommendedHomeFile := path.Join(os.Getenv("HOME"), "/.kube/.kubeconfig") + oldRecommendedWindowsHomeFile := path.Join(os.Getenv("HOME"), RecommendedHomeDir, RecommendedFileName) + + migrationRules := map[string]string{} + migrationRules[RecommendedHomeFile] = oldRecommendedHomeFile + if goruntime.GOOS == "windows" { + migrationRules[RecommendedHomeFile] = oldRecommendedWindowsHomeFile + } + return migrationRules +} + +type ClientConfigLoader interface { + ConfigAccess + // IsDefaultConfig returns true if the returned config matches the defaults. + IsDefaultConfig(*restclient.Config) bool + // Load returns the latest config + Load() (*clientcmdapi.Config, error) +} + +type KubeconfigGetter func() (*clientcmdapi.Config, error) + +type ClientConfigGetter struct { + kubeconfigGetter KubeconfigGetter +} + +// ClientConfigGetter implements the ClientConfigLoader interface. +var _ ClientConfigLoader = &ClientConfigGetter{} + +func (g *ClientConfigGetter) Load() (*clientcmdapi.Config, error) { + return g.kubeconfigGetter() +} + +func (g *ClientConfigGetter) GetLoadingPrecedence() []string { + return nil +} +func (g *ClientConfigGetter) GetStartingConfig() (*clientcmdapi.Config, error) { + return g.kubeconfigGetter() +} +func (g *ClientConfigGetter) GetDefaultFilename() string { + return "" +} +func (g *ClientConfigGetter) IsExplicitFile() bool { + return false +} +func (g *ClientConfigGetter) GetExplicitFile() string { + return "" +} +func (g *ClientConfigGetter) IsDefaultConfig(config *restclient.Config) bool { + return false +} + +// ClientConfigLoadingRules is an ExplicitPath and string slice of specific locations that are used for merging together a Config +// Callers can put the chain together however they want, but we'd recommend: +// EnvVarPathFiles if set (a list of files if set) OR the HomeDirectoryPath +// ExplicitPath is special, because if a user specifically requests a certain file be used and error is reported if this file is not present +type ClientConfigLoadingRules struct { + ExplicitPath string + Precedence []string + + // MigrationRules is a map of destination files to source files. If a destination file is not present, then the source file is checked. + // If the source file is present, then it is copied to the destination file BEFORE any further loading happens. + MigrationRules map[string]string + + // DoNotResolvePaths indicates whether or not to resolve paths with respect to the originating files. This is phrased as a negative so + // that a default object that doesn't set this will usually get the behavior it wants. + DoNotResolvePaths bool + + // DefaultClientConfig is an optional field indicating what rules to use to calculate a default configuration. + // This should match the overrides passed in to ClientConfig loader. + DefaultClientConfig ClientConfig + + // WarnIfAllMissing indicates whether the configuration files pointed by KUBECONFIG environment variable are present or not. + // In case of missing files, it warns the user about the missing files. + WarnIfAllMissing bool +} + +// ClientConfigLoadingRules implements the ClientConfigLoader interface. +var _ ClientConfigLoader = &ClientConfigLoadingRules{} + +// NewDefaultClientConfigLoadingRules returns a ClientConfigLoadingRules object with default fields filled in. You are not required to +// use this constructor +func NewDefaultClientConfigLoadingRules() *ClientConfigLoadingRules { + chain := []string{} + warnIfAllMissing := false + + envVarFiles := os.Getenv(RecommendedConfigPathEnvVar) + if len(envVarFiles) != 0 { + fileList := filepath.SplitList(envVarFiles) + // prevent the same path load multiple times + chain = append(chain, deduplicate(fileList)...) + warnIfAllMissing = true + + } else { + chain = append(chain, RecommendedHomeFile) + } + + return &ClientConfigLoadingRules{ + Precedence: chain, + MigrationRules: currentMigrationRules(), + WarnIfAllMissing: warnIfAllMissing, + } +} + +// Load starts by running the MigrationRules and then +// takes the loading rules and returns a Config object based on following rules. +// if the ExplicitPath, return the unmerged explicit file +// Otherwise, return a merged config based on the Precedence slice +// A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored. +// Read errors or files with non-deserializable content produce errors. +// The first file to set a particular map key wins and map key's value is never changed. +// BUT, if you set a struct value that is NOT contained inside of map, the value WILL be changed. +// This results in some odd looking logic to merge in one direction, merge in the other, and then merge the two. +// It also means that if two files specify a "red-user", only values from the first file's red-user are used. Even +// non-conflicting entries from the second file's "red-user" are discarded. +// Relative paths inside of the .kubeconfig files are resolved against the .kubeconfig file's parent folder +// and only absolute file paths are returned. +func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) { + if err := rules.Migrate(); err != nil { + return nil, err + } + + errlist := []error{} + missingList := []string{} + + kubeConfigFiles := []string{} + + // Make sure a file we were explicitly told to use exists + if len(rules.ExplicitPath) > 0 { + if _, err := os.Stat(rules.ExplicitPath); os.IsNotExist(err) { + return nil, err + } + kubeConfigFiles = append(kubeConfigFiles, rules.ExplicitPath) + + } else { + kubeConfigFiles = append(kubeConfigFiles, rules.Precedence...) + } + + kubeconfigs := []*clientcmdapi.Config{} + // read and cache the config files so that we only look at them once + for _, filename := range kubeConfigFiles { + if len(filename) == 0 { + // no work to do + continue + } + + config, err := LoadFromFile(filename) + + if os.IsNotExist(err) { + // skip missing files + // Add to the missing list to produce a warning + missingList = append(missingList, filename) + continue + } + + if err != nil { + errlist = append(errlist, fmt.Errorf("error loading config file \"%s\": %v", filename, err)) + continue + } + + kubeconfigs = append(kubeconfigs, config) + } + + if rules.WarnIfAllMissing && len(missingList) > 0 && len(kubeconfigs) == 0 { + klog.Warningf("Config not found: %s", strings.Join(missingList, ", ")) + } + + // first merge all of our maps + mapConfig := clientcmdapi.NewConfig() + + for _, kubeconfig := range kubeconfigs { + mergo.MergeWithOverwrite(mapConfig, kubeconfig) + } + + // merge all of the struct values in the reverse order so that priority is given correctly + // errors are not added to the list the second time + nonMapConfig := clientcmdapi.NewConfig() + for i := len(kubeconfigs) - 1; i >= 0; i-- { + kubeconfig := kubeconfigs[i] + mergo.MergeWithOverwrite(nonMapConfig, kubeconfig) + } + + // since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and + // get the values we expect. + config := clientcmdapi.NewConfig() + mergo.MergeWithOverwrite(config, mapConfig) + mergo.MergeWithOverwrite(config, nonMapConfig) + + if rules.ResolvePaths() { + if err := ResolveLocalPaths(config); err != nil { + errlist = append(errlist, err) + } + } + return config, utilerrors.NewAggregate(errlist) +} + +// Migrate uses the MigrationRules map. If a destination file is not present, then the source file is checked. +// If the source file is present, then it is copied to the destination file BEFORE any further loading happens. +func (rules *ClientConfigLoadingRules) Migrate() error { + if rules.MigrationRules == nil { + return nil + } + + for destination, source := range rules.MigrationRules { + if _, err := os.Stat(destination); err == nil { + // if the destination already exists, do nothing + continue + } else if os.IsPermission(err) { + // if we can't access the file, skip it + continue + } else if !os.IsNotExist(err) { + // if we had an error other than non-existence, fail + return err + } + + if sourceInfo, err := os.Stat(source); err != nil { + if os.IsNotExist(err) || os.IsPermission(err) { + // if the source file doesn't exist or we can't access it, there's no work to do. + continue + } + + // if we had an error other than non-existence, fail + return err + } else if sourceInfo.IsDir() { + return fmt.Errorf("cannot migrate %v to %v because it is a directory", source, destination) + } + + in, err := os.Open(source) + if err != nil { + return err + } + defer in.Close() + out, err := os.Create(destination) + if err != nil { + return err + } + defer out.Close() + + if _, err = io.Copy(out, in); err != nil { + return err + } + } + + return nil +} + +// GetLoadingPrecedence implements ConfigAccess +func (rules *ClientConfigLoadingRules) GetLoadingPrecedence() []string { + return rules.Precedence +} + +// GetStartingConfig implements ConfigAccess +func (rules *ClientConfigLoadingRules) GetStartingConfig() (*clientcmdapi.Config, error) { + clientConfig := NewNonInteractiveDeferredLoadingClientConfig(rules, &ConfigOverrides{}) + rawConfig, err := clientConfig.RawConfig() + if os.IsNotExist(err) { + return clientcmdapi.NewConfig(), nil + } + if err != nil { + return nil, err + } + + return &rawConfig, nil +} + +// GetDefaultFilename implements ConfigAccess +func (rules *ClientConfigLoadingRules) GetDefaultFilename() string { + // Explicit file if we have one. + if rules.IsExplicitFile() { + return rules.GetExplicitFile() + } + // Otherwise, first existing file from precedence. + for _, filename := range rules.GetLoadingPrecedence() { + if _, err := os.Stat(filename); err == nil { + return filename + } + } + // If none exists, use the first from precedence. + if len(rules.Precedence) > 0 { + return rules.Precedence[0] + } + return "" +} + +// IsExplicitFile implements ConfigAccess +func (rules *ClientConfigLoadingRules) IsExplicitFile() bool { + return len(rules.ExplicitPath) > 0 +} + +// GetExplicitFile implements ConfigAccess +func (rules *ClientConfigLoadingRules) GetExplicitFile() string { + return rules.ExplicitPath +} + +// IsDefaultConfig returns true if the provided configuration matches the default +func (rules *ClientConfigLoadingRules) IsDefaultConfig(config *restclient.Config) bool { + if rules.DefaultClientConfig == nil { + return false + } + defaultConfig, err := rules.DefaultClientConfig.ClientConfig() + if err != nil { + return false + } + return reflect.DeepEqual(config, defaultConfig) +} + +// LoadFromFile takes a filename and deserializes the contents into Config object +func LoadFromFile(filename string) (*clientcmdapi.Config, error) { + kubeconfigBytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + config, err := Load(kubeconfigBytes) + if err != nil { + return nil, err + } + klog.V(6).Infoln("Config loaded from file: ", filename) + + // set LocationOfOrigin on every Cluster, User, and Context + for key, obj := range config.AuthInfos { + obj.LocationOfOrigin = filename + config.AuthInfos[key] = obj + } + for key, obj := range config.Clusters { + obj.LocationOfOrigin = filename + config.Clusters[key] = obj + } + for key, obj := range config.Contexts { + obj.LocationOfOrigin = filename + config.Contexts[key] = obj + } + + if config.AuthInfos == nil { + config.AuthInfos = map[string]*clientcmdapi.AuthInfo{} + } + if config.Clusters == nil { + config.Clusters = map[string]*clientcmdapi.Cluster{} + } + if config.Contexts == nil { + config.Contexts = map[string]*clientcmdapi.Context{} + } + + return config, nil +} + +// Load takes a byte slice and deserializes the contents into Config object. +// Encapsulates deserialization without assuming the source is a file. +func Load(data []byte) (*clientcmdapi.Config, error) { + config := clientcmdapi.NewConfig() + // if there's no data in a file, return the default object instead of failing (DecodeInto reject empty input) + if len(data) == 0 { + return config, nil + } + decoded, _, err := clientcmdlatest.Codec.Decode(data, &schema.GroupVersionKind{Version: clientcmdlatest.Version, Kind: "Config"}, config) + if err != nil { + return nil, err + } + return decoded.(*clientcmdapi.Config), nil +} + +// WriteToFile serializes the config to yaml and writes it out to a file. If not present, it creates the file with the mode 0600. If it is present +// it stomps the contents +func WriteToFile(config clientcmdapi.Config, filename string) error { + content, err := Write(config) + if err != nil { + return err + } + dir := filepath.Dir(filename) + if _, err := os.Stat(dir); os.IsNotExist(err) { + if err = os.MkdirAll(dir, 0755); err != nil { + return err + } + } + + if err := ioutil.WriteFile(filename, content, 0600); err != nil { + return err + } + return nil +} + +func lockFile(filename string) error { + // TODO: find a way to do this with actual file locks. Will + // probably need separate solution for windows and Linux. + + // Make sure the dir exists before we try to create a lock file. + dir := filepath.Dir(filename) + if _, err := os.Stat(dir); os.IsNotExist(err) { + if err = os.MkdirAll(dir, 0755); err != nil { + return err + } + } + f, err := os.OpenFile(lockName(filename), os.O_CREATE|os.O_EXCL, 0) + if err != nil { + return err + } + f.Close() + return nil +} + +func unlockFile(filename string) error { + return os.Remove(lockName(filename)) +} + +func lockName(filename string) string { + return filename + ".lock" +} + +// Write serializes the config to yaml. +// Encapsulates serialization without assuming the destination is a file. +func Write(config clientcmdapi.Config) ([]byte, error) { + return runtime.Encode(clientcmdlatest.Codec, &config) +} + +func (rules ClientConfigLoadingRules) ResolvePaths() bool { + return !rules.DoNotResolvePaths +} + +// ResolveLocalPaths resolves all relative paths in the config object with respect to the stanza's LocationOfOrigin +// this cannot be done directly inside of LoadFromFile because doing so there would make it impossible to load a file without +// modification of its contents. +func ResolveLocalPaths(config *clientcmdapi.Config) error { + for _, cluster := range config.Clusters { + if len(cluster.LocationOfOrigin) == 0 { + continue + } + base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) + if err != nil { + return fmt.Errorf("could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err) + } + + if err := ResolvePaths(GetClusterFileReferences(cluster), base); err != nil { + return err + } + } + for _, authInfo := range config.AuthInfos { + if len(authInfo.LocationOfOrigin) == 0 { + continue + } + base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) + if err != nil { + return fmt.Errorf("could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err) + } + + if err := ResolvePaths(GetAuthInfoFileReferences(authInfo), base); err != nil { + return err + } + } + + return nil +} + +// RelativizeClusterLocalPaths first absolutizes the paths by calling ResolveLocalPaths. This assumes that any NEW path is already +// absolute, but any existing path will be resolved relative to LocationOfOrigin +func RelativizeClusterLocalPaths(cluster *clientcmdapi.Cluster) error { + if len(cluster.LocationOfOrigin) == 0 { + return fmt.Errorf("no location of origin for %s", cluster.Server) + } + base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) + if err != nil { + return fmt.Errorf("could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err) + } + + if err := ResolvePaths(GetClusterFileReferences(cluster), base); err != nil { + return err + } + if err := RelativizePathWithNoBacksteps(GetClusterFileReferences(cluster), base); err != nil { + return err + } + + return nil +} + +// RelativizeAuthInfoLocalPaths first absolutizes the paths by calling ResolveLocalPaths. This assumes that any NEW path is already +// absolute, but any existing path will be resolved relative to LocationOfOrigin +func RelativizeAuthInfoLocalPaths(authInfo *clientcmdapi.AuthInfo) error { + if len(authInfo.LocationOfOrigin) == 0 { + return fmt.Errorf("no location of origin for %v", authInfo) + } + base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) + if err != nil { + return fmt.Errorf("could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err) + } + + if err := ResolvePaths(GetAuthInfoFileReferences(authInfo), base); err != nil { + return err + } + if err := RelativizePathWithNoBacksteps(GetAuthInfoFileReferences(authInfo), base); err != nil { + return err + } + + return nil +} + +func RelativizeConfigPaths(config *clientcmdapi.Config, base string) error { + return RelativizePathWithNoBacksteps(GetConfigFileReferences(config), base) +} + +func ResolveConfigPaths(config *clientcmdapi.Config, base string) error { + return ResolvePaths(GetConfigFileReferences(config), base) +} + +func GetConfigFileReferences(config *clientcmdapi.Config) []*string { + refs := []*string{} + + for _, cluster := range config.Clusters { + refs = append(refs, GetClusterFileReferences(cluster)...) + } + for _, authInfo := range config.AuthInfos { + refs = append(refs, GetAuthInfoFileReferences(authInfo)...) + } + + return refs +} + +func GetClusterFileReferences(cluster *clientcmdapi.Cluster) []*string { + return []*string{&cluster.CertificateAuthority} +} + +func GetAuthInfoFileReferences(authInfo *clientcmdapi.AuthInfo) []*string { + s := []*string{&authInfo.ClientCertificate, &authInfo.ClientKey, &authInfo.TokenFile} + // Only resolve exec command if it isn't PATH based. + if authInfo.Exec != nil && strings.ContainsRune(authInfo.Exec.Command, filepath.Separator) { + s = append(s, &authInfo.Exec.Command) + } + return s +} + +// ResolvePaths updates the given refs to be absolute paths, relative to the given base directory +func ResolvePaths(refs []*string, base string) error { + for _, ref := range refs { + // Don't resolve empty paths + if len(*ref) > 0 { + // Don't resolve absolute paths + if !filepath.IsAbs(*ref) { + *ref = filepath.Join(base, *ref) + } + } + } + return nil +} + +// RelativizePathWithNoBacksteps updates the given refs to be relative paths, relative to the given base directory as long as they do not require backsteps. +// Any path requiring a backstep is left as-is as long it is absolute. Any non-absolute path that can't be relativized produces an error +func RelativizePathWithNoBacksteps(refs []*string, base string) error { + for _, ref := range refs { + // Don't relativize empty paths + if len(*ref) > 0 { + rel, err := MakeRelative(*ref, base) + if err != nil { + return err + } + + // if we have a backstep, don't mess with the path + if strings.HasPrefix(rel, "../") { + if filepath.IsAbs(*ref) { + continue + } + + return fmt.Errorf("%v requires backsteps and is not absolute", *ref) + } + + *ref = rel + } + } + return nil +} + +func MakeRelative(path, base string) (string, error) { + if len(path) > 0 { + rel, err := filepath.Rel(base, path) + if err != nil { + return path, err + } + return rel, nil + } + return path, nil +} + +// deduplicate removes any duplicated values and returns a new slice, keeping the order unchanged +func deduplicate(s []string) []string { + encountered := map[string]bool{} + ret := make([]string, 0) + for i := range s { + if encountered[s[i]] { + continue + } + encountered[s[i]] = true + ret = append(ret, s[i]) + } + return ret +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go b/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go new file mode 100644 index 00000000..9cc112a2 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/merged_client_builder.go @@ -0,0 +1,173 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "io" + "sync" + + "k8s.io/klog" + + restclient "k8s.io/client-go/rest" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +// DeferredLoadingClientConfig is a ClientConfig interface that is backed by a client config loader. +// It is used in cases where the loading rules may change after you've instantiated them and you want to be sure that +// the most recent rules are used. This is useful in cases where you bind flags to loading rule parameters before +// the parse happens and you want your calling code to be ignorant of how the values are being mutated to avoid +// passing extraneous information down a call stack +type DeferredLoadingClientConfig struct { + loader ClientConfigLoader + overrides *ConfigOverrides + fallbackReader io.Reader + + clientConfig ClientConfig + loadingLock sync.Mutex + + // provided for testing + icc InClusterConfig +} + +// InClusterConfig abstracts details of whether the client is running in a cluster for testing. +type InClusterConfig interface { + ClientConfig + Possible() bool +} + +// NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name +func NewNonInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides) ClientConfig { + return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: &inClusterClientConfig{overrides: overrides}} +} + +// NewInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name and the fallback auth reader +func NewInteractiveDeferredLoadingClientConfig(loader ClientConfigLoader, overrides *ConfigOverrides, fallbackReader io.Reader) ClientConfig { + return &DeferredLoadingClientConfig{loader: loader, overrides: overrides, icc: &inClusterClientConfig{overrides: overrides}, fallbackReader: fallbackReader} +} + +func (config *DeferredLoadingClientConfig) createClientConfig() (ClientConfig, error) { + if config.clientConfig == nil { + config.loadingLock.Lock() + defer config.loadingLock.Unlock() + + if config.clientConfig == nil { + mergedConfig, err := config.loader.Load() + if err != nil { + return nil, err + } + + var mergedClientConfig ClientConfig + if config.fallbackReader != nil { + mergedClientConfig = NewInteractiveClientConfig(*mergedConfig, config.overrides.CurrentContext, config.overrides, config.fallbackReader, config.loader) + } else { + mergedClientConfig = NewNonInteractiveClientConfig(*mergedConfig, config.overrides.CurrentContext, config.overrides, config.loader) + } + + config.clientConfig = mergedClientConfig + } + } + + return config.clientConfig, nil +} + +func (config *DeferredLoadingClientConfig) RawConfig() (clientcmdapi.Config, error) { + mergedConfig, err := config.createClientConfig() + if err != nil { + return clientcmdapi.Config{}, err + } + + return mergedConfig.RawConfig() +} + +// ClientConfig implements ClientConfig +func (config *DeferredLoadingClientConfig) ClientConfig() (*restclient.Config, error) { + mergedClientConfig, err := config.createClientConfig() + if err != nil { + return nil, err + } + + // load the configuration and return on non-empty errors and if the + // content differs from the default config + mergedConfig, err := mergedClientConfig.ClientConfig() + switch { + case err != nil: + if !IsEmptyConfig(err) { + // return on any error except empty config + return nil, err + } + case mergedConfig != nil: + // the configuration is valid, but if this is equal to the defaults we should try + // in-cluster configuration + if !config.loader.IsDefaultConfig(mergedConfig) { + return mergedConfig, nil + } + } + + // check for in-cluster configuration and use it + if config.icc.Possible() { + klog.V(4).Infof("Using in-cluster configuration") + return config.icc.ClientConfig() + } + + // return the result of the merged client config + return mergedConfig, err +} + +// Namespace implements KubeConfig +func (config *DeferredLoadingClientConfig) Namespace() (string, bool, error) { + mergedKubeConfig, err := config.createClientConfig() + if err != nil { + return "", false, err + } + + ns, overridden, err := mergedKubeConfig.Namespace() + // if we get an error and it is not empty config, or if the merged config defined an explicit namespace, or + // if in-cluster config is not possible, return immediately + if (err != nil && !IsEmptyConfig(err)) || overridden || !config.icc.Possible() { + // return on any error except empty config + return ns, overridden, err + } + + if len(ns) > 0 { + // if we got a non-default namespace from the kubeconfig, use it + if ns != "default" { + return ns, false, nil + } + + // if we got a default namespace, determine whether it was explicit or implicit + if raw, err := mergedKubeConfig.RawConfig(); err == nil { + // determine the current context + currentContext := raw.CurrentContext + if config.overrides != nil && len(config.overrides.CurrentContext) > 0 { + currentContext = config.overrides.CurrentContext + } + if context := raw.Contexts[currentContext]; context != nil && len(context.Namespace) > 0 { + return ns, false, nil + } + } + } + + klog.V(4).Infof("Using in-cluster namespace") + + // allow the namespace from the service account token directory to be used. + return config.icc.Namespace() +} + +// ConfigAccess implements ClientConfig +func (config *DeferredLoadingClientConfig) ConfigAccess() ConfigAccess { + return config.loader +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/overrides.go b/vendor/k8s.io/client-go/tools/clientcmd/overrides.go new file mode 100644 index 00000000..bfca0328 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/overrides.go @@ -0,0 +1,247 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "strconv" + "strings" + + "github.com/spf13/pflag" + + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +// ConfigOverrides holds values that should override whatever information is pulled from the actual Config object. You can't +// simply use an actual Config object, because Configs hold maps, but overrides are restricted to "at most one" +type ConfigOverrides struct { + AuthInfo clientcmdapi.AuthInfo + // ClusterDefaults are applied before the configured cluster info is loaded. + ClusterDefaults clientcmdapi.Cluster + ClusterInfo clientcmdapi.Cluster + Context clientcmdapi.Context + CurrentContext string + Timeout string +} + +// ConfigOverrideFlags holds the flag names to be used for binding command line flags. Notice that this structure tightly +// corresponds to ConfigOverrides +type ConfigOverrideFlags struct { + AuthOverrideFlags AuthOverrideFlags + ClusterOverrideFlags ClusterOverrideFlags + ContextOverrideFlags ContextOverrideFlags + CurrentContext FlagInfo + Timeout FlagInfo +} + +// AuthOverrideFlags holds the flag names to be used for binding command line flags for AuthInfo objects +type AuthOverrideFlags struct { + ClientCertificate FlagInfo + ClientKey FlagInfo + Token FlagInfo + Impersonate FlagInfo + ImpersonateGroups FlagInfo + Username FlagInfo + Password FlagInfo +} + +// ContextOverrideFlags holds the flag names to be used for binding command line flags for Cluster objects +type ContextOverrideFlags struct { + ClusterName FlagInfo + AuthInfoName FlagInfo + Namespace FlagInfo +} + +// ClusterOverride holds the flag names to be used for binding command line flags for Cluster objects +type ClusterOverrideFlags struct { + APIServer FlagInfo + APIVersion FlagInfo + CertificateAuthority FlagInfo + InsecureSkipTLSVerify FlagInfo +} + +// FlagInfo contains information about how to register a flag. This struct is useful if you want to provide a way for an extender to +// get back a set of recommended flag names, descriptions, and defaults, but allow for customization by an extender. This makes for +// coherent extension, without full prescription +type FlagInfo struct { + // LongName is the long string for a flag. If this is empty, then the flag will not be bound + LongName string + // ShortName is the single character for a flag. If this is empty, then there will be no short flag + ShortName string + // Default is the default value for the flag + Default string + // Description is the description for the flag + Description string +} + +// AddSecretAnnotation add secret flag to Annotation. +func (f FlagInfo) AddSecretAnnotation(flags *pflag.FlagSet) FlagInfo { + flags.SetAnnotation(f.LongName, "classified", []string{"true"}) + return f +} + +// BindStringFlag binds the flag based on the provided info. If LongName == "", nothing is registered +func (f FlagInfo) BindStringFlag(flags *pflag.FlagSet, target *string) FlagInfo { + // you can't register a flag without a long name + if len(f.LongName) > 0 { + flags.StringVarP(target, f.LongName, f.ShortName, f.Default, f.Description) + } + return f +} + +// BindTransformingStringFlag binds the flag based on the provided info. If LongName == "", nothing is registered +func (f FlagInfo) BindTransformingStringFlag(flags *pflag.FlagSet, target *string, transformer func(string) (string, error)) FlagInfo { + // you can't register a flag without a long name + if len(f.LongName) > 0 { + flags.VarP(newTransformingStringValue(f.Default, target, transformer), f.LongName, f.ShortName, f.Description) + } + return f +} + +// BindStringSliceFlag binds the flag based on the provided info. If LongName == "", nothing is registered +func (f FlagInfo) BindStringArrayFlag(flags *pflag.FlagSet, target *[]string) FlagInfo { + // you can't register a flag without a long name + if len(f.LongName) > 0 { + sliceVal := []string{} + if len(f.Default) > 0 { + sliceVal = []string{f.Default} + } + flags.StringArrayVarP(target, f.LongName, f.ShortName, sliceVal, f.Description) + } + return f +} + +// BindBoolFlag binds the flag based on the provided info. If LongName == "", nothing is registered +func (f FlagInfo) BindBoolFlag(flags *pflag.FlagSet, target *bool) FlagInfo { + // you can't register a flag without a long name + if len(f.LongName) > 0 { + // try to parse Default as a bool. If it fails, assume false + boolVal, err := strconv.ParseBool(f.Default) + if err != nil { + boolVal = false + } + + flags.BoolVarP(target, f.LongName, f.ShortName, boolVal, f.Description) + } + return f +} + +const ( + FlagClusterName = "cluster" + FlagAuthInfoName = "user" + FlagContext = "context" + FlagNamespace = "namespace" + FlagAPIServer = "server" + FlagInsecure = "insecure-skip-tls-verify" + FlagCertFile = "client-certificate" + FlagKeyFile = "client-key" + FlagCAFile = "certificate-authority" + FlagEmbedCerts = "embed-certs" + FlagBearerToken = "token" + FlagImpersonate = "as" + FlagImpersonateGroup = "as-group" + FlagUsername = "username" + FlagPassword = "password" + FlagTimeout = "request-timeout" +) + +// RecommendedConfigOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing +func RecommendedConfigOverrideFlags(prefix string) ConfigOverrideFlags { + return ConfigOverrideFlags{ + AuthOverrideFlags: RecommendedAuthOverrideFlags(prefix), + ClusterOverrideFlags: RecommendedClusterOverrideFlags(prefix), + ContextOverrideFlags: RecommendedContextOverrideFlags(prefix), + + CurrentContext: FlagInfo{prefix + FlagContext, "", "", "The name of the kubeconfig context to use"}, + Timeout: FlagInfo{prefix + FlagTimeout, "", "0", "The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests."}, + } +} + +// RecommendedAuthOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing +func RecommendedAuthOverrideFlags(prefix string) AuthOverrideFlags { + return AuthOverrideFlags{ + ClientCertificate: FlagInfo{prefix + FlagCertFile, "", "", "Path to a client certificate file for TLS"}, + ClientKey: FlagInfo{prefix + FlagKeyFile, "", "", "Path to a client key file for TLS"}, + Token: FlagInfo{prefix + FlagBearerToken, "", "", "Bearer token for authentication to the API server"}, + Impersonate: FlagInfo{prefix + FlagImpersonate, "", "", "Username to impersonate for the operation"}, + ImpersonateGroups: FlagInfo{prefix + FlagImpersonateGroup, "", "", "Group to impersonate for the operation, this flag can be repeated to specify multiple groups."}, + Username: FlagInfo{prefix + FlagUsername, "", "", "Username for basic authentication to the API server"}, + Password: FlagInfo{prefix + FlagPassword, "", "", "Password for basic authentication to the API server"}, + } +} + +// RecommendedClusterOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing +func RecommendedClusterOverrideFlags(prefix string) ClusterOverrideFlags { + return ClusterOverrideFlags{ + APIServer: FlagInfo{prefix + FlagAPIServer, "", "", "The address and port of the Kubernetes API server"}, + CertificateAuthority: FlagInfo{prefix + FlagCAFile, "", "", "Path to a cert file for the certificate authority"}, + InsecureSkipTLSVerify: FlagInfo{prefix + FlagInsecure, "", "false", "If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure"}, + } +} + +// RecommendedContextOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing +func RecommendedContextOverrideFlags(prefix string) ContextOverrideFlags { + return ContextOverrideFlags{ + ClusterName: FlagInfo{prefix + FlagClusterName, "", "", "The name of the kubeconfig cluster to use"}, + AuthInfoName: FlagInfo{prefix + FlagAuthInfoName, "", "", "The name of the kubeconfig user to use"}, + Namespace: FlagInfo{prefix + FlagNamespace, "n", "", "If present, the namespace scope for this CLI request"}, + } +} + +// BindOverrideFlags is a convenience method to bind the specified flags to their associated variables +func BindOverrideFlags(overrides *ConfigOverrides, flags *pflag.FlagSet, flagNames ConfigOverrideFlags) { + BindAuthInfoFlags(&overrides.AuthInfo, flags, flagNames.AuthOverrideFlags) + BindClusterFlags(&overrides.ClusterInfo, flags, flagNames.ClusterOverrideFlags) + BindContextFlags(&overrides.Context, flags, flagNames.ContextOverrideFlags) + flagNames.CurrentContext.BindStringFlag(flags, &overrides.CurrentContext) + flagNames.Timeout.BindStringFlag(flags, &overrides.Timeout) +} + +// BindAuthInfoFlags is a convenience method to bind the specified flags to their associated variables +func BindAuthInfoFlags(authInfo *clientcmdapi.AuthInfo, flags *pflag.FlagSet, flagNames AuthOverrideFlags) { + flagNames.ClientCertificate.BindStringFlag(flags, &authInfo.ClientCertificate).AddSecretAnnotation(flags) + flagNames.ClientKey.BindStringFlag(flags, &authInfo.ClientKey).AddSecretAnnotation(flags) + flagNames.Token.BindStringFlag(flags, &authInfo.Token).AddSecretAnnotation(flags) + flagNames.Impersonate.BindStringFlag(flags, &authInfo.Impersonate).AddSecretAnnotation(flags) + flagNames.ImpersonateGroups.BindStringArrayFlag(flags, &authInfo.ImpersonateGroups).AddSecretAnnotation(flags) + flagNames.Username.BindStringFlag(flags, &authInfo.Username).AddSecretAnnotation(flags) + flagNames.Password.BindStringFlag(flags, &authInfo.Password).AddSecretAnnotation(flags) +} + +// BindClusterFlags is a convenience method to bind the specified flags to their associated variables +func BindClusterFlags(clusterInfo *clientcmdapi.Cluster, flags *pflag.FlagSet, flagNames ClusterOverrideFlags) { + flagNames.APIServer.BindStringFlag(flags, &clusterInfo.Server) + flagNames.CertificateAuthority.BindStringFlag(flags, &clusterInfo.CertificateAuthority) + flagNames.InsecureSkipTLSVerify.BindBoolFlag(flags, &clusterInfo.InsecureSkipTLSVerify) +} + +// BindFlags is a convenience method to bind the specified flags to their associated variables +func BindContextFlags(contextInfo *clientcmdapi.Context, flags *pflag.FlagSet, flagNames ContextOverrideFlags) { + flagNames.ClusterName.BindStringFlag(flags, &contextInfo.Cluster) + flagNames.AuthInfoName.BindStringFlag(flags, &contextInfo.AuthInfo) + flagNames.Namespace.BindTransformingStringFlag(flags, &contextInfo.Namespace, RemoveNamespacesPrefix) +} + +// RemoveNamespacesPrefix is a transformer that strips "ns/", "namespace/" and "namespaces/" prefixes case-insensitively +func RemoveNamespacesPrefix(value string) (string, error) { + for _, prefix := range []string{"namespaces/", "namespace/", "ns/"} { + if len(value) > len(prefix) && strings.EqualFold(value[0:len(prefix)], prefix) { + value = value[len(prefix):] + break + } + } + return value, nil +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/validation.go b/vendor/k8s.io/client-go/tools/clientcmd/validation.go new file mode 100644 index 00000000..2f927072 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/validation.go @@ -0,0 +1,299 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientcmd + +import ( + "errors" + "fmt" + "os" + "reflect" + "strings" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/validation" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +var ( + ErrNoContext = errors.New("no context chosen") + ErrEmptyConfig = errors.New("no configuration has been provided") + // message is for consistency with old behavior + ErrEmptyCluster = errors.New("cluster has no server defined") +) + +type errContextNotFound struct { + ContextName string +} + +func (e *errContextNotFound) Error() string { + return fmt.Sprintf("context was not found for specified context: %v", e.ContextName) +} + +// IsContextNotFound returns a boolean indicating whether the error is known to +// report that a context was not found +func IsContextNotFound(err error) bool { + if err == nil { + return false + } + if _, ok := err.(*errContextNotFound); ok || err == ErrNoContext { + return true + } + return strings.Contains(err.Error(), "context was not found for specified context") +} + +// IsEmptyConfig returns true if the provided error indicates the provided configuration +// is empty. +func IsEmptyConfig(err error) bool { + switch t := err.(type) { + case errConfigurationInvalid: + return len(t) == 1 && t[0] == ErrEmptyConfig + } + return err == ErrEmptyConfig +} + +// errConfigurationInvalid is a set of errors indicating the configuration is invalid. +type errConfigurationInvalid []error + +// errConfigurationInvalid implements error and Aggregate +var _ error = errConfigurationInvalid{} +var _ utilerrors.Aggregate = errConfigurationInvalid{} + +func newErrConfigurationInvalid(errs []error) error { + switch len(errs) { + case 0: + return nil + default: + return errConfigurationInvalid(errs) + } +} + +// Error implements the error interface +func (e errConfigurationInvalid) Error() string { + return fmt.Sprintf("invalid configuration: %v", utilerrors.NewAggregate(e).Error()) +} + +// Errors implements the AggregateError interface +func (e errConfigurationInvalid) Errors() []error { + return e +} + +// IsConfigurationInvalid returns true if the provided error indicates the configuration is invalid. +func IsConfigurationInvalid(err error) bool { + switch err.(type) { + case *errContextNotFound, errConfigurationInvalid: + return true + } + return IsContextNotFound(err) +} + +// Validate checks for errors in the Config. It does not return early so that it can find as many errors as possible. +func Validate(config clientcmdapi.Config) error { + validationErrors := make([]error, 0) + + if clientcmdapi.IsConfigEmpty(&config) { + return newErrConfigurationInvalid([]error{ErrEmptyConfig}) + } + + if len(config.CurrentContext) != 0 { + if _, exists := config.Contexts[config.CurrentContext]; !exists { + validationErrors = append(validationErrors, &errContextNotFound{config.CurrentContext}) + } + } + + for contextName, context := range config.Contexts { + validationErrors = append(validationErrors, validateContext(contextName, *context, config)...) + } + + for authInfoName, authInfo := range config.AuthInfos { + validationErrors = append(validationErrors, validateAuthInfo(authInfoName, *authInfo)...) + } + + for clusterName, clusterInfo := range config.Clusters { + validationErrors = append(validationErrors, validateClusterInfo(clusterName, *clusterInfo)...) + } + + return newErrConfigurationInvalid(validationErrors) +} + +// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config, +// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. +func ConfirmUsable(config clientcmdapi.Config, passedContextName string) error { + validationErrors := make([]error, 0) + + if clientcmdapi.IsConfigEmpty(&config) { + return newErrConfigurationInvalid([]error{ErrEmptyConfig}) + } + + var contextName string + if len(passedContextName) != 0 { + contextName = passedContextName + } else { + contextName = config.CurrentContext + } + + if len(contextName) == 0 { + return ErrNoContext + } + + context, exists := config.Contexts[contextName] + if !exists { + validationErrors = append(validationErrors, &errContextNotFound{contextName}) + } + + if exists { + validationErrors = append(validationErrors, validateContext(contextName, *context, config)...) + validationErrors = append(validationErrors, validateAuthInfo(context.AuthInfo, *config.AuthInfos[context.AuthInfo])...) + validationErrors = append(validationErrors, validateClusterInfo(context.Cluster, *config.Clusters[context.Cluster])...) + } + + return newErrConfigurationInvalid(validationErrors) +} + +// validateClusterInfo looks for conflicts and errors in the cluster info +func validateClusterInfo(clusterName string, clusterInfo clientcmdapi.Cluster) []error { + validationErrors := make([]error, 0) + + emptyCluster := clientcmdapi.NewCluster() + if reflect.DeepEqual(*emptyCluster, clusterInfo) { + return []error{ErrEmptyCluster} + } + + if len(clusterInfo.Server) == 0 { + if len(clusterName) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("default cluster has no server defined")) + } else { + validationErrors = append(validationErrors, fmt.Errorf("no server found for cluster %q", clusterName)) + } + } + // Make sure CA data and CA file aren't both specified + if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 { + validationErrors = append(validationErrors, fmt.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override.", clusterName)) + } + if len(clusterInfo.CertificateAuthority) != 0 { + clientCertCA, err := os.Open(clusterInfo.CertificateAuthority) + if err != nil { + validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err)) + } else { + defer clientCertCA.Close() + } + } + + return validationErrors +} + +// validateAuthInfo looks for conflicts and errors in the auth info +func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []error { + validationErrors := make([]error, 0) + + usingAuthPath := false + methods := make([]string, 0, 3) + if len(authInfo.Token) != 0 { + methods = append(methods, "token") + } + if len(authInfo.Username) != 0 || len(authInfo.Password) != 0 { + methods = append(methods, "basicAuth") + } + + if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 { + // Make sure cert data and file aren't both specified + if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 { + validationErrors = append(validationErrors, fmt.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override.", authInfoName)) + } + // Make sure key data and file aren't both specified + if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 { + validationErrors = append(validationErrors, fmt.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName)) + } + // Make sure a key is specified + if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method.", authInfoName)) + } + + if len(authInfo.ClientCertificate) != 0 { + clientCertFile, err := os.Open(authInfo.ClientCertificate) + if err != nil { + validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err)) + } else { + defer clientCertFile.Close() + } + } + if len(authInfo.ClientKey) != 0 { + clientKeyFile, err := os.Open(authInfo.ClientKey) + if err != nil { + validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err)) + } else { + defer clientKeyFile.Close() + } + } + } + + if authInfo.Exec != nil { + if authInfo.AuthProvider != nil { + validationErrors = append(validationErrors, fmt.Errorf("authProvider cannot be provided in combination with an exec plugin for %s", authInfoName)) + } + if len(authInfo.Exec.Command) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("command must be specified for %v to use exec authentication plugin", authInfoName)) + } + if len(authInfo.Exec.APIVersion) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("apiVersion must be specified for %v to use exec authentication plugin", authInfoName)) + } + for _, v := range authInfo.Exec.Env { + if len(v.Name) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("env variable name must be specified for %v to use exec authentication plugin", authInfoName)) + } + } + } + + // authPath also provides information for the client to identify the server, so allow multiple auth methods in that case + if (len(methods) > 1) && (!usingAuthPath) { + validationErrors = append(validationErrors, fmt.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods)) + } + + // ImpersonateGroups or ImpersonateUserExtra should be requested with a user + if (len(authInfo.ImpersonateGroups) > 0 || len(authInfo.ImpersonateUserExtra) > 0) && (len(authInfo.Impersonate) == 0) { + validationErrors = append(validationErrors, fmt.Errorf("requesting groups or user-extra for %v without impersonating a user", authInfoName)) + } + return validationErrors +} + +// validateContext looks for errors in the context. It is not transitive, so errors in the reference authInfo or cluster configs are not included in this return +func validateContext(contextName string, context clientcmdapi.Context, config clientcmdapi.Config) []error { + validationErrors := make([]error, 0) + + if len(contextName) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("empty context name for %#v is not allowed", context)) + } + + if len(context.AuthInfo) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("user was not specified for context %q", contextName)) + } else if _, exists := config.AuthInfos[context.AuthInfo]; !exists { + validationErrors = append(validationErrors, fmt.Errorf("user %q was not found for context %q", context.AuthInfo, contextName)) + } + + if len(context.Cluster) == 0 { + validationErrors = append(validationErrors, fmt.Errorf("cluster was not specified for context %q", contextName)) + } else if _, exists := config.Clusters[context.Cluster]; !exists { + validationErrors = append(validationErrors, fmt.Errorf("cluster %q was not found for context %q", context.Cluster, contextName)) + } + + if len(context.Namespace) != 0 { + if len(validation.IsDNS1123Label(context.Namespace)) != 0 { + validationErrors = append(validationErrors, fmt.Errorf("namespace %q for context %q does not conform to the kubernetes DNS_LABEL rules", context.Namespace, contextName)) + } + } + + return validationErrors +} diff --git a/vendor/k8s.io/client-go/util/homedir/homedir.go b/vendor/k8s.io/client-go/util/homedir/homedir.go new file mode 100644 index 00000000..3fdbeb8c --- /dev/null +++ b/vendor/k8s.io/client-go/util/homedir/homedir.go @@ -0,0 +1,92 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package homedir + +import ( + "os" + "path/filepath" + "runtime" +) + +// HomeDir returns the home directory for the current user. +// On Windows: +// 1. the first of %HOME%, %HOMEDRIVE%%HOMEPATH%, %USERPROFILE% containing a `.kube\config` file is returned. +// 2. if none of those locations contain a `.kube\config` file, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists and is writeable is returned. +// 3. if none of those locations are writeable, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists is returned. +// 4. if none of those locations exists, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that is set is returned. +func HomeDir() string { + if runtime.GOOS == "windows" { + home := os.Getenv("HOME") + homeDriveHomePath := "" + if homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 { + homeDriveHomePath = homeDrive + homePath + } + userProfile := os.Getenv("USERPROFILE") + + // Return first of %HOME%, %HOMEDRIVE%/%HOMEPATH%, %USERPROFILE% that contains a `.kube\config` file. + // %HOMEDRIVE%/%HOMEPATH% is preferred over %USERPROFILE% for backwards-compatibility. + for _, p := range []string{home, homeDriveHomePath, userProfile} { + if len(p) == 0 { + continue + } + if _, err := os.Stat(filepath.Join(p, ".kube", "config")); err != nil { + continue + } + return p + } + + firstSetPath := "" + firstExistingPath := "" + + // Prefer %USERPROFILE% over %HOMEDRIVE%/%HOMEPATH% for compatibility with other auth-writing tools + for _, p := range []string{home, userProfile, homeDriveHomePath} { + if len(p) == 0 { + continue + } + if len(firstSetPath) == 0 { + // remember the first path that is set + firstSetPath = p + } + info, err := os.Stat(p) + if err != nil { + continue + } + if len(firstExistingPath) == 0 { + // remember the first path that exists + firstExistingPath = p + } + if info.IsDir() && info.Mode().Perm()&(1<<(uint(7))) != 0 { + // return first path that is writeable + return p + } + } + + // If none are writeable, return first location that exists + if len(firstExistingPath) > 0 { + return firstExistingPath + } + + // If none exist, return first location that is set + if len(firstSetPath) > 0 { + return firstSetPath + } + + // We've got nothing + return "" + } + return os.Getenv("HOME") +} diff --git a/vendor/knative.dev/pkg/Gopkg.lock b/vendor/knative.dev/pkg/Gopkg.lock index 2f87417d..7d48f438 100644 --- a/vendor/knative.dev/pkg/Gopkg.lock +++ b/vendor/knative.dev/pkg/Gopkg.lock @@ -789,7 +789,7 @@ version = "v2.0.1" [[projects]] - digest = "1:081608ceb454c46b54d24b7561e5744088f3ff69478b23f50277ec83bd8636b0" + digest = "1:74ee58de746e67a8d7d44b8b71ca3b980f6b1f62211075832d2f427205ea18cd" name = "google.golang.org/api" packages = [ "container/v1beta1", @@ -800,16 +800,18 @@ "internal/third_party/uritemplates", "iterator", "option", + "option/internaloption", "storage/v1", "support/bundler", "transport", + "transport/cert", "transport/grpc", "transport/http", "transport/http/internal/propagation", ] pruneopts = "NUT" - revision = "aa5d4e47691e7ae1aebb5221ff8e4beea23fad72" - version = "v0.15.0" + revision = "52f0532eadbcc6f6b82d6f5edf66e610d10bfde6" + version = "v0.21.0" [[projects]] digest = "1:a955e7c44c2be14b61aa2ddda744edfdfbc6817e993703a16e303c277ba84449" @@ -857,10 +859,12 @@ revision = "3bdd9d9f5532d75d09efb230bd767d265245cfe5" [[projects]] - digest = "1:89cc3cf640fa24f7345509981e7ab088ee8d4d4f08cf3b5783508856b146b438" + digest = "1:c37329e8e9ea45a568f1804138ec01718acddb52e2ef2a9a503a5a57136fd2bc" name = "google.golang.org/grpc" packages = [ ".", + "attributes", + "backoff", "balancer", "balancer/base", "balancer/grpclb", @@ -887,10 +891,15 @@ "internal/backoff", "internal/balancerload", "internal/binarylog", + "internal/buffer", "internal/channelz", "internal/envconfig", + "internal/grpclog", "internal/grpcrand", "internal/grpcsync", + "internal/grpcutil", + "internal/resolver/dns", + "internal/resolver/passthrough", "internal/syscall", "internal/transport", "keepalive", @@ -898,16 +907,14 @@ "naming", "peer", "resolver", - "resolver/dns", - "resolver/passthrough", "serviceconfig", "stats", "status", "tap", ] pruneopts = "NUT" - revision = "1d89a3c832915b2314551c1d2a506874d62e53f7" - version = "v1.22.0" + revision = "142182889d38b76209f1d9f1d8e91d7608aff542" + version = "v1.28.0" [[projects]] digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a" @@ -1516,6 +1523,7 @@ "k8s.io/client-go/tools/leaderelection", "k8s.io/client-go/tools/leaderelection/resourcelock", "k8s.io/client-go/tools/metrics", + "k8s.io/client-go/tools/pager", "k8s.io/client-go/tools/record", "k8s.io/client-go/util/retry", "k8s.io/client-go/util/workqueue", diff --git a/vendor/knative.dev/pkg/apiextensions/storageversion/cmd/migrate/config.go b/vendor/knative.dev/pkg/apiextensions/storageversion/cmd/migrate/config.go new file mode 100644 index 00000000..2507e737 --- /dev/null +++ b/vendor/knative.dev/pkg/apiextensions/storageversion/cmd/migrate/config.go @@ -0,0 +1,76 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "fmt" + "os" + "os/user" + "path/filepath" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +// TODO - make a package that's reusable here and by sharedmain + +func configOrDie() *rest.Config { + var ( + masterURL = flag.String("master", "", + "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.") + kubeconfig = flag.String("kubeconfig", "", + "Path to a kubeconfig. Only required if out-of-cluster.") + ) + + flag.Parse() + + cfg, err := getConfig(*masterURL, *kubeconfig) + if err != nil { + panic(fmt.Sprintf("Error building kubeconfig: %v", err)) + } + + return cfg +} + +// getConfig returns a rest.Config to be used for kubernetes client creation. +// It does so in the following order: +// 1. Use the passed kubeconfig/masterURL. +// 2. Fallback to the KUBECONFIG environment variable. +// 3. Fallback to in-cluster config. +// 4. Fallback to the ~/.kube/config. +func getConfig(masterURL, kubeconfig string) (*rest.Config, error) { + if kubeconfig == "" { + kubeconfig = os.Getenv("KUBECONFIG") + } + // If we have an explicit indication of where the kubernetes config lives, read that. + if kubeconfig != "" { + return clientcmd.BuildConfigFromFlags(masterURL, kubeconfig) + } + // If not, try the in-cluster config. + if c, err := rest.InClusterConfig(); err == nil { + return c, nil + } + // If no in-cluster config, try the default location in the user's home directory. + if usr, err := user.Current(); err == nil { + if c, err := clientcmd.BuildConfigFromFlags("", filepath.Join(usr.HomeDir, ".kube", "config")); err == nil { + return c, nil + } + } + + return nil, fmt.Errorf("could not create a valid kubeconfig") +} diff --git a/vendor/knative.dev/pkg/apiextensions/storageversion/cmd/migrate/main.go b/vendor/knative.dev/pkg/apiextensions/storageversion/cmd/migrate/main.go new file mode 100644 index 00000000..e17d0e50 --- /dev/null +++ b/vendor/knative.dev/pkg/apiextensions/storageversion/cmd/migrate/main.go @@ -0,0 +1,84 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "fmt" + "log" + + "go.uber.org/zap" + apixclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "knative.dev/pkg/apiextensions/storageversion" + "knative.dev/pkg/logging" + "knative.dev/pkg/signals" +) + +func main() { + logger := setupLogger() + defer logger.Sync() + + config := configOrDie() + grs, err := parseResources(flag.Args()) + if err != nil { + logger.Fatal(err) + } + + migrator := storageversion.NewMigrator( + dynamic.NewForConfigOrDie(config), + apixclient.NewForConfigOrDie(config), + ) + + ctx := signals.NewContext() + + logger.Infof("Migrating %d group resources", len(grs)) + + for _, gr := range grs { + logger.Infof("Migrating group resource %s", gr) + if err := migrator.Migrate(ctx, gr); err != nil { + logger.Fatalf("Failed to migrate: %s", err) + } + } + + logger.Info("Migration complete") +} + +func parseResources(args []string) ([]schema.GroupResource, error) { + grs := make([]schema.GroupResource, 0, len(args)) + for _, arg := range args { + gr := schema.ParseGroupResource(arg) + if gr.Empty() { + return nil, fmt.Errorf("unable to parse group version: %s", arg) + } + grs = append(grs, gr) + } + return grs, nil +} + +func setupLogger() *zap.SugaredLogger { + const component = "storage-migrator" + + config, err := logging.NewConfigFromMap(nil) + if err != nil { + log.Fatalf("Failed to create logging config: %s", err) + } + + logger, _ := logging.NewLoggerFromConfig(config, component) + return logger +} diff --git a/vendor/knative.dev/pkg/apiextensions/storageversion/migrator.go b/vendor/knative.dev/pkg/apiextensions/storageversion/migrator.go new file mode 100644 index 00000000..45a06ab9 --- /dev/null +++ b/vendor/knative.dev/pkg/apiextensions/storageversion/migrator.go @@ -0,0 +1,120 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storageversion + +import ( + "context" + "fmt" + + apix "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apixclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/tools/pager" +) + +// Migrator will read custom resource definitions and upgrade +// the associated resources to the latest storage version +type Migrator struct { + dynamicClient dynamic.Interface + apixClient apixclient.Interface +} + +// NewMigrator will return a new Migrator +func NewMigrator(d dynamic.Interface, a apixclient.Interface) *Migrator { + return &Migrator{ + dynamicClient: d, + apixClient: a, + } +} + +// Migrate takes a group resource (ie. resource.some.group.dev) and +// updates instances of the resource to the latest storage version +// +// This is done by listing all the resources and performing an empty patch +// which triggers a migration on the K8s API server +// +// Finally the migrator will update the CRD's status and drop older storage +// versions +func (m *Migrator) Migrate(ctx context.Context, gr schema.GroupResource) error { + crdClient := m.apixClient.ApiextensionsV1beta1().CustomResourceDefinitions() + + crd, err := crdClient.Get(gr.String(), metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("unable to fetch crd %s - %w", gr, err) + } + + version := storageVersion(crd) + + if err := m.migrateResources(ctx, gr.WithVersion(version)); err != nil { + return err + } + + patch := `{"status":{"storedVersions":["` + version + `"]}}` + _, err = crdClient.Patch(crd.Name, types.StrategicMergePatchType, []byte(patch), "status") + if err != nil { + return fmt.Errorf("unable to drop storage version definition %s - %w", gr, err) + } + + return nil +} + +func (m *Migrator) migrateResources(ctx context.Context, gvr schema.GroupVersionResource) error { + client := m.dynamicClient.Resource(gvr) + + listFunc := func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) { + return client.Namespace(metav1.NamespaceAll).List(opts) + } + + onEach := func(obj runtime.Object) error { + item := obj.(metav1.Object) + + _, err := client.Namespace(item.GetNamespace()). + Patch(item.GetName(), types.MergePatchType, []byte("{}"), metav1.PatchOptions{}) + + if err != nil { + return fmt.Errorf("unable to patch resource %s/%s (gvr: %s) - %w", + item.GetNamespace(), item.GetName(), + gvr, err) + } + + return nil + } + + pager := pager.New(listFunc) + return pager.EachListItem(ctx, metav1.ListOptions{}, onEach) +} + +func storageVersion(crd *apix.CustomResourceDefinition) string { + var version string + + for _, v := range crd.Spec.Versions { + if v.Storage { + version = v.Name + break + } + } + + if version == "" { + version = crd.Spec.Version + } + + return version +} diff --git a/vendor/knative.dev/pkg/apis/contexts.go b/vendor/knative.dev/pkg/apis/contexts.go index abb5706a..735c6191 100644 --- a/vendor/knative.dev/pkg/apis/contexts.go +++ b/vendor/knative.dev/pkg/apis/contexts.go @@ -198,3 +198,16 @@ func AllowDifferentNamespace(ctx context.Context) context.Context { func IsDifferentNamespaceAllowed(ctx context.Context) bool { return ctx.Value(allowDifferentNamespace{}) != nil } + +// This is attached to contexts passed to webhook interfaces when the user has request DryRun mode. +type isDryRun struct{} + +// WithDryRun is used to indicate that this call is in DryRun mode. +func WithDryRun(ctx context.Context) context.Context { + return context.WithValue(ctx, isDryRun{}, struct{}{}) +} + +// IsDryRun indicates that this request is in DryRun mode. +func IsDryRun(ctx context.Context) bool { + return ctx.Value(isDryRun{}) != nil +} diff --git a/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/packages.go b/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/packages.go index 75904502..ce8b6dbc 100644 --- a/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/packages.go +++ b/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/packages.go @@ -206,11 +206,6 @@ func isNonNamespaced(t *types.Type) bool { return nonNamespaced } -// isInternal returns true if the tags for a member do not contain a json tag -func isInternal(m types.Member) bool { - return !strings.Contains(m.Tags, "json") -} - func vendorless(p string) string { if pos := strings.LastIndex(p, "/vendor/"); pos != -1 { return p[pos+len("/vendor/"):] diff --git a/vendor/knative.dev/pkg/leaderelection/config.go b/vendor/knative.dev/pkg/leaderelection/config.go index 4bba700a..04bacfe5 100644 --- a/vendor/knative.dev/pkg/leaderelection/config.go +++ b/vendor/knative.dev/pkg/leaderelection/config.go @@ -17,7 +17,6 @@ limitations under the License. package leaderelection import ( - "errors" "fmt" "os" "strings" @@ -30,10 +29,7 @@ import ( const ConfigMapNameEnv = "CONFIG_LEADERELECTION_NAME" -var ( - errEmptyLeaderElectionConfig = errors.New("empty leader election configuration") - validResourceLocks = sets.NewString("leases", "configmaps", "endpoints") -) +var validResourceLocks = sets.NewString("leases", "configmaps", "endpoints") // NewConfigFromMap returns a Config for the given map, or an error. func NewConfigFromMap(data map[string]string) (*Config, error) { diff --git a/vendor/knative.dev/pkg/metrics/monitored_resources.go b/vendor/knative.dev/pkg/metrics/monitored_resources.go index d8034efc..41819e0a 100644 --- a/vendor/knative.dev/pkg/metrics/monitored_resources.go +++ b/vendor/knative.dev/pkg/metrics/monitored_resources.go @@ -17,7 +17,6 @@ limitations under the License. package metrics import ( - "go.opencensus.io/tag" "knative.dev/pkg/metrics/metricskey" ) @@ -27,14 +26,6 @@ func (g *Global) MonitoredResource() (resType string, labels map[string]string) return "global", nil } -func getTagsMap(tags []tag.Tag) map[string]string { - tagsMap := map[string]string{} - for _, t := range tags { - tagsMap[t.Key.Name()] = t.Value - } - return tagsMap -} - func valueOrUnknown(key string, tagsMap map[string]string) string { if value, ok := tagsMap[key]; ok { return value diff --git a/vendor/knative.dev/pkg/resolver/addressable_resolver.go b/vendor/knative.dev/pkg/resolver/addressable_resolver.go index 36b5eed7..3dadd4f5 100644 --- a/vendor/knative.dev/pkg/resolver/addressable_resolver.go +++ b/vendor/knative.dev/pkg/resolver/addressable_resolver.go @@ -140,7 +140,12 @@ func (r *URIResolver) URIFromObjectReference(ref *corev1.ObjectReference, parent return nil, errors.New("ref is nil") } - if err := r.tracker.Track(*ref, parent); err != nil { + if err := r.tracker.TrackReference(tracker.Reference{ + APIVersion: ref.APIVersion, + Kind: ref.Kind, + Namespace: ref.Namespace, + Name: ref.Name, + }, parent); err != nil { return nil, fmt.Errorf("failed to track %+v: %v", ref, err) } diff --git a/vendor/knative.dev/pkg/test/cmd/command.go b/vendor/knative.dev/pkg/test/cmd/command.go index 5eb8a02f..7c4ce0da 100644 --- a/vendor/knative.dev/pkg/test/cmd/command.go +++ b/vendor/knative.dev/pkg/test/cmd/command.go @@ -33,6 +33,13 @@ const ( separator = "\n" ) +// These vars are defined for easy mocking in unit tests. +var ( + RunCommand = runCommand + RunCommands = runCommands + RunCommandsInParallel = runCommandsInParallel +) + // Option enables further configuration of a Cmd. type Option func(cmd *exec.Cmd) @@ -51,7 +58,7 @@ func WithDir(dir string) Option { } // RunCommand will run the command and return the standard output, plus error if there is one. -func RunCommand(cmdLine string, options ...Option) (string, error) { +func runCommand(cmdLine string, options ...Option) (string, error) { cmdSplit, err := shell.Split(cmdLine) if len(cmdSplit) == 0 || err != nil { return "", &CommandLineError{ @@ -85,7 +92,7 @@ func RunCommand(cmdLine string, options ...Option) (string, error) { // RunCommands will run the commands sequentially. // If there is an error when running a command, it will return directly with all standard output so far and the error. -func RunCommands(cmdLines ...string) (string, error) { +func runCommands(cmdLines ...string) (string, error) { var outputs []string for _, cmdLine := range cmdLines { output, err := RunCommand(cmdLine) @@ -99,7 +106,7 @@ func RunCommands(cmdLines ...string) (string, error) { // RunCommandsInParallel will run the commands in parallel. // It will always finish running all commands, and return all standard output and errors together. -func RunCommandsInParallel(cmdLines ...string) (string, error) { +func runCommandsInParallel(cmdLines ...string) (string, error) { errCh := make(chan error, len(cmdLines)) outputCh := make(chan string, len(cmdLines)) mx := sync.Mutex{} diff --git a/vendor/knative.dev/pkg/test/e2e_flags.go b/vendor/knative.dev/pkg/test/e2e_flags.go index bb974ca3..3dfb2eb3 100644 --- a/vendor/knative.dev/pkg/test/e2e_flags.go +++ b/vendor/knative.dev/pkg/test/e2e_flags.go @@ -25,21 +25,14 @@ import ( "os" "os/user" "path" - "sync" "text/template" "time" "knative.dev/pkg/test/logging" ) -const ( - // The recommended default log level https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md - klogDefaultLogLevel = "2" -) - var ( - flagsSetupOnce = &sync.Once{} - klogFlags = flag.NewFlagSet("klog", flag.ExitOnError) + klogFlags = flag.NewFlagSet("klog", flag.ExitOnError) // Flags holds the command line flags or defaults for settings in the user's environment. // See EnvironmentFlags for a list of supported fields. Flags = initializeFlags() diff --git a/vendor/knative.dev/pkg/test/logging/sugar.go b/vendor/knative.dev/pkg/test/logging/sugar.go index e03d1d4e..01fc6b29 100644 --- a/vendor/knative.dev/pkg/test/logging/sugar.go +++ b/vendor/knative.dev/pkg/test/logging/sugar.go @@ -32,9 +32,6 @@ import ( const ( _oddNumberErrMsg = "Ignored key without a value." _nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys." - spewLevel1 = 2 - spewLevel2 = 4 - spewLevel3 = 6 ) var spewConfig *spew.ConfigState diff --git a/vendor/knative.dev/pkg/test/logging/tlogger.go b/vendor/knative.dev/pkg/test/logging/tlogger.go index 29f0948d..ea6f3f01 100644 --- a/vendor/knative.dev/pkg/test/logging/tlogger.go +++ b/vendor/knative.dev/pkg/test/logging/tlogger.go @@ -315,7 +315,7 @@ func newTLogger(t *testing.T, verbosity int, dontFail bool) (*TLogger, func()) { l: log, level: verbosity, t: t, - errs: make(map[string][]interface{}, 0), + errs: make(map[string][]interface{}), dontFail: dontFail, } return &tlogger, func() { diff --git a/vendor/knative.dev/pkg/test/testgrid/testgrid.go b/vendor/knative.dev/pkg/test/testgrid/testgrid.go index 263a87f6..633ff7f6 100644 --- a/vendor/knative.dev/pkg/test/testgrid/testgrid.go +++ b/vendor/knative.dev/pkg/test/testgrid/testgrid.go @@ -51,10 +51,10 @@ func CreateXMLOutput(tc []junit.TestCase, testName string) error { outputFile := path.Join(artifactsDir, filePrefix+testName+extension) log.Printf("Storing output in %s", outputFile) f, err := os.OpenFile(outputFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - defer f.Close() if err != nil { return err } + defer f.Close() if _, err := f.WriteString(string(op) + "\n"); err != nil { return err } diff --git a/vendor/knative.dev/pkg/testing/resource.go b/vendor/knative.dev/pkg/testing/resource.go index a095d167..0ab4a4f9 100644 --- a/vendor/knative.dev/pkg/testing/resource.go +++ b/vendor/knative.dev/pkg/testing/resource.go @@ -48,6 +48,7 @@ type ResourceSpec struct { FieldWithValidation string `json:"fieldWithValidation,omitempty"` FieldThatsImmutable string `json:"fieldThatsImmutable,omitempty"` FieldThatsImmutableWithDefault string `json:"fieldThatsImmutableWithDefault,omitempty"` + FieldForCallbackValidation string `json:"fieldThatCallbackRejects,omitempty"` } // GetGroupVersionKind returns the GroupVersionKind. diff --git a/vendor/knative.dev/pkg/testutils/clustermanager/e2e-tests/gke.go b/vendor/knative.dev/pkg/testutils/clustermanager/e2e-tests/gke.go index 31b509c2..8b80fe6d 100644 --- a/vendor/knative.dev/pkg/testutils/clustermanager/e2e-tests/gke.go +++ b/vendor/knative.dev/pkg/testutils/clustermanager/e2e-tests/gke.go @@ -24,113 +24,12 @@ import ( "github.com/davecgh/go-spew/spew" container "google.golang.org/api/container/v1beta1" + "google.golang.org/api/option" "knative.dev/pkg/test/gke" - "knative.dev/pkg/testutils/clustermanager/e2e-tests/boskos" "knative.dev/pkg/testutils/clustermanager/e2e-tests/common" ) -// GKEClient implements Client -type GKEClient struct { -} - -// GKERequest contains all requests collected for cluster creation -type GKERequest struct { - // Request holds settings for GKE native operations - gke.Request - - // BackupRegions: fall back regions to try out in case of cluster creation - // failure due to regional issue(s) - BackupRegions []string - - // SkipCreation: skips cluster creation - SkipCreation bool - - // ResourceType: the boskos resource type to acquire to hold the cluster in create - ResourceType string -} - -// GKECluster implements ClusterOperations -type GKECluster struct { - Request *GKERequest - // Project might be GKE specific, so put it here - Project string - // IsBoskos is true if the GCP project used is managed by boskos - IsBoskos bool - // AsyncCleanup tells whether the cluster needs to be deleted asynchronously afterwards - // It should be true on Prow but false on local. - AsyncCleanup bool - Cluster *container.Cluster - operations gke.SDKOperations - boskosOps boskos.Operation -} - -// Setup sets up a GKECluster client, takes GEKRequest as parameter and applies -// all defaults if not defined. -func (gs *GKEClient) Setup(r GKERequest) ClusterOperations { - gc := &GKECluster{} - - if r.Project != "" { // use provided project to create cluster - gc.Project = r.Project - gc.AsyncCleanup = true - } else if common.IsProw() { // if no project is provided and is on Prow, use boskos - gc.IsBoskos = true - } - - if r.MinNodes == 0 { - r.MinNodes = defaultGKEMinNodes - } - if r.MaxNodes == 0 { - r.MaxNodes = defaultGKEMaxNodes - // We don't want MaxNodes < MinNodes - if r.MinNodes > r.MaxNodes { - r.MaxNodes = r.MinNodes - } - } - if r.NodeType == "" { - r.NodeType = defaultGKENodeType - } - // Only use default backup regions if region is not provided - if len(r.BackupRegions) == 0 && r.Region == "" { - r.BackupRegions = defaultGKEBackupRegions - if common.GetOSEnv(backupRegionEnv) != "" { - r.BackupRegions = strings.Split(common.GetOSEnv(backupRegionEnv), " ") - } - } - if r.Region == "" { - r.Region = defaultGKERegion - if common.GetOSEnv(regionEnv) != "" { - r.Region = common.GetOSEnv(regionEnv) - } - } - if r.Zone == "" { - r.Zone = defaultGKEZone - } else { // No backupregions if zone is provided - r.BackupRegions = make([]string, 0) - } - - if r.ResourceType == "" { - r.ResourceType = defaultResourceType - } - - gc.Request = &r - - client, err := gke.NewSDKClient() - if err != nil { - log.Fatalf("Failed to create GKE SDK client: '%v'", err) - } - gc.operations = client - - gc.boskosOps, err = boskos.NewClient("", /* boskos owner */ - "", /* boskos user */ - "" /* boskos password file */) - if err != nil { - log.Fatalf("Failed to create boskos client: '%v", err) - } - - return gc -} - // Provider returns gke func (gc *GKECluster) Provider() string { return "gke" @@ -141,24 +40,21 @@ func (gc *GKECluster) Provider() string { // in us-central1, and default BackupRegions are us-west1 and us-east1. If // Region or Zone is provided then there is no retries func (gc *GKECluster) Acquire() error { - if err := gc.checkEnvironment(); err != nil { - return fmt.Errorf("failed checking project/cluster from environment: '%v'", err) - } - // If gc.Cluster is discovered above, then the cluster exists and it's - // project and name matches with requested, use it - if gc.Cluster != nil { - gc.ensureProtected() - return nil - } if gc.Request.SkipCreation { - return errors.New("cannot acquire cluster if SkipCreation is set") + if err := gc.checkEnvironment(); err != nil { + return fmt.Errorf("failed checking project/cluster from environment: '%w'", err) + } + // If gc.Cluster is discovered above, then the cluster exists and it's + // project and name matches with requested, use it + if gc.Cluster != nil { + gc.ensureProtected() + return nil + } + return errors.New("failing acquiring an existing cluster") } - // If comes here we are very likely going to create a cluster, unless - // the cluster already exists - // If running on Prow and project name is not provided, get project name from boskos. - if gc.Project == "" && gc.IsBoskos { + if gc.Request.Project == "" && gc.isBoskos { project, err := gc.boskosOps.AcquireGKEProject(gc.Request.ResourceType) if err != nil { return fmt.Errorf("failed acquiring boskos project: '%w'", err) @@ -171,6 +67,10 @@ func (gc *GKECluster) Acquire() error { gc.ensureProtected() log.Printf("Identified project %s for cluster creation", gc.Project) + client, err := gc.newGKEClient(gc.Project) + if err != nil { + return fmt.Errorf("failed creating the GKE client: '%w'", err) + } // Make a deep copy of the request struct, since the original request is supposed to be immutable request := gc.Request.DeepCopy() // We are going to use request for creating cluster, set its Project @@ -195,7 +95,7 @@ func (gc *GKECluster) Acquire() error { var cluster *container.Cluster rb, err := gke.NewCreateClusterRequest(request) if err != nil { - return fmt.Errorf("failed building the CreateClusterRequest: '%v'", err) + return fmt.Errorf("failed building the CreateClusterRequest: '%w'", err) } for i, region := range regions { // Restore innocence @@ -203,22 +103,22 @@ func (gc *GKECluster) Acquire() error { clusterName := request.ClusterName // Use cluster if it already exists and running - existingCluster, _ := gc.operations.GetCluster(gc.Project, region, request.Zone, clusterName) + existingCluster, _ := client.GetCluster(gc.Project, region, request.Zone, clusterName) if existingCluster != nil && existingCluster.Status == clusterRunning { gc.Cluster = existingCluster return nil } // Creating cluster log.Printf("Creating cluster %q in region %q zone %q with:\n%+v", clusterName, region, request.Zone, spew.Sdump(rb)) - err = gc.operations.CreateCluster(gc.Project, region, request.Zone, rb) + err = client.CreateCluster(gc.Project, region, request.Zone, rb) if err == nil { - cluster, err = gc.operations.GetCluster(gc.Project, region, request.Zone, rb.Cluster.Name) + cluster, err = client.GetCluster(gc.Project, region, request.Zone, rb.Cluster.Name) } if err != nil { errMsg := fmt.Sprintf("Error during cluster creation: '%v'. ", err) if !common.IsProw() { // Delete half created cluster if it's user created errMsg = fmt.Sprintf("%sDeleting cluster %q in region %q zone %q in background...\n", errMsg, clusterName, region, request.Zone) - gc.operations.DeleteClusterAsync(gc.Project, region, request.Zone, clusterName) + client.DeleteClusterAsync(gc.Project, region, request.Zone, clusterName) } // Retry another region if cluster creation failed. if i != len(regions)-1 && needsRetryCreation(err.Error()) { @@ -253,29 +153,36 @@ func (gc *GKECluster) Delete() error { return fmt.Errorf("failed checking project/cluster from environment: '%w'", err) } gc.ensureProtected() + + // Should only get here if running locally and cluster created by this + // client, so at this moment cluster should have been set + if gc.Cluster == nil { + return errors.New("cluster doesn't exist") + } + + log.Printf("Deleting cluster %q in %q", gc.Cluster.Name, gc.Cluster.Location) + client, err := gc.newGKEClient(gc.Project) + if err != nil { + return fmt.Errorf("failed creating the GKE client: '%w'", err) + } + region, zone := gke.RegionZoneFromLoc(gc.Cluster.Location) + if gc.asyncCleanup { + _, err = client.DeleteClusterAsync(gc.Project, region, zone, gc.Cluster.Name) + } else { + err = client.DeleteCluster(gc.Project, region, zone, gc.Cluster.Name) + } + if err != nil { + return fmt.Errorf("failed deleting cluster: '%w'", err) + } + // Release Boskos if running in Prow - if gc.IsBoskos { + if gc.isBoskos { log.Printf("Releasing Boskos resource: '%v'", gc.Project) if err = gc.boskosOps.ReleaseGKEProject(gc.Project); err != nil { return fmt.Errorf("failed releasing boskos resource: '%w'", err) } } - // Should only get here if running locally and cluster created by this - // client, so at this moment cluster should have been set - if gc.Cluster == nil { - return fmt.Errorf("cluster doesn't exist") - } - log.Printf("Deleting cluster %q in %q", gc.Cluster.Name, gc.Cluster.Location) - region, zone := gke.RegionZoneFromLoc(gc.Cluster.Location) - if gc.AsyncCleanup { - _, err = gc.operations.DeleteClusterAsync(gc.Project, region, zone, gc.Cluster.Name) - } else { - err = gc.operations.DeleteCluster(gc.Project, region, zone, gc.Cluster.Name) - } - if err != nil { - return fmt.Errorf("failed deleting cluster: '%w'", err) - } return nil } @@ -325,7 +232,11 @@ func (gc *GKECluster) checkEnvironment() error { region, zone := gke.RegionZoneFromLoc(location) // Use the cluster only if project and clustername match if (gc.Request.Project == "" || gc.Request.Project == project) && (gc.Request.ClusterName == "" || gc.Request.ClusterName == clusterName) { - cluster, err := gc.operations.GetCluster(project, region, zone, clusterName) + client, err := gc.newGKEClient(project) + if err != nil { + return fmt.Errorf("failed creating the GKE client: '%v'", err) + } + cluster, err := client.GetCluster(project, region, zone, clusterName) if err != nil { return fmt.Errorf("couldn't find cluster %s in %s in %s, does it exist? %v", clusterName, project, location, err) } @@ -358,3 +269,15 @@ func (gc *GKECluster) checkEnvironment() error { } return nil } + +// newGKEClient returns a new GKE client. project and environment must be provided. +func (gc *GKECluster) newGKEClient(project string) (gke.SDKOperations, error) { + // HACK: this is merely used for unit tests. + // Return the operation directly if it's already initialize. + if gc.operations != nil { + return gc.operations, nil + } + + return gke.NewSDKClient( + option.WithQuotaProject(project)) +} diff --git a/vendor/knative.dev/pkg/testutils/clustermanager/e2e-tests/setup.go b/vendor/knative.dev/pkg/testutils/clustermanager/e2e-tests/setup.go new file mode 100644 index 00000000..7f4e7629 --- /dev/null +++ b/vendor/knative.dev/pkg/testutils/clustermanager/e2e-tests/setup.go @@ -0,0 +1,132 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clustermanager + +import ( + "log" + "strings" + + container "google.golang.org/api/container/v1beta1" + + "knative.dev/pkg/test/gke" + "knative.dev/pkg/testutils/clustermanager/e2e-tests/boskos" + "knative.dev/pkg/testutils/clustermanager/e2e-tests/common" +) + +// GKEClient implements Client +type GKEClient struct { +} + +// GKERequest contains all requests collected for cluster creation +type GKERequest struct { + // Request holds settings for GKE native operations + gke.Request + + // BackupRegions: fall back regions to try out in case of cluster creation + // failure due to regional issue(s) + BackupRegions []string + + // SkipCreation: skips cluster creation + SkipCreation bool + + // ResourceType: the boskos resource type to acquire to hold the cluster in create + ResourceType string +} + +// GKECluster implements ClusterOperations +type GKECluster struct { + Request *GKERequest + // Project might be GKE specific, so put it here + Project string + Cluster *container.Cluster + + // isBoskos is true if the GCP project used is managed by boskos + isBoskos bool + // asyncCleanup tells whether the cluster needs to be deleted asynchronously afterwards + // It should be true on Prow but false on local. + asyncCleanup bool + operations gke.SDKOperations + boskosOps boskos.Operation +} + +// Setup sets up a GKECluster client, takes GEKRequest as parameter and applies +// all defaults if not defined. +func (gs *GKEClient) Setup(r GKERequest) ClusterOperations { + gc := &GKECluster{} + + if r.Project != "" { // use provided project to create cluster + gc.Project = r.Project + } else if common.IsProw() { // if no project is provided and is on Prow, use boskos + gc.isBoskos = true + gc.asyncCleanup = true + } + + if r.MinNodes == 0 { + r.MinNodes = defaultGKEMinNodes + } + if r.MaxNodes == 0 { + r.MaxNodes = defaultGKEMaxNodes + // We don't want MaxNodes < MinNodes + if r.MinNodes > r.MaxNodes { + r.MaxNodes = r.MinNodes + } + } + if r.NodeType == "" { + r.NodeType = defaultGKENodeType + } + // Only use default backup regions if region is not provided + if len(r.BackupRegions) == 0 && r.Region == "" { + r.BackupRegions = defaultGKEBackupRegions + if common.GetOSEnv(backupRegionEnv) != "" { + r.BackupRegions = strings.Split(common.GetOSEnv(backupRegionEnv), " ") + } + } + if r.Region == "" { + r.Region = defaultGKERegion + if common.GetOSEnv(regionEnv) != "" { + r.Region = common.GetOSEnv(regionEnv) + } + } + if r.Zone == "" { + r.Zone = defaultGKEZone + } else { // No backupregions if zone is provided + r.BackupRegions = make([]string, 0) + } + + loc := gke.GetClusterLocation(r.Region, r.Zone) + if version, err := resolveGKEVersion(r.GKEVersion, loc); err == nil { + r.GKEVersion = version + } else { + log.Fatalf("Failed to resolve GKE version: '%v'", err) + } + + if r.ResourceType == "" { + r.ResourceType = defaultResourceType + } + + gc.Request = &r + + client, err := boskos.NewClient("", /* boskos owner */ + "", /* boskos user */ + "" /* boskos password file */) + if err != nil { + log.Fatalf("Failed to create boskos client: '%v'", err) + } + gc.boskosOps = client + + return gc +} diff --git a/vendor/knative.dev/pkg/testutils/clustermanager/e2e-tests/util.go b/vendor/knative.dev/pkg/testutils/clustermanager/e2e-tests/util.go index d4b89569..8d9eefa8 100644 --- a/vendor/knative.dev/pkg/testutils/clustermanager/e2e-tests/util.go +++ b/vendor/knative.dev/pkg/testutils/clustermanager/e2e-tests/util.go @@ -18,10 +18,18 @@ package clustermanager import ( "fmt" + "log" + "strings" + "knative.dev/pkg/test/cmd" "knative.dev/pkg/testutils/clustermanager/e2e-tests/common" ) +const ( + defaultGKEVersion = "default" + latestGKEVersion = "latest" +) + var ( ClusterResource ResourceType = "e2e-cls" ) @@ -51,3 +59,29 @@ func getResourceName(rt ResourceType) (string, error) { } return resName, nil } + +// resolveGKEVersion returns the actual GKE version based on the raw version and location +func resolveGKEVersion(raw, location string) (string, error) { + switch raw { + case defaultGKEVersion: + defaultCmd := "gcloud container get-server-config --format='value(defaultClusterVersion)' --zone=" + location + version, err := cmd.RunCommand(defaultCmd) + if err != nil && version != "" { + return "", fmt.Errorf("failed getting the default version: %w", err) + } + log.Printf("Using default version, %s", version) + return version, nil + case latestGKEVersion: + validCmd := "gcloud container get-server-config --format='value(validMasterVersions)' --zone=" + location + versionsStr, err := cmd.RunCommand(validCmd) + if err != nil && versionsStr != "" { + return "", fmt.Errorf("failed getting the list of valid versions: %w", err) + } + versions := strings.Split(versionsStr, ";") + log.Printf("Using the latest version, %s", strings.TrimSpace(versions[0])) + return strings.TrimSpace(versions[0]), nil + default: + log.Printf("Using the custom version, %s", raw) + return raw, nil + } +} diff --git a/vendor/knative.dev/pkg/testutils/clustermanager/prow-cluster-operation/options/options.go b/vendor/knative.dev/pkg/testutils/clustermanager/prow-cluster-operation/options/options.go index 73982846..847e8a1a 100644 --- a/vendor/knative.dev/pkg/testutils/clustermanager/prow-cluster-operation/options/options.go +++ b/vendor/knative.dev/pkg/testutils/clustermanager/prow-cluster-operation/options/options.go @@ -55,6 +55,7 @@ func (rw *RequestWrapper) addOptions() { flag.StringVar(&rw.Request.Zone, "zone", "", "GCP zone") flag.StringVar(&rw.Request.Project, "project", "", "GCP project") flag.StringVar(&rw.Request.ClusterName, "name", "", "cluster name") + flag.StringVar(&rw.Request.GKEVersion, "version", "latest", "GKE version") flag.StringVar(&rw.Request.ReleaseChannel, "release-channel", "", "GKE release channel") flag.StringVar(&rw.Request.ResourceType, "resource-type", "", "Boskos Resource Type") flag.StringVar(&rw.BackupRegionsStr, "backup-regions", "", "GCP regions as backup, separated by comma") diff --git a/vendor/knative.dev/pkg/tracker/enqueue.go b/vendor/knative.dev/pkg/tracker/enqueue.go index a8f02ea6..fc036466 100644 --- a/vendor/knative.dev/pkg/tracker/enqueue.go +++ b/vendor/knative.dev/pkg/tracker/enqueue.go @@ -159,7 +159,7 @@ func (i *impl) TrackReference(ref Reference, obj interface{}) error { // doesn't create problems: // foo, err := lister.Get(key) // // Later... - // err := tracker.Track(fooRef, parent) + // err := tracker.TrackReference(fooRef, parent) // In this example, "Later" represents a window where "foo" may // have changed or been created while the Track is not active. // The simplest way of eliminating such a window is to call the @@ -192,7 +192,7 @@ func (i *impl) TrackReference(ref Reference, obj interface{}) error { // doesn't create problems: // foo, err := lister.Get(key) // // Later... - // err := tracker.Track(fooRef, parent) + // err := tracker.TrackReference(fooRef, parent) // In this example, "Later" represents a window where "foo" may // have changed or been created while the Track is not active. // The simplest way of eliminating such a window is to call the diff --git a/vendor/knative.dev/pkg/webhook/resourcesemantics/conversion/conversion.go b/vendor/knative.dev/pkg/webhook/resourcesemantics/conversion/conversion.go index 0d3f0ad2..e0d495b9 100644 --- a/vendor/knative.dev/pkg/webhook/resourcesemantics/conversion/conversion.go +++ b/vendor/knative.dev/pkg/webhook/resourcesemantics/conversion/conversion.go @@ -109,11 +109,11 @@ func (r *reconciler) convert( out := outZygote.DeepCopyObject().(ConvertibleObject) hubGVK := inGVK.GroupKind().WithVersion(conv.HubVersion) - logger = logger.With( + ctx = logging.WithLogger(ctx, logger.With( zap.String("inputType", formatGVK(inGVK)), zap.String("outputType", formatGVK(outGVK)), zap.String("hubType", formatGVK(hubGVK)), - ) + )) // TODO(dprotaso) - potentially error on unknown fields if err = json.Unmarshal(inRaw.Raw, &in); err != nil { diff --git a/vendor/knative.dev/pkg/webhook/resourcesemantics/validation/controller.go b/vendor/knative.dev/pkg/webhook/resourcesemantics/validation/controller.go index c8e63f40..6e104ac4 100644 --- a/vendor/knative.dev/pkg/webhook/resourcesemantics/validation/controller.go +++ b/vendor/knative.dev/pkg/webhook/resourcesemantics/validation/controller.go @@ -40,6 +40,7 @@ func NewAdmissionController( handlers map[schema.GroupVersionKind]resourcesemantics.GenericCRD, wc func(context.Context) context.Context, disallowUnknownFields bool, + callbacks ...map[schema.GroupVersionKind]Callback, ) *controller.Impl { client := kubeclient.Get(ctx) @@ -47,10 +48,24 @@ func NewAdmissionController( secretInformer := secretinformer.Get(ctx) options := webhook.GetOptions(ctx) + // This not ideal, we are using a variadic argument to effectively make callbacks optional + // This allows this addition to be non-breaking to consumers of /pkg + // TODO: once all sub-repos have adoped this, we might move this back to a traditional param. + var unwrappedCallbacks map[schema.GroupVersionKind]Callback + switch len(callbacks) { + case 0: + unwrappedCallbacks = map[schema.GroupVersionKind]Callback{} + case 1: + unwrappedCallbacks = callbacks[0] + default: + panic("NewAdmissionController may not be called with multiple callback maps") + } + wh := &reconciler{ - name: name, - path: path, - handlers: handlers, + name: name, + path: path, + handlers: handlers, + callbacks: unwrappedCallbacks, withContext: wc, disallowUnknownFields: disallowUnknownFields, diff --git a/vendor/knative.dev/pkg/webhook/resourcesemantics/validation/validation.go b/vendor/knative.dev/pkg/webhook/resourcesemantics/validation/reconcile_config.go similarity index 58% rename from vendor/knative.dev/pkg/webhook/resourcesemantics/validation/validation.go rename to vendor/knative.dev/pkg/webhook/resourcesemantics/validation/reconcile_config.go index 80f7622a..30581fed 100644 --- a/vendor/knative.dev/pkg/webhook/resourcesemantics/validation/validation.go +++ b/vendor/knative.dev/pkg/webhook/resourcesemantics/validation/reconcile_config.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Knative Authors +Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,23 +17,18 @@ limitations under the License. package validation import ( - "bytes" "context" - "encoding/json" - "errors" "fmt" "sort" "strings" "github.com/markbates/inflect" "go.uber.org/zap" - admissionv1beta1 "k8s.io/api/admission/v1beta1" admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/kubernetes" admissionlisters "k8s.io/client-go/listers/admissionregistration/v1beta1" corelisters "k8s.io/client-go/listers/core/v1" - "knative.dev/pkg/apis" "knative.dev/pkg/controller" "knative.dev/pkg/kmp" "knative.dev/pkg/logging" @@ -44,15 +39,14 @@ import ( "knative.dev/pkg/webhook/resourcesemantics" ) -var errMissingNewObject = errors.New("the new object may not be nil") - // reconciler implements the AdmissionController for resources type reconciler struct { webhook.StatelessAdmissionImpl - name string - path string - handlers map[schema.GroupVersionKind]resourcesemantics.GenericCRD + name string + path string + handlers map[schema.GroupVersionKind]resourcesemantics.GenericCRD + callbacks map[schema.GroupVersionKind]Callback withContext func(context.Context) context.Context @@ -68,6 +62,11 @@ var _ controller.Reconciler = (*reconciler)(nil) var _ webhook.AdmissionController = (*reconciler)(nil) var _ webhook.StatelessAdmissionController = (*reconciler)(nil) +// Path implements AdmissionController +func (ac *reconciler) Path() string { + return ac.path +} + // Reconcile implements controller.Reconciler func (ac *reconciler) Reconcile(ctx context.Context, key string) error { logger := logging.FromContext(ctx) @@ -87,32 +86,6 @@ func (ac *reconciler) Reconcile(ctx context.Context, key string) error { return ac.reconcileValidatingWebhook(ctx, caCert) } -// Path implements AdmissionController -func (ac *reconciler) Path() string { - return ac.path -} - -// Admit implements AdmissionController -func (ac *reconciler) Admit(ctx context.Context, request *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { - if ac.withContext != nil { - ctx = ac.withContext(ctx) - } - - logger := logging.FromContext(ctx) - switch request.Operation { - case admissionv1beta1.Create, admissionv1beta1.Update: - default: - logger.Infof("Unhandled webhook operation, letting it through %v", request.Operation) - return &admissionv1beta1.AdmissionResponse{Allowed: true} - } - - if err := ac.validate(ctx, request); err != nil { - return webhook.MakeErrorStatus("validation failed: %v", err) - } - - return &admissionv1beta1.AdmissionResponse{Allowed: true} -} - func (ac *reconciler) reconcileValidatingWebhook(ctx context.Context, caCert []byte) error { logger := logging.FromContext(ctx) @@ -181,82 +154,3 @@ func (ac *reconciler) reconcileValidatingWebhook(ctx context.Context, caCert []b } return nil } - -func (ac *reconciler) validate(ctx context.Context, req *admissionv1beta1.AdmissionRequest) error { - kind := req.Kind - newBytes := req.Object.Raw - oldBytes := req.OldObject.Raw - // Why, oh why are these different types... - gvk := schema.GroupVersionKind{ - Group: kind.Group, - Version: kind.Version, - Kind: kind.Kind, - } - - logger := logging.FromContext(ctx) - handler, ok := ac.handlers[gvk] - if !ok { - logger.Errorf("Unhandled kind: %v", gvk) - return fmt.Errorf("unhandled kind: %v", gvk) - } - - // nil values denote absence of `old` (create) or `new` (delete) objects. - var oldObj, newObj resourcesemantics.GenericCRD - - if len(newBytes) != 0 { - newObj = handler.DeepCopyObject().(resourcesemantics.GenericCRD) - newDecoder := json.NewDecoder(bytes.NewBuffer(newBytes)) - if ac.disallowUnknownFields { - newDecoder.DisallowUnknownFields() - } - if err := newDecoder.Decode(&newObj); err != nil { - return fmt.Errorf("cannot decode incoming new object: %v", err) - } - } - if len(oldBytes) != 0 { - oldObj = handler.DeepCopyObject().(resourcesemantics.GenericCRD) - oldDecoder := json.NewDecoder(bytes.NewBuffer(oldBytes)) - if ac.disallowUnknownFields { - oldDecoder.DisallowUnknownFields() - } - if err := oldDecoder.Decode(&oldObj); err != nil { - return fmt.Errorf("cannot decode incoming old object: %v", err) - } - } - - // Set up the context for defaulting and validation - if oldObj != nil { - if req.SubResource == "" { - ctx = apis.WithinUpdate(ctx, oldObj) - } else { - ctx = apis.WithinSubResourceUpdate(ctx, oldObj, req.SubResource) - } - } else { - ctx = apis.WithinCreate(ctx) - } - ctx = apis.WithUserInfo(ctx, &req.UserInfo) - - // None of the validators will accept a nil value for newObj. - if newObj == nil { - return errMissingNewObject - } - - if err := validate(ctx, newObj); err != nil { - logger.Errorw("Failed the resource specific validation", zap.Error(err)) - // Return the error message as-is to give the validation callback - // discretion over (our portion of) the message that the user sees. - return err - } - - return nil -} - -// validate performs validation on the provided "new" CRD. -func validate(ctx context.Context, new apis.Validatable) error { - // Can't just `return new.Validate()` because it doesn't properly nil-check. - if err := new.Validate(ctx); err != nil { - return err - } - - return nil -} diff --git a/vendor/knative.dev/pkg/webhook/resourcesemantics/validation/validation_admit.go b/vendor/knative.dev/pkg/webhook/resourcesemantics/validation/validation_admit.go new file mode 100644 index 00000000..20e69fa3 --- /dev/null +++ b/vendor/knative.dev/pkg/webhook/resourcesemantics/validation/validation_admit.go @@ -0,0 +1,209 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + + "go.uber.org/zap" + admissionv1beta1 "k8s.io/api/admission/v1beta1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/apis" + kubeclient "knative.dev/pkg/client/injection/kube/client" + "knative.dev/pkg/logging" + "knative.dev/pkg/webhook" + "knative.dev/pkg/webhook/resourcesemantics" +) + +var errMissingNewObject = errors.New("the new object may not be nil") + +// Callback is a generic function to be called by a consumer of validation +type Callback struct { + // function is the callback to be invoked + function func(ctx context.Context, unstructured *unstructured.Unstructured) error + + // supportedVerbs are the verbs supported for the callback. + // The function will only be called on these acitons. + supportedVerbs map[webhook.Operation]struct{} +} + +// NewCallback creates a new callback function to be invoked on supported vebs. +func NewCallback(function func(context.Context, *unstructured.Unstructured) error, supportedVerbs ...webhook.Operation) Callback { + m := make(map[webhook.Operation]struct{}) + for _, op := range supportedVerbs { + if _, has := m[op]; has { + panic("duplicate verbs not allowed") + } + m[op] = struct{}{} + } + return Callback{function: function, supportedVerbs: m} +} + +var _ webhook.AdmissionController = (*reconciler)(nil) + +// Admit implements AdmissionController +func (ac *reconciler) Admit(ctx context.Context, request *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { + if ac.withContext != nil { + ctx = ac.withContext(ctx) + } + + kind := request.Kind + gvk := schema.GroupVersionKind{ + Group: kind.Group, + Version: kind.Version, + Kind: kind.Kind, + } + + ctx, resource, err := ac.decodeRequestAndPrepareContext(ctx, request, gvk) + if err != nil { + return webhook.MakeErrorStatus("decoding request failed: %v", err) + } + + if err := validate(ctx, resource, request); err != nil { + return webhook.MakeErrorStatus("validation failed: %v", err) + } + + if err := ac.callback(ctx, request, gvk); err != nil { + return webhook.MakeErrorStatus("validation callback failed: %v", err) + } + + return &admissionv1beta1.AdmissionResponse{Allowed: true} +} + +// decodeRequestAndPrepareContext deserializes the old and new GenericCrds from the incoming request and sets up the context. +// nil oldObj or newObj denote absence of `old` (create) or `new` (delete) objects. +func (ac *reconciler) decodeRequestAndPrepareContext( + ctx context.Context, + req *admissionv1beta1.AdmissionRequest, + gvk schema.GroupVersionKind) (context.Context, resourcesemantics.GenericCRD, error) { + + logger := logging.FromContext(ctx) + handler, ok := ac.handlers[gvk] + if !ok { + logger.Errorf("Unhandled kind: %v", gvk) + return ctx, nil, fmt.Errorf("unhandled kind: %v", gvk) + } + + newBytes := req.Object.Raw + oldBytes := req.OldObject.Raw + + // Decode json to a GenericCRD + var newObj resourcesemantics.GenericCRD + if len(newBytes) != 0 { + newObj = handler.DeepCopyObject().(resourcesemantics.GenericCRD) + newDecoder := json.NewDecoder(bytes.NewBuffer(newBytes)) + if ac.disallowUnknownFields { + newDecoder.DisallowUnknownFields() + } + if err := newDecoder.Decode(&newObj); err != nil { + return ctx, nil, fmt.Errorf("cannot decode incoming new object: %w", err) + } + } + + var oldObj resourcesemantics.GenericCRD + if len(oldBytes) != 0 { + oldObj = handler.DeepCopyObject().(resourcesemantics.GenericCRD) + oldDecoder := json.NewDecoder(bytes.NewBuffer(oldBytes)) + if ac.disallowUnknownFields { + oldDecoder.DisallowUnknownFields() + } + if err := oldDecoder.Decode(&oldObj); err != nil { + return ctx, nil, fmt.Errorf("cannot decode incoming old object: %w", err) + } + } + + // Set up the context for validation + if oldObj != nil { + if req.SubResource == "" { + ctx = apis.WithinUpdate(ctx, oldObj) + } else { + ctx = apis.WithinSubResourceUpdate(ctx, oldObj, req.SubResource) + } + } else { + ctx = apis.WithinCreate(ctx) + } + ctx = apis.WithUserInfo(ctx, &req.UserInfo) + ctx = context.WithValue(ctx, kubeclient.Key{}, ac.client) + + if req.DryRun != nil && *req.DryRun { + ctx = apis.WithDryRun(ctx) + } + + return ctx, newObj, nil +} + +func validate(ctx context.Context, resource resourcesemantics.GenericCRD, req *admissionv1beta1.AdmissionRequest) error { + logger := logging.FromContext(ctx) + + // Only run validation for supported create and update validaiton. + switch req.Operation { + case admissionv1beta1.Create, admissionv1beta1.Update: + // Supported verbs + default: + logger.Infof("Unhandled webhook validation operation, letting it through %v", req.Operation) + return nil + } + + // None of the validators will accept a nil value for newObj. + if resource == nil { + return errMissingNewObject + } + + if err := resource.Validate(ctx); err != nil { + logger.Errorw("Failed the resource specific validation", zap.Error(err)) + // Return the error message as-is to give the validation callback + // discretion over (our portion of) the message that the user sees. + return err + } + + return nil +} + +// callback runs optional callbacks on admission +func (ac *reconciler) callback(ctx context.Context, req *admissionv1beta1.AdmissionRequest, gvk schema.GroupVersionKind) error { + var toDecode []byte + if req.Operation == admissionv1beta1.Delete { + toDecode = req.OldObject.Raw + } else { + toDecode = req.Object.Raw + } + if toDecode == nil { + logger := logging.FromContext(ctx) + logger.Errorf("No incoming object found: %v for verb %v", gvk, req.Operation) + return nil + } + + // Generically callback if any are provided for the resource. + if c, ok := ac.callbacks[gvk]; ok { + if _, supported := c.supportedVerbs[req.Operation]; supported { + unstruct := &unstructured.Unstructured{} + newDecoder := json.NewDecoder(bytes.NewBuffer(toDecode)) + if err := newDecoder.Decode(&unstruct); err != nil { + return fmt.Errorf("cannot decode incoming new object: %w", err) + } + + return c.function(ctx, unstruct) + } + } + + return nil +} diff --git a/vendor/knative.dev/pkg/webhook/testing/testing.go b/vendor/knative.dev/pkg/webhook/testing/testing.go index 202443e0..6c072fde 100644 --- a/vendor/knative.dev/pkg/webhook/testing/testing.go +++ b/vendor/knative.dev/pkg/webhook/testing/testing.go @@ -37,6 +37,10 @@ import ( // CreateResource creates a testing.Resource with the given name in the system namespace. func CreateResource(name string) *pkgtest.Resource { return &pkgtest.Resource{ + TypeMeta: metav1.TypeMeta{ + Kind: "Resource", + APIVersion: "v1", + }, ObjectMeta: metav1.ObjectMeta{ Namespace: system.Namespace(), Name: name, diff --git a/vendor/knative.dev/pkg/webhook/webhook.go b/vendor/knative.dev/pkg/webhook/webhook.go index 99cb48d0..a15d9303 100644 --- a/vendor/knative.dev/pkg/webhook/webhook.go +++ b/vendor/knative.dev/pkg/webhook/webhook.go @@ -29,6 +29,7 @@ import ( "go.uber.org/zap" "golang.org/x/sync/errgroup" + admissionv1beta1 "k8s.io/api/admission/v1beta1" "k8s.io/client-go/kubernetes" corelisters "k8s.io/client-go/listers/core/v1" "knative.dev/pkg/logging" @@ -58,6 +59,18 @@ type Options struct { StatsReporter StatsReporter } +// Operation is the verb being operated on +// it is aliasde in Validation from the k8s admission package +type Operation = admissionv1beta1.Operation + +// Operation types +const ( + Create Operation = admissionv1beta1.Create + Update Operation = admissionv1beta1.Update + Delete Operation = admissionv1beta1.Delete + Connect Operation = admissionv1beta1.Connect +) + // Webhook implements the external webhook for validation of // resources and configuration. type Webhook struct { diff --git a/vendor/knative.dev/test-infra/tools/dep-collector/gobuild.go b/vendor/knative.dev/test-infra/tools/dep-collector/gobuild.go new file mode 100644 index 00000000..c85b62bb --- /dev/null +++ b/vendor/knative.dev/test-infra/tools/dep-collector/gobuild.go @@ -0,0 +1,87 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "encoding/json" + "fmt" + gb "go/build" + "path/filepath" + "strings" + + "knative.dev/pkg/test/cmd" +) + +// https://golang.org/pkg/cmd/go/internal/modinfo/#ModulePublic +type modInfo struct { + Path string + Dir string +} + +type gobuild struct { + mod *modInfo +} + +// moduleInfo returns the module path and module root directory for a project +// using go modules, otherwise returns nil. +// If there is something wrong in getting the module info, it will return an error. +// +// Related: https://github.com/golang/go/issues/26504 +func moduleInfo() (*modInfo, error) { + // If `go list -m` returns an error, the project is not using Go modules. + _, err := cmd.RunCommands("go list -m") + if err != nil { + return nil, nil + } + + output, err := cmd.RunCommand("go list -mod=readonly -m -json") + if err != nil { + return nil, fmt.Errorf("failed getting module info: %v", err) + } + var info modInfo + if err := json.Unmarshal([]byte(output), &info); err != nil { + return nil, fmt.Errorf("failed parsing module info %q: %v", output, err) + } + return &info, nil +} + +// importPackage wraps go/build.Import to handle go modules. +// +// Note that we will fall back to GOPATH if the project isn't using go modules. +func (g *gobuild) importPackage(s string) (*gb.Package, error) { + if g.mod == nil { + return gb.Import(s, WorkingDir, gb.ImportComment) + } + + // If we're inside a go modules project, try to use the module's directory + // as our source root to import: + // * paths that match module path prefix (they should be in this project) + // * relative paths (they should also be in this project) + gp, err := gb.Import(s, g.mod.Dir, gb.ImportComment) + return gp, err +} + +func (g *gobuild) qualifyLocalImport(ip string) (string, error) { + if g.mod == nil { + gopathsrc := filepath.Join(gb.Default.GOPATH, "src") + if !strings.HasPrefix(WorkingDir, gopathsrc) { + return "", fmt.Errorf("working directory must be on ${GOPATH}/src = %s", gopathsrc) + } + return filepath.Join(strings.TrimPrefix(WorkingDir, gopathsrc+string(filepath.Separator)), ip), nil + } + return filepath.Join(g.mod.Path, ip), nil +} diff --git a/vendor/knative.dev/test-infra/tools/dep-collector/imports.go b/vendor/knative.dev/test-infra/tools/dep-collector/imports.go index 4f0ff76a..d1e23536 100644 --- a/vendor/knative.dev/test-infra/tools/dep-collector/imports.go +++ b/vendor/knative.dev/test-infra/tools/dep-collector/imports.go @@ -19,66 +19,72 @@ package main import ( "fmt" gb "go/build" - "path/filepath" "sort" "strings" ) -func CollectTransitiveImports(binaries []string) ([]string, error) { +type ImportInfo struct { + ImportPath string + Dir string +} + +func CollectTransitiveImports(binaries []string) ([]ImportInfo, error) { // Perform a simple DFS to collect the binaries' transitive dependencies. - visited := make(map[string]struct{}) + visited := make(map[string]ImportInfo) + mi, err := moduleInfo() + if err != nil { + return nil, fmt.Errorf("failed getting Go module info: %v", err) + } + g := &gobuild{mi} for _, importpath := range binaries { if gb.IsLocalImport(importpath) { - ip, err := qualifyLocalImport(importpath) + ip, err := g.qualifyLocalImport(importpath) if err != nil { return nil, err } importpath = ip } - pkg, err := gb.Import(importpath, WorkingDir, gb.ImportComment) + pkg, err := g.importPackage(importpath) if err != nil { return nil, err } - if err := visit(pkg, visited); err != nil { + if err := visit(g, pkg, visited); err != nil { return nil, err } } // Sort the dependencies deterministically. var list sort.StringSlice - for ip := range visited { - if !strings.Contains(ip, "/vendor/") { + for dir := range visited { + if !strings.Contains(dir, "/vendor/") { // Skip files outside of vendor continue } - list = append(list, ip) + list = append(list, dir) } list.Sort() - return list, nil -} - -func qualifyLocalImport(ip string) (string, error) { - gopathsrc := filepath.Join(gb.Default.GOPATH, "src") - if !strings.HasPrefix(WorkingDir, gopathsrc) { - return "", fmt.Errorf("working directory must be on ${GOPATH}/src = %s", gopathsrc) + iiList := make([]ImportInfo, len(list)) + for i := range iiList { + iiList[i] = visited[list[i]] } - return filepath.Join(strings.TrimPrefix(WorkingDir, gopathsrc+string(filepath.Separator)), ip), nil + + return iiList, nil } -func visit(pkg *gb.Package, visited map[string]struct{}) error { - if _, ok := visited[pkg.ImportPath]; ok { +func visit(g *gobuild, pkg *gb.Package, visited map[string]ImportInfo) error { + if _, ok := visited[pkg.Dir]; ok { return nil } - visited[pkg.ImportPath] = struct{}{} + visited[pkg.Dir] = ImportInfo{Dir: pkg.Dir, ImportPath: pkg.ImportPath} for _, ip := range pkg.Imports { if ip == "C" { // skip cgo continue } - subpkg, err := gb.Import(ip, WorkingDir, gb.ImportComment) + subpkg, err := g.importPackage(ip) if err != nil { return fmt.Errorf("%v\n -> %v", pkg.ImportPath, err) } @@ -86,7 +92,7 @@ func visit(pkg *gb.Package, visited map[string]struct{}) error { // Skip import paths outside of our workspace (std library) continue } - if err := visit(subpkg, visited); err != nil { + if err := visit(g, subpkg, visited); err != nil { return fmt.Errorf("%v (%v)\n -> %v", pkg.ImportPath, pkg.Dir, err) } } diff --git a/vendor/knative.dev/test-infra/tools/dep-collector/licenses.go b/vendor/knative.dev/test-infra/tools/dep-collector/licenses.go index cb1df9ab..fda5e04d 100644 --- a/vendor/knative.dev/test-infra/tools/dep-collector/licenses.go +++ b/vendor/knative.dev/test-infra/tools/dep-collector/licenses.go @@ -18,7 +18,6 @@ package main import ( "fmt" - gb "go/build" "io/ioutil" "os" "path/filepath" @@ -72,7 +71,7 @@ func (lt *LicenseFile) Check(classifier *licenseclassifier.License) error { } ms := classifier.MultipleMatch(body, false) for _, m := range ms { - return fmt.Errorf("Found matching forbidden license in %q: %v", lt.EnclosingImportPath, m.Name) + return fmt.Errorf("found matching forbidden license in %q: %v", lt.EnclosingImportPath, m.Name) } return nil } @@ -108,17 +107,13 @@ func (lt *LicenseFile) CSVRow(classifier *licenseclassifier.License) (string, er }, ","), nil } -func findLicense(ip string) (*LicenseFile, error) { - pkg, err := gb.Import(ip, WorkingDir, gb.ImportComment) - if err != nil { - return nil, err - } - dir := pkg.Dir - +func findLicense(ii ImportInfo) (*LicenseFile, error) { + dir := ii.Dir + ip := ii.ImportPath for { // When we reach the root of our workspace, stop searching. if dir == WorkingDir { - return nil, fmt.Errorf("unable to find license for %q", pkg.ImportPath) + return nil, fmt.Errorf("unable to find license for %q", ip) } for _, name := range LicenseNames { @@ -178,11 +173,12 @@ func (lc LicenseCollection) Check(classifier *licenseclassifier.License) error { return fmt.Errorf("Errors validating licenses:\n%v", strings.Join(errors, "\n")) } -func CollectLicenses(imports []string) (LicenseCollection, error) { +// CollectLicenses collects a list of licenses for the given imports. +func CollectLicenses(importInfos []ImportInfo) (LicenseCollection, error) { // for each of the import paths, search for a license file. licenseFiles := make(map[string]*LicenseFile) - for _, ip := range imports { - lf, err := findLicense(ip) + for _, info := range importInfos { + lf, err := findLicense(info) if err != nil { return nil, err }