mirror of https://github.com/knative/caching.git
Auto-update dependencies (#59)
Produced via: `dep ensure -update github.com/knative/test-infra knative.dev/pkg` /assign @mattmoor
This commit is contained in:
parent
2032732871
commit
ce106ecb61
|
@ -262,14 +262,14 @@
|
|||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:f91a467423c58e30754cb830f66a04fa3e6f28fe567e3e68e04aa61f3e51f377"
|
||||
digest = "1:c5fcddf864b41dd694b4348b06406904ba8b2e403a3a1bb1a565104beb2b4092"
|
||||
name = "github.com/knative/test-infra"
|
||||
packages = [
|
||||
"scripts",
|
||||
"tools/dep-collector",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "cad8ba5b64ca151d6435a914fd29f0b8a5cdec74"
|
||||
revision = "88caf452c91594b30b06b2b62c1ece171d77415c"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6"
|
||||
|
@ -938,7 +938,7 @@
|
|||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:52d224343dc361c41a93a8c01685d2f491b52366830bc6a783cd3f7315b01716"
|
||||
digest = "1:ebc565a6cd5ee8b686a82263489b4038979293df11dfe8fbbd1f7a3b54564232"
|
||||
name = "knative.dev/pkg"
|
||||
packages = [
|
||||
"apis",
|
||||
|
@ -957,7 +957,7 @@
|
|||
"metrics/metricskey",
|
||||
]
|
||||
pruneopts = "T"
|
||||
revision = "8fe96d53cc1856e044988703a4e46e7b431cfcd3"
|
||||
revision = "154a0848d46b569c96b9eb18d5535bbc43bb108c"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
|
|
|
@ -83,7 +83,7 @@ function go_test_e2e() {
|
|||
local go_options=""
|
||||
(( EMIT_METRICS )) && test_options="-emitmetrics"
|
||||
[[ ! " $@" == *" -tags="* ]] && go_options="-tags=e2e"
|
||||
report_go_test -v -count=1 ${go_options} $@ ${test_options}
|
||||
report_go_test -v -race -count=1 ${go_options} $@ ${test_options}
|
||||
}
|
||||
|
||||
# Dump info about the test cluster. If dump_extra_cluster_info() is defined, calls it too.
|
||||
|
@ -214,8 +214,11 @@ function create_test_cluster() {
|
|||
[[ -n "${GCP_PROJECT}" ]] && test_cmd_args+=" --gcp-project ${GCP_PROJECT}"
|
||||
[[ -n "${E2E_SCRIPT_CUSTOM_FLAGS[@]}" ]] && test_cmd_args+=" ${E2E_SCRIPT_CUSTOM_FLAGS[@]}"
|
||||
local extra_flags=()
|
||||
# If using boskos, save time and let it tear down the cluster
|
||||
(( ! IS_BOSKOS )) && extra_flags+=(--down)
|
||||
if (( IS_BOSKOS )); then # Add arbitrary duration, wait for Boskos projects acquisition before error out
|
||||
extra_flags+=(--boskos-wait-duration=20m)
|
||||
else # Only let kubetest tear down the cluster if not using Boskos, it's done by Janitor if using Boskos
|
||||
extra_flags+=(--down)
|
||||
fi
|
||||
|
||||
# Set a minimal kubernetes environment that satisfies kubetest
|
||||
# TODO(adrcunha): Remove once https://github.com/kubernetes/test-infra/issues/13029 is fixed.
|
||||
|
|
|
@ -257,7 +257,7 @@ function dump_app_logs() {
|
|||
for pod in $(get_app_pods "$1" "$2")
|
||||
do
|
||||
echo ">>> Pod: $pod"
|
||||
kubectl -n "$2" logs "$pod" -c "$1"
|
||||
kubectl -n "$2" logs "$pod" --all-containers
|
||||
done
|
||||
}
|
||||
|
||||
|
@ -340,7 +340,7 @@ function report_go_test() {
|
|||
# Run tests in verbose mode to capture details.
|
||||
# go doesn't like repeating -v, so remove if passed.
|
||||
local args=" $@ "
|
||||
local go_test="go test -race -v ${args/ -v / }"
|
||||
local go_test="go test -v ${args/ -v / }"
|
||||
# Just run regular go tests if not on Prow.
|
||||
echo "Running tests with '${go_test}'"
|
||||
local report="$(mktemp)"
|
||||
|
|
|
@ -224,7 +224,7 @@ function run_unit_tests() {
|
|||
|
||||
# Default unit test runner that runs all go tests in the repo.
|
||||
function default_unit_test_runner() {
|
||||
report_go_test ./...
|
||||
report_go_test -race ./...
|
||||
}
|
||||
|
||||
# Run integration tests. If there's no `integration_tests` function, run the
|
||||
|
|
|
@ -313,14 +313,14 @@
|
|||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:5197df8038ae4c7d05c2edda32a77d7f16faa635df3c560f01fb73fa42682f69"
|
||||
digest = "1:f91a467423c58e30754cb830f66a04fa3e6f28fe567e3e68e04aa61f3e51f377"
|
||||
name = "github.com/knative/test-infra"
|
||||
packages = [
|
||||
"scripts",
|
||||
"tools/dep-collector",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "81861c7c2060af68e3dbdc72bcd3a2f0584566d2"
|
||||
revision = "cad8ba5b64ca151d6435a914fd29f0b8a5cdec74"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:56dbf15e091bf7926cb33a57cb6bdfc658fc6d3498d2f76f10a97ce7856f1fde"
|
||||
|
|
|
@ -60,6 +60,9 @@ type ConditionManager interface {
|
|||
// If there is an update, Conditions are stored back sorted.
|
||||
SetCondition(new Condition)
|
||||
|
||||
// ClearCondition removes the non terminal condition that matches the ConditionType
|
||||
ClearCondition(t ConditionType) error
|
||||
|
||||
// MarkTrue sets the status of t to true, and then marks the happy condition to
|
||||
// true if all dependents are true.
|
||||
MarkTrue(t ConditionType)
|
||||
|
@ -192,12 +195,7 @@ func (r conditionsImpl) isTerminal(t ConditionType) bool {
|
|||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if t == r.happy {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
return t == r.happy
|
||||
}
|
||||
|
||||
func (r conditionsImpl) severity(t ConditionType) ConditionSeverity {
|
||||
|
@ -207,6 +205,35 @@ func (r conditionsImpl) severity(t ConditionType) ConditionSeverity {
|
|||
return ConditionSeverityInfo
|
||||
}
|
||||
|
||||
// RemoveCondition removes the non terminal condition that matches the ConditionType
|
||||
// Not implemented for terminal conditions
|
||||
func (r conditionsImpl) ClearCondition(t ConditionType) error {
|
||||
var conditions Conditions
|
||||
|
||||
if r.accessor == nil {
|
||||
return nil
|
||||
}
|
||||
// Terminal conditions are not handled as they can't be nil
|
||||
if r.isTerminal(t) {
|
||||
return fmt.Errorf("Clearing terminal conditions not implemented")
|
||||
}
|
||||
cond := r.GetCondition(t)
|
||||
if cond == nil {
|
||||
return nil
|
||||
}
|
||||
for _, c := range r.accessor.GetConditions() {
|
||||
if c.Type != t {
|
||||
conditions = append(conditions, c)
|
||||
}
|
||||
}
|
||||
|
||||
// Sorted for convenience of the consumer, i.e. kubectl.
|
||||
sort.Slice(conditions, func(i, j int) bool { return conditions[i].Type < conditions[j].Type })
|
||||
r.accessor.SetConditions(conditions)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarkTrue sets the status of t to true, and then marks the happy condition to
|
||||
// true if all other dependents are also true.
|
||||
func (r conditionsImpl) MarkTrue(t ConditionType) {
|
||||
|
|
|
@ -81,8 +81,8 @@ func CheckDeprecatedUpdate(ctx context.Context, obj interface{}, original interf
|
|||
}
|
||||
|
||||
func getPrefixedNamedFieldValues(prefix string, obj interface{}) (map[string]reflect.Value, map[string]interface{}) {
|
||||
fields := make(map[string]reflect.Value, 0)
|
||||
inlined := make(map[string]interface{}, 0)
|
||||
fields := map[string]reflect.Value{}
|
||||
inlined := map[string]interface{}{}
|
||||
|
||||
objValue := reflect.Indirect(reflect.ValueOf(obj))
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ func (dif *TypedInformerFactory) Get(gvr schema.GroupVersionResource) (cache.Sha
|
|||
go inf.Run(dif.StopChannel)
|
||||
|
||||
if ok := cache.WaitForCacheSync(dif.StopChannel, inf.HasSynced); !ok {
|
||||
return nil, nil, fmt.Errorf("Failed starting shared index informer for %v with type %T", gvr, dif.Type)
|
||||
return nil, nil, fmt.Errorf("failed starting shared index informer for %v with type %T", gvr, dif.Type)
|
||||
}
|
||||
|
||||
return inf, lister, nil
|
||||
|
|
|
@ -218,11 +218,7 @@ func (r conditionsImpl) isTerminal(t ConditionType) bool {
|
|||
}
|
||||
}
|
||||
|
||||
if t == r.happy {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
return t == r.happy
|
||||
}
|
||||
|
||||
func (r conditionsImpl) severity(t ConditionType) ConditionSeverity {
|
||||
|
|
|
@ -116,13 +116,6 @@ func anyError(errs ...error) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func require(name string, value string) error {
|
||||
if len(value) == 0 {
|
||||
return fmt.Errorf("missing required field %q", name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The Cloud-Events spec allows two forms of JSON encoding:
|
||||
// 1. The overall message (Structured JSON encoding)
|
||||
// 2. Just the event data, where the context will be in HTTP headers instead
|
||||
|
@ -160,7 +153,7 @@ func unmarshalEventData(encoding string, reader io.Reader, data interface{}) err
|
|||
return xml.NewDecoder(reader).Decode(&data)
|
||||
}
|
||||
|
||||
return fmt.Errorf("Cannot decode content type %q", encoding)
|
||||
return fmt.Errorf("cannot decode content type %q", encoding)
|
||||
}
|
||||
|
||||
func marshalEventData(encoding string, data interface{}) ([]byte, error) {
|
||||
|
@ -172,7 +165,7 @@ func marshalEventData(encoding string, data interface{}) ([]byte, error) {
|
|||
} else if isXMLEncoding(encoding) {
|
||||
b, err = xml.Marshal(data)
|
||||
} else {
|
||||
err = fmt.Errorf("Cannot encode content type %q", encoding)
|
||||
err = fmt.Errorf("cannot encode content type %q", encoding)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
|
|
@ -40,11 +40,9 @@ func (w *ManualWatcher) Watch(name string, o Observer) {
|
|||
defer w.m.Unlock()
|
||||
|
||||
if w.observers == nil {
|
||||
w.observers = make(map[string][]Observer)
|
||||
w.observers = make(map[string][]Observer, 1)
|
||||
}
|
||||
|
||||
wl, _ := w.observers[name]
|
||||
w.observers[name] = append(wl, o)
|
||||
w.observers[name] = append(w.observers[name], o)
|
||||
}
|
||||
|
||||
func (w *ManualWatcher) Start(<-chan struct{}) error {
|
||||
|
|
|
@ -409,7 +409,7 @@ func StartInformers(stopCh <-chan struct{}, informers ...Informer) error {
|
|||
|
||||
for i, informer := range informers {
|
||||
if ok := cache.WaitForCacheSync(stopCh, informer.HasSynced); !ok {
|
||||
return fmt.Errorf("Failed to wait for cache at index %d to sync", i)
|
||||
return fmt.Errorf("failed to wait for cache at index %d to sync", i)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -82,11 +82,11 @@ func DeletionHandlingAccessor(obj interface{}) (Accessor, error) {
|
|||
// To handle obj deletion, try to fetch info from DeletedFinalStateUnknown.
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Couldn't get Accessor from tombstone %#v", obj)
|
||||
return nil, fmt.Errorf("couldn't get Accessor from tombstone %#v", obj)
|
||||
}
|
||||
accessor, ok = tombstone.Obj.(Accessor)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("The object that Tombstone contained is not of kmeta.Accessor %#v", obj)
|
||||
return nil, fmt.Errorf("the object that Tombstone contained is not of kmeta.Accessor %#v", obj)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ const (
|
|||
head = longest - md5Len
|
||||
)
|
||||
|
||||
// ChildName generates a name for the resource based upong the parent resource and suffix.
|
||||
// ChildName generates a name for the resource based upon the parent resource and suffix.
|
||||
// If the concatenated name is longer than K8s permits the name is hashed and truncated to permit
|
||||
// construction of the resource, but still keeps it unique.
|
||||
func ChildName(parent, suffix string) string {
|
||||
|
|
|
@ -112,7 +112,7 @@ func (r *ShortDiffReporter) Report(rs cmp.Result) {
|
|||
var diff string
|
||||
// Prefix struct values with the types to add clarity in output
|
||||
if !vx.IsValid() && !vy.IsValid() {
|
||||
r.err = fmt.Errorf("Unable to diff %+v and %+v on path %#v", vx, vy, r.path)
|
||||
r.err = fmt.Errorf("unable to diff %+v and %+v on path %#v", vx, vy, r.path)
|
||||
} else {
|
||||
diff = fmt.Sprintf("%#v:\n", r.path)
|
||||
if vx.IsValid() {
|
||||
|
|
|
@ -50,7 +50,7 @@ func newMetricsExporter(config *metricsConfig, logger *zap.SugaredLogger) (view.
|
|||
case Prometheus:
|
||||
e, err = newPrometheusExporter(config, logger)
|
||||
default:
|
||||
err = fmt.Errorf("Unsupported metrics backend %v", config.backendDestination)
|
||||
err = fmt.Errorf("unsupported metrics backend %v", config.backendDestination)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -70,7 +70,7 @@ func (scc *signalContext) Err() error {
|
|||
select {
|
||||
case _, ok := <-scc.Done():
|
||||
if !ok {
|
||||
return errors.New("received a termination signal.")
|
||||
return errors.New("received a termination signal")
|
||||
}
|
||||
default:
|
||||
}
|
||||
|
|
|
@ -110,5 +110,5 @@ func (client *KubeClient) PodLogs(podName, containerName, namespace string) ([]b
|
|||
return result.Raw()
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Could not find logs for %s/%s", podName, containerName)
|
||||
return nil, fmt.Errorf("could not find logs for %s/%s", podName, containerName)
|
||||
}
|
||||
|
|
|
@ -57,7 +57,7 @@ func GetIngressEndpoint(kubeClientset *kubernetes.Clientset) (*string, error) {
|
|||
func EndpointFromService(svc *v1.Service) (string, error) {
|
||||
ingresses := svc.Status.LoadBalancer.Ingress
|
||||
if len(ingresses) != 1 {
|
||||
return "", fmt.Errorf("Expected exactly one ingress load balancer, instead had %d: %v", len(ingresses), ingresses)
|
||||
return "", fmt.Errorf("expected exactly one ingress load balancer, instead had %d: %v", len(ingresses), ingresses)
|
||||
}
|
||||
itu := ingresses[0]
|
||||
|
||||
|
@ -67,6 +67,6 @@ func EndpointFromService(svc *v1.Service) (string, error) {
|
|||
case itu.Hostname != "":
|
||||
return itu.Hostname, nil
|
||||
default:
|
||||
return "", fmt.Errorf("Expected ingress loadbalancer IP or hostname for %s to be set, instead was empty", svc.Name)
|
||||
return "", fmt.Errorf("expected ingress loadbalancer IP or hostname for %s to be set, instead was empty", svc.Name)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ func CheckPortAvailability(port int) error {
|
|||
func GetPods(kubeClientset *kubernetes.Clientset, app, namespace string) (*v1.PodList, error) {
|
||||
pods, err := kubeClientset.CoreV1().Pods(namespace).List(metav1.ListOptions{LabelSelector: fmt.Sprintf("app=%s", app)})
|
||||
if err == nil && len(pods.Items) == 0 {
|
||||
err = fmt.Errorf("No %s Pod found on the cluster. Ensure monitoring is switched on for your Knative Setup", app)
|
||||
err = fmt.Errorf("no %s Pod found on the cluster. Ensure monitoring is switched on for your Knative Setup", app)
|
||||
}
|
||||
|
||||
return pods, err
|
||||
|
@ -65,7 +65,7 @@ func PortForward(logf logging.FormatLogger, podList *v1.PodList, localPort, remo
|
|||
portFwdProcess, err := executeCmdBackground(logf, portFwdCmd)
|
||||
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("Failed to port forward: %v", err)
|
||||
return 0, fmt.Errorf("failed to port forward: %v", err)
|
||||
}
|
||||
|
||||
logf("running %s port-forward in background, pid = %d", podName, portFwdProcess.Pid)
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
ingress "knative.dev/pkg/test/ingress"
|
||||
"knative.dev/pkg/test/ingress"
|
||||
"knative.dev/pkg/test/logging"
|
||||
"knative.dev/pkg/test/zipkin"
|
||||
|
||||
|
@ -101,6 +101,23 @@ func New(kubeClientset *kubernetes.Clientset, logf logging.FormatLogger, domain
|
|||
logf: logf,
|
||||
}
|
||||
|
||||
var err error
|
||||
if sc.endpoint, err = ResolveEndpoint(kubeClientset, domain, resolvable, endpointOverride); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !resolvable {
|
||||
sc.domain = domain
|
||||
}
|
||||
|
||||
return &sc, nil
|
||||
}
|
||||
|
||||
// ResolveEndpoint resolves the endpoint address considering whether the domain is resolvable and taking into
|
||||
// account whether the user overrode the endpoint address externally
|
||||
func ResolveEndpoint(kubeClientset *kubernetes.Clientset, domain string, resolvable bool, endpointOverride string) (string, error) {
|
||||
// If the domain is resolvable, we can use it directly when we make requests.
|
||||
endpoint := domain
|
||||
if !resolvable {
|
||||
e := &endpointOverride
|
||||
if endpointOverride == "" {
|
||||
|
@ -108,20 +125,13 @@ func New(kubeClientset *kubernetes.Clientset, logf logging.FormatLogger, domain
|
|||
// If the domain that the Route controller is configured to assign to Route.Status.Domain
|
||||
// (the domainSuffix) is not resolvable, we need to retrieve the endpoint and spoof
|
||||
// the Host in our requests.
|
||||
e, err = ingress.GetIngressEndpoint(kubeClientset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if e, err = ingress.GetIngressEndpoint(kubeClientset); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
sc.endpoint = *e
|
||||
sc.domain = domain
|
||||
} else {
|
||||
// If the domain is resolvable, we can use it directly when we make requests.
|
||||
sc.endpoint = domain
|
||||
endpoint = *e
|
||||
}
|
||||
|
||||
return &sc, nil
|
||||
return endpoint, nil
|
||||
}
|
||||
|
||||
// Do dispatches to the underlying http.Client.Do, spoofing domains as needed
|
||||
|
|
|
@ -57,14 +57,14 @@ func NewTracingConfigFromMap(cfgMap map[string]string) (*Config, error) {
|
|||
if enable, ok := cfgMap[enableKey]; ok {
|
||||
enableBool, err := strconv.ParseBool(enable)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed parsing tracing config %q: %v", enableKey, err)
|
||||
return nil, fmt.Errorf("failed parsing tracing config %q: %v", enableKey, err)
|
||||
}
|
||||
tc.Enable = enableBool
|
||||
}
|
||||
|
||||
if endpoint, ok := cfgMap[zipkinEndpointKey]; !ok {
|
||||
if tc.Enable {
|
||||
return nil, errors.New("Tracing enabled but no zipkin endpoint specified")
|
||||
return nil, errors.New("tracing enabled but no zipkin endpoint specified")
|
||||
}
|
||||
} else {
|
||||
tc.ZipkinEndpoint = endpoint
|
||||
|
@ -73,7 +73,7 @@ func NewTracingConfigFromMap(cfgMap map[string]string) (*Config, error) {
|
|||
if debug, ok := cfgMap[debugKey]; ok {
|
||||
debugBool, err := strconv.ParseBool(debug)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed parsing tracing config %q", debugKey)
|
||||
return nil, fmt.Errorf("failed parsing tracing config %q", debugKey)
|
||||
}
|
||||
tc.Debug = debugBool
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ func NewTracingConfigFromMap(cfgMap map[string]string) (*Config, error) {
|
|||
if sampleRate, ok := cfgMap[sampleRateKey]; ok {
|
||||
sampleRateFloat, err := strconv.ParseFloat(sampleRate, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse sampleRate in tracing config: %v", err)
|
||||
return nil, fmt.Errorf("failed to parse sampleRate in tracing config: %v", err)
|
||||
}
|
||||
tc.SampleRate = sampleRateFloat
|
||||
}
|
||||
|
|
|
@ -62,7 +62,7 @@ func (oct *OpenCensusTracer) Finish() error {
|
|||
err := oct.acquireGlobal()
|
||||
defer octMutex.Unlock()
|
||||
if err != nil {
|
||||
return errors.New("Finish called on OpenTracer which is not the global OpenCensusTracer.")
|
||||
return errors.New("finish called on OpenTracer which is not the global OpenCensusTracer")
|
||||
}
|
||||
|
||||
for _, configOpt := range oct.configOptions {
|
||||
|
@ -79,7 +79,7 @@ func (oct *OpenCensusTracer) acquireGlobal() error {
|
|||
if globalOct == nil {
|
||||
globalOct = oct
|
||||
} else if globalOct != oct {
|
||||
return errors.New("A OpenCensusTracer already exists and only one can be run at a time.")
|
||||
return errors.New("an OpenCensusTracer already exists and only one can be run at a time")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -80,7 +80,7 @@ func (i *impl) Track(ref corev1.ObjectReference, obj interface{}) error {
|
|||
}
|
||||
if len(fieldErrors) > 0 {
|
||||
sort.Strings(fieldErrors)
|
||||
return fmt.Errorf("Invalid ObjectReference:\n%s", strings.Join(fieldErrors, "\n"))
|
||||
return fmt.Errorf("invalid ObjectReference:\n%s", strings.Join(fieldErrors, "\n"))
|
||||
}
|
||||
|
||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
|
|
|
@ -127,10 +127,6 @@ type AdmissionController struct {
|
|||
DisallowUnknownFields bool
|
||||
}
|
||||
|
||||
func nop(ctx context.Context) context.Context {
|
||||
return ctx
|
||||
}
|
||||
|
||||
// GenericCRD is the interface definition that allows us to perform the generic
|
||||
// CRD actions like deciding whether to increment generation and so forth.
|
||||
type GenericCRD interface {
|
||||
|
@ -214,13 +210,15 @@ func getOrGenerateKeyCertsFromSecret(ctx context.Context, client kubernetes.Inte
|
|||
return nil, nil, nil, err
|
||||
}
|
||||
secret, err = client.CoreV1().Secrets(newSecret.Namespace).Create(newSecret)
|
||||
if err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
// Ok, so something else might have created, try fetching it one more time
|
||||
secret, err = client.CoreV1().Secrets(options.Namespace).Get(options.SecretName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
if !apierrors.IsAlreadyExists(err) {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
// OK, so something else might have created, try fetching it instead.
|
||||
secret, err = client.CoreV1().Secrets(options.Namespace).Get(options.SecretName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue