mirror of https://github.com/knative/caching.git
Auto-update dependencies (#105)
Produced via: `dep ensure -update knative.dev/test-infra knative.dev/pkg` /assign mattmoor
This commit is contained in:
parent
70927bec9a
commit
ff16d22461
|
@ -927,7 +927,7 @@
|
|||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:40bc98faaf4e29673da841dfc4d4a5740ceb8c225576b91438472c8542af0849"
|
||||
digest = "1:ee6796d9df631ca8c5cf5617521af3086f2c67f8c941e440cd5c7a935b63da76"
|
||||
name = "knative.dev/pkg"
|
||||
packages = [
|
||||
"apis",
|
||||
|
@ -946,18 +946,18 @@
|
|||
"metrics/metricskey",
|
||||
]
|
||||
pruneopts = "T"
|
||||
revision = "1fb9a433083f3e74ff9de88a5ff6bd37ab7bd709"
|
||||
revision = "4a790dd36c6c194892fd3cdb8039667ad391e210"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:964a4f9e12d5021b9d550e04c36cf23feb880e286ceede1b246d5c0e43b523b9"
|
||||
digest = "1:2226e46ebce37abefef1e100a1c77cdbbff6e76bfe0f99b061abb5bdf1849a3c"
|
||||
name = "knative.dev/test-infra"
|
||||
packages = [
|
||||
"scripts",
|
||||
"tools/dep-collector",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "eab25edcb2c13ed4eaa76f8cdd9fb5e13b26797e"
|
||||
revision = "cc2b86828e9e7d4992029981667f9cc9a69acd96"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c"
|
||||
|
|
|
@ -22,13 +22,6 @@ import (
|
|||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// NewFixedWatcher returns a StaticWatcher that exposes a collection of ConfigMaps.
|
||||
//
|
||||
// Deprecated: Use NewStaticWatcher
|
||||
func NewFixedWatcher(cms ...*corev1.ConfigMap) *StaticWatcher {
|
||||
return NewStaticWatcher(cms...)
|
||||
}
|
||||
|
||||
// NewStaticWatcher returns an StaticWatcher that exposes a collection of ConfigMaps.
|
||||
func NewStaticWatcher(cms ...*corev1.ConfigMap) *StaticWatcher {
|
||||
cmm := make(map[string]*corev1.ConfigMap)
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"go.uber.org/zap"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
@ -155,23 +156,23 @@ func NewImplWithStats(r Reconciler, logger *zap.SugaredLogger, workQueueName str
|
|||
// EnqueueAfter takes a resource, converts it into a namespace/name string,
|
||||
// and passes it to EnqueueKey.
|
||||
func (c *Impl) EnqueueAfter(obj interface{}, after time.Duration) {
|
||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
object, err := kmeta.DeletionHandlingAccessor(obj)
|
||||
if err != nil {
|
||||
c.logger.Errorw("Enqueue", zap.Error(err))
|
||||
return
|
||||
}
|
||||
c.EnqueueKeyAfter(key, after)
|
||||
c.EnqueueKeyAfter(types.NamespacedName{Namespace: object.GetNamespace(), Name: object.GetName()}, after)
|
||||
}
|
||||
|
||||
// Enqueue takes a resource, converts it into a namespace/name string,
|
||||
// and passes it to EnqueueKey.
|
||||
func (c *Impl) Enqueue(obj interface{}) {
|
||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
object, err := kmeta.DeletionHandlingAccessor(obj)
|
||||
if err != nil {
|
||||
c.logger.Errorw("Enqueue", zap.Error(err))
|
||||
return
|
||||
}
|
||||
c.EnqueueKey(key)
|
||||
c.EnqueueKey(types.NamespacedName{Namespace: object.GetNamespace(), Name: object.GetName()})
|
||||
}
|
||||
|
||||
// EnqueueControllerOf takes a resource, identifies its controller resource,
|
||||
|
@ -186,7 +187,7 @@ func (c *Impl) EnqueueControllerOf(obj interface{}) {
|
|||
// If we can determine the controller ref of this object, then
|
||||
// add that object to our workqueue.
|
||||
if owner := metav1.GetControllerOf(object); owner != nil {
|
||||
c.EnqueueKey(object.GetNamespace() + "/" + owner.Name)
|
||||
c.EnqueueKey(types.NamespacedName{Namespace: object.GetNamespace(), Name: owner.Name})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -218,14 +219,14 @@ func (c *Impl) EnqueueLabelOfNamespaceScopedResource(namespaceLabel, nameLabel s
|
|||
return
|
||||
}
|
||||
|
||||
c.EnqueueKey(fmt.Sprintf("%s/%s", controllerNamespace, controllerKey))
|
||||
c.EnqueueKey(types.NamespacedName{Namespace: controllerNamespace, Name: controllerKey})
|
||||
return
|
||||
}
|
||||
|
||||
// Pass through namespace of the object itself if no namespace label specified.
|
||||
// This is for the scenario that object and the parent resource are of same namespace,
|
||||
// e.g. to enqueue the revision of an endpoint.
|
||||
c.EnqueueKey(fmt.Sprintf("%s/%s", object.GetNamespace(), controllerKey))
|
||||
c.EnqueueKey(types.NamespacedName{Namespace: object.GetNamespace(), Name: controllerKey})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -249,21 +250,27 @@ func (c *Impl) EnqueueLabelOfClusterScopedResource(nameLabel string) func(obj in
|
|||
return
|
||||
}
|
||||
|
||||
c.EnqueueKey(controllerKey)
|
||||
ns, name, err := cache.SplitMetaNamespaceKey(controllerKey)
|
||||
if err != nil {
|
||||
c.logger.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
c.EnqueueKey(types.NamespacedName{Namespace: ns, Name: name})
|
||||
}
|
||||
}
|
||||
|
||||
// EnqueueKey takes a namespace/name string and puts it onto the work queue.
|
||||
func (c *Impl) EnqueueKey(key string) {
|
||||
func (c *Impl) EnqueueKey(key types.NamespacedName) {
|
||||
c.WorkQueue.Add(key)
|
||||
c.logger.Debugf("Adding to queue %s (depth: %d)", key, c.WorkQueue.Len())
|
||||
c.logger.Debugf("Adding to queue %s (depth: %d)", key.String(), c.WorkQueue.Len())
|
||||
}
|
||||
|
||||
// EnqueueKeyAfter takes a namespace/name string and schedules its execution in
|
||||
// the work queue after given delay.
|
||||
func (c *Impl) EnqueueKeyAfter(key string, delay time.Duration) {
|
||||
func (c *Impl) EnqueueKeyAfter(key types.NamespacedName, delay time.Duration) {
|
||||
c.WorkQueue.AddAfter(key, delay)
|
||||
c.logger.Debugf("Adding to queue %s (delay: %v, depth: %d)", key, delay, c.WorkQueue.Len())
|
||||
c.logger.Debugf("Adding to queue %s (delay: %v, depth: %d)", key.String(), delay, c.WorkQueue.Len())
|
||||
}
|
||||
|
||||
// Run starts the controller's worker threads, the number of which is threadiness.
|
||||
|
@ -306,7 +313,8 @@ func (c *Impl) processNextWorkItem() bool {
|
|||
if shutdown {
|
||||
return false
|
||||
}
|
||||
key := obj.(string)
|
||||
key := obj.(types.NamespacedName)
|
||||
keyStr := key.String()
|
||||
|
||||
c.logger.Debugf("Processing from queue %s (depth: %d)", key, c.WorkQueue.Len())
|
||||
|
||||
|
@ -327,17 +335,17 @@ func (c *Impl) processNextWorkItem() bool {
|
|||
if err != nil {
|
||||
status = falseString
|
||||
}
|
||||
c.statsReporter.ReportReconcile(time.Since(startTime), key, status)
|
||||
c.statsReporter.ReportReconcile(time.Since(startTime), keyStr, status)
|
||||
}()
|
||||
|
||||
// Embed the key into the logger and attach that to the context we pass
|
||||
// to the Reconciler.
|
||||
logger := c.logger.With(zap.String(logkey.TraceId, uuid.New().String()), zap.String(logkey.Key, key))
|
||||
logger := c.logger.With(zap.String(logkey.TraceId, uuid.New().String()), zap.String(logkey.Key, keyStr))
|
||||
ctx := logging.WithLogger(context.TODO(), logger)
|
||||
|
||||
// Run Reconcile, passing it the namespace/name string of the
|
||||
// resource to be synced.
|
||||
if err = c.Reconciler.Reconcile(ctx, key); err != nil {
|
||||
if err = c.Reconciler.Reconcile(ctx, keyStr); err != nil {
|
||||
c.handleErr(err, key)
|
||||
logger.Infof("Reconcile failed. Time taken: %v.", time.Since(startTime))
|
||||
return true
|
||||
|
@ -351,7 +359,7 @@ func (c *Impl) processNextWorkItem() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (c *Impl) handleErr(err error, key string) {
|
||||
func (c *Impl) handleErr(err error, key types.NamespacedName) {
|
||||
c.logger.Errorw("Reconcile error", zap.Error(err))
|
||||
|
||||
// Re-queue the key if it's an transient error.
|
||||
|
@ -360,7 +368,7 @@ func (c *Impl) handleErr(err error, key string) {
|
|||
// being processed, queue.Len==0).
|
||||
if !IsPermanentError(err) && !c.WorkQueue.ShuttingDown() {
|
||||
c.WorkQueue.AddRateLimited(key)
|
||||
c.logger.Debugf("Requeuing key %s due to non-permanent error (depth: %d)", key, c.WorkQueue.Len())
|
||||
c.logger.Debugf("Requeuing key %s due to non-permanent error (depth: %d)", key.String(), c.WorkQueue.Len())
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -79,7 +79,7 @@ func GetLoggingConfig(ctx context.Context) (*logging.Config, error) {
|
|||
loggingConfigMap, err := kubeclient.Get(ctx).CoreV1().ConfigMaps(system.Namespace()).Get(logging.ConfigMapName(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return logging.NewConfigFromMap(make(map[string]string))
|
||||
return logging.NewConfigFromMap(nil)
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -214,7 +214,7 @@ func JsonToLoggingConfig(jsonCfg string) (*Config, error) {
|
|||
cfg, err := NewConfigFromMap(configMap)
|
||||
if err != nil {
|
||||
// Get the default config from logging package.
|
||||
if cfg, err = NewConfigFromMap(map[string]string{}); err != nil {
|
||||
if cfg, err = NewConfigFromMap(nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -65,7 +65,7 @@ func CheckMinimumVersion(versioner ServerVersioner) error {
|
|||
// Compare returns 1 if the first version is greater than the
|
||||
// second version.
|
||||
if semver.Compare(minimumVersion, currentVersion) == 1 {
|
||||
return fmt.Errorf("kubernetes version %q is not compatible, need at least %q (this can be overriden with the env var %q)",
|
||||
return fmt.Errorf("kubernetes version %q is not compatible, need at least %q (this can be overridden with the env var %q)",
|
||||
currentVersion, minimumVersion, KubernetesMinVersionKey)
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -94,12 +94,14 @@ function dump_cluster_state() {
|
|||
echo "*** Start of information dump ***"
|
||||
echo "***************************************"
|
||||
echo ">>> All resources:"
|
||||
kubectl get all --all-namespaces
|
||||
kubectl get all --all-namespaces --show-labels -o wide
|
||||
echo ">>> Services:"
|
||||
kubectl get services --all-namespaces
|
||||
kubectl get services --all-namespaces --show-labels -o wide
|
||||
echo ">>> Events:"
|
||||
kubectl get events --all-namespaces
|
||||
kubectl get events --all-namespaces --show-labels -o wide
|
||||
function_exists dump_extra_cluster_state && dump_extra_cluster_state
|
||||
echo ">>> Full dump: ${ARTIFACTS}/k8s.dump.txt"
|
||||
kubectl get all --all-namespaces -o yaml > ${ARTIFACTS}/k8s.dump.txt
|
||||
echo "***************************************"
|
||||
echo "*** E2E TEST FAILED ***"
|
||||
echo "*** End of information dump ***"
|
||||
|
|
Loading…
Reference in New Issue