[master] Auto-update dependencies (#289)

Produced via:
  `./hack/update-deps.sh --upgrade && ./hack/update-codegen.sh`
/assign n3wscott vagababov
/cc n3wscott vagababov
This commit is contained in:
Matt Moore 2020-07-20 10:07:36 -07:00 committed by GitHub
parent ea602fec68
commit 3c3c283e9f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 229 additions and 47 deletions

4
go.mod
View File

@ -16,8 +16,8 @@ require (
k8s.io/client-go v11.0.1-0.20190805182717-6502b5e7b1b5+incompatible
k8s.io/code-generator v0.18.0
k8s.io/kube-openapi v0.0.0-20200410145947-bcb3869e6f29
knative.dev/pkg v0.0.0-20200716125933-cb729b8ae661
knative.dev/test-infra v0.0.0-20200715185233-6964ba126fee
knative.dev/pkg v0.0.0-20200719221534-601c9ff3832e
knative.dev/test-infra v0.0.0-20200719034534-5adf654f5ed5
)
replace (

14
go.sum
View File

@ -1430,8 +1430,6 @@ golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200706234117-b22de6825cf7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200709181711-e327e1019dfe/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200710042808-f1c4188a97a1 h1:rD1FcWVsRaMY+l8biE9jbWP5MS/CJJ/90a9TMkMgNrM=
golang.org/x/tools v0.0.0-20200710042808-f1c4188a97a1/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200714190737-9048b464a08d h1:hYhnolbefSSt3WZp66sgmgnEOFv5PD6a5PIcnKJ8jdU=
golang.org/x/tools v0.0.0-20200714190737-9048b464a08d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
@ -1528,8 +1526,6 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200707001353-8e8330bf89df/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200709005830-7a2ca40e9dc3/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200710124503-20a17af7bd0e h1:k+p/u26/lVeNEpdxSeUrm7rTvoFckBKaf7gTzgmHyDA=
google.golang.org/genproto v0.0.0-20200710124503-20a17af7bd0e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200711021454-869866162049/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200715011427-11fb19a81f2c h1:6DWnZZ6EY/59QRRQttZKiktVL23UuQYs7uy75MhhLRM=
google.golang.org/genproto v0.0.0-20200715011427-11fb19a81f2c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
@ -1671,7 +1667,6 @@ k8s.io/legacy-cloud-providers v0.17.4/go.mod h1:FikRNoD64ECjkxO36gkDgJeiQWwyZTuB
k8s.io/metrics v0.17.2/go.mod h1:3TkNHET4ROd+NfzNxkjoVfQ0Ob4iZnaHmSEA4vYpwLw=
k8s.io/test-infra v0.0.0-20200514184223-ba32c8aae783/go.mod h1:bW6thaPZfL2hW7ecjx2WYwlP9KQLM47/xIJyttkVk5s=
k8s.io/test-infra v0.0.0-20200617221206-ea73eaeab7ff/go.mod h1:L3+cRvwftUq8IW1TrHji5m3msnc4uck/7LsE/GR/aZk=
k8s.io/test-infra v0.0.0-20200710134549-5891a1a4cc17/go.mod h1:L3+cRvwftUq8IW1TrHji5m3msnc4uck/7LsE/GR/aZk=
k8s.io/test-infra v0.0.0-20200715094037-cc150f5ae724/go.mod h1:D2jUSuQFYy6McY2qbknsLUE9stqN0yIuJ+rjdUAxSCs=
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
k8s.io/utils v0.0.0-20200124190032-861946025e34 h1:HjlUD6M0K3P8nRXmr2B9o4F9dUy9TCj/aEpReeyi6+k=
@ -1679,12 +1674,12 @@ k8s.io/utils v0.0.0-20200124190032-861946025e34/go.mod h1:sZAwmy6armz5eXlNoLmJcl
knative.dev/caching v0.0.0-20200116200605-67bca2c83dfa/go.mod h1:dHXFU6CGlLlbzaWc32g80cR92iuBSpsslDNBWI8C7eg=
knative.dev/eventing-contrib v0.11.2/go.mod h1:SnXZgSGgMSMLNFTwTnpaOH7hXDzTFtw0J8OmHflNx3g=
knative.dev/pkg v0.0.0-20200207155214-fef852970f43/go.mod h1:pgODObA1dTyhNoFxPZTTjNWfx6F0aKsKzn+vaT9XO/Q=
knative.dev/pkg v0.0.0-20200716125933-cb729b8ae661 h1:qQTj9dZIrtO2YXS3XfoWFq6a3MCUWrl+9FXSHyEy6ac=
knative.dev/pkg v0.0.0-20200716125933-cb729b8ae661/go.mod h1:yFXTzV2GIB30Qs6pdJNCbtKaIEhxH9fsmrrltAqfjWE=
knative.dev/test-infra v0.0.0-20200713220518-5a4c4cad5372 h1:NZzdNmKYP3L7fut/SNOxLgTgXVvQrygXiYpAeIMGMwM=
knative.dev/test-infra v0.0.0-20200713220518-5a4c4cad5372/go.mod h1:vtT6dLs/iNj8pKcfag8CSVqHKNMgyCFtU/g1pV7Bovs=
knative.dev/pkg v0.0.0-20200719221534-601c9ff3832e h1:QA6k96vYFVWNR+VXzbhfnZ6jHNXtpa7DmOjFQLvX6Rg=
knative.dev/pkg v0.0.0-20200719221534-601c9ff3832e/go.mod h1:3mm5ZffkmyYnqN+SOq1cN9TX0KTjhEbiZL8YBpP4C4Y=
knative.dev/test-infra v0.0.0-20200715185233-6964ba126fee h1:SH4N5kSRiEgmOcgjFwsyLMipS3sPJlN6dpp783C/ILQ=
knative.dev/test-infra v0.0.0-20200715185233-6964ba126fee/go.mod h1:mAsPDmFmlsTJjRWplWBz8xtEiarSgvGiiOjkGj4Or1g=
knative.dev/test-infra v0.0.0-20200719034534-5adf654f5ed5 h1:ZspjtLBz7pzkB9PvPxSgDNHVNftTU1mmjtYh+j5LZJE=
knative.dev/test-infra v0.0.0-20200719034534-5adf654f5ed5/go.mod h1:mAsPDmFmlsTJjRWplWBz8xtEiarSgvGiiOjkGj4Or1g=
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
@ -1702,7 +1697,6 @@ rsc.io/letsencrypt v0.0.3/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/boskos v0.0.0-20200526191642-45fc818e2d00/go.mod h1:L1ubP7d1CCMSQSjKiZv6dGbh7b4kfoG+dFPj8cfYDnI=
sigs.k8s.io/boskos v0.0.0-20200617235605-f289ba6555ba/go.mod h1:ZO5RV+VxJS9mb6DvZ1yAjywoyq/wQ8b0vDoZxcIA5kE=
sigs.k8s.io/boskos v0.0.0-20200710214748-f5935686c7fc/go.mod h1:ZO5RV+VxJS9mb6DvZ1yAjywoyq/wQ8b0vDoZxcIA5kE=
sigs.k8s.io/controller-runtime v0.5.0/go.mod h1:REiJzC7Y00U+2YkMbT8wxgrsX5USpXKGhb2sCtAXiT8=
sigs.k8s.io/controller-runtime v0.5.4/go.mod h1:JZUwSMVbxDupo0lTJSSFP5pimEyxGynROImSsqIOx1A=

View File

@ -213,12 +213,9 @@ func NewImpl(r Reconciler, logger *zap.SugaredLogger, workQueueName string) *Imp
func NewImplWithStats(r Reconciler, logger *zap.SugaredLogger, workQueueName string, reporter StatsReporter) *Impl {
logger = logger.Named(workQueueName)
return &Impl{
Name: workQueueName,
Reconciler: r,
WorkQueue: workqueue.NewNamedRateLimitingQueue(
workqueue.DefaultControllerRateLimiter(),
workQueueName,
),
Name: workQueueName,
Reconciler: r,
WorkQueue: newTwoLaneWorkQueue(workQueueName),
logger: logger,
statsReporter: reporter,
}

View File

@ -0,0 +1,159 @@
/*
Copyright 2020 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"k8s.io/client-go/util/workqueue"
)
// twoLaneQueue is a rate limited queue that wraps around two queues
// -- fast queue (anonymously aliased), whose contents are processed with priority.
// -- slow queue (slowLane queue), whose contents are processed if fast queue has no items.
// All the default methods operate on the fast queue, unless noted otherwise.
type twoLaneQueue struct {
workqueue.RateLimitingInterface
slowLane workqueue.RateLimitingInterface
// consumerQueue is necessary to ensure that we're not reconciling
// the same object at the exact same time (e.g. if it had been enqueued
// in both fast and slow and is the only object there).
consumerQueue workqueue.Interface
name string
fastChan chan interface{}
slowChan chan interface{}
}
// Creates a new twoLaneQueue.
func newTwoLaneWorkQueue(name string) *twoLaneQueue {
rl := workqueue.DefaultControllerRateLimiter()
tlq := &twoLaneQueue{
RateLimitingInterface: workqueue.NewNamedRateLimitingQueue(
rl,
name+"-fast",
),
slowLane: workqueue.NewNamedRateLimitingQueue(
rl,
name+"-slow",
),
consumerQueue: workqueue.NewNamed(name + "-consumer"),
name: name,
fastChan: make(chan interface{}),
slowChan: make(chan interface{}),
}
// Run consumer thread.
go tlq.runConsumer()
// Run producer threads.
go process(tlq.RateLimitingInterface, tlq.fastChan)
go process(tlq.slowLane, tlq.slowChan)
return tlq
}
func process(q workqueue.Interface, ch chan interface{}) {
// Sender closes the channel
defer close(ch)
for {
i, d := q.Get()
// If the queue is empty and we're shutting down — stop the loop.
if d {
break
}
ch <- i
q.Done(i)
}
}
func (tlq *twoLaneQueue) runConsumer() {
// Shutdown flags.
fast, slow := true, true
// When both producer queues are shutdown stop the consumerQueue.
defer tlq.consumerQueue.ShutDown()
// While any of the queues is still running, try to read off of them.
for fast || slow {
// By default drain the fast lane.
// Channels in select are picked random, so first
// we have a select that only looks at the fast lane queue.
if fast {
select {
case item, ok := <-tlq.fastChan:
if !ok {
// This queue is shutdown and drained. Stop looking at it.
fast = false
continue
}
tlq.consumerQueue.Add(item)
continue
default:
// This immediately exits the wait if the fast chan is empty.
}
}
// If the fast lane queue had no items, we can select from both.
// Obviously if suddenly both are populated at the same time there's a
// 50% chance that the slow would be picked first, but this should be
// a rare occasion not to really worry about it.
select {
case item, ok := <-tlq.fastChan:
if !ok {
// This queue is shutdown and drained. Stop looking at it.
fast = false
continue
}
tlq.consumerQueue.Add(item)
case item, ok := <-tlq.slowChan:
if !ok {
// This queue is shutdown and drained. Stop looking at it.
slow = false
continue
}
tlq.consumerQueue.Add(item)
}
}
}
// Shutdown implements workqueue.Interace.
// Shutdown shuts down both queues.
func (tlq *twoLaneQueue) ShutDown() {
tlq.RateLimitingInterface.ShutDown()
tlq.slowLane.ShutDown()
}
// Done implements workqueue.Interface.
// Done marks the item as completed in all the queues.
// NB: this will just re-enqueue the object on the queue that
// didn't originate the object.
func (tlq *twoLaneQueue) Done(i interface{}) {
tlq.consumerQueue.Done(i)
}
// Get implements workqueue.Interface.
// It gets the item from fast lane if it has anything, alternatively
// the slow lane.
func (tlq *twoLaneQueue) Get() (interface{}, bool) {
return tlq.consumerQueue.Get()
}
// Len returns the sum of lengths.
// NB: actual _number_ of unique object might be less than this sum.
func (tlq *twoLaneQueue) Len() int {
return tlq.RateLimitingInterface.Len() + tlq.slowLane.Len() + tlq.consumerQueue.Len()
}
// SlowLane gives direct access to the slow queue.
func (tlq *twoLaneQueue) SlowLane() workqueue.RateLimitingInterface {
return tlq.slowLane
}

View File

@ -19,6 +19,7 @@ package leaderelection
import (
"context"
"fmt"
"hash/fnv"
"strings"
"sync"
@ -81,11 +82,11 @@ type Elector interface {
// BuildElector builds a leaderelection.LeaderElector for the named LeaderAware
// reconciler using a builder added to the context via WithStandardLeaderElectorBuilder.
func BuildElector(ctx context.Context, la reconciler.LeaderAware, queueName string, enq func(reconciler.Bucket, types.NamespacedName)) (Elector, error) {
func BuildElector(ctx context.Context, la reconciler.LeaderAware, name string, enq func(reconciler.Bucket, types.NamespacedName)) (Elector, error) {
if val := ctx.Value(builderKey{}); val != nil {
switch builder := val.(type) {
case *standardBuilder:
return builder.buildElector(ctx, la, queueName, enq)
return builder.buildElector(ctx, la, name, enq)
case *statefulSetBuilder:
return builder.buildElector(ctx, la, enq)
}
@ -106,7 +107,7 @@ type standardBuilder struct {
}
func (b *standardBuilder) buildElector(ctx context.Context, la reconciler.LeaderAware,
queueName string, enq func(reconciler.Bucket, types.NamespacedName)) (Elector, error) {
name string, enq func(reconciler.Bucket, types.NamespacedName)) (Elector, error) {
logger := logging.FromContext(ctx)
id, err := UniqueID()
@ -114,9 +115,16 @@ func (b *standardBuilder) buildElector(ctx context.Context, la reconciler.Leader
return nil, err
}
bkts := newStandardBuckets(queueName, b.lec)
electors := make([]Elector, 0, b.lec.Buckets)
for _, bkt := range bkts {
buckets := make([]Elector, 0, b.lec.Buckets)
for i := uint32(0); i < b.lec.Buckets; i++ {
bkt := &bucket{
// The resource name is the lowercase:
// {component}.{workqueue}.{index}-of-{total}
name: strings.ToLower(fmt.Sprintf("%s.%s.%02d-of-%02d", b.lec.Component, name, i, b.lec.Buckets)),
index: i,
total: b.lec.Buckets,
}
rl, err := resourcelock.New(KnativeResourceLock,
system.Namespace(), // use namespace we are running in
bkt.Name(),
@ -160,27 +168,9 @@ func (b *standardBuilder) buildElector(ctx context.Context, la reconciler.Leader
// if lec.WatchDog != nil {
// lec.WatchDog.SetLeaderElection(le)
// }
electors = append(electors, &runUntilCancelled{Elector: le})
buckets = append(buckets, &runUntilCancelled{Elector: le})
}
return &runAll{les: electors}, nil
}
func newStandardBuckets(queueName string, cc ComponentConfig) []reconciler.Bucket {
names := make(sets.String, cc.Buckets)
for i := uint32(0); i < cc.Buckets; i++ {
names.Insert(standardBucketName(i, queueName, cc))
}
bs := hash.NewBucketSet(names)
bkts := make([]reconciler.Bucket, 0, cc.Buckets)
for name := range names {
bkts = append(bkts, hash.NewBucket(name, bs))
}
return bkts
}
func standardBucketName(ordinal uint32, queueName string, cc ComponentConfig) string {
return strings.ToLower(fmt.Sprintf("%s.%s.%02d-of-%02d", cc.Component, queueName, ordinal, cc.Buckets))
return &runAll{les: buckets}, nil
}
type statefulSetBuilder struct {
@ -279,3 +269,26 @@ func (ruc *runUntilCancelled) Run(ctx context.Context) {
}
}
}
type bucket struct {
name string
// We are bucket {index} of {total}
index uint32
total uint32
}
var _ reconciler.Bucket = (*bucket)(nil)
// Name implements reconciler.Bucket
func (b *bucket) Name() string {
return b.name
}
// Has implements reconciler.Bucket
func (b *bucket) Has(nn types.NamespacedName) bool {
h := fnv.New32a()
h.Write([]byte(nn.Namespace + "." + nn.Name))
ii := h.Sum32() % b.total
return b.index == ii
}

View File

@ -25,6 +25,8 @@ import (
// ErrorHandler sets up a handler suitable for use with the ErrorHandler field on
// httputil's reverse proxy.
// TODO(mattmoor): Move the implementation into handlers/error.go once downstream consumers
// have adopted the alias.
func ErrorHandler(logger *zap.SugaredLogger) func(http.ResponseWriter, *http.Request, error) {
return func(w http.ResponseWriter, req *http.Request, err error) {
ss := readSockStat(logger)

View File

@ -85,6 +85,19 @@ function go_test_e2e() {
report_go_test -v -race -count=1 ${go_options} $@ "${test_options}"
}
# Dumps the k8s api server metrics. Spins up a proxy, waits a little bit and
# dumps the metrics to ${ARTIFACTS}/k8s.metrics.txt
function dump_metrics() {
header ">> Starting kube proxy"
kubectl proxy --port=8080 &
local proxy_pid=$!
sleep 5
header ">> Grabbing k8s metrics"
curl -s http://localhost:8080/metrics > ${ARTIFACTS}/k8s.metrics.txt
# Clean up proxy so it doesn't interfere with job shutting down
kill $proxy_pid || true
}
# Dump info about the test cluster. If dump_extra_cluster_info() is defined, calls it too.
# This is intended to be called when a test fails to provide debugging information.
function dump_cluster_state() {
@ -410,6 +423,7 @@ function success() {
echo "**************************************"
echo "*** E2E TESTS PASSED ***"
echo "**************************************"
dump_metrics
exit 0
}
@ -419,6 +433,7 @@ function fail_test() {
set_test_return_code 1
[[ -n $1 ]] && echo "ERROR: $1"
dump_cluster_state
dump_metrics
exit 1
}

View File

@ -134,7 +134,9 @@ function wait_until_pods_running() {
echo -n "Waiting until all pods in namespace $1 are up"
local failed_pod=""
for i in {1..150}; do # timeout after 5 minutes
local pods="$(kubectl get pods --no-headers -n $1 2>/dev/null)"
# List all pods. Ignore Terminating pods as those have either been replaced through
# a deployment or terminated on purpose (through chaosduck for example).
local pods="$(kubectl get pods --no-headers -n $1 2>/dev/null | grep -v Terminating)"
# All pods must be running (ignore ImagePull error to allow the pod to retry)
local not_running_pods=$(echo "${pods}" | grep -v Running | grep -v Completed | grep -v ErrImagePull | grep -v ImagePullBackOff)
if [[ -n "${pods}" ]] && [[ -z "${not_running_pods}" ]]; then

4
vendor/modules.txt vendored
View File

@ -747,7 +747,7 @@ k8s.io/kube-openapi/pkg/util/sets
k8s.io/utils/buffer
k8s.io/utils/integer
k8s.io/utils/trace
# knative.dev/pkg v0.0.0-20200716125933-cb729b8ae661
# knative.dev/pkg v0.0.0-20200719221534-601c9ff3832e
## explicit
knative.dev/pkg/apis
knative.dev/pkg/apis/duck/ducktypes
@ -771,7 +771,7 @@ knative.dev/pkg/metrics/metricskey
knative.dev/pkg/network
knative.dev/pkg/reconciler
knative.dev/pkg/system
# knative.dev/test-infra v0.0.0-20200715185233-6964ba126fee
# knative.dev/test-infra v0.0.0-20200719034534-5adf654f5ed5
## explicit
knative.dev/test-infra/scripts
knative.dev/test-infra/tools/dep-collector