[master] Auto-update dependencies (#257)

Produced via:
  `./hack/update-deps.sh --upgrade && ./hack/update-codegen.sh`
/assign n3wscott vagababov
/cc n3wscott vagababov
This commit is contained in:
Matt Moore 2020-04-27 09:11:51 -07:00 committed by GitHub
parent c8036952a6
commit 732bdd7594
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 143 additions and 55 deletions

8
Gopkg.lock generated
View File

@ -966,7 +966,7 @@
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:bcff22fa5a7779335e4812c47866681e266cdd83a10356b6e967eefac4563de1" digest = "1:04f788ac12f571fa1ca01efc29cff6ae68de257287c7f3aa4485f768cceaf1b1"
name = "knative.dev/pkg" name = "knative.dev/pkg"
packages = [ packages = [
"apis", "apis",
@ -986,18 +986,18 @@
"reconciler", "reconciler",
] ]
pruneopts = "T" pruneopts = "T"
revision = "4945766b290cbd486c9e3fdaded78985875e516b" revision = "7b6e21a57a3169cfe4acf71b2fa34ca0f6e3898e"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:dc9ba2b25034dbe099b604ce1e243b42502d4ea7b096f844abd1f1617e1151ac" digest = "1:4d8d06fc8e0f7dbfad243aa377651234d422d523c2a3297366343b9ff5165922"
name = "knative.dev/test-infra" name = "knative.dev/test-infra"
packages = [ packages = [
"scripts", "scripts",
"tools/dep-collector", "tools/dep-collector",
] ]
pruneopts = "UT" pruneopts = "UT"
revision = "f645de8d9a500a3fd00149f1b4e693029d678132" revision = "e6e89d29e93a3f4dba44c4a694d704e3e8921c64"
[[projects]] [[projects]]
digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c" digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c"

4
vendor/knative.dev/pkg/Gopkg.lock generated vendored
View File

@ -1369,14 +1369,14 @@
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:8a965ebe2d83033f6a07e926357f7341b6c7f42e165a3e13c7c8113b953a265b" digest = "1:dc9ba2b25034dbe099b604ce1e243b42502d4ea7b096f844abd1f1617e1151ac"
name = "knative.dev/test-infra" name = "knative.dev/test-infra"
packages = [ packages = [
"scripts", "scripts",
"tools/dep-collector", "tools/dep-collector",
] ]
pruneopts = "UT" pruneopts = "UT"
revision = "1be83cfc7702b712d4dcec6209105b012e930308" revision = "f645de8d9a500a3fd00149f1b4e693029d678132"
[[projects]] [[projects]]
digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c" digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c"

View File

@ -196,11 +196,11 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
if reconcileEvent != nil { if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) { if reconciler.EventAs(reconcileEvent, &event) {
logger.Infow("returned an event", zap.Any("event", reconcileEvent)) logger.Infow("Returned an event", zap.Any("event", reconcileEvent))
r.Recorder.Eventf(resource, event.EventType, event.Reason, event.Format, event.Args...) r.Recorder.Eventf(resource, event.EventType, event.Reason, event.Format, event.Args...)
return nil return nil
} else { } else {
logger.Errorw("returned an error", zap.Error(reconcileEvent)) logger.Errorw("Returned an error", zap.Error(reconcileEvent))
r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error()) r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error())
return reconcileEvent return reconcileEvent
} }

View File

@ -195,11 +195,11 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
if reconcileEvent != nil { if reconcileEvent != nil {
var event *reconciler.ReconcilerEvent var event *reconciler.ReconcilerEvent
if reconciler.EventAs(reconcileEvent, &event) { if reconciler.EventAs(reconcileEvent, &event) {
logger.Infow("returned an event", zap.Any("event", reconcileEvent)) logger.Infow("Returned an event", zap.Any("event", reconcileEvent))
r.Recorder.Eventf(resource, event.EventType, event.Reason, event.Format, event.Args...) r.Recorder.Eventf(resource, event.EventType, event.Reason, event.Format, event.Args...)
return nil return nil
} else { } else {
logger.Errorw("returned an error", zap.Error(reconcileEvent)) logger.Errorw("Returned an error", zap.Error(reconcileEvent))
r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error()) r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error())
return reconcileEvent return reconcileEvent
} }

View File

@ -329,10 +329,11 @@ func (c *Impl) EnqueueKeyAfter(key types.NamespacedName, delay time.Duration) {
c.logger.Debugf("Adding to queue %s (delay: %v, depth: %d)", safeKey(key), delay, c.WorkQueue.Len()) c.logger.Debugf("Adding to queue %s (delay: %v, depth: %d)", safeKey(key), delay, c.WorkQueue.Len())
} }
// Run starts the controller's worker threads, the number of which is threadiness. // RunContext starts the controller's worker threads, the number of which is threadiness.
// It then blocks until stopCh is closed, at which point it shuts down its internal // It then blocks until the context is cancelled, at which point it shuts down its
// work queue and waits for workers to finish processing their current work items. // internal work queue and waits for workers to finish processing their current
func (c *Impl) Run(threadiness int, stopCh <-chan struct{}) error { // work items.
func (c *Impl) RunContext(ctx context.Context, threadiness int) error {
defer runtime.HandleCrash() defer runtime.HandleCrash()
sg := sync.WaitGroup{} sg := sync.WaitGroup{}
defer sg.Wait() defer sg.Wait()
@ -356,12 +357,23 @@ func (c *Impl) Run(threadiness int, stopCh <-chan struct{}) error {
} }
logger.Info("Started workers") logger.Info("Started workers")
<-stopCh <-ctx.Done()
logger.Info("Shutting down workers") logger.Info("Shutting down workers")
return nil return nil
} }
// DEPRECATED use RunContext instead.
func (c *Impl) Run(threadiness int, stopCh <-chan struct{}) error {
// Create a context that is cancelled when the stopCh is called.
ctx, cancel := context.WithCancel(context.Background())
go func() {
<-stopCh
cancel()
}()
return c.RunContext(ctx, threadiness)
}
// processNextWorkItem will read a single work item off the workqueue and // processNextWorkItem will read a single work item off the workqueue and
// attempt to process it, by calling Reconcile on our Reconciler. // attempt to process it, by calling Reconcile on our Reconciler.
func (c *Impl) processNextWorkItem() bool { func (c *Impl) processNextWorkItem() bool {
@ -529,14 +541,14 @@ func RunInformers(stopCh <-chan struct{}, informers ...Informer) (func(), error)
} }
// StartAll kicks off all of the passed controllers with DefaultThreadsPerController. // StartAll kicks off all of the passed controllers with DefaultThreadsPerController.
func StartAll(stopCh <-chan struct{}, controllers ...*Impl) { func StartAll(ctx context.Context, controllers ...*Impl) {
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
// Start all of the controllers. // Start all of the controllers.
for _, ctrlr := range controllers { for _, ctrlr := range controllers {
wg.Add(1) wg.Add(1)
go func(c *Impl) { go func(c *Impl) {
defer wg.Done() defer wg.Done()
c.Run(DefaultThreadsPerController, stopCh) c.RunContext(ctx, DefaultThreadsPerController)
}(ctrlr) }(ctrlr)
} }
wg.Wait() wg.Wait()

View File

@ -109,14 +109,11 @@ func GetLoggingConfig(ctx context.Context) (*logging.Config, error) {
// GetLeaderElectionConfig gets the leader election config. // GetLeaderElectionConfig gets the leader election config.
func GetLeaderElectionConfig(ctx context.Context) (*kle.Config, error) { func GetLeaderElectionConfig(ctx context.Context) (*kle.Config, error) {
leaderElectionConfigMap, err := kubeclient.Get(ctx).CoreV1().ConfigMaps(system.Namespace()).Get(kle.ConfigMapName(), metav1.GetOptions{}) leaderElectionConfigMap, err := kubeclient.Get(ctx).CoreV1().ConfigMaps(system.Namespace()).Get(kle.ConfigMapName(), metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
return kle.NewConfigFromConfigMap(nil) return kle.NewConfigFromConfigMap(nil)
} } else if err != nil {
return nil, err return nil, err
} }
return kle.NewConfigFromConfigMap(leaderElectionConfigMap) return kle.NewConfigFromConfigMap(leaderElectionConfigMap)
} }
@ -183,7 +180,7 @@ func MainWithConfig(ctx context.Context, component string, cfg *rest.Config, cto
logger.Fatalw("Failed to start informers", zap.Error(err)) logger.Fatalw("Failed to start informers", zap.Error(err))
} }
logger.Info("Starting controllers...") logger.Info("Starting controllers...")
go controller.StartAll(ctx.Done(), controllers...) go controller.StartAll(ctx, controllers...)
<-ctx.Done() <-ctx.Done()
} }
@ -199,7 +196,7 @@ func MainWithConfig(ctx context.Context, component string, cfg *rest.Config, cto
logger.Infof("%v will not run in leader-elected mode", component) logger.Infof("%v will not run in leader-elected mode", component)
run(ctx) run(ctx)
} else { } else {
RunLeaderElected(ctx, logger, run, component, leConfig) RunLeaderElected(ctx, logger, run, leConfig)
} }
} }
@ -269,7 +266,7 @@ func WebhookMainWithConfig(ctx context.Context, component string, cfg *rest.Conf
wh.InformersHaveSynced() wh.InformersHaveSynced()
} }
logger.Info("Starting controllers...") logger.Info("Starting controllers...")
go controller.StartAll(ctx.Done(), controllers...) go controller.StartAll(ctx, controllers...)
// This will block until either a signal arrives or one of the grouped functions // This will block until either a signal arrives or one of the grouped functions
// returns an error. // returns an error.
@ -419,7 +416,7 @@ func ControllersAndWebhooksFromCtors(ctx context.Context,
// RunLeaderElected runs the given function in leader elected mode. The function // RunLeaderElected runs the given function in leader elected mode. The function
// will be run only once the leader election lock is obtained. // will be run only once the leader election lock is obtained.
func RunLeaderElected(ctx context.Context, logger *zap.SugaredLogger, run func(context.Context), component string, leConfig kle.ComponentConfig) { func RunLeaderElected(ctx context.Context, logger *zap.SugaredLogger, run func(context.Context), leConfig kle.ComponentConfig) {
recorder := controller.GetEventRecorder(ctx) recorder := controller.GetEventRecorder(ctx)
if recorder == nil { if recorder == nil {
// Create event broadcaster // Create event broadcaster
@ -431,7 +428,7 @@ func RunLeaderElected(ctx context.Context, logger *zap.SugaredLogger, run func(c
&typedcorev1.EventSinkImpl{Interface: kubeclient.Get(ctx).CoreV1().Events(system.Namespace())}), &typedcorev1.EventSinkImpl{Interface: kubeclient.Get(ctx).CoreV1().Events(system.Namespace())}),
} }
recorder = eventBroadcaster.NewRecorder( recorder = eventBroadcaster.NewRecorder(
scheme.Scheme, corev1.EventSource{Component: component}) scheme.Scheme, corev1.EventSource{Component: leConfig.Component})
go func() { go func() {
<-ctx.Done() <-ctx.Done()
for _, w := range watches { for _, w := range watches {
@ -446,12 +443,12 @@ func RunLeaderElected(ctx context.Context, logger *zap.SugaredLogger, run func(c
if err != nil { if err != nil {
logger.Fatalw("Failed to get unique ID for leader election", zap.Error(err)) logger.Fatalw("Failed to get unique ID for leader election", zap.Error(err))
} }
logger.Infof("%v will run in leader-elected mode with id %v", component, id) logger.Infof("%v will run in leader-elected mode with id %v", leConfig.Component, id)
// rl is the resource used to hold the leader election lock. // rl is the resource used to hold the leader election lock.
rl, err := resourcelock.New(leConfig.ResourceLock, rl, err := resourcelock.New(leConfig.ResourceLock,
system.Namespace(), // use namespace we are running in system.Namespace(), // use namespace we are running in
component, // component is used as the resource name leConfig.Component, // component is used as the resource name
kubeclient.Get(ctx).CoreV1(), kubeclient.Get(ctx).CoreV1(),
kubeclient.Get(ctx).CoordinationV1(), kubeclient.Get(ctx).CoordinationV1(),
resourcelock.ResourceLockConfig{ resourcelock.ResourceLockConfig{
@ -476,6 +473,6 @@ func RunLeaderElected(ctx context.Context, logger *zap.SugaredLogger, run func(c
}, },
ReleaseOnCancel: true, ReleaseOnCancel: true,
// TODO: use health check watchdog, knative/pkg#1048 // TODO: use health check watchdog, knative/pkg#1048
Name: component, Name: leConfig.Component,
}) })
} }

View File

@ -98,6 +98,7 @@ type Config struct {
func (c *Config) GetComponentConfig(name string) ComponentConfig { func (c *Config) GetComponentConfig(name string) ComponentConfig {
if c.EnabledComponents.Has(name) { if c.EnabledComponents.Has(name) {
return ComponentConfig{ return ComponentConfig{
Component: name,
LeaderElect: true, LeaderElect: true,
ResourceLock: c.ResourceLock, ResourceLock: c.ResourceLock,
LeaseDuration: c.LeaseDuration, LeaseDuration: c.LeaseDuration,
@ -106,7 +107,7 @@ func (c *Config) GetComponentConfig(name string) ComponentConfig {
} }
} }
return defaultComponentConfig() return defaultComponentConfig(name)
} }
func defaultConfig() *Config { func defaultConfig() *Config {
@ -121,6 +122,7 @@ func defaultConfig() *Config {
// ComponentConfig represents the leader election config for a single component. // ComponentConfig represents the leader election config for a single component.
type ComponentConfig struct { type ComponentConfig struct {
Component string
LeaderElect bool LeaderElect bool
ResourceLock string ResourceLock string
LeaseDuration time.Duration LeaseDuration time.Duration
@ -128,8 +130,9 @@ type ComponentConfig struct {
RetryPeriod time.Duration RetryPeriod time.Duration
} }
func defaultComponentConfig() ComponentConfig { func defaultComponentConfig(name string) ComponentConfig {
return ComponentConfig{ return ComponentConfig{
Component: name,
LeaderElect: false, LeaderElect: false,
} }
} }

View File

@ -20,6 +20,8 @@ import (
"sync" "sync"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"knative.dev/pkg/kmeta"
"knative.dev/pkg/tracker" "knative.dev/pkg/tracker"
) )
@ -31,7 +33,7 @@ type NullTracker = FakeTracker
// FakeTracker implements Tracker. // FakeTracker implements Tracker.
type FakeTracker struct { type FakeTracker struct {
sync.Mutex sync.Mutex
references []tracker.Reference references map[tracker.Reference]map[types.NamespacedName]struct{}
} }
var _ tracker.Interface = (*FakeTracker)(nil) var _ tracker.Interface = (*FakeTracker)(nil)
@ -39,6 +41,25 @@ var _ tracker.Interface = (*FakeTracker)(nil)
// OnChanged implements OnChanged. // OnChanged implements OnChanged.
func (*FakeTracker) OnChanged(interface{}) {} func (*FakeTracker) OnChanged(interface{}) {}
// OnDeletedObserver implements OnDeletedObserver.
func (n *FakeTracker) OnDeletedObserver(obj interface{}) {
item, err := kmeta.DeletionHandlingAccessor(obj)
if err != nil {
return
}
key := types.NamespacedName{Namespace: item.GetNamespace(), Name: item.GetName()}
n.Lock()
defer n.Unlock()
for ref, objs := range n.references {
delete(objs, key)
if len(objs) == 0 {
delete(n.references, ref)
}
}
}
// Track implements tracker.Interface. // Track implements tracker.Interface.
func (n *FakeTracker) Track(ref corev1.ObjectReference, obj interface{}) error { func (n *FakeTracker) Track(ref corev1.ObjectReference, obj interface{}) error {
return n.TrackReference(tracker.Reference{ return n.TrackReference(tracker.Reference{
@ -51,10 +72,26 @@ func (n *FakeTracker) Track(ref corev1.ObjectReference, obj interface{}) error {
// TrackReference implements tracker.Interface. // TrackReference implements tracker.Interface.
func (n *FakeTracker) TrackReference(ref tracker.Reference, obj interface{}) error { func (n *FakeTracker) TrackReference(ref tracker.Reference, obj interface{}) error {
item, err := kmeta.DeletionHandlingAccessor(obj)
if err != nil {
return err
}
key := types.NamespacedName{Namespace: item.GetNamespace(), Name: item.GetName()}
n.Lock() n.Lock()
defer n.Unlock() defer n.Unlock()
n.references = append(n.references, ref) if n.references == nil {
n.references = make(map[tracker.Reference]map[types.NamespacedName]struct{}, 1)
}
objs := n.references[ref]
if objs == nil {
objs = make(map[types.NamespacedName]struct{}, 1)
}
objs[key] = struct{}{}
n.references[ref] = objs
return nil return nil
} }
@ -63,5 +100,10 @@ func (n *FakeTracker) References() []tracker.Reference {
n.Lock() n.Lock()
defer n.Unlock() defer n.Unlock()
return append(n.references[:0:0], n.references...) refs := make([]tracker.Reference, 0, len(n.references))
for ref := range n.references {
refs = append(refs, ref)
}
return refs
} }

View File

@ -18,10 +18,10 @@ package mako
import ( import (
"context" "context"
"fmt"
"log" "log"
"path/filepath" "path/filepath"
"runtime" "runtime"
"strconv"
"strings" "strings"
"cloud.google.com/go/compute/metadata" "cloud.google.com/go/compute/metadata"
@ -119,7 +119,7 @@ func SetupHelper(ctx context.Context, benchmarkKey *string, benchmarkName *strin
if err != nil { if err != nil {
return nil, err return nil, err
} }
tags = append(tags, "nodes="+fmt.Sprintf("%d", len(nodes.Items))) tags = append(tags, "nodes="+strconv.Itoa(len(nodes.Items)))
// Decorate GCP metadata as tags (when we're running on GCP). // Decorate GCP metadata as tags (when we're running on GCP).
if projectID, err := metadata.ProjectID(); err != nil { if projectID, err := metadata.ProjectID(); err != nil {

View File

@ -52,9 +52,9 @@ type server struct {
func (s *server) Store(ctx context.Context, in *qspb.StoreInput) (*qspb.StoreOutput, error) { func (s *server) Store(ctx context.Context, in *qspb.StoreInput) (*qspb.StoreOutput, error) {
m := jsonpb.Marshaler{} m := jsonpb.Marshaler{}
qi, _ := m.MarshalToString(in.GetQuickstoreInput()) qi, _ := m.MarshalToString(in.GetQuickstoreInput())
fmt.Printf("# Received input") fmt.Println("# Received input")
fmt.Fprintf(s.sb, "# %s\n", qi) fmt.Fprintln(s.sb, "#", qi)
writer := csv.NewWriter(s.sb) writer := csv.NewWriter(s.sb)
kv := calculateKeyIndexColumnsMap(s.info) kv := calculateKeyIndexColumnsMap(s.info)
@ -62,30 +62,30 @@ func (s *server) Store(ctx context.Context, in *qspb.StoreInput) (*qspb.StoreOut
for k, i := range kv { for k, i := range kv {
cols[i] = k cols[i] = k
} }
fmt.Fprintf(s.sb, "# %s\n", strings.Join(cols, ",")) fmt.Fprintln(s.sb, "#", strings.Join(cols, ","))
for _, sp := range in.GetSamplePoints() { for _, sp := range in.GetSamplePoints() {
for _, mv := range sp.GetMetricValueList() { for _, mv := range sp.GetMetricValueList() {
vals := map[string]string{"inputValue": fmt.Sprintf("%f", sp.GetInputValue())} vals := map[string]string{"inputValue": fmt.Sprint(sp.GetInputValue())}
vals[mv.GetValueKey()] = fmt.Sprintf("%f", mv.GetValue()) vals[mv.GetValueKey()] = fmt.Sprint(mv.GetValue())
writer.Write(makeRow(vals, kv)) writer.Write(makeRow(vals, kv))
} }
} }
for _, ra := range in.GetRunAggregates() { for _, ra := range in.GetRunAggregates() {
vals := map[string]string{ra.GetValueKey(): fmt.Sprintf("%f", ra.GetValue())} vals := map[string]string{ra.GetValueKey(): fmt.Sprint(ra.GetValue())}
writer.Write(makeRow(vals, kv)) writer.Write(makeRow(vals, kv))
} }
for _, sa := range in.GetSampleErrors() { for _, sa := range in.GetSampleErrors() {
vals := map[string]string{"inputValue": fmt.Sprintf("%f", sa.GetInputValue()), "errorMessage": sa.GetErrorMessage()} vals := map[string]string{"inputValue": fmt.Sprint(sa.GetInputValue()), "errorMessage": sa.GetErrorMessage()}
writer.Write(makeRow(vals, kv)) writer.Write(makeRow(vals, kv))
} }
writer.Flush() writer.Flush()
fmt.Fprintf(s.sb, "# CSV end\n") fmt.Fprintln(s.sb, "# CSV end")
fmt.Printf("# Input completed") fmt.Println("# Input completed")
return &qspb.StoreOutput{}, nil return &qspb.StoreOutput{}, nil
} }
@ -124,7 +124,7 @@ func main() {
lis, err := net.Listen("tcp", port) lis, err := net.Listen("tcp", port)
if err != nil { if err != nil {
log.Fatalf("failed to listen: %v", err) log.Fatal("Failed to listen:", err)
} }
s := grpc.NewServer(grpc.MaxRecvMsgSize(defaultServerMaxReceiveMessageSize)) s := grpc.NewServer(grpc.MaxRecvMsgSize(defaultServerMaxReceiveMessageSize))
stopCh := make(chan struct{}) stopCh := make(chan struct{})
@ -136,7 +136,7 @@ func main() {
go func() { go func() {
qspb.RegisterQuickstoreServer(s, &server{info: info, stopCh: stopCh, sb: &sb}) qspb.RegisterQuickstoreServer(s, &server{info: info, stopCh: stopCh, sb: &sb})
if err := s.Serve(lis); err != nil { if err := s.Serve(lis); err != nil {
log.Fatalf("failed to serve: %v", err) log.Fatal("Failed to serve:", err)
} }
}() }()
<-stopCh <-stopCh
@ -161,8 +161,8 @@ func main() {
if err := s.ListenAndServe(); err != nil && err != http.ErrServerClosed { if err := s.ListenAndServe(); err != nil && err != http.ErrServerClosed {
log.Fatal(err) log.Fatal(err)
} }
fmt.Print("Successfully served the results") fmt.Println("Successfully served the results")
} else { } else {
fmt.Print(sb.String()) fmt.Println(sb.String())
} }
} }

View File

@ -73,7 +73,7 @@ func (b *BasicTypeKindNode) string(v reflect.Value) string {
} }
case reflect.Float32, reflect.Float64: case reflect.Float32, reflect.Float64:
if v.Float() != 0 { if v.Float() != 0 {
return fmt.Sprintf("%f", v.Float()) return fmt.Sprint(v.Float())
} }
case reflect.String: case reflect.String:
if v.Len() != 0 { if v.Len() != 0 {

View File

@ -275,3 +275,32 @@ func (i *impl) OnChanged(obj interface{}) {
} }
} }
} }
// OnChanged implements Interface.
func (i *impl) OnDeletedObserver(obj interface{}) {
item, err := kmeta.DeletionHandlingAccessor(obj)
if err != nil {
return
}
key := types.NamespacedName{Namespace: item.GetNamespace(), Name: item.GetName()}
i.m.Lock()
defer i.m.Unlock()
// Remove exact matches.
for ref, matchers := range i.exact {
delete(matchers, key)
if len(matchers) == 0 {
delete(i.exact, ref)
}
}
// Remove inexact matches.
for ref, matchers := range i.inexact {
delete(matchers, key)
if len(matchers) == 0 {
delete(i.exact, ref)
}
}
}

View File

@ -70,6 +70,11 @@ type Interface interface {
// OnChanged is a callback to register with the InformerFactory // OnChanged is a callback to register with the InformerFactory
// so that we are notified for appropriate object changes. // so that we are notified for appropriate object changes.
OnChanged(obj interface{}) OnChanged(obj interface{})
// OnDeletedObserver is a callback to register with the InformerFactory
// so that we are notified for deletions of a watching parent to
// remove the respective tracking.
OnDeletedObserver(obj interface{})
} }
// GroupVersionKind returns the GroupVersion of the object referenced. // GroupVersionKind returns the GroupVersion of the object referenced.

View File

@ -62,7 +62,7 @@ func NewAdmissionController(
} }
logger := logging.FromContext(ctx) logger := logging.FromContext(ctx)
c := controller.NewImpl(wh, logger, "ConfigMapWebhook") c := controller.NewImpl(wh, logger, "DefaultingWebhook")
// Reconcile when the named MutatingWebhookConfiguration changes. // Reconcile when the named MutatingWebhookConfiguration changes.
mwhInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ mwhInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{

View File

@ -77,7 +77,7 @@ func NewAdmissionController(
} }
logger := logging.FromContext(ctx) logger := logging.FromContext(ctx)
c := controller.NewImpl(wh, logger, "ConfigMapWebhook") c := controller.NewImpl(wh, logger, "ValidationWebhook")
// Reconcile when the named ValidatingWebhookConfiguration changes. // Reconcile when the named ValidatingWebhookConfiguration changes.
vwhInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ vwhInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{

View File

@ -49,7 +49,7 @@ function pr_only_contains() {
# List changed files in the current PR. # List changed files in the current PR.
# This is implemented as a function so it can be mocked in unit tests. # This is implemented as a function so it can be mocked in unit tests.
function list_changed_files() { function list_changed_files() {
/workspace/githubhelper -list-changed-files -github-token /etc/repoview-token/token git --no-pager diff --name-only ${PULL_BASE_SHA}..${PULL_SHA}
} }
# Initialize flags and context for presubmit tests: # Initialize flags and context for presubmit tests: