mirror of https://github.com/knative/caching.git
[master] Auto-update dependencies (#257)
Produced via: `./hack/update-deps.sh --upgrade && ./hack/update-codegen.sh` /assign n3wscott vagababov /cc n3wscott vagababov
This commit is contained in:
parent
c8036952a6
commit
732bdd7594
|
@ -966,7 +966,7 @@
|
|||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:bcff22fa5a7779335e4812c47866681e266cdd83a10356b6e967eefac4563de1"
|
||||
digest = "1:04f788ac12f571fa1ca01efc29cff6ae68de257287c7f3aa4485f768cceaf1b1"
|
||||
name = "knative.dev/pkg"
|
||||
packages = [
|
||||
"apis",
|
||||
|
@ -986,18 +986,18 @@
|
|||
"reconciler",
|
||||
]
|
||||
pruneopts = "T"
|
||||
revision = "4945766b290cbd486c9e3fdaded78985875e516b"
|
||||
revision = "7b6e21a57a3169cfe4acf71b2fa34ca0f6e3898e"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:dc9ba2b25034dbe099b604ce1e243b42502d4ea7b096f844abd1f1617e1151ac"
|
||||
digest = "1:4d8d06fc8e0f7dbfad243aa377651234d422d523c2a3297366343b9ff5165922"
|
||||
name = "knative.dev/test-infra"
|
||||
packages = [
|
||||
"scripts",
|
||||
"tools/dep-collector",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "f645de8d9a500a3fd00149f1b4e693029d678132"
|
||||
revision = "e6e89d29e93a3f4dba44c4a694d704e3e8921c64"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c"
|
||||
|
|
|
@ -1369,14 +1369,14 @@
|
|||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:8a965ebe2d83033f6a07e926357f7341b6c7f42e165a3e13c7c8113b953a265b"
|
||||
digest = "1:dc9ba2b25034dbe099b604ce1e243b42502d4ea7b096f844abd1f1617e1151ac"
|
||||
name = "knative.dev/test-infra"
|
||||
packages = [
|
||||
"scripts",
|
||||
"tools/dep-collector",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "1be83cfc7702b712d4dcec6209105b012e930308"
|
||||
revision = "f645de8d9a500a3fd00149f1b4e693029d678132"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c"
|
||||
|
|
|
@ -196,11 +196,11 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
|
|||
if reconcileEvent != nil {
|
||||
var event *reconciler.ReconcilerEvent
|
||||
if reconciler.EventAs(reconcileEvent, &event) {
|
||||
logger.Infow("returned an event", zap.Any("event", reconcileEvent))
|
||||
logger.Infow("Returned an event", zap.Any("event", reconcileEvent))
|
||||
r.Recorder.Eventf(resource, event.EventType, event.Reason, event.Format, event.Args...)
|
||||
return nil
|
||||
} else {
|
||||
logger.Errorw("returned an error", zap.Error(reconcileEvent))
|
||||
logger.Errorw("Returned an error", zap.Error(reconcileEvent))
|
||||
r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error())
|
||||
return reconcileEvent
|
||||
}
|
||||
|
|
|
@ -195,11 +195,11 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
|
|||
if reconcileEvent != nil {
|
||||
var event *reconciler.ReconcilerEvent
|
||||
if reconciler.EventAs(reconcileEvent, &event) {
|
||||
logger.Infow("returned an event", zap.Any("event", reconcileEvent))
|
||||
logger.Infow("Returned an event", zap.Any("event", reconcileEvent))
|
||||
r.Recorder.Eventf(resource, event.EventType, event.Reason, event.Format, event.Args...)
|
||||
return nil
|
||||
} else {
|
||||
logger.Errorw("returned an error", zap.Error(reconcileEvent))
|
||||
logger.Errorw("Returned an error", zap.Error(reconcileEvent))
|
||||
r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error())
|
||||
return reconcileEvent
|
||||
}
|
||||
|
|
|
@ -329,10 +329,11 @@ func (c *Impl) EnqueueKeyAfter(key types.NamespacedName, delay time.Duration) {
|
|||
c.logger.Debugf("Adding to queue %s (delay: %v, depth: %d)", safeKey(key), delay, c.WorkQueue.Len())
|
||||
}
|
||||
|
||||
// Run starts the controller's worker threads, the number of which is threadiness.
|
||||
// It then blocks until stopCh is closed, at which point it shuts down its internal
|
||||
// work queue and waits for workers to finish processing their current work items.
|
||||
func (c *Impl) Run(threadiness int, stopCh <-chan struct{}) error {
|
||||
// RunContext starts the controller's worker threads, the number of which is threadiness.
|
||||
// It then blocks until the context is cancelled, at which point it shuts down its
|
||||
// internal work queue and waits for workers to finish processing their current
|
||||
// work items.
|
||||
func (c *Impl) RunContext(ctx context.Context, threadiness int) error {
|
||||
defer runtime.HandleCrash()
|
||||
sg := sync.WaitGroup{}
|
||||
defer sg.Wait()
|
||||
|
@ -356,12 +357,23 @@ func (c *Impl) Run(threadiness int, stopCh <-chan struct{}) error {
|
|||
}
|
||||
|
||||
logger.Info("Started workers")
|
||||
<-stopCh
|
||||
<-ctx.Done()
|
||||
logger.Info("Shutting down workers")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DEPRECATED use RunContext instead.
|
||||
func (c *Impl) Run(threadiness int, stopCh <-chan struct{}) error {
|
||||
// Create a context that is cancelled when the stopCh is called.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
go func() {
|
||||
<-stopCh
|
||||
cancel()
|
||||
}()
|
||||
return c.RunContext(ctx, threadiness)
|
||||
}
|
||||
|
||||
// processNextWorkItem will read a single work item off the workqueue and
|
||||
// attempt to process it, by calling Reconcile on our Reconciler.
|
||||
func (c *Impl) processNextWorkItem() bool {
|
||||
|
@ -529,14 +541,14 @@ func RunInformers(stopCh <-chan struct{}, informers ...Informer) (func(), error)
|
|||
}
|
||||
|
||||
// StartAll kicks off all of the passed controllers with DefaultThreadsPerController.
|
||||
func StartAll(stopCh <-chan struct{}, controllers ...*Impl) {
|
||||
func StartAll(ctx context.Context, controllers ...*Impl) {
|
||||
wg := sync.WaitGroup{}
|
||||
// Start all of the controllers.
|
||||
for _, ctrlr := range controllers {
|
||||
wg.Add(1)
|
||||
go func(c *Impl) {
|
||||
defer wg.Done()
|
||||
c.Run(DefaultThreadsPerController, stopCh)
|
||||
c.RunContext(ctx, DefaultThreadsPerController)
|
||||
}(ctrlr)
|
||||
}
|
||||
wg.Wait()
|
||||
|
|
|
@ -109,14 +109,11 @@ func GetLoggingConfig(ctx context.Context) (*logging.Config, error) {
|
|||
// GetLeaderElectionConfig gets the leader election config.
|
||||
func GetLeaderElectionConfig(ctx context.Context) (*kle.Config, error) {
|
||||
leaderElectionConfigMap, err := kubeclient.Get(ctx).CoreV1().ConfigMaps(system.Namespace()).Get(kle.ConfigMapName(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return kle.NewConfigFromConfigMap(nil)
|
||||
}
|
||||
|
||||
if apierrors.IsNotFound(err) {
|
||||
return kle.NewConfigFromConfigMap(nil)
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return kle.NewConfigFromConfigMap(leaderElectionConfigMap)
|
||||
}
|
||||
|
||||
|
@ -183,7 +180,7 @@ func MainWithConfig(ctx context.Context, component string, cfg *rest.Config, cto
|
|||
logger.Fatalw("Failed to start informers", zap.Error(err))
|
||||
}
|
||||
logger.Info("Starting controllers...")
|
||||
go controller.StartAll(ctx.Done(), controllers...)
|
||||
go controller.StartAll(ctx, controllers...)
|
||||
|
||||
<-ctx.Done()
|
||||
}
|
||||
|
@ -199,7 +196,7 @@ func MainWithConfig(ctx context.Context, component string, cfg *rest.Config, cto
|
|||
logger.Infof("%v will not run in leader-elected mode", component)
|
||||
run(ctx)
|
||||
} else {
|
||||
RunLeaderElected(ctx, logger, run, component, leConfig)
|
||||
RunLeaderElected(ctx, logger, run, leConfig)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -269,7 +266,7 @@ func WebhookMainWithConfig(ctx context.Context, component string, cfg *rest.Conf
|
|||
wh.InformersHaveSynced()
|
||||
}
|
||||
logger.Info("Starting controllers...")
|
||||
go controller.StartAll(ctx.Done(), controllers...)
|
||||
go controller.StartAll(ctx, controllers...)
|
||||
|
||||
// This will block until either a signal arrives or one of the grouped functions
|
||||
// returns an error.
|
||||
|
@ -419,7 +416,7 @@ func ControllersAndWebhooksFromCtors(ctx context.Context,
|
|||
|
||||
// RunLeaderElected runs the given function in leader elected mode. The function
|
||||
// will be run only once the leader election lock is obtained.
|
||||
func RunLeaderElected(ctx context.Context, logger *zap.SugaredLogger, run func(context.Context), component string, leConfig kle.ComponentConfig) {
|
||||
func RunLeaderElected(ctx context.Context, logger *zap.SugaredLogger, run func(context.Context), leConfig kle.ComponentConfig) {
|
||||
recorder := controller.GetEventRecorder(ctx)
|
||||
if recorder == nil {
|
||||
// Create event broadcaster
|
||||
|
@ -431,7 +428,7 @@ func RunLeaderElected(ctx context.Context, logger *zap.SugaredLogger, run func(c
|
|||
&typedcorev1.EventSinkImpl{Interface: kubeclient.Get(ctx).CoreV1().Events(system.Namespace())}),
|
||||
}
|
||||
recorder = eventBroadcaster.NewRecorder(
|
||||
scheme.Scheme, corev1.EventSource{Component: component})
|
||||
scheme.Scheme, corev1.EventSource{Component: leConfig.Component})
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
for _, w := range watches {
|
||||
|
@ -446,12 +443,12 @@ func RunLeaderElected(ctx context.Context, logger *zap.SugaredLogger, run func(c
|
|||
if err != nil {
|
||||
logger.Fatalw("Failed to get unique ID for leader election", zap.Error(err))
|
||||
}
|
||||
logger.Infof("%v will run in leader-elected mode with id %v", component, id)
|
||||
logger.Infof("%v will run in leader-elected mode with id %v", leConfig.Component, id)
|
||||
|
||||
// rl is the resource used to hold the leader election lock.
|
||||
rl, err := resourcelock.New(leConfig.ResourceLock,
|
||||
system.Namespace(), // use namespace we are running in
|
||||
component, // component is used as the resource name
|
||||
leConfig.Component, // component is used as the resource name
|
||||
kubeclient.Get(ctx).CoreV1(),
|
||||
kubeclient.Get(ctx).CoordinationV1(),
|
||||
resourcelock.ResourceLockConfig{
|
||||
|
@ -476,6 +473,6 @@ func RunLeaderElected(ctx context.Context, logger *zap.SugaredLogger, run func(c
|
|||
},
|
||||
ReleaseOnCancel: true,
|
||||
// TODO: use health check watchdog, knative/pkg#1048
|
||||
Name: component,
|
||||
Name: leConfig.Component,
|
||||
})
|
||||
}
|
||||
|
|
|
@ -98,6 +98,7 @@ type Config struct {
|
|||
func (c *Config) GetComponentConfig(name string) ComponentConfig {
|
||||
if c.EnabledComponents.Has(name) {
|
||||
return ComponentConfig{
|
||||
Component: name,
|
||||
LeaderElect: true,
|
||||
ResourceLock: c.ResourceLock,
|
||||
LeaseDuration: c.LeaseDuration,
|
||||
|
@ -106,7 +107,7 @@ func (c *Config) GetComponentConfig(name string) ComponentConfig {
|
|||
}
|
||||
}
|
||||
|
||||
return defaultComponentConfig()
|
||||
return defaultComponentConfig(name)
|
||||
}
|
||||
|
||||
func defaultConfig() *Config {
|
||||
|
@ -121,6 +122,7 @@ func defaultConfig() *Config {
|
|||
|
||||
// ComponentConfig represents the leader election config for a single component.
|
||||
type ComponentConfig struct {
|
||||
Component string
|
||||
LeaderElect bool
|
||||
ResourceLock string
|
||||
LeaseDuration time.Duration
|
||||
|
@ -128,8 +130,9 @@ type ComponentConfig struct {
|
|||
RetryPeriod time.Duration
|
||||
}
|
||||
|
||||
func defaultComponentConfig() ComponentConfig {
|
||||
func defaultComponentConfig(name string) ComponentConfig {
|
||||
return ComponentConfig{
|
||||
Component: name,
|
||||
LeaderElect: false,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,8 @@ import (
|
|||
"sync"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"knative.dev/pkg/kmeta"
|
||||
"knative.dev/pkg/tracker"
|
||||
)
|
||||
|
||||
|
@ -31,7 +33,7 @@ type NullTracker = FakeTracker
|
|||
// FakeTracker implements Tracker.
|
||||
type FakeTracker struct {
|
||||
sync.Mutex
|
||||
references []tracker.Reference
|
||||
references map[tracker.Reference]map[types.NamespacedName]struct{}
|
||||
}
|
||||
|
||||
var _ tracker.Interface = (*FakeTracker)(nil)
|
||||
|
@ -39,6 +41,25 @@ var _ tracker.Interface = (*FakeTracker)(nil)
|
|||
// OnChanged implements OnChanged.
|
||||
func (*FakeTracker) OnChanged(interface{}) {}
|
||||
|
||||
// OnDeletedObserver implements OnDeletedObserver.
|
||||
func (n *FakeTracker) OnDeletedObserver(obj interface{}) {
|
||||
item, err := kmeta.DeletionHandlingAccessor(obj)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
key := types.NamespacedName{Namespace: item.GetNamespace(), Name: item.GetName()}
|
||||
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
|
||||
for ref, objs := range n.references {
|
||||
delete(objs, key)
|
||||
if len(objs) == 0 {
|
||||
delete(n.references, ref)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Track implements tracker.Interface.
|
||||
func (n *FakeTracker) Track(ref corev1.ObjectReference, obj interface{}) error {
|
||||
return n.TrackReference(tracker.Reference{
|
||||
|
@ -51,10 +72,26 @@ func (n *FakeTracker) Track(ref corev1.ObjectReference, obj interface{}) error {
|
|||
|
||||
// TrackReference implements tracker.Interface.
|
||||
func (n *FakeTracker) TrackReference(ref tracker.Reference, obj interface{}) error {
|
||||
item, err := kmeta.DeletionHandlingAccessor(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := types.NamespacedName{Namespace: item.GetNamespace(), Name: item.GetName()}
|
||||
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
|
||||
n.references = append(n.references, ref)
|
||||
if n.references == nil {
|
||||
n.references = make(map[tracker.Reference]map[types.NamespacedName]struct{}, 1)
|
||||
}
|
||||
|
||||
objs := n.references[ref]
|
||||
if objs == nil {
|
||||
objs = make(map[types.NamespacedName]struct{}, 1)
|
||||
}
|
||||
objs[key] = struct{}{}
|
||||
n.references[ref] = objs
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -63,5 +100,10 @@ func (n *FakeTracker) References() []tracker.Reference {
|
|||
n.Lock()
|
||||
defer n.Unlock()
|
||||
|
||||
return append(n.references[:0:0], n.references...)
|
||||
refs := make([]tracker.Reference, 0, len(n.references))
|
||||
for ref := range n.references {
|
||||
refs = append(refs, ref)
|
||||
}
|
||||
|
||||
return refs
|
||||
}
|
||||
|
|
|
@ -18,10 +18,10 @@ package mako
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"cloud.google.com/go/compute/metadata"
|
||||
|
@ -119,7 +119,7 @@ func SetupHelper(ctx context.Context, benchmarkKey *string, benchmarkName *strin
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tags = append(tags, "nodes="+fmt.Sprintf("%d", len(nodes.Items)))
|
||||
tags = append(tags, "nodes="+strconv.Itoa(len(nodes.Items)))
|
||||
|
||||
// Decorate GCP metadata as tags (when we're running on GCP).
|
||||
if projectID, err := metadata.ProjectID(); err != nil {
|
||||
|
|
|
@ -52,9 +52,9 @@ type server struct {
|
|||
func (s *server) Store(ctx context.Context, in *qspb.StoreInput) (*qspb.StoreOutput, error) {
|
||||
m := jsonpb.Marshaler{}
|
||||
qi, _ := m.MarshalToString(in.GetQuickstoreInput())
|
||||
fmt.Printf("# Received input")
|
||||
fmt.Println("# Received input")
|
||||
|
||||
fmt.Fprintf(s.sb, "# %s\n", qi)
|
||||
fmt.Fprintln(s.sb, "#", qi)
|
||||
writer := csv.NewWriter(s.sb)
|
||||
|
||||
kv := calculateKeyIndexColumnsMap(s.info)
|
||||
|
@ -62,30 +62,30 @@ func (s *server) Store(ctx context.Context, in *qspb.StoreInput) (*qspb.StoreOut
|
|||
for k, i := range kv {
|
||||
cols[i] = k
|
||||
}
|
||||
fmt.Fprintf(s.sb, "# %s\n", strings.Join(cols, ","))
|
||||
fmt.Fprintln(s.sb, "#", strings.Join(cols, ","))
|
||||
|
||||
for _, sp := range in.GetSamplePoints() {
|
||||
for _, mv := range sp.GetMetricValueList() {
|
||||
vals := map[string]string{"inputValue": fmt.Sprintf("%f", sp.GetInputValue())}
|
||||
vals[mv.GetValueKey()] = fmt.Sprintf("%f", mv.GetValue())
|
||||
vals := map[string]string{"inputValue": fmt.Sprint(sp.GetInputValue())}
|
||||
vals[mv.GetValueKey()] = fmt.Sprint(mv.GetValue())
|
||||
writer.Write(makeRow(vals, kv))
|
||||
}
|
||||
}
|
||||
|
||||
for _, ra := range in.GetRunAggregates() {
|
||||
vals := map[string]string{ra.GetValueKey(): fmt.Sprintf("%f", ra.GetValue())}
|
||||
vals := map[string]string{ra.GetValueKey(): fmt.Sprint(ra.GetValue())}
|
||||
writer.Write(makeRow(vals, kv))
|
||||
}
|
||||
|
||||
for _, sa := range in.GetSampleErrors() {
|
||||
vals := map[string]string{"inputValue": fmt.Sprintf("%f", sa.GetInputValue()), "errorMessage": sa.GetErrorMessage()}
|
||||
vals := map[string]string{"inputValue": fmt.Sprint(sa.GetInputValue()), "errorMessage": sa.GetErrorMessage()}
|
||||
writer.Write(makeRow(vals, kv))
|
||||
}
|
||||
|
||||
writer.Flush()
|
||||
|
||||
fmt.Fprintf(s.sb, "# CSV end\n")
|
||||
fmt.Printf("# Input completed")
|
||||
fmt.Fprintln(s.sb, "# CSV end")
|
||||
fmt.Println("# Input completed")
|
||||
|
||||
return &qspb.StoreOutput{}, nil
|
||||
}
|
||||
|
@ -124,7 +124,7 @@ func main() {
|
|||
|
||||
lis, err := net.Listen("tcp", port)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to listen: %v", err)
|
||||
log.Fatal("Failed to listen:", err)
|
||||
}
|
||||
s := grpc.NewServer(grpc.MaxRecvMsgSize(defaultServerMaxReceiveMessageSize))
|
||||
stopCh := make(chan struct{})
|
||||
|
@ -136,7 +136,7 @@ func main() {
|
|||
go func() {
|
||||
qspb.RegisterQuickstoreServer(s, &server{info: info, stopCh: stopCh, sb: &sb})
|
||||
if err := s.Serve(lis); err != nil {
|
||||
log.Fatalf("failed to serve: %v", err)
|
||||
log.Fatal("Failed to serve:", err)
|
||||
}
|
||||
}()
|
||||
<-stopCh
|
||||
|
@ -161,8 +161,8 @@ func main() {
|
|||
if err := s.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Print("Successfully served the results")
|
||||
fmt.Println("Successfully served the results")
|
||||
} else {
|
||||
fmt.Print(sb.String())
|
||||
fmt.Println(sb.String())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -73,7 +73,7 @@ func (b *BasicTypeKindNode) string(v reflect.Value) string {
|
|||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
if v.Float() != 0 {
|
||||
return fmt.Sprintf("%f", v.Float())
|
||||
return fmt.Sprint(v.Float())
|
||||
}
|
||||
case reflect.String:
|
||||
if v.Len() != 0 {
|
||||
|
|
|
@ -275,3 +275,32 @@ func (i *impl) OnChanged(obj interface{}) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// OnChanged implements Interface.
|
||||
func (i *impl) OnDeletedObserver(obj interface{}) {
|
||||
item, err := kmeta.DeletionHandlingAccessor(obj)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
key := types.NamespacedName{Namespace: item.GetNamespace(), Name: item.GetName()}
|
||||
|
||||
i.m.Lock()
|
||||
defer i.m.Unlock()
|
||||
|
||||
// Remove exact matches.
|
||||
for ref, matchers := range i.exact {
|
||||
delete(matchers, key)
|
||||
if len(matchers) == 0 {
|
||||
delete(i.exact, ref)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove inexact matches.
|
||||
for ref, matchers := range i.inexact {
|
||||
delete(matchers, key)
|
||||
if len(matchers) == 0 {
|
||||
delete(i.exact, ref)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -70,6 +70,11 @@ type Interface interface {
|
|||
// OnChanged is a callback to register with the InformerFactory
|
||||
// so that we are notified for appropriate object changes.
|
||||
OnChanged(obj interface{})
|
||||
|
||||
// OnDeletedObserver is a callback to register with the InformerFactory
|
||||
// so that we are notified for deletions of a watching parent to
|
||||
// remove the respective tracking.
|
||||
OnDeletedObserver(obj interface{})
|
||||
}
|
||||
|
||||
// GroupVersionKind returns the GroupVersion of the object referenced.
|
||||
|
|
|
@ -62,7 +62,7 @@ func NewAdmissionController(
|
|||
}
|
||||
|
||||
logger := logging.FromContext(ctx)
|
||||
c := controller.NewImpl(wh, logger, "ConfigMapWebhook")
|
||||
c := controller.NewImpl(wh, logger, "DefaultingWebhook")
|
||||
|
||||
// Reconcile when the named MutatingWebhookConfiguration changes.
|
||||
mwhInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{
|
||||
|
|
|
@ -77,7 +77,7 @@ func NewAdmissionController(
|
|||
}
|
||||
|
||||
logger := logging.FromContext(ctx)
|
||||
c := controller.NewImpl(wh, logger, "ConfigMapWebhook")
|
||||
c := controller.NewImpl(wh, logger, "ValidationWebhook")
|
||||
|
||||
// Reconcile when the named ValidatingWebhookConfiguration changes.
|
||||
vwhInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{
|
||||
|
|
|
@ -49,7 +49,7 @@ function pr_only_contains() {
|
|||
# List changed files in the current PR.
|
||||
# This is implemented as a function so it can be mocked in unit tests.
|
||||
function list_changed_files() {
|
||||
/workspace/githubhelper -list-changed-files -github-token /etc/repoview-token/token
|
||||
git --no-pager diff --name-only ${PULL_BASE_SHA}..${PULL_SHA}
|
||||
}
|
||||
|
||||
# Initialize flags and context for presubmit tests:
|
||||
|
|
Loading…
Reference in New Issue