Add configMap informer (#326)

Signed-off-by: laminar <fangtian@kubesphere.io>
Signed-off-by: laminar <fangtian@kubesphere.io>

Co-authored-by: Aaron Schlesinger <70865+arschles@users.noreply.github.com>
This commit is contained in:
laminar 2021-12-15 02:06:27 +08:00 committed by GitHub
parent 8622cf5dee
commit 201c7cb7bf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 255 additions and 68 deletions

View File

@ -1,6 +1,8 @@
package config package config
import ( import (
"time"
"github.com/kelseyhightower/envconfig" "github.com/kelseyhightower/envconfig"
) )
@ -16,15 +18,9 @@ type Serving struct {
// This is the server that the external scaler will issue metrics // This is the server that the external scaler will issue metrics
// requests to // requests to
AdminPort int `envconfig:"KEDA_HTTP_ADMIN_PORT" required:"true"` AdminPort int `envconfig:"KEDA_HTTP_ADMIN_PORT" required:"true"`
// RoutingTableUpdateDurationMS is the interval (in milliseconds) representing how // ConfigMapCacheRsyncPeriod is the time interval
// often to do a complete update of the routing table ConfigMap. // for the configmap informer to rsync the local cache.
// ConfigMapCacheRsyncPeriod time.Duration `envconfig:"KEDA_HTTP_SCALER_CONFIG_MAP_INFORMER_RSYNC_PERIOD" default:"60m"`
// The interceptor will also open a watch stream to the routing table
// ConfigMap and attempt to update the routing table on every update.
//
// Since it does full updates alongside watch stream updates, it can
// only process one at a time. Therefore, this is a best effort timeout
RoutingTableUpdateDurationMS int `envconfig:"KEDA_HTTP_ROUTING_TABLE_UPDATE_DURATION_MS" default:"500"`
// The interceptor has an internal process that periodically fetches the state // The interceptor has an internal process that periodically fetches the state
// of deployment that is running the servers it forwards to. // of deployment that is running the servers it forwards to.
// //

View File

@ -73,6 +73,15 @@ func main() {
q := queue.NewMemory() q := queue.NewMemory()
routingTable := routing.NewTable() routingTable := routing.NewTable()
// Create the informer of ConfigMap resource,
// the resynchronization period of the informer should be not less than 1s,
// refer to: https://github.com/kubernetes/client-go/blob/v0.22.2/tools/cache/shared_informer.go#L475
configMapInformer := k8s.NewInformerConfigMapUpdater(
lggr,
cl,
servingCfg.ConfigMapCacheRsyncPeriod,
)
lggr.Info( lggr.Info(
"Fetching initial routing table", "Fetching initial routing table",
) )
@ -109,10 +118,11 @@ func main() {
err := routing.StartConfigMapRoutingTableUpdater( err := routing.StartConfigMapRoutingTableUpdater(
ctx, ctx,
lggr, lggr,
time.Duration(servingCfg.RoutingTableUpdateDurationMS)*time.Millisecond, configMapInformer,
configMapsInterface, servingCfg.CurrentNamespace,
routingTable, routingTable,
q, q,
nil,
) )
lggr.Error(err, "config map routing table updater failed") lggr.Error(err, "config map routing table updater failed")
return err return err

View File

@ -0,0 +1,132 @@
package k8s
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/go-logr/logr"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/informers"
infcorev1 "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
)
type InformerConfigMapUpdater struct {
lggr logr.Logger
cmInformer infcorev1.ConfigMapInformer
bcaster *watch.Broadcaster
}
func (i *InformerConfigMapUpdater) MarshalJSON() ([]byte, error) {
lst := i.cmInformer.Lister()
cms, err := lst.List(labels.Everything())
if err != nil {
return nil, err
}
return json.Marshal(&cms)
}
func (i *InformerConfigMapUpdater) Start(ctx context.Context) error {
i.cmInformer.Informer().Run(ctx.Done())
return errors.Wrap(
ctx.Err(),
"configMap informer was stopped",
)
}
func (i *InformerConfigMapUpdater) Get(
ns,
name string,
) (corev1.ConfigMap, error) {
cm, err := i.cmInformer.Lister().ConfigMaps(ns).Get(name)
if err != nil {
return corev1.ConfigMap{}, err
}
return *cm, nil
}
func (i *InformerConfigMapUpdater) Watch(
ns,
name string,
) watch.Interface {
return watch.Filter(i.bcaster.Watch(), func(e watch.Event) (watch.Event, bool) {
cm, ok := e.Object.(*corev1.ConfigMap)
if !ok {
i.lggr.Error(
fmt.Errorf("informer expected ConfigMap, ignoring this event"),
"event",
e,
)
return e, false
}
if cm.Namespace == ns && cm.Name == name {
return e, true
}
return e, false
})
}
func (i *InformerConfigMapUpdater) addEvtHandler(obj interface{}) {
cm, ok := obj.(*corev1.ConfigMap)
if !ok {
i.lggr.Error(
fmt.Errorf("informer expected configMap, got %v", obj),
"not forwarding event",
)
return
}
i.bcaster.Action(watch.Added, cm)
}
func (i *InformerConfigMapUpdater) updateEvtHandler(oldObj, newObj interface{}) {
cm, ok := newObj.(*corev1.ConfigMap)
if !ok {
i.lggr.Error(
fmt.Errorf("informer expected configMap, got %v", newObj),
"not forwarding event",
)
return
}
i.bcaster.Action(watch.Modified, cm)
}
func (i *InformerConfigMapUpdater) deleteEvtHandler(obj interface{}) {
cm, ok := obj.(*corev1.ConfigMap)
if !ok {
i.lggr.Error(
fmt.Errorf("informer expected configMap, got %v", obj),
"not forwarding event",
)
return
}
i.bcaster.Action(watch.Deleted, cm)
}
func NewInformerConfigMapUpdater(
lggr logr.Logger,
cl kubernetes.Interface,
defaultResync time.Duration,
) *InformerConfigMapUpdater {
factory := informers.NewSharedInformerFactory(
cl,
defaultResync,
)
cmInformer := factory.Core().V1().ConfigMaps()
ret := &InformerConfigMapUpdater{
lggr: lggr,
bcaster: watch.NewBroadcaster(0, watch.WaitIfChannelFull),
cmInformer: cmInformer,
}
ret.cmInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: ret.addEvtHandler,
UpdateFunc: ret.updateEvtHandler,
DeleteFunc: ret.deleteEvtHandler,
})
return ret
}

View File

@ -2,15 +2,13 @@ package routing
import ( import (
"context" "context"
"time"
"github.com/go-logr/logr" "github.com/go-logr/logr"
"github.com/kedacore/http-add-on/pkg/k8s" "github.com/kedacore/http-add-on/pkg/k8s"
"github.com/kedacore/http-add-on/pkg/queue" "github.com/kedacore/http-add-on/pkg/queue"
"github.com/pkg/errors" "github.com/pkg/errors"
"golang.org/x/sync/errgroup"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/watch"
) )
// StartConfigMapRoutingTableUpdater starts a loop that does the following: // StartConfigMapRoutingTableUpdater starts a loop that does the following:
@ -21,49 +19,42 @@ import (
// called ConfigMapRoutingTableName. On either of those events, decodes // called ConfigMapRoutingTableName. On either of those events, decodes
// that ConfigMap into a routing table and stores the new table into table // that ConfigMap into a routing table and stores the new table into table
// using table.Replace(newTable) // using table.Replace(newTable)
// - Execute the callback function, if one exists
// - Returns an appropriate non-nil error if ctx.Done() receives // - Returns an appropriate non-nil error if ctx.Done() receives
func StartConfigMapRoutingTableUpdater( func StartConfigMapRoutingTableUpdater(
ctx context.Context, ctx context.Context,
lggr logr.Logger, lggr logr.Logger,
updateEvery time.Duration, cmInformer *k8s.InformerConfigMapUpdater,
getterWatcher k8s.ConfigMapGetterWatcher, ns string,
table *Table, table *Table,
q queue.Counter, q queue.Counter,
cbFunc func() error,
) error { ) error {
lggr = lggr.WithName("pkg.routing.StartConfigMapRoutingTableUpdater") lggr = lggr.WithName("pkg.routing.StartConfigMapRoutingTableUpdater")
watchIface, err := getterWatcher.Watch(ctx, metav1.ListOptions{})
if err != nil {
return err
}
defer watchIface.Stop()
ticker := time.NewTicker(updateEvery) watcher := cmInformer.Watch(ns, ConfigMapRoutingTableName)
defer ticker.Stop() defer watcher.Stop()
ctx, done := context.WithCancel(ctx)
defer done()
grp, ctx := errgroup.WithContext(ctx)
grp.Go(func() error {
defer done()
return cmInformer.Start(ctx)
})
grp.Go(func() error {
defer done()
for { for {
select { select {
case <-ctx.Done(): case event := <-watcher.ResultChan():
return errors.Wrap(ctx.Err(), "context is done") cm, ok := event.Object.(*corev1.ConfigMap)
case <-ticker.C: // Theoretically this will not happen
if err := GetTable(ctx, lggr, getterWatcher, table, q); err != nil {
return errors.Wrap(err, "failed to fetch routing table")
}
case evt := <-watchIface.ResultChan():
evtType := evt.Type
obj := evt.Object
if evtType == watch.Added || evtType == watch.Modified {
cm, ok := obj.(*corev1.ConfigMap)
// by definition of watchIface, all returned objects should
// be assertable to a ConfigMap. In the likely impossible
// case that it isn't, just ignore and move on.
// This check is just to be defensive.
if !ok { if !ok {
continue lggr.Info(
} "The event object observed is not a configmap",
// the watcher is open on all ConfigMaps in the namespace, so )
// bail out of this loop iteration immediately if the event
// isn't for the routing table ConfigMap.
if cm.Name != ConfigMapRoutingTableName {
continue continue
} }
newTable, err := FetchTableFromConfigMap(cm, q) newTable, err := FetchTableFromConfigMap(cm, q)
@ -81,8 +72,25 @@ func StartConfigMapRoutingTableUpdater(
) )
continue continue
} }
// Execute the callback function, if one exists
if cbFunc != nil {
if err := cbFunc(); err != nil {
lggr.Error(
err,
"failed to exec the callback function",
)
continue
} }
} }
case <-ctx.Done():
return errors.Wrap(ctx.Err(), "context is done")
} }
}
})
if err := grp.Wait(); err != nil {
lggr.Error(err, "config map routing updater is failed")
return err
}
return nil
} }

View File

@ -3,6 +3,7 @@ package routing
import ( import (
"context" "context"
"errors" "errors"
"strings"
"testing" "testing"
"time" "time"
@ -41,10 +42,6 @@ import (
// approach. The fake watcher documentation is linked below: // approach. The fake watcher documentation is linked below:
// //
// (https://pkg.go.dev/k8s.io/apimachinery@v0.21.3/pkg/watch#NewFake), // (https://pkg.go.dev/k8s.io/apimachinery@v0.21.3/pkg/watch#NewFake),
type fakeCMGetterWatcher struct {
k8s.ConfigMapGetter
k8s.ConfigMapWatcher
}
type fakeConfigMapWatcher struct { type fakeConfigMapWatcher struct {
watchIface watch.Interface watchIface watch.Interface
@ -88,15 +85,13 @@ func TestStartUpdateLoop(t *testing.T) {
} }
r.NoError(SaveTableToConfigMap(table, cm)) r.NoError(SaveTableToConfigMap(table, cm))
fakeWatcher := watch.NewFake()
fakeGetter := fake.NewSimpleClientset(cm) fakeGetter := fake.NewSimpleClientset(cm)
getterWatcher := fakeCMGetterWatcher{
ConfigMapGetter: fakeGetter. configMapInformer := k8s.NewInformerConfigMapUpdater(
CoreV1(). lggr,
ConfigMaps(ns), fakeGetter,
ConfigMapWatcher: fakeConfigMapWatcher{fakeWatcher}, time.Second*1,
} )
defer fakeWatcher.Stop()
grp, ctx := errgroup.WithContext(ctx) grp, ctx := errgroup.WithContext(ctx)
@ -104,10 +99,11 @@ func TestStartUpdateLoop(t *testing.T) {
err := StartConfigMapRoutingTableUpdater( err := StartConfigMapRoutingTableUpdater(
ctx, ctx,
lggr, lggr,
interval, configMapInformer,
getterWatcher, ns,
table, table,
q, q,
nil,
) )
// we purposefully cancel the context below, // we purposefully cancel the context below,
// so we need to ignore that error. // so we need to ignore that error.
@ -120,7 +116,26 @@ func TestStartUpdateLoop(t *testing.T) {
// send a watch event in parallel. we'll ensure that it // send a watch event in parallel. we'll ensure that it
// made it through in the below loop // made it through in the below loop
grp.Go(func() error { grp.Go(func() error {
fakeWatcher.Add(cm) if _, err := fakeGetter.
CoreV1().
ConfigMaps(ns).
Create(ctx, cm, metav1.CreateOptions{}); err != nil && strings.Contains(
err.Error(),
"already exists",
) {
if err := fakeGetter.
CoreV1().
ConfigMaps(ns).
Delete(ctx, cm.Name, metav1.DeleteOptions{}); err != nil {
return err
}
if _, err := fakeGetter.
CoreV1().
ConfigMaps(ns).
Create(ctx, cm, metav1.CreateOptions{}); err != nil {
return err
}
}
return nil return nil
}) })
@ -129,6 +144,12 @@ func TestStartUpdateLoop(t *testing.T) {
const waitDur = interval * 5 const waitDur = interval * 5
time.Sleep(waitDur) time.Sleep(waitDur)
_, err := fakeGetter.
CoreV1().
ConfigMaps(ns).
Get(ctx, ConfigMapRoutingTableName, metav1.GetOptions{})
r.NoError(err)
for _, action := range fakeGetter.Actions() { for _, action := range fakeGetter.Actions() {
verb := action.GetVerb() verb := action.GetVerb()
resource := action.GetResource().Resource resource := action.GetResource().Resource

View File

@ -26,9 +26,9 @@ type config struct {
// KEDA, if that value is not set on an incoming // KEDA, if that value is not set on an incoming
// `HTTPScaledObject` // `HTTPScaledObject`
TargetPendingRequests int `envconfig:"KEDA_HTTP_SCALER_TARGET_PENDING_REQUESTS" default:"100"` TargetPendingRequests int `envconfig:"KEDA_HTTP_SCALER_TARGET_PENDING_REQUESTS" default:"100"`
// UpdateRoutingTableDur is the duration between manual // ConfigMapCacheRsyncPeriod is the time interval
// updates to the routing table. // for the configmap informer to rsync the local cache.
UpdateRoutingTableDur time.Duration `envconfig:"KEDA_HTTP_SCALER_ROUTING_TABLE_UPDATE_DUR" default:"100ms"` ConfigMapCacheRsyncPeriod time.Duration `envconfig:"KEDA_HTTP_SCALER_CONFIG_MAP_INFORMER_RSYNC_PERIOD" default:"60m"`
// QueueTickDuration is the duration between queue requests // QueueTickDuration is the duration between queue requests
QueueTickDuration time.Duration `envconfig:"KEDA_HTTP_QUEUE_TICK_DURATION" default:"500ms"` QueueTickDuration time.Duration `envconfig:"KEDA_HTTP_QUEUE_TICK_DURATION" default:"500ms"`
// This will be the 'Target Pending Requests' for the interceptor // This will be the 'Target Pending Requests' for the interceptor

View File

@ -62,8 +62,27 @@ func main() {
os.Exit(1) os.Exit(1)
} }
// This callback function is used to fetch and save
// the current queue counts from the interceptor immediately
// after updating the routingTable information.
callbackWhenRoutingTableUpdate := func() error {
if err := pinger.fetchAndSaveCounts(ctx); err != nil {
return err
}
return nil
}
table := routing.NewTable() table := routing.NewTable()
// Create the informer of ConfigMap resource,
// the resynchronization period of the informer should be not less than 1s,
// refer to: https://github.com/kubernetes/client-go/blob/v0.22.2/tools/cache/shared_informer.go#L475
configMapInformer := k8s.NewInformerConfigMapUpdater(
lggr,
k8sCl,
cfg.ConfigMapCacheRsyncPeriod,
)
grp, ctx := errgroup.WithContext(ctx) grp, ctx := errgroup.WithContext(ctx)
grp.Go(func() error { grp.Go(func() error {
@ -92,16 +111,18 @@ func main() {
return routing.StartConfigMapRoutingTableUpdater( return routing.StartConfigMapRoutingTableUpdater(
ctx, ctx,
lggr, lggr,
cfg.UpdateRoutingTableDur, configMapInformer,
k8sCl.CoreV1().ConfigMaps(cfg.TargetNamespace), cfg.TargetNamespace,
table, table,
// we don't care about the queue here. // we don't care about the queue here.
// we just want to update the routing table // we just want to update the routing table
// so that the scaler can use it to determine // so that the scaler can use it to determine
// the target metrics for given hosts. // the target metrics for given hosts.
queue.NewMemory(), queue.NewMemory(),
callbackWhenRoutingTableUpdate,
) )
}) })
grp.Go(func() error { grp.Go(func() error {
defer done() defer done()
return startHealthcheckServer( return startHealthcheckServer(

View File

@ -29,7 +29,6 @@ func TestHealthChecks(t *testing.T) {
TargetService: "testsvc", TargetService: "testsvc",
TargetPort: port + 123, TargetPort: port + 123,
TargetPendingRequests: 100, TargetPendingRequests: 100,
UpdateRoutingTableDur: 100 * time.Millisecond,
} }
errgrp, ctx := errgroup.WithContext(ctx) errgrp, ctx := errgroup.WithContext(ctx)