Merge pull request #1380 from Garrybest/pr_estimator

Descheduler: Implement GetUnschedulableReplicas for estimator
This commit is contained in:
karmada-bot 2022-02-23 14:54:04 +08:00 committed by GitHub
commit 670a77aead
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 1173 additions and 32 deletions

View File

@ -11,6 +11,8 @@ import (
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/spf13/cobra"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog/v2"
@ -56,9 +58,11 @@ func run(ctx context.Context, opts *options.Options) error {
}
restConfig.QPS, restConfig.Burst = opts.ClusterAPIQPS, opts.ClusterAPIBurst
kubeClientSet := kubernetes.NewForConfigOrDie(restConfig)
kubeClient := kubernetes.NewForConfigOrDie(restConfig)
dynamicClient := dynamic.NewForConfigOrDie(restConfig)
discoveryClient := discovery.NewDiscoveryClientForConfigOrDie(restConfig)
e := server.NewEstimatorServer(kubeClientSet, opts)
e := server.NewEstimatorServer(kubeClient, dynamicClient, discoveryClient, opts, ctx.Done())
if err = e.Start(ctx); err != nil {
klog.Errorf("estimator server exits unexpectedly: %v", err)
return err

View File

@ -133,7 +133,7 @@ func (c *Controller) tryDeleteWorkload(clusterName string, work *workv1alpha1.Wo
return err
}
clusterObj, err := helper.GetObjectFromCache(c.RESTMapper, c.InformerManager, fedKey, c.Client, c.ClusterClientSetFunc)
clusterObj, err := helper.GetObjectFromCache(c.RESTMapper, c.InformerManager, fedKey)
if err != nil {
if apierrors.IsNotFound(err) {
return nil
@ -234,7 +234,7 @@ func (c *Controller) tryUpdateWorkload(clusterName string, workload *unstructure
return err
}
clusterObj, err := helper.GetObjectFromCache(c.RESTMapper, c.InformerManager, fedKey, c.Client, c.ClusterClientSetFunc)
clusterObj, err := helper.GetObjectFromCache(c.RESTMapper, c.InformerManager, fedKey)
if err != nil {
if !apierrors.IsNotFound(err) {
klog.Errorf("Failed to get resource %v from member cluster, err is %v ", workload.GetName(), err)

View File

@ -264,7 +264,7 @@ func (c *ServiceExportController) genHandlerDeleteFunc(clusterName string) func(
// For ServiceExport create or update event, reports the referencing service's EndpointSlice.
// For ServiceExport delete event, cleanup the previously reported EndpointSlice.
func (c *ServiceExportController) handleServiceExportEvent(serviceExportKey keys.FederatedKey) error {
_, err := helper.GetObjectFromCache(c.RESTMapper, c.InformerManager, serviceExportKey, c.Client, c.ClusterDynamicClientSetFunc)
_, err := helper.GetObjectFromCache(c.RESTMapper, c.InformerManager, serviceExportKey)
if err != nil {
if apierrors.IsNotFound(err) {
return cleanupWorkWithServiceExportDelete(c.Client, serviceExportKey)
@ -289,7 +289,7 @@ func (c *ServiceExportController) handleServiceExportEvent(serviceExportKey keys
// For EndpointSlice create or update event, reports the EndpointSlice when referencing service has been exported.
// For EndpointSlice delete event, cleanup the previously reported EndpointSlice.
func (c *ServiceExportController) handleEndpointSliceEvent(endpointSliceKey keys.FederatedKey) error {
endpointSliceObj, err := helper.GetObjectFromCache(c.RESTMapper, c.InformerManager, endpointSliceKey, c.Client, c.ClusterDynamicClientSetFunc)
endpointSliceObj, err := helper.GetObjectFromCache(c.RESTMapper, c.InformerManager, endpointSliceKey)
if err != nil {
if apierrors.IsNotFound(err) {
return cleanupWorkWithEndpointSliceDelete(c.Client, endpointSliceKey)

View File

@ -159,7 +159,7 @@ func (c *WorkStatusController) syncWorkStatus(key util.QueueKey) error {
return fmt.Errorf("invalid key")
}
observedObj, err := helper.GetObjectFromCache(c.RESTMapper, c.InformerManager, fedKey, c.Client, c.ClusterClientSetFunc)
observedObj, err := helper.GetObjectFromCache(c.RESTMapper, c.InformerManager, fedKey)
if err != nil {
if apierrors.IsNotFound(err) {
return c.handleDeleteEvent(fedKey)

View File

@ -15,6 +15,8 @@ const SchedulerEstimatorSubsystem = "karmada_scheduler_estimator"
const (
// EstimatingTypeMaxAvailableReplicas - label of estimating type
EstimatingTypeMaxAvailableReplicas = "MaxAvailableReplicas"
// EstimatingTypeGetUnschedulableReplicas - label of estimating type
EstimatingTypeGetUnschedulableReplicas = "GetUnschedulableReplicas"
)
const (
@ -22,6 +24,10 @@ const (
EstimatingStepListNodesByNodeClaim = "ListNodesByNodeClaim"
// EstimatingStepMaxAvailableReplicas - label of estimating step
EstimatingStepMaxAvailableReplicas = "MaxAvailableReplicas"
// EstimatingStepGetObjectFromCache - label of estimating step
EstimatingStepGetObjectFromCache = "GetObjectFromCache"
// EstimatingStepGetUnschedulablePodsOfWorkload - label of estimating step
EstimatingStepGetUnschedulablePodsOfWorkload = "GetWorkloadUnschedulablePods"
// EstimatingStepTotal - label of estimating step, total step
EstimatingStepTotal = "Total"
)

View File

@ -1,9 +1,21 @@
package replica
import (
"fmt"
"time"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
listappsv1 "k8s.io/client-go/listers/apps/v1"
listcorev1 "k8s.io/client-go/listers/core/v1"
"k8s.io/klog/v2"
"github.com/karmada-io/karmada/pkg/estimator/server/nodes"
"github.com/karmada-io/karmada/pkg/util"
"github.com/karmada-io/karmada/pkg/util/helper"
utilworkload "github.com/karmada-io/karmada/pkg/util/workload"
)
// NodeMaxAvailableReplica calculates max available replicas of a node, based on
@ -15,3 +27,67 @@ func NodeMaxAvailableReplica(node *corev1.Node, pods []*corev1.Pod, request core
}
return int32(ni.MaxReplicaDivided(request)), nil
}
// ListerWrapper is a wrapper which wraps the pod lister and replicaset lister.
type ListerWrapper struct {
listcorev1.PodLister
listappsv1.ReplicaSetLister
}
// GetUnschedulablePodsOfWorkload will return how many unschedulable pods a workload derives.
func GetUnschedulablePodsOfWorkload(unstructObj *unstructured.Unstructured, threshold time.Duration, listers *ListerWrapper) (int32, error) {
if threshold < 0 {
threshold = 0
}
unschedulable := 0
// Workloads could be classified into two types. The one is which owns ReplicaSet
// and the other is which owns Pod directly.
switch unstructObj.GetKind() {
case util.DeploymentKind:
deployment, err := helper.ConvertToDeployment(unstructObj)
if err != nil {
return 0, fmt.Errorf("failed to convert ReplicaSet from unstructured object: %v", err)
}
pods, err := listDeploymentPods(deployment, listers)
if err != nil {
return 0, err
}
for _, pod := range pods {
if podUnschedulable(pod, threshold) {
unschedulable++
}
}
default:
// TODO(Garrybest): add abstract workload
return 0, fmt.Errorf("kind(%s) of workload(%s) is not supported", unstructObj.GetKind(), klog.KObj(unstructObj).String())
}
return int32(unschedulable), nil
}
func podUnschedulable(pod *corev1.Pod, threshold time.Duration) bool {
_, cond := helper.GetPodCondition(&pod.Status, corev1.PodScheduled)
return cond != nil && cond.Status == corev1.ConditionFalse && cond.Reason == corev1.PodReasonUnschedulable &&
cond.LastTransitionTime.Add(threshold).Before(time.Now())
}
func listDeploymentPods(deployment *appsv1.Deployment, listers *ListerWrapper) ([]*corev1.Pod, error) {
// Get ReplicaSet
rsListFunc := func(namespace string, selector labels.Selector) ([]*appsv1.ReplicaSet, error) {
return listers.ReplicaSetLister.ReplicaSets(namespace).List(selector)
}
rs, err := utilworkload.GetNewReplicaSet(deployment, rsListFunc)
if err != nil {
return nil, err
}
podListFunc := func(namespace string, selector labels.Selector) ([]*corev1.Pod, error) {
return listers.PodLister.Pods(namespace).List(selector)
}
pods, err := utilworkload.ListPodsByRS(deployment, []*appsv1.ReplicaSet{rs}, podListFunc)
if err != nil {
return nil, err
}
return pods, nil
}

View File

@ -9,11 +9,18 @@ import (
"github.com/kr/pretty"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/discovery"
cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/informers"
infov1 "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
listv1 "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/restmapper"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
@ -24,38 +31,62 @@ import (
"github.com/karmada-io/karmada/pkg/estimator/server/replica"
estimatorservice "github.com/karmada-io/karmada/pkg/estimator/service"
"github.com/karmada-io/karmada/pkg/util"
"github.com/karmada-io/karmada/pkg/util/helper"
"github.com/karmada-io/karmada/pkg/util/informermanager"
"github.com/karmada-io/karmada/pkg/util/informermanager/keys"
)
const (
nodeNameKeyIndex = "spec.nodeName"
)
var (
// TODO(Garrybest): make it as an option
supportedGVRs = []schema.GroupVersionResource{
appsv1.SchemeGroupVersion.WithResource("deployments"),
}
)
// AccurateSchedulerEstimatorServer is the gRPC server of a cluster accurate scheduler estimator.
// Please see https://github.com/karmada-io/karmada/pull/580 (#580).
type AccurateSchedulerEstimatorServer struct {
port int
clusterName string
kubeClient kubernetes.Interface
restMapper meta.RESTMapper
informerFactory informers.SharedInformerFactory
nodeInformer infov1.NodeInformer
podInformer infov1.PodInformer
nodeLister listv1.NodeLister
podLister listv1.PodLister
replicaLister *replica.ListerWrapper
getPodFunc func(nodeName string) ([]*corev1.Pod, error)
informerManager informermanager.SingleClusterInformerManager
}
// NewEstimatorServer creates an instance of AccurateSchedulerEstimatorServer.
func NewEstimatorServer(kubeClient kubernetes.Interface, opts *options.Options) *AccurateSchedulerEstimatorServer {
func NewEstimatorServer(
kubeClient kubernetes.Interface,
dynamicClient dynamic.Interface,
discoveryClient discovery.DiscoveryInterface,
opts *options.Options,
stopChan <-chan struct{},
) *AccurateSchedulerEstimatorServer {
cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoveryClient)
restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscoClient)
informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
es := &AccurateSchedulerEstimatorServer{
port: opts.ServerPort,
clusterName: opts.ClusterName,
kubeClient: kubeClient,
restMapper: restMapper,
informerFactory: informerFactory,
nodeInformer: informerFactory.Core().V1().Nodes(),
podInformer: informerFactory.Core().V1().Pods(),
nodeLister: informerFactory.Core().V1().Nodes().Lister(),
podLister: informerFactory.Core().V1().Pods().Lister(),
replicaLister: &replica.ListerWrapper{
PodLister: informerFactory.Core().V1().Pods().Lister(),
ReplicaSetLister: informerFactory.Apps().V1().ReplicaSets().Lister(),
},
}
// Establish a connection between the pods and their assigned nodes.
@ -93,20 +124,30 @@ func NewEstimatorServer(kubeClient kubernetes.Interface, opts *options.Options)
}
return pods, nil
}
es.informerManager = informermanager.NewSingleClusterInformerManager(dynamicClient, 0, stopChan)
for _, gvr := range supportedGVRs {
es.informerManager.Lister(gvr)
}
return es
}
// Start runs the accurate replica estimator server.
func (es *AccurateSchedulerEstimatorServer) Start(ctx context.Context) error {
stopCh := ctx.Done()
klog.Infof("Starting karmada cluster(%s) accurate replica estimator", es.clusterName)
defer klog.Infof("Shutting down cluster(%s) accurate replica estimator", es.clusterName)
klog.Infof("Starting karmada cluster(%s) accurate scheduler estimator", es.clusterName)
defer klog.Infof("Shutting down cluster(%s) accurate scheduler estimator", es.clusterName)
es.informerFactory.Start(stopCh)
if !es.waitForCacheSync(stopCh) {
return fmt.Errorf("failed to wait for cache sync")
}
es.informerManager.Start()
synced := es.informerManager.WaitForCacheSync()
if synced == nil {
return fmt.Errorf("informer factory for cluster does not exist")
}
// Listen a port and register the gRPC server.
l, err := net.Listen("tcp", fmt.Sprintf(":%d", es.port))
if err != nil {
@ -169,9 +210,46 @@ func (es *AccurateSchedulerEstimatorServer) MaxAvailableReplicas(ctx context.Con
// GetUnschedulableReplicas is the implementation of gRPC interface. It will return the
// unschedulable replicas of a workload.
func (es *AccurateSchedulerEstimatorServer) GetUnschedulableReplicas(ctx context.Context, request *pb.UnschedulableReplicasRequest) (*pb.UnschedulableReplicasResponse, error) {
//TODO(Garrybest): implement me
return nil, nil
func (es *AccurateSchedulerEstimatorServer) GetUnschedulableReplicas(ctx context.Context, request *pb.UnschedulableReplicasRequest) (response *pb.UnschedulableReplicasResponse, rerr error) {
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
klog.Warningf("No metadata from context.")
}
var object string
if m := md.Get(string(util.ContextKeyObject)); len(m) != 0 {
object = m[0]
}
defer traceGetUnschedulableReplicas(object, time.Now(), request)(&response, &rerr)
if request.Cluster != es.clusterName {
return nil, fmt.Errorf("cluster name does not match, got: %s, desire: %s", request.Cluster, es.clusterName)
}
// Get the workload.
startTime := time.Now()
gvk := schema.FromAPIVersionAndKind(request.Resource.APIVersion, request.Resource.Kind)
unstructObj, err := helper.GetObjectFromSingleClusterCache(es.restMapper, es.informerManager, &keys.ClusterWideKey{
Group: gvk.Group,
Version: gvk.Version,
Kind: gvk.Kind,
Namespace: request.Resource.Namespace,
Name: request.Resource.Name,
})
metrics.UpdateEstimatingAlgorithmLatency(err, metrics.EstimatingTypeGetUnschedulableReplicas, metrics.EstimatingStepGetObjectFromCache, startTime)
if err != nil {
return nil, err
}
// List all unschedulable replicas.
startTime = time.Now()
unschedulables, err := replica.GetUnschedulablePodsOfWorkload(unstructObj, request.UnschedulableThreshold, es.replicaLister)
metrics.UpdateEstimatingAlgorithmLatency(err, metrics.EstimatingTypeGetUnschedulableReplicas, metrics.EstimatingStepGetUnschedulablePodsOfWorkload, startTime)
if err != nil {
return nil, err
}
return &pb.UnschedulableReplicasResponse{UnschedulableReplicas: unschedulables}, err
}
func (es *AccurateSchedulerEstimatorServer) maxAvailableReplicas(nodes []*corev1.Node, request corev1.ResourceList) int32 {
@ -216,3 +294,16 @@ func traceMaxAvailableReplicas(object string, start time.Time, request *pb.MaxAv
klog.Infof("Finish calculating cluster available replicas of resource(%s), max replicas: %d, time elapsed: %s", object, (*response).MaxReplicas, time.Since(start))
}
}
func traceGetUnschedulableReplicas(object string, start time.Time, request *pb.UnschedulableReplicasRequest) func(response **pb.UnschedulableReplicasResponse, err *error) {
klog.V(4).Infof("Begin detecting cluster unscheduable replicas of resource(%s), request: %s", object, pretty.Sprint(*request))
return func(response **pb.UnschedulableReplicasResponse, err *error) {
metrics.CountRequests(*err, metrics.EstimatingTypeGetUnschedulableReplicas)
metrics.UpdateEstimatingAlgorithmLatency(*err, metrics.EstimatingTypeGetUnschedulableReplicas, metrics.EstimatingStepTotal, start)
if *err != nil {
klog.Errorf("Failed to detect cluster unscheduable replicas: %v", *err)
return
}
klog.Infof("Finish detecting cluster unscheduable replicas of resource(%s), unschedulable replicas: %d, time elapsed: %s", object, (*response).UnschedulableReplicas, time.Since(start))
}
}

View File

@ -6,9 +6,15 @@ import (
"reflect"
"testing"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
discoveryfake "k8s.io/client-go/discovery/fake"
dynamicfake "k8s.io/client-go/dynamic/fake"
"k8s.io/client-go/kubernetes/fake"
coretesting "k8s.io/client-go/testing"
"github.com/karmada-io/karmada/cmd/scheduler-estimator/app/options"
"github.com/karmada-io/karmada/pkg/estimator/pb"
@ -205,11 +211,27 @@ func TestAccurateSchedulerEstimatorServer_MaxAvailableReplicas(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
es := NewEstimatorServer(fake.NewSimpleClientset(tt.objs...), opt)
gvrToListKind := map[schema.GroupVersionResource]string{
{Group: "apps", Version: "v1", Resource: "deployments"}: "DeploymentList",
}
dynamicClient := dynamicfake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), gvrToListKind)
discoveryClient := &discoveryfake.FakeDiscovery{
Fake: &coretesting.Fake{},
}
discoveryClient.Resources = []*metav1.APIResourceList{
{
GroupVersion: appsv1.SchemeGroupVersion.String(),
APIResources: []metav1.APIResource{
{Name: "deployments", Namespaced: true, Kind: "Deployment"},
},
},
}
es := NewEstimatorServer(fake.NewSimpleClientset(tt.objs...), dynamicClient, discoveryClient, opt, ctx.Done())
es.informerFactory.Start(ctx.Done())
if !es.waitForCacheSync(ctx.Done()) {
t.Errorf("MaxAvailableReplicas() error = %v, wantErr %v", fmt.Errorf("failed to wait for cache sync"), tt.wantErr)
t.Fatalf("MaxAvailableReplicas() error = %v, wantErr %v", fmt.Errorf("failed to wait for cache sync"), tt.wantErr)
}
gotResponse, err := es.MaxAvailableReplicas(ctx, tt.args.request)

View File

@ -10,20 +10,20 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/karmada-io/karmada/pkg/util"
"github.com/karmada-io/karmada/pkg/util/informermanager"
"github.com/karmada-io/karmada/pkg/util/informermanager/keys"
"github.com/karmada-io/karmada/pkg/util/restmapper"
)
type clusterDynamicClientSetFunc func(clusterName string, client client.Client) (*util.DynamicClusterClient, error)
// GetObjectFromCache gets full object information from cache by key in worker queue.
func GetObjectFromCache(restMapper meta.RESTMapper, manager informermanager.MultiClusterInformerManager, fedKey keys.FederatedKey,
client client.Client, clientSetFunc clusterDynamicClientSetFunc) (*unstructured.Unstructured, error) {
func GetObjectFromCache(
restMapper meta.RESTMapper,
manager informermanager.MultiClusterInformerManager,
fedKey keys.FederatedKey,
) (*unstructured.Unstructured, error) {
gvr, err := restmapper.GetGroupVersionResource(restMapper, fedKey.GroupVersionKind())
if err != nil {
klog.Errorf("Failed to get GVR from GVK %s. Error: %v", fedKey.GroupVersionKind(), err)
@ -41,7 +41,7 @@ func GetObjectFromCache(restMapper meta.RESTMapper, manager informermanager.Mult
if !singleClusterManager.IsInformerSynced(gvr) {
// fall back to call api server in case the cache has not been synchronized yet
return getObjectFromMemberCluster(gvr, fedKey, client, clientSetFunc)
return getObjectFromSingleCluster(gvr, &fedKey.ClusterWideKey, singleClusterManager.GetClient())
}
var obj runtime.Object
@ -59,19 +59,44 @@ func GetObjectFromCache(restMapper meta.RESTMapper, manager informermanager.Mult
return obj.(*unstructured.Unstructured), nil
}
// getObjectFromMemberCluster will try to get resource from member cluster by DynamicClientSet.
func getObjectFromMemberCluster(gvr schema.GroupVersionResource, fedKey keys.FederatedKey, client client.Client,
clientSetFunc clusterDynamicClientSetFunc) (*unstructured.Unstructured, error) {
dynamicClusterClient, err := clientSetFunc(fedKey.Cluster, client)
// GetObjectFromSingleClusterCache gets full object information from single cluster cache by key in worker queue.
func GetObjectFromSingleClusterCache(restMapper meta.RESTMapper, manager informermanager.SingleClusterInformerManager,
cwk *keys.ClusterWideKey) (*unstructured.Unstructured, error) {
gvr, err := restmapper.GetGroupVersionResource(restMapper, cwk.GroupVersionKind())
if err != nil {
klog.Errorf("Failed to build dynamic cluster client for cluster %s.", fedKey.Cluster)
klog.Errorf("Failed to get GVR from GVK %s. Error: %v", cwk.GroupVersionKind(), err)
return nil, err
}
existObj, err := dynamicClusterClient.DynamicClientSet.Resource(gvr).Namespace(fedKey.Namespace).Get(context.TODO(),
fedKey.Name, metav1.GetOptions{})
if !manager.IsInformerSynced(gvr) {
// fall back to call api server in case the cache has not been synchronized yet
return getObjectFromSingleCluster(gvr, cwk, manager.GetClient())
}
lister := manager.Lister(gvr)
obj, err := lister.Get(cwk.NamespaceKey())
if err != nil {
if apierrors.IsNotFound(err) {
return nil, err
}
// print logs only for real error.
klog.Errorf("Failed to get obj %s. error: %v.", cwk.String(), err)
return nil, err
}
return obj.(*unstructured.Unstructured), nil
}
// getObjectFromSingleCluster will try to get resource from single cluster by DynamicClientSet.
func getObjectFromSingleCluster(
gvr schema.GroupVersionResource,
cwk *keys.ClusterWideKey,
dynamicClient dynamic.Interface,
) (*unstructured.Unstructured, error) {
obj, err := dynamicClient.Resource(gvr).Namespace(cwk.Namespace).Get(context.TODO(), cwk.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return existObj, nil
return obj, nil
}

28
pkg/util/helper/pod.go Normal file
View File

@ -0,0 +1,28 @@
package helper
import (
corev1 "k8s.io/api/core/v1"
)
// GetPodCondition extracts the provided condition from the given status and returns that.
// Returns nil and -1 if the condition is not present, and the index of the located condition.
func GetPodCondition(status *corev1.PodStatus, conditionType corev1.PodConditionType) (int, *corev1.PodCondition) {
if status == nil {
return -1, nil
}
return GetPodConditionFromList(status.Conditions, conditionType)
}
// GetPodConditionFromList extracts the provided condition from the given list of condition and
// returns the index of the condition and the condition. Returns -1 and nil if the condition is not present.
func GetPodConditionFromList(conditions []corev1.PodCondition, conditionType corev1.PodConditionType) (int, *corev1.PodCondition) {
if conditions == nil {
return -1, nil
}
for i := range conditions {
if conditions[i].Type == conditionType {
return i, &conditions[i]
}
}
return -1, nil
}

View File

@ -51,6 +51,9 @@ type SingleClusterInformerManager interface {
// Context returns the single cluster context.
Context() context.Context
// GetClient returns the dynamic client.
GetClient() dynamic.Interface
}
// NewSingleClusterInformerManager constructs a new instance of singleClusterInformerManagerImpl.
@ -63,6 +66,7 @@ func NewSingleClusterInformerManager(client dynamic.Interface, defaultResync tim
syncedInformers: make(map[schema.GroupVersionResource]struct{}),
ctx: ctx,
cancel: cancel,
client: client,
}
}
@ -76,6 +80,8 @@ type singleClusterInformerManagerImpl struct {
handlers map[schema.GroupVersionResource][]cache.ResourceEventHandler
client dynamic.Interface
lock sync.RWMutex
}
@ -164,3 +170,7 @@ func (s *singleClusterInformerManagerImpl) waitForCacheSync(ctx context.Context)
func (s *singleClusterInformerManagerImpl) Context() context.Context {
return s.ctx
}
func (s *singleClusterInformerManagerImpl) GetClient() dynamic.Interface {
return s.client
}

View File

@ -0,0 +1,151 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This code is lifted from the Kubernetes codebase in order to avoid relying on the k8s.io/kubernetes package.
// However the code has been revised for using Lister instead of API interface.
// For reference: https://github.com/kubernetes/kubernetes/blob/release-1.22/pkg/controller/deployment/util/deployment_util.go
package workload
import (
"sort"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
)
// PodListFunc returns the Pod slice from the Pod namespace and a selector.
type PodListFunc func(string, labels.Selector) ([]*corev1.Pod, error)
// ReplicaSetListFunc returns the ReplicaSet slice from the ReplicaSet namespace and a selector.
type ReplicaSetListFunc func(string, labels.Selector) ([]*appsv1.ReplicaSet, error)
// ReplicaSetsByCreationTimestamp sorts a list of ReplicaSet by creation timestamp, using their names as a tie breaker.
type ReplicaSetsByCreationTimestamp []*appsv1.ReplicaSet
func (o ReplicaSetsByCreationTimestamp) Len() int { return len(o) }
func (o ReplicaSetsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o ReplicaSetsByCreationTimestamp) Less(i, j int) bool {
if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) {
return o[i].Name < o[j].Name
}
return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp)
}
// ListReplicaSetsByDeployment returns a slice of RSes the given deployment targets.
// Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan),
// because only the controller itself should do that.
// However, it does filter out anything whose ControllerRef doesn't match.
func ListReplicaSetsByDeployment(deployment *appsv1.Deployment, f ReplicaSetListFunc) ([]*appsv1.ReplicaSet, error) {
// TODO: Right now we list replica sets by their labels. We should list them by selector, i.e. the replica set's selector
// should be a superset of the deployment's selector, see https://github.com/kubernetes/kubernetes/issues/19830.
namespace := deployment.Namespace
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
if err != nil {
return nil, err
}
all, err := f(namespace, selector)
if err != nil {
return nil, err
}
// Only include those whose ControllerRef matches the Deployment.
owned := make([]*appsv1.ReplicaSet, 0, len(all))
for _, rs := range all {
if metav1.IsControlledBy(rs, deployment) {
owned = append(owned, rs)
}
}
return owned, nil
}
// ListPodsByRS returns a list of pods the given deployment targets.
// This needs a list of ReplicaSets for the Deployment,
// which can be found with ListReplicaSets().
// Note that this does NOT attempt to reconcile ControllerRef (adopt/orphan),
// because only the controller itself should do that.
// However, it does filter out anything whose ControllerRef doesn't match.
func ListPodsByRS(deployment *appsv1.Deployment, rsList []*appsv1.ReplicaSet, f PodListFunc) ([]*corev1.Pod, error) {
namespace := deployment.Namespace
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
if err != nil {
return nil, err
}
all, err := f(namespace, selector)
if err != nil {
return all, err
}
// Only include those whose ControllerRef points to a ReplicaSet that is in
// turn owned by this Deployment.
rsMap := make(map[types.UID]bool, len(rsList))
for _, rs := range rsList {
if rs != nil {
rsMap[rs.UID] = true
}
}
owned := make([]*corev1.Pod, 0, len(all))
for i := range all {
pod := all[i]
controllerRef := metav1.GetControllerOf(pod)
if controllerRef != nil && rsMap[controllerRef.UID] {
owned = append(owned, pod)
}
}
return owned, nil
}
// EqualIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash]
// We ignore pod-template-hash because:
// 1. The hash result would be different upon podTemplateSpec API changes
// (e.g. the addition of a new field will cause the hash code to change)
// 2. The deployment template won't have hash labels
func EqualIgnoreHash(template1, template2 *corev1.PodTemplateSpec) bool {
t1Copy := template1.DeepCopy()
t2Copy := template2.DeepCopy()
// Remove hash labels from template.Labels before comparing
delete(t1Copy.Labels, appsv1.DefaultDeploymentUniqueLabelKey)
delete(t2Copy.Labels, appsv1.DefaultDeploymentUniqueLabelKey)
return equality.Semantic.DeepEqual(t1Copy, t2Copy)
}
// GetNewReplicaSet returns a replica set that matches the intent of the given deployment; get ReplicaSetList from client interface.
// Returns nil if the new replica set doesn't exist yet.
func GetNewReplicaSet(deployment *appsv1.Deployment, f ReplicaSetListFunc) (*appsv1.ReplicaSet, error) {
rsList, err := ListReplicaSetsByDeployment(deployment, f)
if err != nil {
return nil, err
}
return FindNewReplicaSet(deployment, rsList), nil
}
// FindNewReplicaSet returns the new RS this given deployment targets (the one with the same pod template).
func FindNewReplicaSet(deployment *appsv1.Deployment, rsList []*appsv1.ReplicaSet) *appsv1.ReplicaSet {
sort.Sort(ReplicaSetsByCreationTimestamp(rsList))
for i := range rsList {
if EqualIgnoreHash(&rsList[i].Spec.Template, &deployment.Spec.Template) {
// In rare cases, such as after cluster upgrades, Deployment may end up with
// having more than one new ReplicaSets that have the same template as its template,
// see https://github.com/kubernetes/kubernetes/issues/40415
// We deterministically choose the oldest new ReplicaSet.
return rsList[i]
}
}
// new ReplicaSet does not exist.
return nil
}

View File

@ -0,0 +1,233 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package memory
import (
"errors"
"fmt"
"sync"
"syscall"
openapi_v2 "github.com/googleapis/gnostic/openapiv2"
errorsutil "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/version"
"k8s.io/client-go/discovery"
restclient "k8s.io/client-go/rest"
)
type cacheEntry struct {
resourceList *metav1.APIResourceList
err error
}
// memCacheClient can Invalidate() to stay up-to-date with discovery
// information.
//
// TODO: Switch to a watch interface. Right now it will poll after each
// Invalidate() call.
type memCacheClient struct {
delegate discovery.DiscoveryInterface
lock sync.RWMutex
groupToServerResources map[string]*cacheEntry
groupList *metav1.APIGroupList
cacheValid bool
}
// Error Constants
var (
ErrCacheNotFound = errors.New("not found")
)
var _ discovery.CachedDiscoveryInterface = &memCacheClient{}
// isTransientConnectionError checks whether given error is "Connection refused" or
// "Connection reset" error which usually means that apiserver is temporarily
// unavailable.
func isTransientConnectionError(err error) bool {
var errno syscall.Errno
if errors.As(err, &errno) {
return errno == syscall.ECONNREFUSED || errno == syscall.ECONNRESET
}
return false
}
func isTransientError(err error) bool {
if isTransientConnectionError(err) {
return true
}
if t, ok := err.(errorsutil.APIStatus); ok && t.Status().Code >= 500 {
return true
}
return errorsutil.IsTooManyRequests(err)
}
// ServerResourcesForGroupVersion returns the supported resources for a group and version.
func (d *memCacheClient) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) {
d.lock.Lock()
defer d.lock.Unlock()
if !d.cacheValid {
if err := d.refreshLocked(); err != nil {
return nil, err
}
}
cachedVal, ok := d.groupToServerResources[groupVersion]
if !ok {
return nil, ErrCacheNotFound
}
if cachedVal.err != nil && isTransientError(cachedVal.err) {
r, err := d.serverResourcesForGroupVersion(groupVersion)
if err != nil {
utilruntime.HandleError(fmt.Errorf("couldn't get resource list for %v: %v", groupVersion, err))
}
cachedVal = &cacheEntry{r, err}
d.groupToServerResources[groupVersion] = cachedVal
}
return cachedVal.resourceList, cachedVal.err
}
// ServerResources returns the supported resources for all groups and versions.
// Deprecated: use ServerGroupsAndResources instead.
func (d *memCacheClient) ServerResources() ([]*metav1.APIResourceList, error) {
return discovery.ServerResources(d)
}
// ServerGroupsAndResources returns the groups and supported resources for all groups and versions.
func (d *memCacheClient) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) {
return discovery.ServerGroupsAndResources(d)
}
func (d *memCacheClient) ServerGroups() (*metav1.APIGroupList, error) {
d.lock.Lock()
defer d.lock.Unlock()
if !d.cacheValid {
if err := d.refreshLocked(); err != nil {
return nil, err
}
}
return d.groupList, nil
}
func (d *memCacheClient) RESTClient() restclient.Interface {
return d.delegate.RESTClient()
}
func (d *memCacheClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) {
return discovery.ServerPreferredResources(d)
}
func (d *memCacheClient) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) {
return discovery.ServerPreferredNamespacedResources(d)
}
func (d *memCacheClient) ServerVersion() (*version.Info, error) {
return d.delegate.ServerVersion()
}
func (d *memCacheClient) OpenAPISchema() (*openapi_v2.Document, error) {
return d.delegate.OpenAPISchema()
}
func (d *memCacheClient) Fresh() bool {
d.lock.RLock()
defer d.lock.RUnlock()
// Return whether the cache is populated at all. It is still possible that
// a single entry is missing due to transient errors and the attempt to read
// that entry will trigger retry.
return d.cacheValid
}
// Invalidate enforces that no cached data that is older than the current time
// is used.
func (d *memCacheClient) Invalidate() {
d.lock.Lock()
defer d.lock.Unlock()
d.cacheValid = false
d.groupToServerResources = nil
d.groupList = nil
}
// refreshLocked refreshes the state of cache. The caller must hold d.lock for
// writing.
func (d *memCacheClient) refreshLocked() error {
// TODO: Could this multiplicative set of calls be replaced by a single call
// to ServerResources? If it's possible for more than one resulting
// APIResourceList to have the same GroupVersion, the lists would need merged.
gl, err := d.delegate.ServerGroups()
if err != nil || len(gl.Groups) == 0 {
utilruntime.HandleError(fmt.Errorf("couldn't get current server API group list: %v", err))
return err
}
wg := &sync.WaitGroup{}
resultLock := &sync.Mutex{}
rl := map[string]*cacheEntry{}
for _, g := range gl.Groups {
for _, v := range g.Versions {
gv := v.GroupVersion
wg.Add(1)
go func() {
defer wg.Done()
defer utilruntime.HandleCrash()
r, err := d.serverResourcesForGroupVersion(gv)
if err != nil {
utilruntime.HandleError(fmt.Errorf("couldn't get resource list for %v: %v", gv, err))
}
resultLock.Lock()
defer resultLock.Unlock()
rl[gv] = &cacheEntry{r, err}
}()
}
}
wg.Wait()
d.groupToServerResources, d.groupList = rl, gl
d.cacheValid = true
return nil
}
func (d *memCacheClient) serverResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) {
r, err := d.delegate.ServerResourcesForGroupVersion(groupVersion)
if err != nil {
return r, err
}
if len(r.APIResources) == 0 {
return r, fmt.Errorf("Got empty response for: %v", groupVersion)
}
return r, nil
}
// NewMemCacheClient creates a new CachedDiscoveryInterface which caches
// discovery information in memory and will stay up-to-date if Invalidate is
// called with regularity.
//
// NOTE: The client will NOT resort to live lookups on cache misses.
func NewMemCacheClient(delegate discovery.DiscoveryInterface) discovery.CachedDiscoveryInterface {
return &memCacheClient{
delegate: delegate,
groupToServerResources: map[string]*cacheEntry{},
}
}

493
vendor/k8s.io/client-go/dynamic/fake/simple.go generated vendored Normal file
View File

@ -0,0 +1,493 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fake
import (
"context"
"fmt"
"strings"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/testing"
)
func NewSimpleDynamicClient(scheme *runtime.Scheme, objects ...runtime.Object) *FakeDynamicClient {
unstructuredScheme := runtime.NewScheme()
for gvk := range scheme.AllKnownTypes() {
if unstructuredScheme.Recognizes(gvk) {
continue
}
if strings.HasSuffix(gvk.Kind, "List") {
unstructuredScheme.AddKnownTypeWithName(gvk, &unstructured.UnstructuredList{})
continue
}
unstructuredScheme.AddKnownTypeWithName(gvk, &unstructured.Unstructured{})
}
objects, err := convertObjectsToUnstructured(scheme, objects)
if err != nil {
panic(err)
}
for _, obj := range objects {
gvk := obj.GetObjectKind().GroupVersionKind()
if !unstructuredScheme.Recognizes(gvk) {
unstructuredScheme.AddKnownTypeWithName(gvk, &unstructured.Unstructured{})
}
gvk.Kind += "List"
if !unstructuredScheme.Recognizes(gvk) {
unstructuredScheme.AddKnownTypeWithName(gvk, &unstructured.UnstructuredList{})
}
}
return NewSimpleDynamicClientWithCustomListKinds(unstructuredScheme, nil, objects...)
}
// NewSimpleDynamicClientWithCustomListKinds try not to use this. In general you want to have the scheme have the List types registered
// and allow the default guessing for resources match. Sometimes that doesn't work, so you can specify a custom mapping here.
func NewSimpleDynamicClientWithCustomListKinds(scheme *runtime.Scheme, gvrToListKind map[schema.GroupVersionResource]string, objects ...runtime.Object) *FakeDynamicClient {
// In order to use List with this client, you have to have your lists registered so that the object tracker will find them
// in the scheme to support the t.scheme.New(listGVK) call when it's building the return value.
// Since the base fake client needs the listGVK passed through the action (in cases where there are no instances, it
// cannot look up the actual hits), we need to know a mapping of GVR to listGVK here. For GETs and other types of calls,
// there is no return value that contains a GVK, so it doesn't have to know the mapping in advance.
// first we attempt to invert known List types from the scheme to auto guess the resource with unsafe guesses
// this covers common usage of registering types in scheme and passing them
completeGVRToListKind := map[schema.GroupVersionResource]string{}
for listGVK := range scheme.AllKnownTypes() {
if !strings.HasSuffix(listGVK.Kind, "List") {
continue
}
nonListGVK := listGVK.GroupVersion().WithKind(listGVK.Kind[:len(listGVK.Kind)-4])
plural, _ := meta.UnsafeGuessKindToResource(nonListGVK)
completeGVRToListKind[plural] = listGVK.Kind
}
for gvr, listKind := range gvrToListKind {
if !strings.HasSuffix(listKind, "List") {
panic("coding error, listGVK must end in List or this fake client doesn't work right")
}
listGVK := gvr.GroupVersion().WithKind(listKind)
// if we already have this type registered, just skip it
if _, err := scheme.New(listGVK); err == nil {
completeGVRToListKind[gvr] = listKind
continue
}
scheme.AddKnownTypeWithName(listGVK, &unstructured.UnstructuredList{})
completeGVRToListKind[gvr] = listKind
}
codecs := serializer.NewCodecFactory(scheme)
o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder())
for _, obj := range objects {
if err := o.Add(obj); err != nil {
panic(err)
}
}
cs := &FakeDynamicClient{scheme: scheme, gvrToListKind: completeGVRToListKind, tracker: o}
cs.AddReactor("*", "*", testing.ObjectReaction(o))
cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
gvr := action.GetResource()
ns := action.GetNamespace()
watch, err := o.Watch(gvr, ns)
if err != nil {
return false, nil, err
}
return true, watch, nil
})
return cs
}
// Clientset implements clientset.Interface. Meant to be embedded into a
// struct to get a default implementation. This makes faking out just the method
// you want to test easier.
type FakeDynamicClient struct {
testing.Fake
scheme *runtime.Scheme
gvrToListKind map[schema.GroupVersionResource]string
tracker testing.ObjectTracker
}
type dynamicResourceClient struct {
client *FakeDynamicClient
namespace string
resource schema.GroupVersionResource
listKind string
}
var (
_ dynamic.Interface = &FakeDynamicClient{}
_ testing.FakeClient = &FakeDynamicClient{}
)
func (c *FakeDynamicClient) Tracker() testing.ObjectTracker {
return c.tracker
}
func (c *FakeDynamicClient) Resource(resource schema.GroupVersionResource) dynamic.NamespaceableResourceInterface {
return &dynamicResourceClient{client: c, resource: resource, listKind: c.gvrToListKind[resource]}
}
func (c *dynamicResourceClient) Namespace(ns string) dynamic.ResourceInterface {
ret := *c
ret.namespace = ns
return &ret
}
func (c *dynamicResourceClient) Create(ctx context.Context, obj *unstructured.Unstructured, opts metav1.CreateOptions, subresources ...string) (*unstructured.Unstructured, error) {
var uncastRet runtime.Object
var err error
switch {
case len(c.namespace) == 0 && len(subresources) == 0:
uncastRet, err = c.client.Fake.
Invokes(testing.NewRootCreateAction(c.resource, obj), obj)
case len(c.namespace) == 0 && len(subresources) > 0:
var accessor metav1.Object // avoid shadowing err
accessor, err = meta.Accessor(obj)
if err != nil {
return nil, err
}
name := accessor.GetName()
uncastRet, err = c.client.Fake.
Invokes(testing.NewRootCreateSubresourceAction(c.resource, name, strings.Join(subresources, "/"), obj), obj)
case len(c.namespace) > 0 && len(subresources) == 0:
uncastRet, err = c.client.Fake.
Invokes(testing.NewCreateAction(c.resource, c.namespace, obj), obj)
case len(c.namespace) > 0 && len(subresources) > 0:
var accessor metav1.Object // avoid shadowing err
accessor, err = meta.Accessor(obj)
if err != nil {
return nil, err
}
name := accessor.GetName()
uncastRet, err = c.client.Fake.
Invokes(testing.NewCreateSubresourceAction(c.resource, name, strings.Join(subresources, "/"), c.namespace, obj), obj)
}
if err != nil {
return nil, err
}
if uncastRet == nil {
return nil, err
}
ret := &unstructured.Unstructured{}
if err := c.client.scheme.Convert(uncastRet, ret, nil); err != nil {
return nil, err
}
return ret, err
}
func (c *dynamicResourceClient) Update(ctx context.Context, obj *unstructured.Unstructured, opts metav1.UpdateOptions, subresources ...string) (*unstructured.Unstructured, error) {
var uncastRet runtime.Object
var err error
switch {
case len(c.namespace) == 0 && len(subresources) == 0:
uncastRet, err = c.client.Fake.
Invokes(testing.NewRootUpdateAction(c.resource, obj), obj)
case len(c.namespace) == 0 && len(subresources) > 0:
uncastRet, err = c.client.Fake.
Invokes(testing.NewRootUpdateSubresourceAction(c.resource, strings.Join(subresources, "/"), obj), obj)
case len(c.namespace) > 0 && len(subresources) == 0:
uncastRet, err = c.client.Fake.
Invokes(testing.NewUpdateAction(c.resource, c.namespace, obj), obj)
case len(c.namespace) > 0 && len(subresources) > 0:
uncastRet, err = c.client.Fake.
Invokes(testing.NewUpdateSubresourceAction(c.resource, strings.Join(subresources, "/"), c.namespace, obj), obj)
}
if err != nil {
return nil, err
}
if uncastRet == nil {
return nil, err
}
ret := &unstructured.Unstructured{}
if err := c.client.scheme.Convert(uncastRet, ret, nil); err != nil {
return nil, err
}
return ret, err
}
func (c *dynamicResourceClient) UpdateStatus(ctx context.Context, obj *unstructured.Unstructured, opts metav1.UpdateOptions) (*unstructured.Unstructured, error) {
var uncastRet runtime.Object
var err error
switch {
case len(c.namespace) == 0:
uncastRet, err = c.client.Fake.
Invokes(testing.NewRootUpdateSubresourceAction(c.resource, "status", obj), obj)
case len(c.namespace) > 0:
uncastRet, err = c.client.Fake.
Invokes(testing.NewUpdateSubresourceAction(c.resource, "status", c.namespace, obj), obj)
}
if err != nil {
return nil, err
}
if uncastRet == nil {
return nil, err
}
ret := &unstructured.Unstructured{}
if err := c.client.scheme.Convert(uncastRet, ret, nil); err != nil {
return nil, err
}
return ret, err
}
func (c *dynamicResourceClient) Delete(ctx context.Context, name string, opts metav1.DeleteOptions, subresources ...string) error {
var err error
switch {
case len(c.namespace) == 0 && len(subresources) == 0:
_, err = c.client.Fake.
Invokes(testing.NewRootDeleteAction(c.resource, name), &metav1.Status{Status: "dynamic delete fail"})
case len(c.namespace) == 0 && len(subresources) > 0:
_, err = c.client.Fake.
Invokes(testing.NewRootDeleteSubresourceAction(c.resource, strings.Join(subresources, "/"), name), &metav1.Status{Status: "dynamic delete fail"})
case len(c.namespace) > 0 && len(subresources) == 0:
_, err = c.client.Fake.
Invokes(testing.NewDeleteAction(c.resource, c.namespace, name), &metav1.Status{Status: "dynamic delete fail"})
case len(c.namespace) > 0 && len(subresources) > 0:
_, err = c.client.Fake.
Invokes(testing.NewDeleteSubresourceAction(c.resource, strings.Join(subresources, "/"), c.namespace, name), &metav1.Status{Status: "dynamic delete fail"})
}
return err
}
func (c *dynamicResourceClient) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOptions metav1.ListOptions) error {
var err error
switch {
case len(c.namespace) == 0:
action := testing.NewRootDeleteCollectionAction(c.resource, listOptions)
_, err = c.client.Fake.Invokes(action, &metav1.Status{Status: "dynamic deletecollection fail"})
case len(c.namespace) > 0:
action := testing.NewDeleteCollectionAction(c.resource, c.namespace, listOptions)
_, err = c.client.Fake.Invokes(action, &metav1.Status{Status: "dynamic deletecollection fail"})
}
return err
}
func (c *dynamicResourceClient) Get(ctx context.Context, name string, opts metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error) {
var uncastRet runtime.Object
var err error
switch {
case len(c.namespace) == 0 && len(subresources) == 0:
uncastRet, err = c.client.Fake.
Invokes(testing.NewRootGetAction(c.resource, name), &metav1.Status{Status: "dynamic get fail"})
case len(c.namespace) == 0 && len(subresources) > 0:
uncastRet, err = c.client.Fake.
Invokes(testing.NewRootGetSubresourceAction(c.resource, strings.Join(subresources, "/"), name), &metav1.Status{Status: "dynamic get fail"})
case len(c.namespace) > 0 && len(subresources) == 0:
uncastRet, err = c.client.Fake.
Invokes(testing.NewGetAction(c.resource, c.namespace, name), &metav1.Status{Status: "dynamic get fail"})
case len(c.namespace) > 0 && len(subresources) > 0:
uncastRet, err = c.client.Fake.
Invokes(testing.NewGetSubresourceAction(c.resource, c.namespace, strings.Join(subresources, "/"), name), &metav1.Status{Status: "dynamic get fail"})
}
if err != nil {
return nil, err
}
if uncastRet == nil {
return nil, err
}
ret := &unstructured.Unstructured{}
if err := c.client.scheme.Convert(uncastRet, ret, nil); err != nil {
return nil, err
}
return ret, err
}
func (c *dynamicResourceClient) List(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error) {
if len(c.listKind) == 0 {
panic(fmt.Sprintf("coding error: you must register resource to list kind for every resource you're going to LIST when creating the client. See NewSimpleDynamicClientWithCustomListKinds or register the list into the scheme: %v out of %v", c.resource, c.client.gvrToListKind))
}
listGVK := c.resource.GroupVersion().WithKind(c.listKind)
listForFakeClientGVK := c.resource.GroupVersion().WithKind(c.listKind[:len(c.listKind)-4]) /*base library appends List*/
var obj runtime.Object
var err error
switch {
case len(c.namespace) == 0:
obj, err = c.client.Fake.
Invokes(testing.NewRootListAction(c.resource, listForFakeClientGVK, opts), &metav1.Status{Status: "dynamic list fail"})
case len(c.namespace) > 0:
obj, err = c.client.Fake.
Invokes(testing.NewListAction(c.resource, listForFakeClientGVK, c.namespace, opts), &metav1.Status{Status: "dynamic list fail"})
}
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
retUnstructured := &unstructured.Unstructured{}
if err := c.client.scheme.Convert(obj, retUnstructured, nil); err != nil {
return nil, err
}
entireList, err := retUnstructured.ToList()
if err != nil {
return nil, err
}
list := &unstructured.UnstructuredList{}
list.SetResourceVersion(entireList.GetResourceVersion())
list.GetObjectKind().SetGroupVersionKind(listGVK)
for i := range entireList.Items {
item := &entireList.Items[i]
metadata, err := meta.Accessor(item)
if err != nil {
return nil, err
}
if label.Matches(labels.Set(metadata.GetLabels())) {
list.Items = append(list.Items, *item)
}
}
return list, nil
}
func (c *dynamicResourceClient) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
switch {
case len(c.namespace) == 0:
return c.client.Fake.
InvokesWatch(testing.NewRootWatchAction(c.resource, opts))
case len(c.namespace) > 0:
return c.client.Fake.
InvokesWatch(testing.NewWatchAction(c.resource, c.namespace, opts))
}
panic("math broke")
}
// TODO: opts are currently ignored.
func (c *dynamicResourceClient) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*unstructured.Unstructured, error) {
var uncastRet runtime.Object
var err error
switch {
case len(c.namespace) == 0 && len(subresources) == 0:
uncastRet, err = c.client.Fake.
Invokes(testing.NewRootPatchAction(c.resource, name, pt, data), &metav1.Status{Status: "dynamic patch fail"})
case len(c.namespace) == 0 && len(subresources) > 0:
uncastRet, err = c.client.Fake.
Invokes(testing.NewRootPatchSubresourceAction(c.resource, name, pt, data, subresources...), &metav1.Status{Status: "dynamic patch fail"})
case len(c.namespace) > 0 && len(subresources) == 0:
uncastRet, err = c.client.Fake.
Invokes(testing.NewPatchAction(c.resource, c.namespace, name, pt, data), &metav1.Status{Status: "dynamic patch fail"})
case len(c.namespace) > 0 && len(subresources) > 0:
uncastRet, err = c.client.Fake.
Invokes(testing.NewPatchSubresourceAction(c.resource, c.namespace, name, pt, data, subresources...), &metav1.Status{Status: "dynamic patch fail"})
}
if err != nil {
return nil, err
}
if uncastRet == nil {
return nil, err
}
ret := &unstructured.Unstructured{}
if err := c.client.scheme.Convert(uncastRet, ret, nil); err != nil {
return nil, err
}
return ret, err
}
func convertObjectsToUnstructured(s *runtime.Scheme, objs []runtime.Object) ([]runtime.Object, error) {
ul := make([]runtime.Object, 0, len(objs))
for _, obj := range objs {
u, err := convertToUnstructured(s, obj)
if err != nil {
return nil, err
}
ul = append(ul, u)
}
return ul, nil
}
func convertToUnstructured(s *runtime.Scheme, obj runtime.Object) (runtime.Object, error) {
var (
err error
u unstructured.Unstructured
)
u.Object, err = runtime.DefaultUnstructuredConverter.ToUnstructured(obj)
if err != nil {
return nil, fmt.Errorf("failed to convert to unstructured: %w", err)
}
gvk := u.GroupVersionKind()
if gvk.Group == "" || gvk.Kind == "" {
gvks, _, err := s.ObjectKinds(obj)
if err != nil {
return nil, fmt.Errorf("failed to convert to unstructured - unable to get GVK %w", err)
}
apiv, k := gvks[0].ToAPIVersionAndKind()
u.SetAPIVersion(apiv)
u.SetKind(k)
}
return &u, nil
}

2
vendor/modules.txt vendored
View File

@ -1025,10 +1025,12 @@ k8s.io/client-go/applyconfigurations/storage/v1alpha1
k8s.io/client-go/applyconfigurations/storage/v1beta1
k8s.io/client-go/discovery
k8s.io/client-go/discovery/cached/disk
k8s.io/client-go/discovery/cached/memory
k8s.io/client-go/discovery/fake
k8s.io/client-go/dynamic
k8s.io/client-go/dynamic/dynamicinformer
k8s.io/client-go/dynamic/dynamiclister
k8s.io/client-go/dynamic/fake
k8s.io/client-go/informers
k8s.io/client-go/informers/admissionregistration
k8s.io/client-go/informers/admissionregistration/v1