Change from deprecated Core to CoreV1 for kube client

This commit is contained in:
Sergey Lanzman 2017-09-04 22:16:21 +03:00
parent 7ae64de0af
commit 415f53cdea
8 changed files with 16 additions and 16 deletions

View File

@ -58,7 +58,7 @@ func NewConfigFetcher(options ConfigFetcherOptions, kubeClient kube_client.Inter
// Returns the config if it has changed since the last sync. Returns nil if it has not changed.
func (c *configFetcherImpl) FetchConfigIfUpdated() (*Config, error) {
opts := metav1.GetOptions{}
cm, err := c.kubeClient.Core().ConfigMaps(c.namespace).Get(c.configMapName, opts)
cm, err := c.kubeClient.CoreV1().ConfigMaps(c.namespace).Get(c.configMapName, opts)
if err != nil {
return nil, fmt.Errorf("failed to fetch config map named %s in namespace %s. please confirm if the configmap name and the namespace are correctly spelled and you've already created the configmap: %v", c.configMapName, c.namespace, err)
}

View File

@ -565,7 +565,7 @@ func evictPod(podToEvict *apiv1.Pod, client kube_client.Interface, recorder kube
GracePeriodSeconds: &maxTermination,
},
}
lastError = client.Core().Pods(podToEvict.Namespace).Evict(eviction)
lastError = client.CoreV1().Pods(podToEvict.Namespace).Evict(eviction)
if lastError == nil {
return nil
}
@ -630,7 +630,7 @@ func drainNode(node *apiv1.Node, pods []*apiv1.Pod, client kube_client.Interface
for start := time.Now(); time.Now().Sub(start) < time.Duration(maxGracefulTerminationSec)*time.Second+PodEvictionHeadroom; time.Sleep(5 * time.Second) {
allGone = true
for _, pod := range pods {
podreturned, err := client.Core().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
podreturned, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
if err == nil {
glog.Errorf("Not deleted yet %v", podreturned)
allGone = false

View File

@ -276,7 +276,7 @@ func main() {
kubeClient := createKubeClient()
// Validate that the client is ok.
_, err = kubeClient.Core().Nodes().List(metav1.ListOptions{})
_, err = kubeClient.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
glog.Fatalf("Failed to get nodes from apiserver: %v", err)
}
@ -285,7 +285,7 @@ func main() {
leaderElection.ResourceLock,
*namespace,
"cluster-autoscaler",
kubeClient.Core(),
kubeClient.CoreV1(),
resourcelock.ResourceLockConfig{
Identity: id,
EventRecorder: kube_util.CreateEventRecorder(kubeClient),

View File

@ -35,7 +35,7 @@ import (
func GetRequiredPodsForNode(nodename string, client kube_client.Interface) ([]*apiv1.Pod, errors.AutoscalerError) {
// TODO: we should change this to use informer
podListResult, err := client.Core().Pods(apiv1.NamespaceAll).List(
podListResult, err := client.CoreV1().Pods(apiv1.NamespaceAll).List(
metav1.ListOptions{FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": nodename}).String()})
if err != nil {
return []*apiv1.Pod{}, errors.ToAutoscalerError(errors.ApiCallError, err)

View File

@ -36,7 +36,7 @@ const (
// MarkToBeDeleted sets a taint that makes the node unschedulable.
func MarkToBeDeleted(node *apiv1.Node, client kube_client.Interface) error {
// Get the newest version of the node.
freshNode, err := client.Core().Nodes().Get(node.Name, metav1.GetOptions{})
freshNode, err := client.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
if err != nil || freshNode == nil {
return fmt.Errorf("failed to get node %v: %v", node.Name, err)
}
@ -45,7 +45,7 @@ func MarkToBeDeleted(node *apiv1.Node, client kube_client.Interface) error {
if added == false {
return err
}
_, err = client.Core().Nodes().Update(freshNode)
_, err = client.CoreV1().Nodes().Update(freshNode)
if err != nil {
glog.Warningf("Error while adding taints on node %v: %v", node.Name, err)
return err
@ -96,7 +96,7 @@ func GetToBeDeletedTime(node *apiv1.Node) (*time.Time, error) {
// CleanToBeDeleted cleans ToBeDeleted taint.
func CleanToBeDeleted(node *apiv1.Node, client kube_client.Interface) (bool, error) {
freshNode, err := client.Core().Nodes().Get(node.Name, metav1.GetOptions{})
freshNode, err := client.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
if err != nil || freshNode == nil {
return false, fmt.Errorf("failed to get node %v: %v", node.Name, err)
}
@ -111,7 +111,7 @@ func CleanToBeDeleted(node *apiv1.Node, client kube_client.Interface) (bool, err
if len(newTaints) != len(freshNode.Spec.Taints) {
freshNode.Spec.Taints = newTaints
_, err := client.Core().Nodes().Update(freshNode)
_, err := client.CoreV1().Nodes().Update(freshNode)
if err != nil {
glog.Warningf("Error while releasing taints on node %v: %v", node.Name, err)
return false, err

View File

@ -83,7 +83,7 @@ func GetPodsForDeletionOnNodeDrain(
if refKind == "ReplicationController" {
if checkReferences {
rc, err := client.Core().ReplicationControllers(controllerNamespace).Get(controllerRef.Name, metav1.GetOptions{})
rc, err := client.CoreV1().ReplicationControllers(controllerNamespace).Get(controllerRef.Name, metav1.GetOptions{})
// Assume a reason for an error is because the RC is either
// gone/missing or that the rc has too few replicas configured.
// TODO: replace the minReplica check with pod disruption budget.

View File

@ -32,7 +32,7 @@ func CreateEventRecorder(kubeClient clientset.Interface) kube_record.EventRecord
eventBroadcaster := kube_record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.V(4).Infof)
if _, isfake := kubeClient.(*fake.Clientset); !isfake {
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")})
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
}
return eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: "cluster-autoscaler"})
}

View File

@ -138,7 +138,7 @@ func NewUnschedulablePodInNamespaceLister(kubeClient client.Interface, namespace
// watch unscheduled pods
selector := fields.ParseSelectorOrDie("spec.nodeName==" + "" + ",status.phase!=" +
string(apiv1.PodSucceeded) + ",status.phase!=" + string(apiv1.PodFailed))
podListWatch := cache.NewListWatchFromClient(kubeClient.Core().RESTClient(), "pods", namespace, selector)
podListWatch := cache.NewListWatchFromClient(kubeClient.CoreV1().RESTClient(), "pods", namespace, selector)
store := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
podLister := v1lister.NewPodLister(store)
podReflector := cache.NewReflector(podListWatch, &apiv1.Pod{}, store, time.Hour)
@ -163,7 +163,7 @@ func NewScheduledPodLister(kubeClient client.Interface, stopchannel <-chan struc
// watch unscheduled pods
selector := fields.ParseSelectorOrDie("spec.nodeName!=" + "" + ",status.phase!=" +
string(apiv1.PodSucceeded) + ",status.phase!=" + string(apiv1.PodFailed))
podListWatch := cache.NewListWatchFromClient(kubeClient.Core().RESTClient(), "pods", apiv1.NamespaceAll, selector)
podListWatch := cache.NewListWatchFromClient(kubeClient.CoreV1().RESTClient(), "pods", apiv1.NamespaceAll, selector)
store := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
podLister := v1lister.NewPodLister(store)
podReflector := cache.NewReflector(podListWatch, &apiv1.Pod{}, store, time.Hour)
@ -201,7 +201,7 @@ func (readyNodeLister *ReadyNodeLister) List() ([]*apiv1.Node, error) {
// NewReadyNodeLister builds a node lister.
func NewReadyNodeLister(kubeClient client.Interface, stopChannel <-chan struct{}) *ReadyNodeLister {
listWatcher := cache.NewListWatchFromClient(kubeClient.Core().RESTClient(), "nodes", apiv1.NamespaceAll, fields.Everything())
listWatcher := cache.NewListWatchFromClient(kubeClient.CoreV1().RESTClient(), "nodes", apiv1.NamespaceAll, fields.Everything())
store := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
nodeLister := v1lister.NewNodeLister(store)
reflector := cache.NewReflector(listWatcher, &apiv1.Node{}, store, time.Hour)
@ -231,7 +231,7 @@ func (allNodeLister *AllNodeLister) List() ([]*apiv1.Node, error) {
// NewAllNodeLister builds a node lister that returns all nodes (ready and unready)
func NewAllNodeLister(kubeClient client.Interface, stopchannel <-chan struct{}) *AllNodeLister {
listWatcher := cache.NewListWatchFromClient(kubeClient.Core().RESTClient(), "nodes", apiv1.NamespaceAll, fields.Everything())
listWatcher := cache.NewListWatchFromClient(kubeClient.CoreV1().RESTClient(), "nodes", apiv1.NamespaceAll, fields.Everything())
store := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
nodeLister := v1lister.NewNodeLister(store)
reflector := cache.NewReflector(listWatcher, &apiv1.Node{}, store, time.Hour)