Cluster-autoscaler: update CA code for godep refresh

This commit is contained in:
Marcin Wielgus 2017-01-19 17:10:33 +01:00
parent c57810d1b0
commit ce45c33d29
22 changed files with 78 additions and 69 deletions

View File

@ -28,8 +28,8 @@ import (
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/util/wait"
provider_aws "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
"k8s.io/kubernetes/pkg/util/wait"
)
const (

View File

@ -29,8 +29,8 @@ import (
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
gce "google.golang.org/api/compute/v1"
"k8s.io/apimachinery/pkg/util/wait"
provider_gce "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/util/wait"
)
const (

View File

@ -24,6 +24,7 @@ import (
"strings"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/contrib/cluster-autoscaler/cloudprovider"
"k8s.io/contrib/cluster-autoscaler/cloudprovider/aws"
"k8s.io/contrib/cluster-autoscaler/cloudprovider/gce"
@ -36,8 +37,8 @@ import (
"k8s.io/contrib/cluster-autoscaler/simulator"
kube_util "k8s.io/contrib/cluster-autoscaler/utils/kubernetes"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
kube_client "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
kube_client "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
kube_leaderelection "k8s.io/kubernetes/pkg/client/leaderelection"
"k8s.io/kubernetes/pkg/client/leaderelection/resourcelock"
kube_record "k8s.io/kubernetes/pkg/client/record"
@ -474,7 +475,7 @@ func main() {
kubeClient := createKubeClient()
kube_leaderelection.RunOrDie(kube_leaderelection.LeaderElectionConfig{
Lock: &resourcelock.EndpointsLock{
EndpointsMeta: apiv1.ObjectMeta{
EndpointsMeta: metav1.ObjectMeta{
Namespace: "kube-system",
Name: "cluster-autoscaler",
},

View File

@ -25,8 +25,9 @@ import (
"k8s.io/contrib/cluster-autoscaler/utils/deletetaint"
kube_util "k8s.io/contrib/cluster-autoscaler/utils/kubernetes"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/sets"
"github.com/golang/glog"
)
@ -457,7 +458,7 @@ func getNotRegisteredNodes(allNodes []*apiv1.Node, cloudProvider cloudprovider.C
if !registered.Has(node) {
notRegistered = append(notRegistered, UnregisteredNode{
Node: &apiv1.Node{
ObjectMeta: apiv1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: node,
},
Spec: apiv1.NodeSpec{

View File

@ -20,10 +20,10 @@ import (
"testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/contrib/cluster-autoscaler/cloudprovider/test"
. "k8s.io/contrib/cluster-autoscaler/utils/test"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"github.com/stretchr/testify/assert"
)

View File

@ -22,10 +22,10 @@ import (
"net/url"
"strconv"
"k8s.io/apimachinery/pkg/runtime/schema"
kube_rest "k8s.io/kubernetes/pkg/client/restclient"
kube_client_cmd "k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
kube_client_cmd_api "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
"k8s.io/kubernetes/pkg/runtime/schema"
)
// This code was borrowed from Heapster to push the work forward and contains some functionality

View File

@ -28,9 +28,10 @@ import (
"k8s.io/contrib/cluster-autoscaler/utils/deletetaint"
kube_util "k8s.io/contrib/cluster-autoscaler/utils/kubernetes"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
kube_client "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
kube_client "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
kube_record "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
@ -341,7 +342,7 @@ func drainNode(node *apiv1.Node, pods []*apiv1.Pod, client kube_client.Interface
for start := time.Now(); time.Now().Sub(start) < time.Duration(maxGratefulTerminationSec)*time.Second; time.Sleep(5 * time.Second) {
allGone = true
for _, pod := range pods {
podreturned, err := client.Core().Pods(pod.Namespace).Get(pod.Name)
podreturned, err := client.Core().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
if err == nil {
glog.Errorf("Not deleted yet %v", podreturned)
allGone = false

View File

@ -26,12 +26,13 @@ import (
"k8s.io/contrib/cluster-autoscaler/simulator"
. "k8s.io/contrib/cluster-autoscaler/utils/test"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/runtime"
"github.com/stretchr/testify/assert"
)
@ -135,7 +136,7 @@ func TestScaleDown(t *testing.T) {
fakeClient := &fake.Clientset{}
job := batchv1.Job{
ObjectMeta: apiv1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "job",
Namespace: "default",
SelfLink: "/apivs/extensions/v1beta1/namespaces/default/jobs/job",
@ -288,7 +289,7 @@ func TestScaleDownNoMove(t *testing.T) {
fakeClient := &fake.Clientset{}
job := batchv1.Job{
ObjectMeta: apiv1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "job",
Namespace: "default",
SelfLink: "/apivs/extensions/v1beta1/namespaces/default/jobs/job",

View File

@ -28,10 +28,10 @@ import (
"k8s.io/contrib/cluster-autoscaler/simulator"
. "k8s.io/contrib/cluster-autoscaler/utils/test"
"k8s.io/apimachinery/pkg/runtime"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/runtime"
"github.com/stretchr/testify/assert"
)

View File

@ -25,7 +25,7 @@ import (
"k8s.io/kubernetes/pkg/api/resource"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
client "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
client "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
"github.com/golang/glog"

View File

@ -20,7 +20,7 @@ import (
"k8s.io/contrib/cluster-autoscaler/utils/drain"
api "k8s.io/kubernetes/pkg/api"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
client "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
client "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
)

View File

@ -19,6 +19,7 @@ package simulator
import (
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
@ -30,7 +31,7 @@ func TestFastGetPodsToMove(t *testing.T) {
// Unreplicated pod
pod1 := &apiv1.Pod{
ObjectMeta: apiv1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
Namespace: "ns",
},
@ -40,7 +41,7 @@ func TestFastGetPodsToMove(t *testing.T) {
// Replicated pod
pod2 := &apiv1.Pod{
ObjectMeta: apiv1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "pod2",
Namespace: "ns",
Annotations: map[string]string{
@ -55,7 +56,7 @@ func TestFastGetPodsToMove(t *testing.T) {
// Manifest pod
pod3 := &apiv1.Pod{
ObjectMeta: apiv1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "pod3",
Namespace: "kube-system",
Annotations: map[string]string{
@ -69,7 +70,7 @@ func TestFastGetPodsToMove(t *testing.T) {
// DeamonSet pod
pod4 := &apiv1.Pod{
ObjectMeta: apiv1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "pod4",
Namespace: "ns",
Annotations: map[string]string{
@ -84,7 +85,7 @@ func TestFastGetPodsToMove(t *testing.T) {
// Kube-system
pod5 := &apiv1.Pod{
ObjectMeta: apiv1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "pod5",
Namespace: "kube-system",
Annotations: map[string]string{
@ -97,7 +98,7 @@ func TestFastGetPodsToMove(t *testing.T) {
// Local storage
pod6 := &apiv1.Pod{
ObjectMeta: apiv1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "pod6",
Namespace: "ns",
Annotations: map[string]string{
@ -119,7 +120,7 @@ func TestFastGetPodsToMove(t *testing.T) {
// Non-local storage
pod7 := &apiv1.Pod{
ObjectMeta: apiv1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "pod7",
Namespace: "ns",
Annotations: map[string]string{

View File

@ -20,7 +20,7 @@ import (
"k8s.io/contrib/cluster-autoscaler/utils/drain"
api "k8s.io/kubernetes/pkg/api"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
kube_client "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
kube_client "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
)

View File

@ -19,18 +19,19 @@ package simulator
import (
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/runtime"
"github.com/stretchr/testify/assert"
)
func TestRequiredPodsForNode(t *testing.T) {
pod1 := apiv1.Pod{
ObjectMeta: apiv1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "pod1",
SelfLink: "pod1",
@ -38,7 +39,7 @@ func TestRequiredPodsForNode(t *testing.T) {
}
// Manifest pod.
pod2 := apiv1.Pod{
ObjectMeta: apiv1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "pod2",
Namespace: "kube-system",
SelfLink: "pod2",

View File

@ -21,7 +21,7 @@ import (
kube_util "k8s.io/contrib/cluster-autoscaler/utils/kubernetes"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
kube_client "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
kube_client "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"

View File

@ -26,9 +26,9 @@ import (
"k8s.io/contrib/cluster-autoscaler/expander"
"k8s.io/contrib/cluster-autoscaler/simulator"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
kube_client "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
kube_client "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
kube_record "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"

View File

@ -21,8 +21,9 @@ import (
"fmt"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
kube_client "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
kube_client "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"github.com/golang/glog"
)
@ -35,7 +36,7 @@ const (
// MarkToBeDeleted sets a taint that makes the node unschedulable.
func MarkToBeDeleted(node *apiv1.Node, client kube_client.Interface) error {
// Get the newest version of the node.
freshNode, err := client.Core().Nodes().Get(node.Name)
freshNode, err := client.Core().Nodes().Get(node.Name, metav1.GetOptions{})
if err != nil || freshNode == nil {
return fmt.Errorf("failed to get node %v: %v", node.Name, err)
}

View File

@ -22,11 +22,11 @@ import (
. "k8s.io/contrib/cluster-autoscaler/utils/test"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/runtime"
"github.com/stretchr/testify/assert"
)

View File

@ -19,11 +19,12 @@ package drain
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
api "k8s.io/kubernetes/pkg/api"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
client "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
client "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/runtime"
)
// GetPodsForDeletionOnNodeDrain returns pods that should be deleted on node drain as well as some extra information
@ -59,7 +60,7 @@ func GetPodsForDeletionOnNodeDrain(
if refKind == "ReplicationController" {
if checkReferences {
rc, err := client.Core().ReplicationControllers(sr.Reference.Namespace).Get(sr.Reference.Name)
rc, err := client.Core().ReplicationControllers(sr.Reference.Namespace).Get(sr.Reference.Name, metav1.GetOptions{})
// Assume a reason for an error is because the RC is either
// gone/missing or that the rc has too few replicas configured.
// TODO: replace the minReplica check with pod disruption budget.
@ -78,7 +79,7 @@ func GetPodsForDeletionOnNodeDrain(
}
} else if refKind == "DaemonSet" {
if checkReferences {
ds, err := client.Extensions().DaemonSets(sr.Reference.Namespace).Get(sr.Reference.Name)
ds, err := client.Extensions().DaemonSets(sr.Reference.Namespace).Get(sr.Reference.Name, metav1.GetOptions{})
// Assume the only reason for an error is because the DaemonSet is
// gone/missing, not for any other cause. TODO(mml): something more
@ -97,7 +98,7 @@ func GetPodsForDeletionOnNodeDrain(
}
} else if refKind == "Job" {
if checkReferences {
job, err := client.Batch().Jobs(sr.Reference.Namespace).Get(sr.Reference.Name)
job, err := client.Batch().Jobs(sr.Reference.Namespace).Get(sr.Reference.Name, metav1.GetOptions{})
// Assume the only reason for an error is because the Job is
// gone/missing, not for any other cause. TODO(mml): something more
@ -112,7 +113,7 @@ func GetPodsForDeletionOnNodeDrain(
}
} else if refKind == "ReplicaSet" {
if checkReferences {
rs, err := client.Extensions().ReplicaSets(sr.Reference.Namespace).Get(sr.Reference.Name)
rs, err := client.Extensions().ReplicaSets(sr.Reference.Namespace).Get(sr.Reference.Name, metav1.GetOptions{})
// Assume the only reason for an error is because the RS is
// gone/missing, not for any other cause. TODO(mml): something more
@ -131,7 +132,7 @@ func GetPodsForDeletionOnNodeDrain(
}
} else if refKind == "StatefulSet" {
if checkReferences {
ss, err := client.Apps().StatefulSets(sr.Reference.Namespace).Get(sr.Reference.Name)
ss, err := client.Apps().StatefulSets(sr.Reference.Namespace).Get(sr.Reference.Name, metav1.GetOptions{})
// Assume the only reason for an error is because the StatefulSet is
// gone/missing, not for any other cause. TODO(mml): something more

View File

@ -20,6 +20,8 @@ import (
"fmt"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
. "k8s.io/contrib/cluster-autoscaler/utils/test"
api "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi"
@ -27,16 +29,15 @@ import (
appsv1beta1 "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/runtime"
)
func TestDrain(t *testing.T) {
replicas := int32(5)
rc := apiv1.ReplicationController{
ObjectMeta: apiv1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "rc",
Namespace: "default",
SelfLink: testapi.Default.SelfLink("replicationcontrollers", "rc"),
@ -47,7 +48,7 @@ func TestDrain(t *testing.T) {
}
rcPod := &apiv1.Pod{
ObjectMeta: apiv1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "bar",
Namespace: "default",
Annotations: map[string]string{apiv1.CreatedByAnnotation: RefJSON(&rc)},
@ -58,7 +59,7 @@ func TestDrain(t *testing.T) {
}
ds := extensions.DaemonSet{
ObjectMeta: apiv1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "ds",
Namespace: "default",
SelfLink: "/apiv1s/extensions/v1beta1/namespaces/default/daemonsets/ds",
@ -66,7 +67,7 @@ func TestDrain(t *testing.T) {
}
dsPod := &apiv1.Pod{
ObjectMeta: apiv1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "bar",
Namespace: "default",
Annotations: map[string]string{apiv1.CreatedByAnnotation: RefJSON(&ds)},
@ -77,7 +78,7 @@ func TestDrain(t *testing.T) {
}
job := batchv1.Job{
ObjectMeta: apiv1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "job",
Namespace: "default",
SelfLink: "/apiv1s/extensions/v1beta1/namespaces/default/jobs/job",
@ -85,7 +86,7 @@ func TestDrain(t *testing.T) {
}
jobPod := &apiv1.Pod{
ObjectMeta: apiv1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "bar",
Namespace: "default",
Annotations: map[string]string{apiv1.CreatedByAnnotation: RefJSON(&job)},
@ -93,7 +94,7 @@ func TestDrain(t *testing.T) {
}
statefulset := appsv1beta1.StatefulSet{
ObjectMeta: apiv1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "ss",
Namespace: "default",
SelfLink: "/apiv1s/extensions/v1beta1/namespaces/default/statefulsets/ss",
@ -101,7 +102,7 @@ func TestDrain(t *testing.T) {
}
ssPod := &apiv1.Pod{
ObjectMeta: apiv1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "bar",
Namespace: "default",
Annotations: map[string]string{apiv1.CreatedByAnnotation: RefJSON(&statefulset)},
@ -109,7 +110,7 @@ func TestDrain(t *testing.T) {
}
rs := extensions.ReplicaSet{
ObjectMeta: apiv1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "rs",
Namespace: "default",
SelfLink: testapi.Default.SelfLink("replicasets", "rs"),
@ -120,7 +121,7 @@ func TestDrain(t *testing.T) {
}
rsPod := &apiv1.Pod{
ObjectMeta: apiv1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "bar",
Namespace: "default",
Annotations: map[string]string{apiv1.CreatedByAnnotation: RefJSON(&rs)},
@ -131,7 +132,7 @@ func TestDrain(t *testing.T) {
}
nakedPod := &apiv1.Pod{
ObjectMeta: apiv1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "bar",
Namespace: "default",
},
@ -141,7 +142,7 @@ func TestDrain(t *testing.T) {
}
emptydirPod := &apiv1.Pod{
ObjectMeta: apiv1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: "bar",
Namespace: "default",
},
@ -215,7 +216,7 @@ func TestDrain(t *testing.T) {
for _, test := range tests {
fakeClient := &fake.Clientset{}
register := func(resource string, obj runtime.Object, meta apiv1.ObjectMeta) {
register := func(resource string, obj runtime.Object, meta metav1.ObjectMeta) {
fakeClient.Fake.AddReactor("get", resource, func(action core.Action) (bool, runtime.Object, error) {
getAction := action.(core.GetAction)
if getAction.GetName() == meta.GetName() && getAction.GetNamespace() == meta.GetNamespace() {

View File

@ -19,11 +19,11 @@ package kubernetes
import (
"time"
"k8s.io/apimachinery/pkg/labels"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
client "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
client "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
)
// UnschedulablePodLister lists unscheduled pods

View File

@ -20,18 +20,18 @@ import (
"fmt"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/api/resource"
apiv1 "k8s.io/kubernetes/pkg/api/v1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/runtime"
)
// BuildTestPod creates a pod with specified resources.
func BuildTestPod(name string, cpu int64, mem int64) *apiv1.Pod {
pod := &apiv1.Pod{
ObjectMeta: apiv1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: name,
SelfLink: fmt.Sprintf("/api/v1/namespaces/default/pods/%s", name),
@ -60,7 +60,7 @@ func BuildTestPod(name string, cpu int64, mem int64) *apiv1.Pod {
// BuildTestNode creates a node with specified capacity.
func BuildTestNode(name string, cpu int64, mem int64) *apiv1.Node {
node := &apiv1.Node{
ObjectMeta: apiv1.ObjectMeta{
ObjectMeta: metav1.ObjectMeta{
Name: name,
SelfLink: fmt.Sprintf("/api/v1/nodes/%s", name),
},