Move leaderelection options to new packages
This commit is contained in:
		
							parent
							
								
									128729bae9
								
							
						
					
					
						commit
						2546d0d97c
					
				| 
						 | 
				
			
			@ -231,10 +231,10 @@ func buildGenericLabels(template *sgTemplate, nodeName string) map[string]string
 | 
			
		|||
	result[kubeletapis.LabelArch] = cloudprovider.DefaultArch
 | 
			
		||||
	result[kubeletapis.LabelOS] = cloudprovider.DefaultOS
 | 
			
		||||
 | 
			
		||||
	result[kubeletapis.LabelInstanceType] = template.InstanceType.instanceTypeID
 | 
			
		||||
	result[apiv1.LabelInstanceType] = template.InstanceType.instanceTypeID
 | 
			
		||||
 | 
			
		||||
	result[kubeletapis.LabelZoneRegion] = template.Region
 | 
			
		||||
	result[kubeletapis.LabelZoneFailureDomain] = template.Zone
 | 
			
		||||
	result[kubeletapis.LabelHostname] = nodeName
 | 
			
		||||
	result[apiv1.LabelZoneRegion] = template.Region
 | 
			
		||||
	result[apiv1.LabelZoneFailureDomain] = template.Zone
 | 
			
		||||
	result[apiv1.LabelHostname] = nodeName
 | 
			
		||||
	return result
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -17,9 +17,10 @@ limitations under the License.
 | 
			
		|||
package alicloud
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/stretchr/testify/assert"
 | 
			
		||||
	kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
 | 
			
		||||
	"testing"
 | 
			
		||||
 | 
			
		||||
	"github.com/stretchr/testify/assert"
 | 
			
		||||
	apiv1 "k8s.io/api/core/v1"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func TestBuildGenericLabels(t *testing.T) {
 | 
			
		||||
| 
						 | 
				
			
			@ -35,5 +36,5 @@ func TestBuildGenericLabels(t *testing.T) {
 | 
			
		|||
	}
 | 
			
		||||
	nodeName := "virtual-node"
 | 
			
		||||
	labels := buildGenericLabels(template, nodeName)
 | 
			
		||||
	assert.Equal(t, labels[kubeletapis.LabelInstanceType], template.InstanceType.instanceTypeID)
 | 
			
		||||
	assert.Equal(t, labels[apiv1.LabelInstanceType], template.InstanceType.instanceTypeID)
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -272,11 +272,11 @@ func buildGenericLabels(template *asgTemplate, nodeName string) map[string]strin
 | 
			
		|||
	result[kubeletapis.LabelArch] = cloudprovider.DefaultArch
 | 
			
		||||
	result[kubeletapis.LabelOS] = cloudprovider.DefaultOS
 | 
			
		||||
 | 
			
		||||
	result[kubeletapis.LabelInstanceType] = template.InstanceType.InstanceType
 | 
			
		||||
	result[apiv1.LabelInstanceType] = template.InstanceType.InstanceType
 | 
			
		||||
 | 
			
		||||
	result[kubeletapis.LabelZoneRegion] = template.Region
 | 
			
		||||
	result[kubeletapis.LabelZoneFailureDomain] = template.Zone
 | 
			
		||||
	result[kubeletapis.LabelHostname] = nodeName
 | 
			
		||||
	result[apiv1.LabelZoneRegion] = template.Region
 | 
			
		||||
	result[apiv1.LabelZoneFailureDomain] = template.Zone
 | 
			
		||||
	result[apiv1.LabelHostname] = nodeName
 | 
			
		||||
	return result
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -76,9 +76,9 @@ func TestBuildGenericLabels(t *testing.T) {
 | 
			
		|||
		},
 | 
			
		||||
		Region: "us-east-1",
 | 
			
		||||
	}, "sillyname")
 | 
			
		||||
	assert.Equal(t, "us-east-1", labels[kubeletapis.LabelZoneRegion])
 | 
			
		||||
	assert.Equal(t, "sillyname", labels[kubeletapis.LabelHostname])
 | 
			
		||||
	assert.Equal(t, "c4.large", labels[kubeletapis.LabelInstanceType])
 | 
			
		||||
	assert.Equal(t, "us-east-1", labels[apiv1.LabelZoneRegion])
 | 
			
		||||
	assert.Equal(t, "sillyname", labels[apiv1.LabelHostname])
 | 
			
		||||
	assert.Equal(t, "c4.large", labels[apiv1.LabelInstanceType])
 | 
			
		||||
	assert.Equal(t, cloudprovider.DefaultArch, labels[kubeletapis.LabelArch])
 | 
			
		||||
	assert.Equal(t, cloudprovider.DefaultOS, labels[kubeletapis.LabelOS])
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -29,6 +29,7 @@ import (
 | 
			
		|||
	"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
 | 
			
		||||
	"k8s.io/autoscaler/cluster-autoscaler/config/dynamic"
 | 
			
		||||
	"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
 | 
			
		||||
	cloudvolume "k8s.io/cloud-provider/volume"
 | 
			
		||||
	"k8s.io/klog"
 | 
			
		||||
	kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
 | 
			
		||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
			
		||||
| 
						 | 
				
			
			@ -395,8 +396,8 @@ func buildGenericLabels(template compute.VirtualMachineScaleSet, nodeName string
 | 
			
		|||
 | 
			
		||||
	result[kubeletapis.LabelArch] = cloudprovider.DefaultArch
 | 
			
		||||
	result[kubeletapis.LabelOS] = buildInstanceOS(template)
 | 
			
		||||
	result[kubeletapis.LabelInstanceType] = *template.Sku.Name
 | 
			
		||||
	result[kubeletapis.LabelZoneRegion] = strings.ToLower(*template.Location)
 | 
			
		||||
	result[apiv1.LabelInstanceType] = *template.Sku.Name
 | 
			
		||||
	result[apiv1.LabelZoneRegion] = strings.ToLower(*template.Location)
 | 
			
		||||
 | 
			
		||||
	if template.Zones != nil && len(*template.Zones) > 0 {
 | 
			
		||||
		failureDomains := make([]string, len(*template.Zones))
 | 
			
		||||
| 
						 | 
				
			
			@ -404,12 +405,12 @@ func buildGenericLabels(template compute.VirtualMachineScaleSet, nodeName string
 | 
			
		|||
			failureDomains[k] = strings.ToLower(*template.Location) + "-" + v
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		result[kubeletapis.LabelZoneFailureDomain] = strings.Join(failureDomains[:], kubeletapis.LabelMultiZoneDelimiter)
 | 
			
		||||
		result[apiv1.LabelZoneFailureDomain] = strings.Join(failureDomains[:], cloudvolume.LabelMultiZoneDelimiter)
 | 
			
		||||
	} else {
 | 
			
		||||
		result[kubeletapis.LabelZoneFailureDomain] = "0"
 | 
			
		||||
		result[apiv1.LabelZoneFailureDomain] = "0"
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	result[kubeletapis.LabelHostname] = nodeName
 | 
			
		||||
	result[apiv1.LabelHostname] = nodeName
 | 
			
		||||
	return result
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -23,7 +23,6 @@ import (
 | 
			
		|||
	apiv1 "k8s.io/api/core/v1"
 | 
			
		||||
	"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
 | 
			
		||||
	"k8s.io/autoscaler/cluster-autoscaler/utils/units"
 | 
			
		||||
	kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// GcePriceModel implements PriceModel interface for GCE.
 | 
			
		||||
| 
						 | 
				
			
			@ -96,7 +95,7 @@ func (model *GcePriceModel) NodePrice(node *apiv1.Node, startTime time.Time, end
 | 
			
		|||
	price := 0.0
 | 
			
		||||
	basePriceFound := false
 | 
			
		||||
	if node.Labels != nil {
 | 
			
		||||
		if machineType, found := node.Labels[kubeletapis.LabelInstanceType]; found {
 | 
			
		||||
		if machineType, found := node.Labels[apiv1.LabelInstanceType]; found {
 | 
			
		||||
			var priceMapToUse map[string]float64
 | 
			
		||||
			if node.Labels[preemptibleLabel] == "true" {
 | 
			
		||||
				priceMapToUse = preemptiblePrices
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -179,14 +179,14 @@ func BuildGenericLabels(ref GceRef, machineType string, nodeName string) (map[st
 | 
			
		|||
	result[kubeletapis.LabelArch] = cloudprovider.DefaultArch
 | 
			
		||||
	result[kubeletapis.LabelOS] = cloudprovider.DefaultOS
 | 
			
		||||
 | 
			
		||||
	result[kubeletapis.LabelInstanceType] = machineType
 | 
			
		||||
	result[apiv1.LabelInstanceType] = machineType
 | 
			
		||||
	ix := strings.LastIndex(ref.Zone, "-")
 | 
			
		||||
	if ix == -1 {
 | 
			
		||||
		return nil, fmt.Errorf("unexpected zone: %s", ref.Zone)
 | 
			
		||||
	}
 | 
			
		||||
	result[kubeletapis.LabelZoneRegion] = ref.Zone[:ix]
 | 
			
		||||
	result[kubeletapis.LabelZoneFailureDomain] = ref.Zone
 | 
			
		||||
	result[kubeletapis.LabelHostname] = nodeName
 | 
			
		||||
	result[apiv1.LabelZoneRegion] = ref.Zone[:ix]
 | 
			
		||||
	result[apiv1.LabelZoneFailureDomain] = ref.Zone
 | 
			
		||||
	result[apiv1.LabelHostname] = nodeName
 | 
			
		||||
	return result, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -133,10 +133,10 @@ func TestBuildGenericLabels(t *testing.T) {
 | 
			
		|||
		Zone:    "us-central1-b"},
 | 
			
		||||
		"n1-standard-8", "sillyname")
 | 
			
		||||
	assert.Nil(t, err)
 | 
			
		||||
	assert.Equal(t, "us-central1", labels[kubeletapis.LabelZoneRegion])
 | 
			
		||||
	assert.Equal(t, "us-central1-b", labels[kubeletapis.LabelZoneFailureDomain])
 | 
			
		||||
	assert.Equal(t, "sillyname", labels[kubeletapis.LabelHostname])
 | 
			
		||||
	assert.Equal(t, "n1-standard-8", labels[kubeletapis.LabelInstanceType])
 | 
			
		||||
	assert.Equal(t, "us-central1", labels[apiv1.LabelZoneRegion])
 | 
			
		||||
	assert.Equal(t, "us-central1-b", labels[apiv1.LabelZoneFailureDomain])
 | 
			
		||||
	assert.Equal(t, "sillyname", labels[apiv1.LabelHostname])
 | 
			
		||||
	assert.Equal(t, "n1-standard-8", labels[apiv1.LabelInstanceType])
 | 
			
		||||
	assert.Equal(t, cloudprovider.DefaultArch, labels[kubeletapis.LabelArch])
 | 
			
		||||
	assert.Equal(t, cloudprovider.DefaultOS, labels[kubeletapis.LabelOS])
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -30,7 +30,6 @@ import (
 | 
			
		|||
	"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
 | 
			
		||||
	"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
 | 
			
		||||
	"k8s.io/klog"
 | 
			
		||||
	kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
 | 
			
		||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -126,7 +125,7 @@ func (gke *GkeCloudProvider) GetAvailableMachineTypes() ([]string, error) {
 | 
			
		|||
func (gke *GkeCloudProvider) NewNodeGroup(machineType string, labels map[string]string, systemLabels map[string]string,
 | 
			
		||||
	taints []apiv1.Taint, extraResources map[string]resource.Quantity) (cloudprovider.NodeGroup, error) {
 | 
			
		||||
	nodePoolName := fmt.Sprintf("%s-%s-%d", nodeAutoprovisioningPrefix, machineType, time.Now().Unix())
 | 
			
		||||
	zone, found := systemLabels[kubeletapis.LabelZoneFailureDomain]
 | 
			
		||||
	zone, found := systemLabels[apiv1.LabelZoneFailureDomain]
 | 
			
		||||
	if !found {
 | 
			
		||||
		return nil, cloudprovider.ErrIllegalConfiguration
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -29,7 +29,6 @@ import (
 | 
			
		|||
	"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/gce"
 | 
			
		||||
	"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
 | 
			
		||||
	. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
 | 
			
		||||
	kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
 | 
			
		||||
 | 
			
		||||
	apiv1 "k8s.io/api/core/v1"
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -297,7 +296,7 @@ func TestMig(t *testing.T) {
 | 
			
		|||
	// Test NewNodeGroup.
 | 
			
		||||
	gkeManagerMock.On("GetProjectId").Return("project1").Once()
 | 
			
		||||
	gkeManagerMock.On("GetMigTemplateNode", mock.AnythingOfType("*gke.GkeMig")).Return(&apiv1.Node{}, nil).Once()
 | 
			
		||||
	systemLabels := map[string]string{kubeletapis.LabelZoneFailureDomain: "us-central1-b"}
 | 
			
		||||
	systemLabels := map[string]string{apiv1.LabelZoneFailureDomain: "us-central1-b"}
 | 
			
		||||
	nodeGroup, err := gke.NewNodeGroup("n1-standard-1", nil, systemLabels, nil, nil)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	assert.NotNil(t, nodeGroup)
 | 
			
		||||
| 
						 | 
				
			
			@ -465,7 +464,7 @@ func TestNewNodeGroupForGpu(t *testing.T) {
 | 
			
		|||
 | 
			
		||||
	systemLabels := map[string]string{
 | 
			
		||||
		gpu.GPULabel:                 gpu.DefaultGPUType,
 | 
			
		||||
		kubeletapis.LabelZoneFailureDomain: "us-west1-b",
 | 
			
		||||
		apiv1.LabelZoneFailureDomain: "us-west1-b",
 | 
			
		||||
	}
 | 
			
		||||
	extraResources := map[string]resource.Quantity{
 | 
			
		||||
		gpu.ResourceNvidiaGPU: resource.MustParse("1"),
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -148,10 +148,10 @@ func TestBuildLabelsForAutoprovisionedMigOK(t *testing.T) {
 | 
			
		|||
 | 
			
		||||
	assert.Nil(t, err)
 | 
			
		||||
	assert.Equal(t, "B", labels["A"])
 | 
			
		||||
	assert.Equal(t, "us-central1", labels[kubeletapis.LabelZoneRegion])
 | 
			
		||||
	assert.Equal(t, "us-central1-b", labels[kubeletapis.LabelZoneFailureDomain])
 | 
			
		||||
	assert.Equal(t, "sillyname", labels[kubeletapis.LabelHostname])
 | 
			
		||||
	assert.Equal(t, "n1-standard-8", labels[kubeletapis.LabelInstanceType])
 | 
			
		||||
	assert.Equal(t, "us-central1", labels[apiv1.LabelZoneRegion])
 | 
			
		||||
	assert.Equal(t, "us-central1-b", labels[apiv1.LabelZoneFailureDomain])
 | 
			
		||||
	assert.Equal(t, "sillyname", labels[apiv1.LabelHostname])
 | 
			
		||||
	assert.Equal(t, "n1-standard-8", labels[apiv1.LabelInstanceType])
 | 
			
		||||
	assert.Equal(t, cloudprovider.DefaultArch, labels[kubeletapis.LabelArch])
 | 
			
		||||
	assert.Equal(t, cloudprovider.DefaultOS, labels[kubeletapis.LabelOS])
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -43,7 +43,6 @@ import (
 | 
			
		|||
	appsv1 "k8s.io/api/apps/v1"
 | 
			
		||||
	apiv1 "k8s.io/api/core/v1"
 | 
			
		||||
	apiequality "k8s.io/apimachinery/pkg/api/equality"
 | 
			
		||||
	kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
 | 
			
		||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/klog"
 | 
			
		||||
| 
						 | 
				
			
			@ -432,7 +431,7 @@ func sanitizeTemplateNode(node *apiv1.Node, nodeGroup string) (*apiv1.Node, erro
 | 
			
		|||
	nodeName := fmt.Sprintf("template-node-for-%s-%d", nodeGroup, rand.Int63())
 | 
			
		||||
	newNode.Labels = make(map[string]string, len(node.Labels))
 | 
			
		||||
	for k, v := range node.Labels {
 | 
			
		||||
		if k != kubeletapis.LabelHostname {
 | 
			
		||||
		if k != apiv1.LabelHostname {
 | 
			
		||||
			newNode.Labels[k] = v
 | 
			
		||||
		} else {
 | 
			
		||||
			newNode.Labels[k] = nodeName
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -37,7 +37,6 @@ import (
 | 
			
		|||
	"k8s.io/client-go/kubernetes/fake"
 | 
			
		||||
	kube_record "k8s.io/client-go/tools/record"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/api/testapi"
 | 
			
		||||
	kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
 | 
			
		||||
 | 
			
		||||
	"github.com/stretchr/testify/assert"
 | 
			
		||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
			
		||||
| 
						 | 
				
			
			@ -628,15 +627,15 @@ func TestSanitizeNodeInfo(t *testing.T) {
 | 
			
		|||
func TestSanitizeLabels(t *testing.T) {
 | 
			
		||||
	oldNode := BuildTestNode("ng1-1", 1000, 1000)
 | 
			
		||||
	oldNode.Labels = map[string]string{
 | 
			
		||||
		kubeletapis.LabelHostname: "abc",
 | 
			
		||||
		apiv1.LabelHostname: "abc",
 | 
			
		||||
		"x":                 "y",
 | 
			
		||||
	}
 | 
			
		||||
	node, err := sanitizeTemplateNode(oldNode, "bzium")
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	assert.NotEqual(t, node.Labels[kubeletapis.LabelHostname], "abc")
 | 
			
		||||
	assert.NotEqual(t, node.Labels[apiv1.LabelHostname], "abc")
 | 
			
		||||
	assert.Equal(t, node.Labels["x"], "y")
 | 
			
		||||
	assert.NotEqual(t, node.Name, oldNode.Name)
 | 
			
		||||
	assert.Equal(t, node.Labels[kubeletapis.LabelHostname], node.Name)
 | 
			
		||||
	assert.Equal(t, node.Labels[apiv1.LabelHostname], node.Name)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestSanitizeTaints(t *testing.T) {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -30,7 +30,6 @@ import (
 | 
			
		|||
	"time"
 | 
			
		||||
 | 
			
		||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
			
		||||
	apiserverconfig "k8s.io/apiserver/pkg/apis/config"
 | 
			
		||||
	cloudBuilder "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/builder"
 | 
			
		||||
	"k8s.io/autoscaler/cluster-autoscaler/config"
 | 
			
		||||
	"k8s.io/autoscaler/cluster-autoscaler/core"
 | 
			
		||||
| 
						 | 
				
			
			@ -48,6 +47,7 @@ import (
 | 
			
		|||
	"k8s.io/client-go/tools/leaderelection"
 | 
			
		||||
	"k8s.io/client-go/tools/leaderelection/resourcelock"
 | 
			
		||||
	kube_flag "k8s.io/component-base/cli/flag"
 | 
			
		||||
	componentbaseconfig "k8s.io/component-base/config"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/client/leaderelectionconfig"
 | 
			
		||||
 | 
			
		||||
	"github.com/prometheus/client_golang/prometheus"
 | 
			
		||||
| 
						 | 
				
			
			@ -369,6 +369,7 @@ func main() {
 | 
			
		|||
			*namespace,
 | 
			
		||||
			"cluster-autoscaler",
 | 
			
		||||
			kubeClient.CoreV1(),
 | 
			
		||||
			kubeClient.CoordinationV1(),
 | 
			
		||||
			resourcelock.ResourceLockConfig{
 | 
			
		||||
				Identity:      id,
 | 
			
		||||
				EventRecorder: kube_util.CreateEventRecorder(kubeClient),
 | 
			
		||||
| 
						 | 
				
			
			@ -397,8 +398,8 @@ func main() {
 | 
			
		|||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func defaultLeaderElectionConfiguration() apiserverconfig.LeaderElectionConfiguration {
 | 
			
		||||
	return apiserverconfig.LeaderElectionConfiguration{
 | 
			
		||||
func defaultLeaderElectionConfiguration() componentbaseconfig.LeaderElectionConfiguration {
 | 
			
		||||
	return componentbaseconfig.LeaderElectionConfiguration{
 | 
			
		||||
		LeaderElect:   false,
 | 
			
		||||
		LeaseDuration: metav1.Duration{Duration: defaultLeaseDuration},
 | 
			
		||||
		RenewDeadline: metav1.Duration{Duration: defaultRenewDeadline},
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -21,7 +21,6 @@ import (
 | 
			
		|||
 | 
			
		||||
	apiv1 "k8s.io/api/core/v1"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/api/resource"
 | 
			
		||||
	kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
 | 
			
		||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -94,9 +93,9 @@ func IsNodeInfoSimilar(n1, n2 *schedulernodeinfo.NodeInfo) bool {
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	ignoredLabels := map[string]bool{
 | 
			
		||||
		kubeletapis.LabelHostname:             true,
 | 
			
		||||
		kubeletapis.LabelZoneFailureDomain:    true,
 | 
			
		||||
		kubeletapis.LabelZoneRegion:           true,
 | 
			
		||||
		apiv1.LabelHostname:                   true,
 | 
			
		||||
		apiv1.LabelZoneFailureDomain:          true,
 | 
			
		||||
		apiv1.LabelZoneRegion:                 true,
 | 
			
		||||
		"beta.kubernetes.io/fluentd-ds-ready": true, // this is internal label used for determining if fluentd should be installed as deamon set. Used for migration 1.8 to 1.9.
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -23,7 +23,6 @@ import (
 | 
			
		|||
	"k8s.io/apimachinery/pkg/api/resource"
 | 
			
		||||
	"k8s.io/autoscaler/cluster-autoscaler/utils/gpu"
 | 
			
		||||
	. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
 | 
			
		||||
	kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
 | 
			
		||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
			
		||||
 | 
			
		||||
	"github.com/stretchr/testify/assert"
 | 
			
		||||
| 
						 | 
				
			
			@ -112,13 +111,13 @@ func TestNodesSimilarVariousLabels(t *testing.T) {
 | 
			
		|||
	checkNodesSimilar(t, n1, n2, IsNodeInfoSimilar, true)
 | 
			
		||||
 | 
			
		||||
	// Different hostname labels shouldn't matter
 | 
			
		||||
	n1.ObjectMeta.Labels[kubeletapis.LabelHostname] = "node1"
 | 
			
		||||
	n2.ObjectMeta.Labels[kubeletapis.LabelHostname] = "node2"
 | 
			
		||||
	n1.ObjectMeta.Labels[apiv1.LabelHostname] = "node1"
 | 
			
		||||
	n2.ObjectMeta.Labels[apiv1.LabelHostname] = "node2"
 | 
			
		||||
	checkNodesSimilar(t, n1, n2, IsNodeInfoSimilar, true)
 | 
			
		||||
 | 
			
		||||
	// Different zone shouldn't matter either
 | 
			
		||||
	n1.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] = "mars-olympus-mons1-b"
 | 
			
		||||
	n2.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain] = "us-houston1-a"
 | 
			
		||||
	n1.ObjectMeta.Labels[apiv1.LabelZoneFailureDomain] = "mars-olympus-mons1-b"
 | 
			
		||||
	n2.ObjectMeta.Labels[apiv1.LabelZoneFailureDomain] = "us-houston1-a"
 | 
			
		||||
	checkNodesSimilar(t, n1, n2, IsNodeInfoSimilar, true)
 | 
			
		||||
 | 
			
		||||
	// Different beta.kubernetes.io/fluentd-ds-ready should not matter
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -29,11 +29,11 @@ import (
 | 
			
		|||
	kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
 | 
			
		||||
	scheduler_util "k8s.io/autoscaler/cluster-autoscaler/utils/scheduler"
 | 
			
		||||
	"k8s.io/autoscaler/cluster-autoscaler/utils/tpu"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
 | 
			
		||||
 | 
			
		||||
	apiv1 "k8s.io/api/core/v1"
 | 
			
		||||
	policyv1 "k8s.io/api/policy/v1beta1"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/api/resource"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/scheduler/algorithm"
 | 
			
		||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/klog"
 | 
			
		||||
| 
						 | 
				
			
			@ -207,7 +207,7 @@ func findPlaceFor(removedNode string, pods []*apiv1.Pod, nodes []*apiv1.Node, no
 | 
			
		|||
 | 
			
		||||
	loggingQuota := glogx.PodsLoggingQuota()
 | 
			
		||||
 | 
			
		||||
	tryNodeForPod := func(nodename string, pod *apiv1.Pod, predicateMeta algorithm.PredicateMetadata) bool {
 | 
			
		||||
	tryNodeForPod := func(nodename string, pod *apiv1.Pod, predicateMeta predicates.PredicateMetadata) bool {
 | 
			
		||||
		nodeInfo, found := newNodeInfos[nodename]
 | 
			
		||||
		if found {
 | 
			
		||||
			if nodeInfo.Node() == nil {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -24,10 +24,9 @@ import (
 | 
			
		|||
	kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
 | 
			
		||||
	informers "k8s.io/client-go/informers"
 | 
			
		||||
	kube_client "k8s.io/client-go/kubernetes"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/scheduler/algorithm"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
 | 
			
		||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/scheduler/factory"
 | 
			
		||||
	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
 | 
			
		||||
 | 
			
		||||
	// We need to import provider to initialize default scheduler.
 | 
			
		||||
	"k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
 | 
			
		||||
| 
						 | 
				
			
			@ -43,13 +42,13 @@ const (
 | 
			
		|||
 | 
			
		||||
type predicateInfo struct {
 | 
			
		||||
	name      string
 | 
			
		||||
	predicate algorithm.FitPredicate
 | 
			
		||||
	predicate predicates.FitPredicate
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PredicateChecker checks whether all required predicates pass for given Pod and Node.
 | 
			
		||||
type PredicateChecker struct {
 | 
			
		||||
	predicates                []predicateInfo
 | 
			
		||||
	predicateMetadataProducer algorithm.PredicateMetadataProducer
 | 
			
		||||
	predicateMetadataProducer predicates.PredicateMetadataProducer
 | 
			
		||||
	enableAffinityPredicate   bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -132,13 +131,13 @@ func NewPredicateChecker(kubeClient kube_client.Interface, stop <-chan struct{})
 | 
			
		|||
	}, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func isNodeReadyAndSchedulablePredicate(pod *apiv1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool,
 | 
			
		||||
	[]algorithm.PredicateFailureReason, error) {
 | 
			
		||||
func isNodeReadyAndSchedulablePredicate(pod *apiv1.Pod, meta predicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool,
 | 
			
		||||
	[]predicates.PredicateFailureReason, error) {
 | 
			
		||||
	ready := kube_util.IsNodeReadyAndSchedulable(nodeInfo.Node())
 | 
			
		||||
	if !ready {
 | 
			
		||||
		return false, []algorithm.PredicateFailureReason{predicates.NewFailureReason("node is unready")}, nil
 | 
			
		||||
		return false, []predicates.PredicateFailureReason{predicates.NewFailureReason("node is unready")}, nil
 | 
			
		||||
	}
 | 
			
		||||
	return true, []algorithm.PredicateFailureReason{}, nil
 | 
			
		||||
	return true, []predicates.PredicateFailureReason{}, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewTestPredicateChecker builds test version of PredicateChecker.
 | 
			
		||||
| 
						 | 
				
			
			@ -148,7 +147,7 @@ func NewTestPredicateChecker() *PredicateChecker {
 | 
			
		|||
			{name: "default", predicate: predicates.GeneralPredicates},
 | 
			
		||||
			{name: "ready", predicate: isNodeReadyAndSchedulablePredicate},
 | 
			
		||||
		},
 | 
			
		||||
		predicateMetadataProducer: func(_ *apiv1.Pod, _ map[string]*schedulernodeinfo.NodeInfo) algorithm.PredicateMetadata {
 | 
			
		||||
		predicateMetadataProducer: func(_ *apiv1.Pod, _ map[string]*schedulernodeinfo.NodeInfo) predicates.PredicateMetadata {
 | 
			
		||||
			return nil
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -172,7 +171,7 @@ func (p *PredicateChecker) IsAffinityPredicateEnabled() bool {
 | 
			
		|||
// improve the performance of running predicates, especially MatchInterPodAffinity predicate. However, calculating
 | 
			
		||||
// predicateMetadata is also quite expensive, so it's not always the best option to run this method.
 | 
			
		||||
// Please refer to https://github.com/kubernetes/autoscaler/issues/257 for more details.
 | 
			
		||||
func (p *PredicateChecker) GetPredicateMetadata(pod *apiv1.Pod, nodeInfos map[string]*schedulernodeinfo.NodeInfo) algorithm.PredicateMetadata {
 | 
			
		||||
func (p *PredicateChecker) GetPredicateMetadata(pod *apiv1.Pod, nodeInfos map[string]*schedulernodeinfo.NodeInfo) predicates.PredicateMetadata {
 | 
			
		||||
	// Skip precomputation if affinity predicate is disabled - it's not worth it performance-wise.
 | 
			
		||||
	if !p.enableAffinityPredicate {
 | 
			
		||||
		return nil
 | 
			
		||||
| 
						 | 
				
			
			@ -197,7 +196,7 @@ func (p *PredicateChecker) FitsAny(pod *apiv1.Pod, nodeInfos map[string]*schedul
 | 
			
		|||
// PredicateError implements error, preserving the original error information from scheduler predicate.
 | 
			
		||||
type PredicateError struct {
 | 
			
		||||
	predicateName  string
 | 
			
		||||
	failureReasons []algorithm.PredicateFailureReason
 | 
			
		||||
	failureReasons []predicates.PredicateFailureReason
 | 
			
		||||
	err            error
 | 
			
		||||
 | 
			
		||||
	reasons []string
 | 
			
		||||
| 
						 | 
				
			
			@ -231,7 +230,7 @@ func (pe *PredicateError) VerboseError() string {
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
// NewPredicateError creates a new predicate error from error and reasons.
 | 
			
		||||
func NewPredicateError(name string, err error, reasons []string, originalReasons []algorithm.PredicateFailureReason) *PredicateError {
 | 
			
		||||
func NewPredicateError(name string, err error, reasons []string, originalReasons []predicates.PredicateFailureReason) *PredicateError {
 | 
			
		||||
	return &PredicateError{
 | 
			
		||||
		predicateName:  name,
 | 
			
		||||
		err:            err,
 | 
			
		||||
| 
						 | 
				
			
			@ -253,7 +252,7 @@ func (pe *PredicateError) Reasons() []string {
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
// OriginalReasons returns original failure reasons from failed predicate as a slice of PredicateFailureReason.
 | 
			
		||||
func (pe *PredicateError) OriginalReasons() []algorithm.PredicateFailureReason {
 | 
			
		||||
func (pe *PredicateError) OriginalReasons() []predicates.PredicateFailureReason {
 | 
			
		||||
	return pe.failureReasons
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -268,7 +267,7 @@ func (pe *PredicateError) PredicateName() string {
 | 
			
		|||
// it was calculated using NodeInfo map representing different cluster state and the
 | 
			
		||||
// performance gains of CheckPredicates won't always offset the cost of GetPredicateMetadata.
 | 
			
		||||
// Alternatively you can pass nil as predicateMetadata.
 | 
			
		||||
func (p *PredicateChecker) CheckPredicates(pod *apiv1.Pod, predicateMetadata algorithm.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) *PredicateError {
 | 
			
		||||
func (p *PredicateChecker) CheckPredicates(pod *apiv1.Pod, predicateMetadata predicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) *PredicateError {
 | 
			
		||||
	for _, predInfo := range p.predicates {
 | 
			
		||||
		// Skip affinity predicate if it has been disabled.
 | 
			
		||||
		if !p.enableAffinityPredicate && predInfo.name == affinityPredicateName {
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in New Issue