Use all resources to compute node reservation
This commit is contained in:
parent
ececab153b
commit
04a4810043
|
|
@ -18,6 +18,7 @@ package simulator
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
kube_api "k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
|
|
@ -45,8 +46,7 @@ func FindNodeToRemove(nodes []*kube_api.Node, pods []*kube_api.Pod, client *kube
|
|||
continue
|
||||
}
|
||||
|
||||
// TODO: Use other resources as well.
|
||||
reservation, err := calculateReservation(node, nodeInfo, kube_api.ResourceCPU)
|
||||
reservation, err := calculateReservation(node, nodeInfo)
|
||||
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to calculate reservation for %s: %v", node.Name, err)
|
||||
|
|
@ -57,7 +57,8 @@ func FindNodeToRemove(nodes []*kube_api.Node, pods []*kube_api.Pod, client *kube
|
|||
glog.Infof("Node %s is not suitable for removal - reservation to big (%f)", node.Name, reservation)
|
||||
continue
|
||||
}
|
||||
//Lets try to remove this one.
|
||||
|
||||
// Let's try to remove this one.
|
||||
glog.V(2).Infof("Considering %s for removal", node.Name)
|
||||
|
||||
podsToRemoveList, _, _, err := cmd.GetPodsForDeletionOnNodeDrain(client, node.Name,
|
||||
|
|
@ -89,7 +90,19 @@ func FindNodeToRemove(nodes []*kube_api.Node, pods []*kube_api.Pod, client *kube
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
func calculateReservation(node *kube_api.Node, nodeInfo *schedulercache.NodeInfo, resourceName kube_api.ResourceName) (float64, error) {
|
||||
func calculateReservation(node *kube_api.Node, nodeInfo *schedulercache.NodeInfo) (float64, error) {
|
||||
cpu, err := calculateReservationOfResource(node, nodeInfo, kube_api.ResourceCPU)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
mem, err := calculateReservationOfResource(node, nodeInfo, kube_api.ResourceMemory)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return math.Max(cpu, mem), nil
|
||||
}
|
||||
|
||||
func calculateReservationOfResource(node *kube_api.Node, nodeInfo *schedulercache.NodeInfo, resourceName kube_api.ResourceName) (float64, error) {
|
||||
nodeCapacity, found := node.Status.Capacity[resourceName]
|
||||
if !found {
|
||||
return 0, fmt.Errorf("Failed to get %v from %s", resourceName, node.Name)
|
||||
|
|
|
|||
|
|
@ -42,18 +42,31 @@ func TestReservation(t *testing.T) {
|
|||
nodeInfo := schedulercache.NewNodeInfo(pod, pod, pod2)
|
||||
|
||||
node := &kube_api.Node{
|
||||
ObjectMeta: kube_api.ObjectMeta{
|
||||
Name: "node1",
|
||||
},
|
||||
Status: kube_api.NodeStatus{
|
||||
Capacity: kube_api.ResourceList{
|
||||
kube_api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
kube_api.ResourceMemory: *resource.NewQuantity(2000000, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
}
|
||||
reservation, err := calculateReservation(node, nodeInfo)
|
||||
assert.NoError(t, err)
|
||||
assert.InEpsilon(t, 2.0/10, reservation, 0.01)
|
||||
|
||||
node2 := &kube_api.Node{
|
||||
ObjectMeta: kube_api.ObjectMeta{
|
||||
Name: "node2",
|
||||
},
|
||||
Status: kube_api.NodeStatus{
|
||||
Capacity: kube_api.ResourceList{
|
||||
kube_api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
reservation, err := calculateReservation(node, nodeInfo, kube_api.ResourceCPU)
|
||||
assert.NoError(t, err)
|
||||
assert.InEpsilon(t, 1.0/10, reservation, 0.01)
|
||||
|
||||
_, err = calculateReservation(node, nodeInfo, kube_api.ResourceMemory)
|
||||
_, err = calculateReservation(node2, nodeInfo)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue