From 8e3ded491663faa7f4d72e31b3d907d05e5c3ef2 Mon Sep 17 00:00:00 2001 From: Andrea Luzzardi Date: Thu, 20 Nov 2014 16:17:55 -0800 Subject: [PATCH] Added resource overcommit support to the binpacking strategy. Signed-off-by: Andrea Luzzardi --- scheduler/strategy/binpacking.go | 11 ++++++--- scheduler/strategy/binpacking_test.go | 33 +++++++++++++++++++++++++-- 2 files changed, 39 insertions(+), 5 deletions(-) diff --git a/scheduler/strategy/binpacking.go b/scheduler/strategy/binpacking.go index acb162ad43..6634341c44 100644 --- a/scheduler/strategy/binpacking.go +++ b/scheduler/strategy/binpacking.go @@ -13,14 +13,19 @@ var ( ) type BinPackingPlacementStrategy struct { + OvercommitRatio float64 } func (p *BinPackingPlacementStrategy) PlaceContainer(config *dockerclient.ContainerConfig, nodes []*cluster.Node) (*cluster.Node, error) { scores := scores{} + ratio := int64(p.OvercommitRatio * 100) for _, node := range nodes { + nodeMemory := node.Memory + (node.Memory * ratio / 100) + nodeCpus := node.Cpus + (node.Cpus * ratio / 100) + // Skip nodes that are smaller than the requested resources. - if node.Memory < int64(config.Memory) || node.Cpus < config.CpuShares { + if nodeMemory < int64(config.Memory) || nodeCpus < config.CpuShares { continue } @@ -30,10 +35,10 @@ func (p *BinPackingPlacementStrategy) PlaceContainer(config *dockerclient.Contai ) if config.CpuShares > 0 { - cpuScore = (node.ReservedCpus() + config.CpuShares) * 100 / node.Cpus + cpuScore = (node.ReservedCpus() + config.CpuShares) * 100 / nodeCpus } if config.Memory > 0 { - memoryScore = (node.ReservedMemory() + config.Memory) * 100 / node.Memory + memoryScore = (node.ReservedMemory() + config.Memory) * 100 / nodeMemory } var total = ((cpuScore + memoryScore) / 200) * 100 diff --git a/scheduler/strategy/binpacking_test.go b/scheduler/strategy/binpacking_test.go index 9f60881573..413f962ffe 100644 --- a/scheduler/strategy/binpacking_test.go +++ b/scheduler/strategy/binpacking_test.go @@ -52,7 +52,6 @@ func TestPlaceContainerMemory(t *testing.T) { // check that both containers ended on the same node assert.Equal(t, node1.ID, node2.ID, "") assert.Equal(t, len(node1.Containers()), len(node2.Containers()), "") - } func TestPlaceContainerCPU(t *testing.T) { @@ -82,7 +81,6 @@ func TestPlaceContainerCPU(t *testing.T) { // check that both containers ended on the same node assert.Equal(t, node1.ID, node2.ID, "") assert.Equal(t, len(node1.Containers()), len(node2.Containers()), "") - } func TestPlaceContainerHuge(t *testing.T) { @@ -114,7 +112,38 @@ func TestPlaceContainerHuge(t *testing.T) { // try to add another container 1G _, err = s.PlaceContainer(createConfig(1, 0), nodes) assert.Error(t, err) +} +func TestPlaceContainerOvercommit(t *testing.T) { + s := &BinPackingPlacementStrategy{OvercommitRatio: 0.05} + + nodes := []*cluster.Node{createNode("node-1", 0, 1)} + nodes[0].Memory = 100 + + config := createConfig(0, 0) + + // Below limit should still work. + config.Memory = 90 + node, err := s.PlaceContainer(config, nodes) + assert.NoError(t, err) + assert.Equal(t, node, nodes[0]) + + // At memory limit should still work. + config.Memory = 100 + node, err = s.PlaceContainer(config, nodes) + assert.NoError(t, err) + assert.Equal(t, node, nodes[0]) + + // Up to 105% it should still work. + config.Memory = 105 + node, err = s.PlaceContainer(config, nodes) + assert.NoError(t, err) + assert.Equal(t, node, nodes[0]) + + // Above it should return an error. + config.Memory = 106 + node, err = s.PlaceContainer(config, nodes) + assert.Error(t, err) } // The demo