Add spread strategy and make it the default

Signed-off-by: Victor Vieux <vieux@docker.com>
This commit is contained in:
Victor Vieux 2015-03-06 15:43:04 -08:00
parent e3e7ddb616
commit 2f19f5899a
8 changed files with 270 additions and 74 deletions

View File

@ -81,8 +81,8 @@ var (
}
flStrategy = cli.StringFlag{
Name: "strategy",
Usage: "placement strategy to use [binpacking, random]",
Value: "binpacking",
Usage: "placement strategy to use [spread, binpack, random]",
Value: "spread",
}
// hack for go vet

View File

@ -0,0 +1,27 @@
package strategy
import (
"sort"
"github.com/docker/swarm/cluster"
"github.com/samalba/dockerclient"
)
type BinpackPlacementStrategy struct {
}
func (p *BinpackPlacementStrategy) Initialize() error {
return nil
}
func (p *BinpackPlacementStrategy) PlaceContainer(config *dockerclient.ContainerConfig, nodes []cluster.Node) (cluster.Node, error) {
weightedNodes, err := weighNodes(config, nodes)
if err != nil {
return nil, err
}
// sort by highest weight
sort.Sort(sort.Reverse(weightedNodes))
return weightedNodes[0].Node, nil
}

View File

@ -29,11 +29,11 @@ func createContainer(ID string, config *dockerclient.ContainerConfig) *cluster.C
}
func TestPlaceContainerMemory(t *testing.T) {
s := &BinPackingPlacementStrategy{}
s := &BinpackPlacementStrategy{}
nodes := []cluster.Node{}
for i := 0; i < 2; i++ {
nodes = append(nodes, createNode(fmt.Sprintf("node-%d", i), 2, 1))
nodes = append(nodes, createNode(fmt.Sprintf("node-%d", i), 2, 0))
}
// add 1 container 1G
@ -44,23 +44,23 @@ func TestPlaceContainerMemory(t *testing.T) {
assert.Equal(t, node1.UsedMemory(), 1024*1024*1024)
// add another container 1G
config = createConfig(1, 1)
config = createConfig(1, 0)
node2, err := s.PlaceContainer(config, nodes)
assert.NoError(t, err)
assert.NoError(t, AddContainer(node2, createContainer("c2", config)))
assert.Equal(t, node2.UsedMemory(), int64(2*1024*1024*1024))
// check that both containers ended on the same node
assert.Equal(t, node1.ID(), node2.ID(), "")
assert.Equal(t, node1.ID(), node2.ID())
assert.Equal(t, len(node1.Containers()), len(node2.Containers()), "")
}
func TestPlaceContainerCPU(t *testing.T) {
s := &BinPackingPlacementStrategy{}
s := &BinpackPlacementStrategy{}
nodes := []cluster.Node{}
for i := 0; i < 2; i++ {
nodes = append(nodes, createNode(fmt.Sprintf("node-%d", i), 1, 2))
nodes = append(nodes, createNode(fmt.Sprintf("node-%d", i), 0, 2))
}
// add 1 container 1CPU
@ -78,12 +78,12 @@ func TestPlaceContainerCPU(t *testing.T) {
assert.Equal(t, node2.UsedCpus(), 2)
// check that both containers ended on the same node
assert.Equal(t, node1.ID, node2.ID, "")
assert.Equal(t, node1.ID(), node2.ID())
assert.Equal(t, len(node1.Containers()), len(node2.Containers()), "")
}
func TestPlaceContainerHuge(t *testing.T) {
s := &BinPackingPlacementStrategy{}
s := &BinpackPlacementStrategy{}
nodes := []cluster.Node{}
for i := 0; i < 100; i++ {
@ -147,7 +147,7 @@ func TestPlaceContainerOvercommit(t *testing.T) {
// The demo
func TestPlaceContainerDemo(t *testing.T) {
s := &BinPackingPlacementStrategy{}
s := &BinpackPlacementStrategy{}
nodes := []cluster.Node{}
for i := 0; i < 3; i++ {
@ -230,7 +230,7 @@ func TestPlaceContainerDemo(t *testing.T) {
}
func TestComplexPlacement(t *testing.T) {
s := &BinPackingPlacementStrategy{}
s := &BinpackPlacementStrategy{}
nodes := []cluster.Node{}
for i := 0; i < 2; i++ {

View File

@ -1,58 +0,0 @@
package strategy
import (
"errors"
"sort"
"github.com/docker/swarm/cluster"
"github.com/samalba/dockerclient"
)
var (
ErrNoResourcesAvailable = errors.New("no resources available to schedule container")
)
type BinPackingPlacementStrategy struct{}
func (p *BinPackingPlacementStrategy) Initialize() error {
return nil
}
func (p *BinPackingPlacementStrategy) PlaceContainer(config *dockerclient.ContainerConfig, nodes []cluster.Node) (cluster.Node, error) {
weightedNodes := weightedNodeList{}
for _, node := range nodes {
nodeMemory := node.TotalMemory()
nodeCpus := node.TotalCpus()
// Skip nodes that are smaller than the requested resources.
if nodeMemory < int64(config.Memory) || nodeCpus < config.CpuShares {
continue
}
var (
cpuScore int64 = 100
memoryScore int64 = 100
)
if config.CpuShares > 0 {
cpuScore = (node.UsedCpus() + config.CpuShares) * 100 / nodeCpus
}
if config.Memory > 0 {
memoryScore = (node.UsedMemory() + config.Memory) * 100 / nodeMemory
}
if cpuScore <= 100 && memoryScore <= 100 {
weightedNodes = append(weightedNodes, &weightedNode{Node: node, Weight: cpuScore + memoryScore})
}
}
if len(weightedNodes) == 0 {
return nil, ErrNoResourcesAvailable
}
// sort by highest weight
sort.Sort(sort.Reverse(weightedNodes))
return weightedNodes[0].Node, nil
}

View File

@ -0,0 +1,27 @@
package strategy
import (
"sort"
"github.com/docker/swarm/cluster"
"github.com/samalba/dockerclient"
)
type SpreadPlacementStrategy struct {
}
func (p *SpreadPlacementStrategy) Initialize() error {
return nil
}
func (p *SpreadPlacementStrategy) PlaceContainer(config *dockerclient.ContainerConfig, nodes []cluster.Node) (cluster.Node, error) {
weightedNodes, err := weighNodes(config, nodes)
if err != nil {
return nil, err
}
// sort by highest weight
sort.Sort(weightedNodes)
return weightedNodes[0].Node, nil
}

View File

@ -0,0 +1,158 @@
package strategy
import (
"fmt"
"testing"
"github.com/docker/swarm/cluster"
"github.com/stretchr/testify/assert"
)
func TestSpreadPlaceContainerMemory(t *testing.T) {
s := &SpreadPlacementStrategy{}
nodes := []cluster.Node{}
for i := 0; i < 2; i++ {
nodes = append(nodes, createNode(fmt.Sprintf("node-%d", i), 2, 0))
}
// add 1 container 1G
config := createConfig(1, 0)
node1, err := s.PlaceContainer(config, nodes)
assert.NoError(t, err)
assert.NoError(t, AddContainer(node1, createContainer("c1", config)))
assert.Equal(t, node1.UsedMemory(), 1024*1024*1024)
// add another container 1G
config = createConfig(1, 0)
node2, err := s.PlaceContainer(config, nodes)
assert.NoError(t, err)
assert.NoError(t, AddContainer(node2, createContainer("c2", config)))
assert.Equal(t, node2.UsedMemory(), int64(1024*1024*1024))
// check that both containers ended on different node
assert.NotEqual(t, node1.ID(), node2.ID())
assert.Equal(t, len(node1.Containers()), len(node2.Containers()), "")
}
func TestSpreadPlaceContainerCPU(t *testing.T) {
s := &SpreadPlacementStrategy{}
nodes := []cluster.Node{}
for i := 0; i < 2; i++ {
nodes = append(nodes, createNode(fmt.Sprintf("node-%d", i), 0, 2))
}
// add 1 container 1CPU
config := createConfig(0, 1)
node1, err := s.PlaceContainer(config, nodes)
assert.NoError(t, err)
assert.NoError(t, AddContainer(node1, createContainer("c1", config)))
assert.Equal(t, node1.UsedCpus(), 1)
// add another container 1CPU
config = createConfig(0, 1)
node2, err := s.PlaceContainer(config, nodes)
assert.NoError(t, err)
assert.NoError(t, AddContainer(node2, createContainer("c2", config)))
assert.Equal(t, node2.UsedCpus(), 1)
// check that both containers ended on different node
assert.NotEqual(t, node1.ID(), node2.ID())
assert.Equal(t, len(node1.Containers()), len(node2.Containers()), "")
}
func TestSpreadPlaceContainerHuge(t *testing.T) {
s := &SpreadPlacementStrategy{}
nodes := []cluster.Node{}
for i := 0; i < 100; i++ {
nodes = append(nodes, createNode(fmt.Sprintf("node-%d", i), 1, 1))
}
// add 100 container 1CPU
for i := 0; i < 100; i++ {
node, err := s.PlaceContainer(createConfig(0, 1), nodes)
assert.NoError(t, err)
assert.NoError(t, AddContainer(node, createContainer(fmt.Sprintf("c%d", i), createConfig(0, 1))))
}
// try to add another container 1CPU
_, err := s.PlaceContainer(createConfig(0, 1), nodes)
assert.Error(t, err)
// add 100 container 1G
for i := 100; i < 200; i++ {
node, err := s.PlaceContainer(createConfig(1, 0), nodes)
assert.NoError(t, err)
assert.NoError(t, AddContainer(node, createContainer(fmt.Sprintf("c%d", i), createConfig(1, 0))))
}
// try to add another container 1G
_, err = s.PlaceContainer(createConfig(1, 0), nodes)
assert.Error(t, err)
}
func TestSpreadPlaceContainerOvercommit(t *testing.T) {
s := &SpreadPlacementStrategy{}
nodes := []cluster.Node{createNode("node-1", 100, 1)}
config := createConfig(0, 0)
// Below limit should still work.
config.Memory = 90 * 1024 * 1024 * 1024
node, err := s.PlaceContainer(config, nodes)
assert.NoError(t, err)
assert.Equal(t, node, nodes[0])
// At memory limit should still work.
config.Memory = 100 * 1024 * 1024 * 1024
node, err = s.PlaceContainer(config, nodes)
assert.NoError(t, err)
assert.Equal(t, node, nodes[0])
// Up to 105% it should still work.
config.Memory = 105 * 1024 * 1024 * 1024
node, err = s.PlaceContainer(config, nodes)
assert.NoError(t, err)
assert.Equal(t, node, nodes[0])
// Above it should return an error.
config.Memory = 106 * 1024 * 1024 * 1024
node, err = s.PlaceContainer(config, nodes)
assert.Error(t, err)
}
func TestSpreadComplexPlacement(t *testing.T) {
s := &SpreadPlacementStrategy{}
nodes := []cluster.Node{}
for i := 0; i < 2; i++ {
nodes = append(nodes, createNode(fmt.Sprintf("node-%d", i), 4, 4))
}
// add one container 2G
config := createConfig(2, 0)
node1, err := s.PlaceContainer(config, nodes)
assert.NoError(t, err)
assert.NoError(t, AddContainer(node1, createContainer("c1", config)))
// add one container 3G
config = createConfig(3, 0)
node2, err := s.PlaceContainer(config, nodes)
assert.NoError(t, err)
assert.NoError(t, AddContainer(node2, createContainer("c2", config)))
// check that they end up on separate nodes
assert.NotEqual(t, node1.ID(), node2.ID())
// add one container 1G
config = createConfig(1, 0)
node3, err := s.PlaceContainer(config, nodes)
assert.NoError(t, err)
assert.NoError(t, AddContainer(node3, createContainer("c3", config)))
// check that it ends up on the same node as the 2G
assert.Equal(t, node1.ID(), node3.ID())
}

View File

@ -18,11 +18,14 @@ type PlacementStrategy interface {
var (
strategies map[string]PlacementStrategy
ErrNotSupported = errors.New("strategy not supported")
ErrNoResourcesAvailable = errors.New("no resources available to schedule container")
)
func init() {
strategies = map[string]PlacementStrategy{
"binpacking": &BinPackingPlacementStrategy{},
"binpacking": &BinpackPlacementStrategy{}, //compat
"binpack": &BinpackPlacementStrategy{},
"spread": &SpreadPlacementStrategy{},
"random": &RandomPlacementStrategy{},
}
}

View File

@ -1,6 +1,9 @@
package strategy
import "github.com/docker/swarm/cluster"
import (
"github.com/docker/swarm/cluster"
"github.com/samalba/dockerclient"
)
// WeightedNode represents a node in the cluster with a given weight, typically used for sorting
// purposes.
@ -28,3 +31,39 @@ func (n weightedNodeList) Less(i, j int) bool {
return ip.Weight < jp.Weight
}
func weighNodes(config *dockerclient.ContainerConfig, nodes []cluster.Node) (weightedNodeList, error) {
weightedNodes := weightedNodeList{}
for _, node := range nodes {
nodeMemory := node.TotalMemory()
nodeCpus := node.TotalCpus()
// Skip nodes that are smaller than the requested resources.
if nodeMemory < int64(config.Memory) || nodeCpus < config.CpuShares {
continue
}
var (
cpuScore int64 = 100
memoryScore int64 = 100
)
if config.CpuShares > 0 {
cpuScore = (node.UsedCpus() + config.CpuShares) * 100 / nodeCpus
}
if config.Memory > 0 {
memoryScore = (node.UsedMemory() + config.Memory) * 100 / nodeMemory
}
if cpuScore <= 100 && memoryScore <= 100 {
weightedNodes = append(weightedNodes, &weightedNode{Node: node, Weight: cpuScore + memoryScore})
}
}
if len(weightedNodes) == 0 {
return nil, ErrNoResourcesAvailable
}
return weightedNodes, nil
}