diff --git a/flags.go b/flags.go index 4a4f971f0f..1a327aee61 100644 --- a/flags.go +++ b/flags.go @@ -81,8 +81,8 @@ var ( } flStrategy = cli.StringFlag{ Name: "strategy", - Usage: "placement strategy to use [binpacking, random]", - Value: "binpacking", + Usage: "placement strategy to use [spread, binpack, random]", + Value: "spread", } // hack for go vet diff --git a/scheduler/strategy/binpack.go b/scheduler/strategy/binpack.go new file mode 100644 index 0000000000..558074b5c0 --- /dev/null +++ b/scheduler/strategy/binpack.go @@ -0,0 +1,27 @@ +package strategy + +import ( + "sort" + + "github.com/docker/swarm/cluster" + "github.com/samalba/dockerclient" +) + +type BinpackPlacementStrategy struct { +} + +func (p *BinpackPlacementStrategy) Initialize() error { + return nil +} + +func (p *BinpackPlacementStrategy) PlaceContainer(config *dockerclient.ContainerConfig, nodes []cluster.Node) (cluster.Node, error) { + weightedNodes, err := weighNodes(config, nodes) + if err != nil { + return nil, err + } + + // sort by highest weight + sort.Sort(sort.Reverse(weightedNodes)) + + return weightedNodes[0].Node, nil +} diff --git a/scheduler/strategy/binpacking_test.go b/scheduler/strategy/binpack_test.go similarity index 95% rename from scheduler/strategy/binpacking_test.go rename to scheduler/strategy/binpack_test.go index d1f82edf5e..c346200d0a 100644 --- a/scheduler/strategy/binpacking_test.go +++ b/scheduler/strategy/binpack_test.go @@ -29,11 +29,11 @@ func createContainer(ID string, config *dockerclient.ContainerConfig) *cluster.C } func TestPlaceContainerMemory(t *testing.T) { - s := &BinPackingPlacementStrategy{} + s := &BinpackPlacementStrategy{} nodes := []cluster.Node{} for i := 0; i < 2; i++ { - nodes = append(nodes, createNode(fmt.Sprintf("node-%d", i), 2, 1)) + nodes = append(nodes, createNode(fmt.Sprintf("node-%d", i), 2, 0)) } // add 1 container 1G @@ -44,23 +44,23 @@ func TestPlaceContainerMemory(t *testing.T) { assert.Equal(t, node1.UsedMemory(), 1024*1024*1024) // add another container 1G - config = createConfig(1, 1) + config = createConfig(1, 0) node2, err := s.PlaceContainer(config, nodes) assert.NoError(t, err) assert.NoError(t, AddContainer(node2, createContainer("c2", config))) assert.Equal(t, node2.UsedMemory(), int64(2*1024*1024*1024)) // check that both containers ended on the same node - assert.Equal(t, node1.ID(), node2.ID(), "") + assert.Equal(t, node1.ID(), node2.ID()) assert.Equal(t, len(node1.Containers()), len(node2.Containers()), "") } func TestPlaceContainerCPU(t *testing.T) { - s := &BinPackingPlacementStrategy{} + s := &BinpackPlacementStrategy{} nodes := []cluster.Node{} for i := 0; i < 2; i++ { - nodes = append(nodes, createNode(fmt.Sprintf("node-%d", i), 1, 2)) + nodes = append(nodes, createNode(fmt.Sprintf("node-%d", i), 0, 2)) } // add 1 container 1CPU @@ -78,12 +78,12 @@ func TestPlaceContainerCPU(t *testing.T) { assert.Equal(t, node2.UsedCpus(), 2) // check that both containers ended on the same node - assert.Equal(t, node1.ID, node2.ID, "") + assert.Equal(t, node1.ID(), node2.ID()) assert.Equal(t, len(node1.Containers()), len(node2.Containers()), "") } func TestPlaceContainerHuge(t *testing.T) { - s := &BinPackingPlacementStrategy{} + s := &BinpackPlacementStrategy{} nodes := []cluster.Node{} for i := 0; i < 100; i++ { @@ -147,7 +147,7 @@ func TestPlaceContainerOvercommit(t *testing.T) { // The demo func TestPlaceContainerDemo(t *testing.T) { - s := &BinPackingPlacementStrategy{} + s := &BinpackPlacementStrategy{} nodes := []cluster.Node{} for i := 0; i < 3; i++ { @@ -230,7 +230,7 @@ func TestPlaceContainerDemo(t *testing.T) { } func TestComplexPlacement(t *testing.T) { - s := &BinPackingPlacementStrategy{} + s := &BinpackPlacementStrategy{} nodes := []cluster.Node{} for i := 0; i < 2; i++ { diff --git a/scheduler/strategy/binpacking.go b/scheduler/strategy/binpacking.go deleted file mode 100644 index d22028fb32..0000000000 --- a/scheduler/strategy/binpacking.go +++ /dev/null @@ -1,58 +0,0 @@ -package strategy - -import ( - "errors" - "sort" - - "github.com/docker/swarm/cluster" - "github.com/samalba/dockerclient" -) - -var ( - ErrNoResourcesAvailable = errors.New("no resources available to schedule container") -) - -type BinPackingPlacementStrategy struct{} - -func (p *BinPackingPlacementStrategy) Initialize() error { - return nil -} - -func (p *BinPackingPlacementStrategy) PlaceContainer(config *dockerclient.ContainerConfig, nodes []cluster.Node) (cluster.Node, error) { - weightedNodes := weightedNodeList{} - - for _, node := range nodes { - nodeMemory := node.TotalMemory() - nodeCpus := node.TotalCpus() - - // Skip nodes that are smaller than the requested resources. - if nodeMemory < int64(config.Memory) || nodeCpus < config.CpuShares { - continue - } - - var ( - cpuScore int64 = 100 - memoryScore int64 = 100 - ) - - if config.CpuShares > 0 { - cpuScore = (node.UsedCpus() + config.CpuShares) * 100 / nodeCpus - } - if config.Memory > 0 { - memoryScore = (node.UsedMemory() + config.Memory) * 100 / nodeMemory - } - - if cpuScore <= 100 && memoryScore <= 100 { - weightedNodes = append(weightedNodes, &weightedNode{Node: node, Weight: cpuScore + memoryScore}) - } - } - - if len(weightedNodes) == 0 { - return nil, ErrNoResourcesAvailable - } - - // sort by highest weight - sort.Sort(sort.Reverse(weightedNodes)) - - return weightedNodes[0].Node, nil -} diff --git a/scheduler/strategy/spread.go b/scheduler/strategy/spread.go new file mode 100644 index 0000000000..2c0d622f95 --- /dev/null +++ b/scheduler/strategy/spread.go @@ -0,0 +1,27 @@ +package strategy + +import ( + "sort" + + "github.com/docker/swarm/cluster" + "github.com/samalba/dockerclient" +) + +type SpreadPlacementStrategy struct { +} + +func (p *SpreadPlacementStrategy) Initialize() error { + return nil +} + +func (p *SpreadPlacementStrategy) PlaceContainer(config *dockerclient.ContainerConfig, nodes []cluster.Node) (cluster.Node, error) { + weightedNodes, err := weighNodes(config, nodes) + if err != nil { + return nil, err + } + + // sort by highest weight + sort.Sort(weightedNodes) + + return weightedNodes[0].Node, nil +} diff --git a/scheduler/strategy/spread_test.go b/scheduler/strategy/spread_test.go new file mode 100644 index 0000000000..e89f199506 --- /dev/null +++ b/scheduler/strategy/spread_test.go @@ -0,0 +1,158 @@ +package strategy + +import ( + "fmt" + "testing" + + "github.com/docker/swarm/cluster" + "github.com/stretchr/testify/assert" +) + +func TestSpreadPlaceContainerMemory(t *testing.T) { + s := &SpreadPlacementStrategy{} + + nodes := []cluster.Node{} + for i := 0; i < 2; i++ { + nodes = append(nodes, createNode(fmt.Sprintf("node-%d", i), 2, 0)) + } + + // add 1 container 1G + config := createConfig(1, 0) + node1, err := s.PlaceContainer(config, nodes) + assert.NoError(t, err) + assert.NoError(t, AddContainer(node1, createContainer("c1", config))) + assert.Equal(t, node1.UsedMemory(), 1024*1024*1024) + + // add another container 1G + config = createConfig(1, 0) + node2, err := s.PlaceContainer(config, nodes) + assert.NoError(t, err) + assert.NoError(t, AddContainer(node2, createContainer("c2", config))) + assert.Equal(t, node2.UsedMemory(), int64(1024*1024*1024)) + + // check that both containers ended on different node + assert.NotEqual(t, node1.ID(), node2.ID()) + assert.Equal(t, len(node1.Containers()), len(node2.Containers()), "") +} + +func TestSpreadPlaceContainerCPU(t *testing.T) { + s := &SpreadPlacementStrategy{} + + nodes := []cluster.Node{} + for i := 0; i < 2; i++ { + nodes = append(nodes, createNode(fmt.Sprintf("node-%d", i), 0, 2)) + } + + // add 1 container 1CPU + config := createConfig(0, 1) + node1, err := s.PlaceContainer(config, nodes) + assert.NoError(t, err) + assert.NoError(t, AddContainer(node1, createContainer("c1", config))) + assert.Equal(t, node1.UsedCpus(), 1) + + // add another container 1CPU + config = createConfig(0, 1) + node2, err := s.PlaceContainer(config, nodes) + assert.NoError(t, err) + assert.NoError(t, AddContainer(node2, createContainer("c2", config))) + assert.Equal(t, node2.UsedCpus(), 1) + + // check that both containers ended on different node + assert.NotEqual(t, node1.ID(), node2.ID()) + assert.Equal(t, len(node1.Containers()), len(node2.Containers()), "") +} + +func TestSpreadPlaceContainerHuge(t *testing.T) { + s := &SpreadPlacementStrategy{} + + nodes := []cluster.Node{} + for i := 0; i < 100; i++ { + nodes = append(nodes, createNode(fmt.Sprintf("node-%d", i), 1, 1)) + } + + // add 100 container 1CPU + for i := 0; i < 100; i++ { + node, err := s.PlaceContainer(createConfig(0, 1), nodes) + assert.NoError(t, err) + assert.NoError(t, AddContainer(node, createContainer(fmt.Sprintf("c%d", i), createConfig(0, 1)))) + } + + // try to add another container 1CPU + _, err := s.PlaceContainer(createConfig(0, 1), nodes) + assert.Error(t, err) + + // add 100 container 1G + for i := 100; i < 200; i++ { + node, err := s.PlaceContainer(createConfig(1, 0), nodes) + assert.NoError(t, err) + assert.NoError(t, AddContainer(node, createContainer(fmt.Sprintf("c%d", i), createConfig(1, 0)))) + } + + // try to add another container 1G + _, err = s.PlaceContainer(createConfig(1, 0), nodes) + assert.Error(t, err) +} + +func TestSpreadPlaceContainerOvercommit(t *testing.T) { + s := &SpreadPlacementStrategy{} + + nodes := []cluster.Node{createNode("node-1", 100, 1)} + + config := createConfig(0, 0) + + // Below limit should still work. + config.Memory = 90 * 1024 * 1024 * 1024 + node, err := s.PlaceContainer(config, nodes) + assert.NoError(t, err) + assert.Equal(t, node, nodes[0]) + + // At memory limit should still work. + config.Memory = 100 * 1024 * 1024 * 1024 + node, err = s.PlaceContainer(config, nodes) + assert.NoError(t, err) + assert.Equal(t, node, nodes[0]) + + // Up to 105% it should still work. + config.Memory = 105 * 1024 * 1024 * 1024 + node, err = s.PlaceContainer(config, nodes) + assert.NoError(t, err) + assert.Equal(t, node, nodes[0]) + + // Above it should return an error. + config.Memory = 106 * 1024 * 1024 * 1024 + node, err = s.PlaceContainer(config, nodes) + assert.Error(t, err) +} + +func TestSpreadComplexPlacement(t *testing.T) { + s := &SpreadPlacementStrategy{} + + nodes := []cluster.Node{} + for i := 0; i < 2; i++ { + nodes = append(nodes, createNode(fmt.Sprintf("node-%d", i), 4, 4)) + } + + // add one container 2G + config := createConfig(2, 0) + node1, err := s.PlaceContainer(config, nodes) + assert.NoError(t, err) + assert.NoError(t, AddContainer(node1, createContainer("c1", config))) + + // add one container 3G + config = createConfig(3, 0) + node2, err := s.PlaceContainer(config, nodes) + assert.NoError(t, err) + assert.NoError(t, AddContainer(node2, createContainer("c2", config))) + + // check that they end up on separate nodes + assert.NotEqual(t, node1.ID(), node2.ID()) + + // add one container 1G + config = createConfig(1, 0) + node3, err := s.PlaceContainer(config, nodes) + assert.NoError(t, err) + assert.NoError(t, AddContainer(node3, createContainer("c3", config))) + + // check that it ends up on the same node as the 2G + assert.Equal(t, node1.ID(), node3.ID()) +} diff --git a/scheduler/strategy/strategy.go b/scheduler/strategy/strategy.go index faa31ee683..b39eede669 100644 --- a/scheduler/strategy/strategy.go +++ b/scheduler/strategy/strategy.go @@ -16,13 +16,16 @@ type PlacementStrategy interface { } var ( - strategies map[string]PlacementStrategy - ErrNotSupported = errors.New("strategy not supported") + strategies map[string]PlacementStrategy + ErrNotSupported = errors.New("strategy not supported") + ErrNoResourcesAvailable = errors.New("no resources available to schedule container") ) func init() { strategies = map[string]PlacementStrategy{ - "binpacking": &BinPackingPlacementStrategy{}, + "binpacking": &BinpackPlacementStrategy{}, //compat + "binpack": &BinpackPlacementStrategy{}, + "spread": &SpreadPlacementStrategy{}, "random": &RandomPlacementStrategy{}, } } diff --git a/scheduler/strategy/weighted_node.go b/scheduler/strategy/weighted_node.go index 74abec165d..276a99ec19 100644 --- a/scheduler/strategy/weighted_node.go +++ b/scheduler/strategy/weighted_node.go @@ -1,6 +1,9 @@ package strategy -import "github.com/docker/swarm/cluster" +import ( + "github.com/docker/swarm/cluster" + "github.com/samalba/dockerclient" +) // WeightedNode represents a node in the cluster with a given weight, typically used for sorting // purposes. @@ -28,3 +31,39 @@ func (n weightedNodeList) Less(i, j int) bool { return ip.Weight < jp.Weight } + +func weighNodes(config *dockerclient.ContainerConfig, nodes []cluster.Node) (weightedNodeList, error) { + weightedNodes := weightedNodeList{} + + for _, node := range nodes { + nodeMemory := node.TotalMemory() + nodeCpus := node.TotalCpus() + + // Skip nodes that are smaller than the requested resources. + if nodeMemory < int64(config.Memory) || nodeCpus < config.CpuShares { + continue + } + + var ( + cpuScore int64 = 100 + memoryScore int64 = 100 + ) + + if config.CpuShares > 0 { + cpuScore = (node.UsedCpus() + config.CpuShares) * 100 / nodeCpus + } + if config.Memory > 0 { + memoryScore = (node.UsedMemory() + config.Memory) * 100 / nodeMemory + } + + if cpuScore <= 100 && memoryScore <= 100 { + weightedNodes = append(weightedNodes, &weightedNode{Node: node, Weight: cpuScore + memoryScore}) + } + } + + if len(weightedNodes) == 0 { + return nil, ErrNoResourcesAvailable + } + + return weightedNodes, nil +}