diff --git a/cluster/config.go b/cluster/config.go index 2cc384ade6..ba276d7f13 100644 --- a/cluster/config.go +++ b/cluster/config.go @@ -16,10 +16,9 @@ const SwarmLabelNamespace = "com.docker.swarm" // ContainerConfig is exported // TODO store affinities and constraints in their own fields type ContainerConfig struct { - // dockerclient.ContainerConfig container.Config - container.HostConfig - network.NetworkingConfig + HostConfig container.HostConfig + NetworkingConfig network.NetworkingConfig } func parseEnv(e string) (bool, string, string) { diff --git a/cluster/engine.go b/cluster/engine.go index 02619ebb32..20abb306d4 100644 --- a/cluster/engine.go +++ b/cluster/engine.go @@ -736,7 +736,7 @@ func (e *Engine) updateContainer(c types.Container, containers map[string]*Conta } container.Config = BuildContainerConfig(*info.Config, *info.HostConfig, networkingConfig) // FIXME remove "duplicate" line and move this to cluster/config.go - container.Config.CPUShares = container.Config.CPUShares * int64(e.Cpus) / 1024.0 + container.Config.HostConfig.CPUShares = container.Config.HostConfig.CPUShares * int64(e.Cpus) / 1024.0 // Save the entire inspect back into the container. container.Info = info @@ -832,7 +832,7 @@ func (e *Engine) UsedMemory() int64 { var r int64 e.RLock() for _, c := range e.containers { - r += c.Config.Memory + r += c.Config.HostConfig.Memory } e.RUnlock() return r @@ -843,7 +843,7 @@ func (e *Engine) UsedCpus() int64 { var r int64 e.RLock() for _, c := range e.containers { - r += c.Config.CPUShares + r += c.Config.HostConfig.CPUShares } e.RUnlock() return r @@ -869,12 +869,12 @@ func (e *Engine) Create(config *ContainerConfig, name string, pullImage bool, au // Convert our internal ContainerConfig into something Docker will // understand. Start by making a copy of the internal ContainerConfig as // we don't want to mess with the original. - dockerConfig := config + dockerConfig := *config // nb of CPUs -> real CpuShares // FIXME remove "duplicate" lines and move this to cluster/config.go - dockerConfig.CPUShares = int64(math.Ceil(float64(config.CPUShares*1024) / float64(e.Cpus))) + dockerConfig.HostConfig.CPUShares = int64(math.Ceil(float64(config.HostConfig.CPUShares*1024) / float64(e.Cpus))) createResp, err = e.apiClient.ContainerCreate(context.TODO(), &dockerConfig.Config, &dockerConfig.HostConfig, &dockerConfig.NetworkingConfig, name) e.CheckConnectionErr(err) diff --git a/cluster/mesos/cluster.go b/cluster/mesos/cluster.go index 4e168f66ba..11222c6093 100644 --- a/cluster/mesos/cluster.go +++ b/cluster/mesos/cluster.go @@ -192,7 +192,7 @@ func (c *Cluster) StartContainer(container *cluster.Container, hostConfig *docke // CreateContainer for container creation in Mesos task func (c *Cluster) CreateContainer(config *cluster.ContainerConfig, name string, authConfig *types.AuthConfig) (*cluster.Container, error) { - if config.Memory == 0 && config.CPUShares == 0 { + if config.HostConfig.Memory == 0 && config.HostConfig.CPUShares == 0 { return nil, errResourcesNeeded } diff --git a/cluster/mesos/task/task.go b/cluster/mesos/task/task.go index 3131f3e2ce..2e695875ed 100644 --- a/cluster/mesos/task/task.go +++ b/cluster/mesos/task/task.go @@ -135,11 +135,11 @@ func (t *Task) Build(slaveID string, offers map[string]*mesosproto.Offer) { t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_BRIDGE.Enum() } - if cpus := t.config.CPUShares; cpus > 0 { + if cpus := t.config.HostConfig.CPUShares; cpus > 0 { t.Resources = append(t.Resources, mesosutil.NewScalarResource("cpus", float64(cpus))) } - if mem := t.config.Memory; mem > 0 { + if mem := t.config.HostConfig.Memory; mem > 0 { t.Resources = append(t.Resources, mesosutil.NewScalarResource("mem", float64(mem/1024/1024))) } diff --git a/scheduler/node/node.go b/scheduler/node/node.go index 0fec4065b2..a592581fd1 100644 --- a/scheduler/node/node.go +++ b/scheduler/node/node.go @@ -55,8 +55,8 @@ func (n *Node) Container(IDOrName string) *cluster.Container { // AddContainer injects a container into the internal state. func (n *Node) AddContainer(container *cluster.Container) error { if container.Config != nil { - memory := container.Config.Memory - cpus := container.Config.CPUShares + memory := container.Config.HostConfig.Memory + cpus := container.Config.HostConfig.CPUShares if n.TotalMemory-memory < 0 || n.TotalCpus-cpus < 0 { return errors.New("not enough resources") } diff --git a/scheduler/strategy/weighted_node.go b/scheduler/strategy/weighted_node.go index 2bda25a5c4..53aa18cc02 100644 --- a/scheduler/strategy/weighted_node.go +++ b/scheduler/strategy/weighted_node.go @@ -44,7 +44,7 @@ func weighNodes(config *cluster.ContainerConfig, nodes []*node.Node, healthiness nodeCpus := node.TotalCpus // Skip nodes that are smaller than the requested resources. - if nodeMemory < int64(config.Memory) || nodeCpus < config.CPUShares { + if nodeMemory < int64(config.HostConfig.Memory) || nodeCpus < config.HostConfig.CPUShares { continue } @@ -53,11 +53,11 @@ func weighNodes(config *cluster.ContainerConfig, nodes []*node.Node, healthiness memoryScore int64 = 100 ) - if config.CPUShares > 0 { - cpuScore = (node.UsedCpus + config.CPUShares) * 100 / nodeCpus + if config.HostConfig.CPUShares > 0 { + cpuScore = (node.UsedCpus + config.HostConfig.CPUShares) * 100 / nodeCpus } - if config.Memory > 0 { - memoryScore = (node.UsedMemory + config.Memory) * 100 / nodeMemory + if config.HostConfig.Memory > 0 { + memoryScore = (node.UsedMemory + config.HostConfig.Memory) * 100 / nodeMemory } if cpuScore <= 100 && memoryScore <= 100 {