Updating ContainerConfig to embed HostConfig and NetworkingConfig

Signed-off-by: Nishant Totla <nishanttotla@gmail.com>
This commit is contained in:
Nishant Totla 2016-04-06 15:11:05 -07:00
parent ff0c79e08d
commit f5e578188f
No known key found for this signature in database
GPG Key ID: 7EA5781C9B3D0C19
6 changed files with 17 additions and 18 deletions

View File

@ -16,10 +16,9 @@ const SwarmLabelNamespace = "com.docker.swarm"
// ContainerConfig is exported // ContainerConfig is exported
// TODO store affinities and constraints in their own fields // TODO store affinities and constraints in their own fields
type ContainerConfig struct { type ContainerConfig struct {
// dockerclient.ContainerConfig
container.Config container.Config
container.HostConfig HostConfig container.HostConfig
network.NetworkingConfig NetworkingConfig network.NetworkingConfig
} }
func parseEnv(e string) (bool, string, string) { func parseEnv(e string) (bool, string, string) {

View File

@ -736,7 +736,7 @@ func (e *Engine) updateContainer(c types.Container, containers map[string]*Conta
} }
container.Config = BuildContainerConfig(*info.Config, *info.HostConfig, networkingConfig) container.Config = BuildContainerConfig(*info.Config, *info.HostConfig, networkingConfig)
// FIXME remove "duplicate" line and move this to cluster/config.go // FIXME remove "duplicate" line and move this to cluster/config.go
container.Config.CPUShares = container.Config.CPUShares * int64(e.Cpus) / 1024.0 container.Config.HostConfig.CPUShares = container.Config.HostConfig.CPUShares * int64(e.Cpus) / 1024.0
// Save the entire inspect back into the container. // Save the entire inspect back into the container.
container.Info = info container.Info = info
@ -832,7 +832,7 @@ func (e *Engine) UsedMemory() int64 {
var r int64 var r int64
e.RLock() e.RLock()
for _, c := range e.containers { for _, c := range e.containers {
r += c.Config.Memory r += c.Config.HostConfig.Memory
} }
e.RUnlock() e.RUnlock()
return r return r
@ -843,7 +843,7 @@ func (e *Engine) UsedCpus() int64 {
var r int64 var r int64
e.RLock() e.RLock()
for _, c := range e.containers { for _, c := range e.containers {
r += c.Config.CPUShares r += c.Config.HostConfig.CPUShares
} }
e.RUnlock() e.RUnlock()
return r return r
@ -869,12 +869,12 @@ func (e *Engine) Create(config *ContainerConfig, name string, pullImage bool, au
// Convert our internal ContainerConfig into something Docker will // Convert our internal ContainerConfig into something Docker will
// understand. Start by making a copy of the internal ContainerConfig as // understand. Start by making a copy of the internal ContainerConfig as
// we don't want to mess with the original. // we don't want to mess with the original.
dockerConfig := config dockerConfig := *config
// nb of CPUs -> real CpuShares // nb of CPUs -> real CpuShares
// FIXME remove "duplicate" lines and move this to cluster/config.go // FIXME remove "duplicate" lines and move this to cluster/config.go
dockerConfig.CPUShares = int64(math.Ceil(float64(config.CPUShares*1024) / float64(e.Cpus))) dockerConfig.HostConfig.CPUShares = int64(math.Ceil(float64(config.HostConfig.CPUShares*1024) / float64(e.Cpus)))
createResp, err = e.apiClient.ContainerCreate(context.TODO(), &dockerConfig.Config, &dockerConfig.HostConfig, &dockerConfig.NetworkingConfig, name) createResp, err = e.apiClient.ContainerCreate(context.TODO(), &dockerConfig.Config, &dockerConfig.HostConfig, &dockerConfig.NetworkingConfig, name)
e.CheckConnectionErr(err) e.CheckConnectionErr(err)

View File

@ -192,7 +192,7 @@ func (c *Cluster) StartContainer(container *cluster.Container, hostConfig *docke
// CreateContainer for container creation in Mesos task // CreateContainer for container creation in Mesos task
func (c *Cluster) CreateContainer(config *cluster.ContainerConfig, name string, authConfig *types.AuthConfig) (*cluster.Container, error) { func (c *Cluster) CreateContainer(config *cluster.ContainerConfig, name string, authConfig *types.AuthConfig) (*cluster.Container, error) {
if config.Memory == 0 && config.CPUShares == 0 { if config.HostConfig.Memory == 0 && config.HostConfig.CPUShares == 0 {
return nil, errResourcesNeeded return nil, errResourcesNeeded
} }

View File

@ -135,11 +135,11 @@ func (t *Task) Build(slaveID string, offers map[string]*mesosproto.Offer) {
t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_BRIDGE.Enum() t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_BRIDGE.Enum()
} }
if cpus := t.config.CPUShares; cpus > 0 { if cpus := t.config.HostConfig.CPUShares; cpus > 0 {
t.Resources = append(t.Resources, mesosutil.NewScalarResource("cpus", float64(cpus))) t.Resources = append(t.Resources, mesosutil.NewScalarResource("cpus", float64(cpus)))
} }
if mem := t.config.Memory; mem > 0 { if mem := t.config.HostConfig.Memory; mem > 0 {
t.Resources = append(t.Resources, mesosutil.NewScalarResource("mem", float64(mem/1024/1024))) t.Resources = append(t.Resources, mesosutil.NewScalarResource("mem", float64(mem/1024/1024)))
} }

View File

@ -55,8 +55,8 @@ func (n *Node) Container(IDOrName string) *cluster.Container {
// AddContainer injects a container into the internal state. // AddContainer injects a container into the internal state.
func (n *Node) AddContainer(container *cluster.Container) error { func (n *Node) AddContainer(container *cluster.Container) error {
if container.Config != nil { if container.Config != nil {
memory := container.Config.Memory memory := container.Config.HostConfig.Memory
cpus := container.Config.CPUShares cpus := container.Config.HostConfig.CPUShares
if n.TotalMemory-memory < 0 || n.TotalCpus-cpus < 0 { if n.TotalMemory-memory < 0 || n.TotalCpus-cpus < 0 {
return errors.New("not enough resources") return errors.New("not enough resources")
} }

View File

@ -44,7 +44,7 @@ func weighNodes(config *cluster.ContainerConfig, nodes []*node.Node, healthiness
nodeCpus := node.TotalCpus nodeCpus := node.TotalCpus
// Skip nodes that are smaller than the requested resources. // Skip nodes that are smaller than the requested resources.
if nodeMemory < int64(config.Memory) || nodeCpus < config.CPUShares { if nodeMemory < int64(config.HostConfig.Memory) || nodeCpus < config.HostConfig.CPUShares {
continue continue
} }
@ -53,11 +53,11 @@ func weighNodes(config *cluster.ContainerConfig, nodes []*node.Node, healthiness
memoryScore int64 = 100 memoryScore int64 = 100
) )
if config.CPUShares > 0 { if config.HostConfig.CPUShares > 0 {
cpuScore = (node.UsedCpus + config.CPUShares) * 100 / nodeCpus cpuScore = (node.UsedCpus + config.HostConfig.CPUShares) * 100 / nodeCpus
} }
if config.Memory > 0 { if config.HostConfig.Memory > 0 {
memoryScore = (node.UsedMemory + config.Memory) * 100 / nodeMemory memoryScore = (node.UsedMemory + config.HostConfig.Memory) * 100 / nodeMemory
} }
if cpuScore <= 100 && memoryScore <= 100 { if cpuScore <= 100 && memoryScore <= 100 {