mirror of https://github.com/docker/docs.git
Updating ContainerConfig to embed HostConfig and NetworkingConfig
Signed-off-by: Nishant Totla <nishanttotla@gmail.com>
This commit is contained in:
parent
ff0c79e08d
commit
f5e578188f
|
|
@ -16,10 +16,9 @@ const SwarmLabelNamespace = "com.docker.swarm"
|
|||
// ContainerConfig is exported
|
||||
// TODO store affinities and constraints in their own fields
|
||||
type ContainerConfig struct {
|
||||
// dockerclient.ContainerConfig
|
||||
container.Config
|
||||
container.HostConfig
|
||||
network.NetworkingConfig
|
||||
HostConfig container.HostConfig
|
||||
NetworkingConfig network.NetworkingConfig
|
||||
}
|
||||
|
||||
func parseEnv(e string) (bool, string, string) {
|
||||
|
|
|
|||
|
|
@ -736,7 +736,7 @@ func (e *Engine) updateContainer(c types.Container, containers map[string]*Conta
|
|||
}
|
||||
container.Config = BuildContainerConfig(*info.Config, *info.HostConfig, networkingConfig)
|
||||
// FIXME remove "duplicate" line and move this to cluster/config.go
|
||||
container.Config.CPUShares = container.Config.CPUShares * int64(e.Cpus) / 1024.0
|
||||
container.Config.HostConfig.CPUShares = container.Config.HostConfig.CPUShares * int64(e.Cpus) / 1024.0
|
||||
|
||||
// Save the entire inspect back into the container.
|
||||
container.Info = info
|
||||
|
|
@ -832,7 +832,7 @@ func (e *Engine) UsedMemory() int64 {
|
|||
var r int64
|
||||
e.RLock()
|
||||
for _, c := range e.containers {
|
||||
r += c.Config.Memory
|
||||
r += c.Config.HostConfig.Memory
|
||||
}
|
||||
e.RUnlock()
|
||||
return r
|
||||
|
|
@ -843,7 +843,7 @@ func (e *Engine) UsedCpus() int64 {
|
|||
var r int64
|
||||
e.RLock()
|
||||
for _, c := range e.containers {
|
||||
r += c.Config.CPUShares
|
||||
r += c.Config.HostConfig.CPUShares
|
||||
}
|
||||
e.RUnlock()
|
||||
return r
|
||||
|
|
@ -869,12 +869,12 @@ func (e *Engine) Create(config *ContainerConfig, name string, pullImage bool, au
|
|||
// Convert our internal ContainerConfig into something Docker will
|
||||
// understand. Start by making a copy of the internal ContainerConfig as
|
||||
// we don't want to mess with the original.
|
||||
dockerConfig := config
|
||||
dockerConfig := *config
|
||||
|
||||
// nb of CPUs -> real CpuShares
|
||||
|
||||
// FIXME remove "duplicate" lines and move this to cluster/config.go
|
||||
dockerConfig.CPUShares = int64(math.Ceil(float64(config.CPUShares*1024) / float64(e.Cpus)))
|
||||
dockerConfig.HostConfig.CPUShares = int64(math.Ceil(float64(config.HostConfig.CPUShares*1024) / float64(e.Cpus)))
|
||||
|
||||
createResp, err = e.apiClient.ContainerCreate(context.TODO(), &dockerConfig.Config, &dockerConfig.HostConfig, &dockerConfig.NetworkingConfig, name)
|
||||
e.CheckConnectionErr(err)
|
||||
|
|
|
|||
|
|
@ -192,7 +192,7 @@ func (c *Cluster) StartContainer(container *cluster.Container, hostConfig *docke
|
|||
|
||||
// CreateContainer for container creation in Mesos task
|
||||
func (c *Cluster) CreateContainer(config *cluster.ContainerConfig, name string, authConfig *types.AuthConfig) (*cluster.Container, error) {
|
||||
if config.Memory == 0 && config.CPUShares == 0 {
|
||||
if config.HostConfig.Memory == 0 && config.HostConfig.CPUShares == 0 {
|
||||
return nil, errResourcesNeeded
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -135,11 +135,11 @@ func (t *Task) Build(slaveID string, offers map[string]*mesosproto.Offer) {
|
|||
t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_BRIDGE.Enum()
|
||||
}
|
||||
|
||||
if cpus := t.config.CPUShares; cpus > 0 {
|
||||
if cpus := t.config.HostConfig.CPUShares; cpus > 0 {
|
||||
t.Resources = append(t.Resources, mesosutil.NewScalarResource("cpus", float64(cpus)))
|
||||
}
|
||||
|
||||
if mem := t.config.Memory; mem > 0 {
|
||||
if mem := t.config.HostConfig.Memory; mem > 0 {
|
||||
t.Resources = append(t.Resources, mesosutil.NewScalarResource("mem", float64(mem/1024/1024)))
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -55,8 +55,8 @@ func (n *Node) Container(IDOrName string) *cluster.Container {
|
|||
// AddContainer injects a container into the internal state.
|
||||
func (n *Node) AddContainer(container *cluster.Container) error {
|
||||
if container.Config != nil {
|
||||
memory := container.Config.Memory
|
||||
cpus := container.Config.CPUShares
|
||||
memory := container.Config.HostConfig.Memory
|
||||
cpus := container.Config.HostConfig.CPUShares
|
||||
if n.TotalMemory-memory < 0 || n.TotalCpus-cpus < 0 {
|
||||
return errors.New("not enough resources")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ func weighNodes(config *cluster.ContainerConfig, nodes []*node.Node, healthiness
|
|||
nodeCpus := node.TotalCpus
|
||||
|
||||
// Skip nodes that are smaller than the requested resources.
|
||||
if nodeMemory < int64(config.Memory) || nodeCpus < config.CPUShares {
|
||||
if nodeMemory < int64(config.HostConfig.Memory) || nodeCpus < config.HostConfig.CPUShares {
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
@ -53,11 +53,11 @@ func weighNodes(config *cluster.ContainerConfig, nodes []*node.Node, healthiness
|
|||
memoryScore int64 = 100
|
||||
)
|
||||
|
||||
if config.CPUShares > 0 {
|
||||
cpuScore = (node.UsedCpus + config.CPUShares) * 100 / nodeCpus
|
||||
if config.HostConfig.CPUShares > 0 {
|
||||
cpuScore = (node.UsedCpus + config.HostConfig.CPUShares) * 100 / nodeCpus
|
||||
}
|
||||
if config.Memory > 0 {
|
||||
memoryScore = (node.UsedMemory + config.Memory) * 100 / nodeMemory
|
||||
if config.HostConfig.Memory > 0 {
|
||||
memoryScore = (node.UsedMemory + config.HostConfig.Memory) * 100 / nodeMemory
|
||||
}
|
||||
|
||||
if cpuScore <= 100 && memoryScore <= 100 {
|
||||
|
|
|
|||
Loading…
Reference in New Issue