mirror of https://github.com/containers/podman.git
api: show the memory limit specified for the container
use the memory limit specified for the container instead of reading it from the cgroup. It is not reliable to read it from the cgroup since the container could have been moved to a different cgroup and in general the OCI runtime might create a sub-cgroup (like crun does). Closes: https://github.com/containers/podman/issues/14676 Signed-off-by: Giuseppe Scrivano <gscrivan@redhat.com>
This commit is contained in:
parent
fe8e536328
commit
1f539a2641
|
@ -132,6 +132,12 @@ streamLabel: // A label to flatten the scope
|
|||
InstanceID: "",
|
||||
}
|
||||
|
||||
cfg := ctnr.Config()
|
||||
memoryLimit := cgroupStat.Memory.Usage.Limit
|
||||
if cfg.Spec.Linux != nil && cfg.Spec.Linux.Resources != nil && cfg.Spec.Linux.Resources.Memory != nil && *cfg.Spec.Linux.Resources.Memory.Limit > 0 {
|
||||
memoryLimit = uint64(*cfg.Spec.Linux.Resources.Memory.Limit)
|
||||
}
|
||||
|
||||
systemUsage, _ := cgroups.GetSystemCPUUsage()
|
||||
s := StatsJSON{
|
||||
Stats: Stats{
|
||||
|
@ -173,7 +179,7 @@ streamLabel: // A label to flatten the scope
|
|||
MaxUsage: cgroupStat.Memory.Usage.Limit,
|
||||
Stats: nil,
|
||||
Failcnt: 0,
|
||||
Limit: cgroupStat.Memory.Usage.Limit,
|
||||
Limit: memoryLimit,
|
||||
Commit: 0,
|
||||
CommitPeak: 0,
|
||||
PrivateWorkingSet: 0,
|
||||
|
|
|
@ -95,6 +95,17 @@ fi
|
|||
|
||||
t DELETE libpod/containers/$cid 200 .[0].Id=$cid
|
||||
|
||||
# Issue #14676: make sure the stats show the memory limit specified for the container
|
||||
if root; then
|
||||
CTRNAME=ctr-with-limit
|
||||
podman run --name $CTRNAME -d -m 512m -v /tmp:/tmp $IMAGE top
|
||||
|
||||
t GET libpod/containers/$CTRNAME/stats?stream=false 200 \
|
||||
.memory_stats.limit=536870912
|
||||
|
||||
podman rm -f $CTRNAME
|
||||
fi
|
||||
|
||||
# Issue #6799: it should be possible to start a container, even w/o args.
|
||||
t POST libpod/containers/create?name=test_noargs Image=${IMAGE} 201 \
|
||||
.Id~[0-9a-f]\\{64\\}
|
||||
|
|
Loading…
Reference in New Issue