Merge pull request #7974 from andylibrian/kube-generate-support-resource-limits-7855
Add support for resource cpu limit to generate kube
This commit is contained in:
commit
2694fb85f4
|
|
@ -307,18 +307,40 @@ func containerToV1Container(c *Container) (v1.Container, []v1.Volume, error) {
|
|||
kubeContainer.StdinOnce = false
|
||||
kubeContainer.TTY = c.config.Spec.Process.Terminal
|
||||
|
||||
// TODO add CPU limit support.
|
||||
if c.config.Spec.Linux != nil &&
|
||||
c.config.Spec.Linux.Resources != nil &&
|
||||
c.config.Spec.Linux.Resources.Memory != nil &&
|
||||
c.config.Spec.Linux.Resources.Memory.Limit != nil {
|
||||
if kubeContainer.Resources.Limits == nil {
|
||||
kubeContainer.Resources.Limits = v1.ResourceList{}
|
||||
c.config.Spec.Linux.Resources != nil {
|
||||
if c.config.Spec.Linux.Resources.Memory != nil &&
|
||||
c.config.Spec.Linux.Resources.Memory.Limit != nil {
|
||||
if kubeContainer.Resources.Limits == nil {
|
||||
kubeContainer.Resources.Limits = v1.ResourceList{}
|
||||
}
|
||||
|
||||
qty := kubeContainer.Resources.Limits.Memory()
|
||||
qty.Set(*c.config.Spec.Linux.Resources.Memory.Limit)
|
||||
kubeContainer.Resources.Limits[v1.ResourceMemory] = *qty
|
||||
}
|
||||
|
||||
qty := kubeContainer.Resources.Limits.Memory()
|
||||
qty.Set(*c.config.Spec.Linux.Resources.Memory.Limit)
|
||||
kubeContainer.Resources.Limits[v1.ResourceMemory] = *qty
|
||||
if c.config.Spec.Linux.Resources.CPU != nil &&
|
||||
c.config.Spec.Linux.Resources.CPU.Quota != nil &&
|
||||
c.config.Spec.Linux.Resources.CPU.Period != nil {
|
||||
quota := *c.config.Spec.Linux.Resources.CPU.Quota
|
||||
period := *c.config.Spec.Linux.Resources.CPU.Period
|
||||
|
||||
if quota > 0 && period > 0 {
|
||||
cpuLimitMilli := int64(1000 * float64(quota) / float64(period))
|
||||
|
||||
// Kubernetes: precision finer than 1m is not allowed
|
||||
if cpuLimitMilli >= 1 {
|
||||
if kubeContainer.Resources.Limits == nil {
|
||||
kubeContainer.Resources.Limits = v1.ResourceList{}
|
||||
}
|
||||
|
||||
qty := kubeContainer.Resources.Limits.Cpu()
|
||||
qty.SetMilli(cpuLimitMilli)
|
||||
kubeContainer.Resources.Limits[v1.ResourceCPU] = *qty
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return kubeContainer, kubeVolumes, nil
|
||||
|
|
|
|||
|
|
@ -260,6 +260,38 @@ var _ = Describe("Podman generate kube", func() {
|
|||
}
|
||||
})
|
||||
|
||||
It("podman generate kube on pod with cpu limit", func() {
|
||||
podName := "testCpuLimit"
|
||||
podSession := podmanTest.Podman([]string{"pod", "create", "--name", podName})
|
||||
podSession.WaitWithDefaultTimeout()
|
||||
Expect(podSession.ExitCode()).To(Equal(0))
|
||||
|
||||
ctr1Name := "ctr1"
|
||||
ctr1Session := podmanTest.Podman([]string{"create", "--name", ctr1Name, "--pod", podName,
|
||||
"--cpus", "0.5", ALPINE, "top"})
|
||||
ctr1Session.WaitWithDefaultTimeout()
|
||||
Expect(ctr1Session.ExitCode()).To(Equal(0))
|
||||
|
||||
ctr2Name := "ctr2"
|
||||
ctr2Session := podmanTest.Podman([]string{"create", "--name", ctr2Name, "--pod", podName,
|
||||
"--cpu-period", "100000", "--cpu-quota", "50000", ALPINE, "top"})
|
||||
ctr2Session.WaitWithDefaultTimeout()
|
||||
Expect(ctr2Session.ExitCode()).To(Equal(0))
|
||||
|
||||
kube := podmanTest.Podman([]string{"generate", "kube", podName})
|
||||
kube.WaitWithDefaultTimeout()
|
||||
Expect(kube.ExitCode()).To(Equal(0))
|
||||
|
||||
pod := new(v1.Pod)
|
||||
err := yaml.Unmarshal(kube.Out.Contents(), pod)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
for _, ctr := range pod.Spec.Containers {
|
||||
cpuLimit := ctr.Resources.Limits.Cpu().MilliValue()
|
||||
Expect(cpuLimit).To(Equal(int64(500)))
|
||||
}
|
||||
})
|
||||
|
||||
It("podman generate kube on pod with ports", func() {
|
||||
podName := "test"
|
||||
podSession := podmanTest.Podman([]string{"pod", "create", "--name", podName, "-p", "4000:4000", "-p", "5000:5000"})
|
||||
|
|
|
|||
Loading…
Reference in New Issue