Merge pull request #3472 from haircommander/generate-volumes
generate kube with volumes
This commit is contained in:
commit
f7407f2eb5
|
|
@ -370,58 +370,41 @@ func (c *Container) getInspectMounts(ctrSpec *spec.Spec) ([]InspectMount, error)
|
|||
return inspectMounts, nil
|
||||
}
|
||||
|
||||
// We need to parse all named volumes and mounts into maps, so we don't
|
||||
// end up with repeated lookups for each user volume.
|
||||
// Map destination to struct, as destination is what is stored in
|
||||
// UserVolumes.
|
||||
namedVolumes := make(map[string]*ContainerNamedVolume)
|
||||
mounts := make(map[string]spec.Mount)
|
||||
for _, namedVol := range c.config.NamedVolumes {
|
||||
namedVolumes[namedVol.Dest] = namedVol
|
||||
}
|
||||
for _, mount := range ctrSpec.Mounts {
|
||||
mounts[mount.Destination] = mount
|
||||
}
|
||||
namedVolumes, mounts := c.sortUserVolumes(ctrSpec)
|
||||
for _, volume := range namedVolumes {
|
||||
mountStruct := InspectMount{}
|
||||
mountStruct.Type = "volume"
|
||||
mountStruct.Destination = volume.Dest
|
||||
mountStruct.Name = volume.Name
|
||||
|
||||
for _, vol := range c.config.UserVolumes {
|
||||
// We need to look up the volumes.
|
||||
// First: is it a named volume?
|
||||
if volume, ok := namedVolumes[vol]; ok {
|
||||
mountStruct := InspectMount{}
|
||||
mountStruct.Type = "volume"
|
||||
mountStruct.Destination = volume.Dest
|
||||
mountStruct.Name = volume.Name
|
||||
|
||||
// For src and driver, we need to look up the named
|
||||
// volume.
|
||||
volFromDB, err := c.runtime.state.Volume(volume.Name)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error looking up volume %s in container %s config", volume.Name, c.ID())
|
||||
}
|
||||
mountStruct.Driver = volFromDB.Driver()
|
||||
mountStruct.Source = volFromDB.MountPoint()
|
||||
|
||||
parseMountOptionsForInspect(volume.Options, &mountStruct)
|
||||
|
||||
inspectMounts = append(inspectMounts, mountStruct)
|
||||
} else if mount, ok := mounts[vol]; ok {
|
||||
// It's a mount.
|
||||
// Is it a tmpfs? If so, discard.
|
||||
if mount.Type == "tmpfs" {
|
||||
continue
|
||||
}
|
||||
|
||||
mountStruct := InspectMount{}
|
||||
mountStruct.Type = "bind"
|
||||
mountStruct.Source = mount.Source
|
||||
mountStruct.Destination = mount.Destination
|
||||
|
||||
parseMountOptionsForInspect(mount.Options, &mountStruct)
|
||||
|
||||
inspectMounts = append(inspectMounts, mountStruct)
|
||||
// For src and driver, we need to look up the named
|
||||
// volume.
|
||||
volFromDB, err := c.runtime.state.Volume(volume.Name)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error looking up volume %s in container %s config", volume.Name, c.ID())
|
||||
}
|
||||
// We couldn't find a mount. Log a warning.
|
||||
logrus.Warnf("Could not find mount at destination %q when building inspect output for container %s", vol, c.ID())
|
||||
mountStruct.Driver = volFromDB.Driver()
|
||||
mountStruct.Source = volFromDB.MountPoint()
|
||||
|
||||
parseMountOptionsForInspect(volume.Options, &mountStruct)
|
||||
|
||||
inspectMounts = append(inspectMounts, mountStruct)
|
||||
}
|
||||
for _, mount := range mounts {
|
||||
// It's a mount.
|
||||
// Is it a tmpfs? If so, discard.
|
||||
if mount.Type == "tmpfs" {
|
||||
continue
|
||||
}
|
||||
|
||||
mountStruct := InspectMount{}
|
||||
mountStruct.Type = "bind"
|
||||
mountStruct.Source = mount.Source
|
||||
mountStruct.Destination = mount.Destination
|
||||
|
||||
parseMountOptionsForInspect(mount.Options, &mountStruct)
|
||||
|
||||
inspectMounts = append(inspectMounts, mountStruct)
|
||||
}
|
||||
|
||||
return inspectMounts, nil
|
||||
|
|
|
|||
|
|
@ -1537,3 +1537,34 @@ func (c *Container) prepareCheckpointExport() (err error) {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
// sortUserVolumes sorts the volumes specified for a container
|
||||
// between named and normal volumes
|
||||
func (c *Container) sortUserVolumes(ctrSpec *spec.Spec) ([]*ContainerNamedVolume, []spec.Mount) {
|
||||
namedUserVolumes := []*ContainerNamedVolume{}
|
||||
userMounts := []spec.Mount{}
|
||||
|
||||
// We need to parse all named volumes and mounts into maps, so we don't
|
||||
// end up with repeated lookups for each user volume.
|
||||
// Map destination to struct, as destination is what is stored in
|
||||
// UserVolumes.
|
||||
namedVolumes := make(map[string]*ContainerNamedVolume)
|
||||
mounts := make(map[string]spec.Mount)
|
||||
for _, namedVol := range c.config.NamedVolumes {
|
||||
namedVolumes[namedVol.Dest] = namedVol
|
||||
}
|
||||
for _, mount := range ctrSpec.Mounts {
|
||||
mounts[mount.Destination] = mount
|
||||
}
|
||||
|
||||
for _, vol := range c.config.UserVolumes {
|
||||
if volume, ok := namedVolumes[vol]; ok {
|
||||
namedUserVolumes = append(namedUserVolumes, volume)
|
||||
} else if mount, ok := mounts[vol]; ok {
|
||||
userMounts = append(userMounts, mount)
|
||||
} else {
|
||||
logrus.Warnf("Could not find mount at destination %q when parsing user volumes for container %s", vol, c.ID())
|
||||
}
|
||||
}
|
||||
return namedUserVolumes, userMounts
|
||||
}
|
||||
|
|
|
|||
149
libpod/kube.go
149
libpod/kube.go
|
|
@ -3,6 +3,7 @@ package libpod
|
|||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
|
@ -132,32 +133,43 @@ func (p *Pod) podWithContainers(containers []*Container, ports []v1.ContainerPor
|
|||
var (
|
||||
podContainers []v1.Container
|
||||
)
|
||||
deDupPodVolumes := make(map[string]*v1.Volume)
|
||||
first := true
|
||||
for _, ctr := range containers {
|
||||
if !ctr.IsInfra() {
|
||||
result, err := containerToV1Container(ctr)
|
||||
ctr, volumes, err := containerToV1Container(ctr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Since port bindings for the pod are handled by the
|
||||
// infra container, wipe them here.
|
||||
result.Ports = nil
|
||||
ctr.Ports = nil
|
||||
|
||||
// We add the original port declarations from the libpod infra container
|
||||
// to the first kubernetes container description because otherwise we loose
|
||||
// the original container/port bindings.
|
||||
if first && len(ports) > 0 {
|
||||
result.Ports = ports
|
||||
ctr.Ports = ports
|
||||
first = false
|
||||
}
|
||||
podContainers = append(podContainers, result)
|
||||
podContainers = append(podContainers, ctr)
|
||||
// Deduplicate volumes, so if containers in the pod share a volume, it's only
|
||||
// listed in the volumes section once
|
||||
for _, vol := range volumes {
|
||||
deDupPodVolumes[vol.Name] = &vol
|
||||
}
|
||||
}
|
||||
}
|
||||
return addContainersToPodObject(podContainers, p.Name()), nil
|
||||
podVolumes := make([]v1.Volume, 0, len(deDupPodVolumes))
|
||||
for _, vol := range deDupPodVolumes {
|
||||
podVolumes = append(podVolumes, *vol)
|
||||
}
|
||||
|
||||
return addContainersAndVolumesToPodObject(podContainers, podVolumes, p.Name()), nil
|
||||
}
|
||||
|
||||
func addContainersToPodObject(containers []v1.Container, podName string) *v1.Pod {
|
||||
func addContainersAndVolumesToPodObject(containers []v1.Container, volumes []v1.Volume, podName string) *v1.Pod {
|
||||
tm := v12.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
|
|
@ -177,6 +189,7 @@ func addContainersToPodObject(containers []v1.Container, podName string) *v1.Pod
|
|||
}
|
||||
ps := v1.PodSpec{
|
||||
Containers: containers,
|
||||
Volumes: volumes,
|
||||
}
|
||||
p := v1.Pod{
|
||||
TypeMeta: tm,
|
||||
|
|
@ -190,56 +203,58 @@ func addContainersToPodObject(containers []v1.Container, podName string) *v1.Pod
|
|||
// for a single container. we "insert" that container description in a pod.
|
||||
func simplePodWithV1Container(ctr *Container) (*v1.Pod, error) {
|
||||
var containers []v1.Container
|
||||
result, err := containerToV1Container(ctr)
|
||||
kubeCtr, kubeVols, err := containerToV1Container(ctr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
containers = append(containers, result)
|
||||
return addContainersToPodObject(containers, ctr.Name()), nil
|
||||
containers = append(containers, kubeCtr)
|
||||
return addContainersAndVolumesToPodObject(containers, kubeVols, ctr.Name()), nil
|
||||
|
||||
}
|
||||
|
||||
// containerToV1Container converts information we know about a libpod container
|
||||
// to a V1.Container specification.
|
||||
func containerToV1Container(c *Container) (v1.Container, error) {
|
||||
func containerToV1Container(c *Container) (v1.Container, []v1.Volume, error) {
|
||||
kubeContainer := v1.Container{}
|
||||
kubeVolumes := []v1.Volume{}
|
||||
kubeSec, err := generateKubeSecurityContext(c)
|
||||
if err != nil {
|
||||
return kubeContainer, err
|
||||
return kubeContainer, kubeVolumes, err
|
||||
}
|
||||
|
||||
if len(c.config.Spec.Linux.Devices) > 0 {
|
||||
// TODO Enable when we can support devices and their names
|
||||
devices, err := generateKubeVolumeDeviceFromLinuxDevice(c.Spec().Linux.Devices)
|
||||
if err != nil {
|
||||
return kubeContainer, err
|
||||
return kubeContainer, kubeVolumes, err
|
||||
}
|
||||
kubeContainer.VolumeDevices = devices
|
||||
return kubeContainer, errors.Wrapf(define.ErrNotImplemented, "linux devices")
|
||||
return kubeContainer, kubeVolumes, errors.Wrapf(define.ErrNotImplemented, "linux devices")
|
||||
}
|
||||
|
||||
if len(c.config.UserVolumes) > 0 {
|
||||
// TODO When we until we can resolve what the volume name should be, this is disabled
|
||||
// Volume names need to be coordinated "globally" in the kube files.
|
||||
volumes, err := libpodMountsToKubeVolumeMounts(c)
|
||||
volumeMounts, volumes, err := libpodMountsToKubeVolumeMounts(c)
|
||||
if err != nil {
|
||||
return kubeContainer, err
|
||||
return kubeContainer, kubeVolumes, err
|
||||
}
|
||||
kubeContainer.VolumeMounts = volumes
|
||||
kubeContainer.VolumeMounts = volumeMounts
|
||||
kubeVolumes = append(kubeVolumes, volumes...)
|
||||
}
|
||||
|
||||
envVariables, err := libpodEnvVarsToKubeEnvVars(c.config.Spec.Process.Env)
|
||||
if err != nil {
|
||||
return kubeContainer, err
|
||||
return kubeContainer, kubeVolumes, err
|
||||
}
|
||||
|
||||
portmappings, err := c.PortMappings()
|
||||
if err != nil {
|
||||
return kubeContainer, err
|
||||
return kubeContainer, kubeVolumes, err
|
||||
}
|
||||
ports, err := ocicniPortMappingToContainerPort(portmappings)
|
||||
if err != nil {
|
||||
return kubeContainer, err
|
||||
return kubeContainer, kubeVolumes, err
|
||||
}
|
||||
|
||||
containerCommands := c.Command()
|
||||
|
|
@ -263,7 +278,7 @@ func containerToV1Container(c *Container) (v1.Container, error) {
|
|||
kubeContainer.StdinOnce = false
|
||||
kubeContainer.TTY = c.config.Spec.Process.Terminal
|
||||
|
||||
return kubeContainer, nil
|
||||
return kubeContainer, kubeVolumes, nil
|
||||
}
|
||||
|
||||
// ocicniPortMappingToContainerPort takes an ocicni portmapping and converts
|
||||
|
|
@ -325,36 +340,82 @@ func libpodMaxAndMinToResourceList(c *Container) (v1.ResourceList, v1.ResourceLi
|
|||
return maxResources, minResources
|
||||
}
|
||||
|
||||
func generateKubeVolumeMount(hostSourcePath string, mounts []specs.Mount) (v1.VolumeMount, error) {
|
||||
vm := v1.VolumeMount{}
|
||||
for _, m := range mounts {
|
||||
if m.Source == hostSourcePath {
|
||||
// TODO Name is not provided and is required by Kube; therefore, this is disabled earlier
|
||||
//vm.Name =
|
||||
vm.MountPath = m.Source
|
||||
vm.SubPath = m.Destination
|
||||
if util.StringInSlice("ro", m.Options) {
|
||||
vm.ReadOnly = true
|
||||
}
|
||||
return vm, nil
|
||||
}
|
||||
}
|
||||
return vm, errors.New("unable to find mount source")
|
||||
}
|
||||
|
||||
// libpodMountsToKubeVolumeMounts converts the containers mounts to a struct kube understands
|
||||
func libpodMountsToKubeVolumeMounts(c *Container) ([]v1.VolumeMount, error) {
|
||||
// At this point, I dont think we can distinguish between the default
|
||||
// volume mounts and user added ones. For now, we pass them all.
|
||||
func libpodMountsToKubeVolumeMounts(c *Container) ([]v1.VolumeMount, []v1.Volume, error) {
|
||||
var vms []v1.VolumeMount
|
||||
for _, hostSourcePath := range c.config.UserVolumes {
|
||||
vm, err := generateKubeVolumeMount(hostSourcePath, c.config.Spec.Mounts)
|
||||
var vos []v1.Volume
|
||||
|
||||
// TjDO when named volumes are supported in play kube, also parse named volumes here
|
||||
_, mounts := c.sortUserVolumes(c.config.Spec)
|
||||
for _, m := range mounts {
|
||||
vm, vo, err := generateKubeVolumeMount(m)
|
||||
if err != nil {
|
||||
continue
|
||||
return vms, vos, err
|
||||
}
|
||||
vms = append(vms, vm)
|
||||
vos = append(vos, vo)
|
||||
}
|
||||
return vms, nil
|
||||
return vms, vos, nil
|
||||
}
|
||||
|
||||
// generateKubeVolumeMount takes a user specfied mount and returns
|
||||
// a kubernetes VolumeMount (to be added to the container) and a kubernetes Volume
|
||||
// (to be added to the pod)
|
||||
func generateKubeVolumeMount(m specs.Mount) (v1.VolumeMount, v1.Volume, error) {
|
||||
vm := v1.VolumeMount{}
|
||||
vo := v1.Volume{}
|
||||
|
||||
name, err := convertVolumePathToName(m.Source)
|
||||
if err != nil {
|
||||
return vm, vo, err
|
||||
}
|
||||
vm.Name = name
|
||||
vm.MountPath = m.Destination
|
||||
if util.StringInSlice("ro", m.Options) {
|
||||
vm.ReadOnly = true
|
||||
}
|
||||
|
||||
vo.Name = name
|
||||
vo.HostPath = &v1.HostPathVolumeSource{}
|
||||
vo.HostPath.Path = m.Source
|
||||
isDir, err := isHostPathDirectory(m.Source)
|
||||
// neither a directory or a file lives here, default to creating a directory
|
||||
// TODO should this be an error instead?
|
||||
var hostPathType v1.HostPathType
|
||||
if err != nil {
|
||||
hostPathType = v1.HostPathDirectoryOrCreate
|
||||
} else if isDir {
|
||||
hostPathType = v1.HostPathDirectory
|
||||
} else {
|
||||
hostPathType = v1.HostPathFile
|
||||
}
|
||||
vo.HostPath.Type = &hostPathType
|
||||
|
||||
return vm, vo, nil
|
||||
}
|
||||
|
||||
func isHostPathDirectory(hostPathSource string) (bool, error) {
|
||||
info, err := os.Stat(hostPathSource)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return info.Mode().IsDir(), nil
|
||||
}
|
||||
|
||||
func convertVolumePathToName(hostSourcePath string) (string, error) {
|
||||
if len(hostSourcePath) == 0 {
|
||||
return "", errors.Errorf("hostSourcePath must be specified to generate volume name")
|
||||
}
|
||||
if len(hostSourcePath) == 1 {
|
||||
if hostSourcePath != "/" {
|
||||
return "", errors.Errorf("hostSourcePath malformatted: %s", hostSourcePath)
|
||||
}
|
||||
// add special case name
|
||||
return "root", nil
|
||||
}
|
||||
// First, trim trailing slashes, then replace slashes with dashes.
|
||||
// Thus, /mnt/data/ will become mnt-data
|
||||
return strings.Replace(strings.Trim(hostSourcePath, "/"), "/", "-", -1), nil
|
||||
}
|
||||
|
||||
func determineCapAddDropFromCapabilities(defaultCaps, containerCaps []string) *v1.Capabilities {
|
||||
|
|
|
|||
|
|
@ -207,4 +207,35 @@ var _ = Describe("Podman generate kube", func() {
|
|||
Expect(psOut).To(ContainSubstring("test1"))
|
||||
Expect(psOut).To(ContainSubstring("test2"))
|
||||
})
|
||||
|
||||
It("podman generate kube with volume", func() {
|
||||
vol1 := filepath.Join(podmanTest.TempDir, "vol-test1")
|
||||
err := os.MkdirAll(vol1, 0755)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// we need a container name because IDs don't persist after rm/play
|
||||
ctrName := "test-ctr"
|
||||
|
||||
session1 := podmanTest.Podman([]string{"run", "-d", "--pod", "new:test1", "--name", ctrName, "-v", vol1 + ":/volume/:z", "alpine", "top"})
|
||||
session1.WaitWithDefaultTimeout()
|
||||
Expect(session1.ExitCode()).To(Equal(0))
|
||||
|
||||
outputFile := filepath.Join(podmanTest.RunRoot, "pod.yaml")
|
||||
kube := podmanTest.Podman([]string{"generate", "kube", "test1", "-f", outputFile})
|
||||
kube.WaitWithDefaultTimeout()
|
||||
Expect(kube.ExitCode()).To(Equal(0))
|
||||
|
||||
rm := podmanTest.Podman([]string{"pod", "rm", "-f", "test1"})
|
||||
rm.WaitWithDefaultTimeout()
|
||||
Expect(rm.ExitCode()).To(Equal(0))
|
||||
|
||||
play := podmanTest.Podman([]string{"play", "kube", outputFile})
|
||||
play.WaitWithDefaultTimeout()
|
||||
Expect(play.ExitCode()).To(Equal(0))
|
||||
|
||||
inspect := podmanTest.Podman([]string{"inspect", ctrName})
|
||||
inspect.WaitWithDefaultTimeout()
|
||||
Expect(inspect.ExitCode()).To(Equal(0))
|
||||
Expect(inspect.OutputToString()).To(ContainSubstring(vol1))
|
||||
})
|
||||
})
|
||||
|
|
|
|||
Loading…
Reference in New Issue