libpod removal from main (phase 2)

this is phase 2 for the removal of libpod from main.

Signed-off-by: baude <bbaude@redhat.com>
This commit is contained in:
baude 2019-06-25 08:40:19 -05:00
parent 58a1777f51
commit 8561b99644
66 changed files with 918 additions and 858 deletions

View File

@ -1,3 +1,5 @@
//+build !remoteclient
package main
import (

View File

@ -8,6 +8,7 @@ import (
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
"github.com/pkg/errors"
)
@ -19,7 +20,7 @@ import (
// is desired a -1 can be used to get all containers. For a better
// error message, if the filter fails, a corresponding verb can be
// specified which will then appear in the error message.
func getAllOrLatestContainers(c *cliconfig.PodmanCommand, runtime *libpod.Runtime, filterState libpod.ContainerStatus, verb string) ([]*libpod.Container, error) {
func getAllOrLatestContainers(c *cliconfig.PodmanCommand, runtime *libpod.Runtime, filterState define.ContainerStatus, verb string) ([]*libpod.Container, error) {
var containers []*libpod.Container
var lastError error
var err error

View File

@ -103,7 +103,7 @@ func copyBetweenHostAndContainer(runtime *libpod.Runtime, src string, dest strin
if err != nil {
return err
}
if state == libpod.ContainerStateRunning {
if state == define.ContainerStateRunning {
return errors.Errorf("cannot copy into running rootless container with pause set - pass --pause=false to force copying")
}
}

View File

@ -1,16 +1,9 @@
package main
import (
"fmt"
"io/ioutil"
"os"
"strconv"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/cmd/podman/shared/parse"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
@ -56,8 +49,6 @@ func init() {
func execCmd(c *cliconfig.ExecValues) error {
args := c.InputArgs
var ctr *libpod.Container
var err error
argStart := 1
if len(args) < 1 && !c.Latest {
return errors.Errorf("you must provide one container name or id")
@ -69,67 +60,15 @@ func execCmd(c *cliconfig.ExecValues) error {
argStart = 0
}
cmd := args[argStart:]
runtime, err := libpodruntime.GetRuntime(getContext(), &c.PodmanCommand)
runtime, err := adapter.GetRuntime(getContext(), &c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
if c.Latest {
ctr, err = runtime.GetLatestContainer()
} else {
ctr, err = runtime.LookupContainer(args[0])
}
if err != nil {
return errors.Wrapf(err, "unable to exec into %s", args[0])
}
if c.PreserveFDs > 0 {
entries, err := ioutil.ReadDir("/proc/self/fd")
if err != nil {
return errors.Wrapf(err, "unable to read /proc/self/fd")
}
m := make(map[int]bool)
for _, e := range entries {
i, err := strconv.Atoi(e.Name())
if err != nil {
if err != nil {
return errors.Wrapf(err, "cannot parse %s in /proc/self/fd", e.Name())
}
}
m[i] = true
}
for i := 3; i < 3+c.PreserveFDs; i++ {
if _, found := m[i]; !found {
return errors.New("invalid --preserve-fds=N specified. Not enough FDs available")
}
}
}
// ENVIRONMENT VARIABLES
env := map[string]string{}
if err := parse.ReadKVStrings(env, []string{}, c.Env); err != nil {
return errors.Wrapf(err, "unable to process environment variables")
}
envs := []string{}
for k, v := range env {
envs = append(envs, fmt.Sprintf("%s=%s", k, v))
}
streams := new(libpod.AttachStreams)
streams.OutputStream = os.Stdout
streams.ErrorStream = os.Stderr
streams.InputStream = os.Stdin
streams.AttachOutput = true
streams.AttachError = true
streams.AttachInput = true
err = ctr.Exec(c.Tty, c.Privileged, envs, cmd, c.User, c.Workdir, streams, c.PreserveFDs)
err = runtime.Exec(c, cmd)
if errors.Cause(err) == define.ErrCtrStateInvalid {
exitCode = 126
}
return err
}

View File

@ -2,8 +2,8 @@ package main
import (
"fmt"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
"github.com/spf13/cobra"
@ -43,12 +43,6 @@ func healthCheckCmd(c *cliconfig.HealthCheckValues) error {
return errors.Wrap(err, "could not get runtime")
}
status, err := runtime.HealthCheck(c)
if err != nil {
if status == libpod.HealthCheckFailure {
fmt.Println("\nunhealthy")
}
return err
}
fmt.Println("healthy")
return nil
fmt.Println(status)
return err
}

View File

@ -6,7 +6,7 @@ import (
"github.com/containers/buildah/pkg/formats"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/adapter"
"github.com/containers/libpod/version"
"github.com/pkg/errors"
@ -74,12 +74,12 @@ func infoCmd(c *cliconfig.InfoValues) error {
remoteClientInfo["RemoteAPI Version"] = version.RemoteAPIVersion
remoteClientInfo["Podman Version"] = version.Version
remoteClientInfo["OS Arch"] = fmt.Sprintf("%s/%s", rt.GOOS, rt.GOARCH)
infoArr = append(infoArr, libpod.InfoData{Type: "client", Data: remoteClientInfo})
infoArr = append(infoArr, define.InfoData{Type: "client", Data: remoteClientInfo})
}
if !runtime.Remote && c.Debug {
debugInfo := debugInfo(c)
infoArr = append(infoArr, libpod.InfoData{Type: "debug", Data: debugInfo})
infoArr = append(infoArr, define.InfoData{Type: "debug", Data: debugInfo})
}
for _, currInfo := range infoArr {
@ -108,7 +108,7 @@ func debugInfo(c *cliconfig.InfoValues) map[string]interface{} {
info["compiler"] = rt.Compiler
info["go version"] = rt.Version()
info["podman version"] = version.Version
version, _ := libpod.GetVersion()
version, _ := define.GetVersion()
info["git commit"] = version.GitCommit
return info
}

View File

@ -4,7 +4,7 @@ import (
"time"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/logs"
"github.com/containers/libpod/pkg/adapter"
"github.com/containers/libpod/pkg/util"
"github.com/pkg/errors"
@ -80,13 +80,12 @@ func logsCmd(c *cliconfig.LogsValues) error {
sinceTime = since
}
opts := &libpod.LogOptions{
options := &logs.LogOptions{
Details: c.Details,
Follow: c.Follow,
Since: sinceTime,
Tail: c.Tail,
Timestamps: c.Timestamps,
}
return runtime.Log(c, opts)
return runtime.Log(c, options)
}

View File

@ -2,11 +2,11 @@ package main
import (
"context"
"github.com/containers/libpod/libpod"
"io"
"os"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/libpod"
_ "github.com/containers/libpod/pkg/hooks/0.1.0"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/version"

View File

@ -1,35 +1,12 @@
package main
import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"github.com/containers/image/types"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/image"
ns "github.com/containers/libpod/pkg/namespaces"
"github.com/containers/libpod/pkg/spec"
"github.com/containers/storage"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/ghodss/yaml"
"github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/api/core/v1"
)
const (
// https://kubernetes.io/docs/concepts/storage/volumes/#hostpath
createDirectoryPermission = 0755
// https://kubernetes.io/docs/concepts/storage/volumes/#hostpath
createFilePermission = 0644
)
var (
@ -81,289 +58,12 @@ func playKubeCmd(c *cliconfig.KubePlayValues) error {
}
ctx := getContext()
runtime, err := libpodruntime.GetRuntime(ctx, &c.PodmanCommand)
runtime, err := adapter.GetRuntime(ctx, &c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
pod, err := playKubeYAMLCmd(c, ctx, runtime, args[0])
if err != nil && pod != nil {
if err2 := runtime.RemovePod(ctx, pod, true, true); err2 != nil {
logrus.Errorf("unable to remove pod %s after failing to play kube", pod.ID())
}
}
_, err = runtime.PlayKubeYAML(ctx, c, args[0])
return err
}
func playKubeYAMLCmd(c *cliconfig.KubePlayValues, ctx context.Context, runtime *libpod.Runtime, yamlFile string) (*libpod.Pod, error) {
var (
containers []*libpod.Container
pod *libpod.Pod
podOptions []libpod.PodCreateOption
podYAML v1.Pod
registryCreds *types.DockerAuthConfig
writer io.Writer
)
content, err := ioutil.ReadFile(yamlFile)
if err != nil {
return nil, err
}
if err := yaml.Unmarshal(content, &podYAML); err != nil {
return nil, errors.Wrapf(err, "unable to read %s as YAML", yamlFile)
}
// check for name collision between pod and container
podName := podYAML.ObjectMeta.Name
for _, n := range podYAML.Spec.Containers {
if n.Name == podName {
fmt.Printf("a container exists with the same name (%s) as the pod in your YAML file; changing pod name to %s_pod\n", podName, podName)
podName = fmt.Sprintf("%s_pod", podName)
}
}
podOptions = append(podOptions, libpod.WithInfraContainer())
podOptions = append(podOptions, libpod.WithPodName(podName))
// TODO for now we just used the default kernel namespaces; we need to add/subtract this from yaml
nsOptions, err := shared.GetNamespaceOptions(strings.Split(shared.DefaultKernelNamespaces, ","))
if err != nil {
return nil, err
}
podOptions = append(podOptions, nsOptions...)
podPorts := getPodPorts(podYAML.Spec.Containers)
podOptions = append(podOptions, libpod.WithInfraContainerPorts(podPorts))
// Create the Pod
pod, err = runtime.NewPod(ctx, podOptions...)
if err != nil {
return pod, err
}
podInfraID, err := pod.InfraContainerID()
if err != nil {
return pod, err
}
namespaces := map[string]string{
// Disabled during code review per mheon
//"pid": fmt.Sprintf("container:%s", podInfraID),
"net": fmt.Sprintf("container:%s", podInfraID),
"user": fmt.Sprintf("container:%s", podInfraID),
"ipc": fmt.Sprintf("container:%s", podInfraID),
"uts": fmt.Sprintf("container:%s", podInfraID),
}
if !c.Quiet {
writer = os.Stderr
}
dockerRegistryOptions := image.DockerRegistryOptions{
DockerRegistryCreds: registryCreds,
DockerCertPath: c.CertDir,
}
if c.Flag("tls-verify").Changed {
dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.TlsVerify)
}
// map from name to mount point
volumes := make(map[string]string)
for _, volume := range podYAML.Spec.Volumes {
hostPath := volume.VolumeSource.HostPath
if hostPath == nil {
return pod, errors.Errorf("HostPath is currently the only supported VolumeSource")
}
if hostPath.Type != nil {
switch *hostPath.Type {
case v1.HostPathDirectoryOrCreate:
if _, err := os.Stat(hostPath.Path); os.IsNotExist(err) {
if err := os.Mkdir(hostPath.Path, createDirectoryPermission); err != nil {
return pod, errors.Errorf("Error creating HostPath %s at %s", volume.Name, hostPath.Path)
}
}
// unconditionally label a newly created volume as private
if err := libpod.LabelVolumePath(hostPath.Path, false); err != nil {
return pod, errors.Wrapf(err, "Error giving %s a label", hostPath.Path)
}
break
case v1.HostPathFileOrCreate:
if _, err := os.Stat(hostPath.Path); os.IsNotExist(err) {
f, err := os.OpenFile(hostPath.Path, os.O_RDONLY|os.O_CREATE, createFilePermission)
if err != nil {
return pod, errors.Errorf("Error creating HostPath %s at %s", volume.Name, hostPath.Path)
}
if err := f.Close(); err != nil {
logrus.Warnf("Error in closing newly created HostPath file: %v", err)
}
}
// unconditionally label a newly created volume as private
if err := libpod.LabelVolumePath(hostPath.Path, false); err != nil {
return pod, errors.Wrapf(err, "Error giving %s a label", hostPath.Path)
}
break
case v1.HostPathDirectory:
case v1.HostPathFile:
case v1.HostPathUnset:
// do nothing here because we will verify the path exists in validateVolumeHostDir
break
default:
return pod, errors.Errorf("Directories are the only supported HostPath type")
}
}
if err := createconfig.ValidateVolumeHostDir(hostPath.Path); err != nil {
return pod, errors.Wrapf(err, "Error in parsing HostPath in YAML")
}
volumes[volume.Name] = hostPath.Path
}
for _, container := range podYAML.Spec.Containers {
newImage, err := runtime.ImageRuntime().New(ctx, container.Image, c.SignaturePolicy, c.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, false, nil)
if err != nil {
return pod, err
}
createConfig, err := kubeContainerToCreateConfig(ctx, container, runtime, newImage, namespaces, volumes, pod.ID())
if err != nil {
return pod, err
}
ctr, err := shared.CreateContainerFromCreateConfig(runtime, createConfig, ctx, pod)
if err != nil {
return pod, err
}
containers = append(containers, ctr)
}
// start the containers
for _, ctr := range containers {
if err := ctr.Start(ctx, true); err != nil {
// Making this a hard failure here to avoid a mess
// the other containers are in created status
return pod, err
}
}
// We've now successfully converted this YAML into a pod
// print our pod and containers, signifying we succeeded
fmt.Printf("Pod:\n%s\n", pod.ID())
if len(containers) == 1 {
fmt.Printf("Container:\n")
}
if len(containers) > 1 {
fmt.Printf("Containers:\n")
}
for _, ctr := range containers {
fmt.Println(ctr.ID())
}
return pod, nil
}
// getPodPorts converts a slice of kube container descriptions to an
// array of ocicni portmapping descriptions usable in libpod
func getPodPorts(containers []v1.Container) []ocicni.PortMapping {
var infraPorts []ocicni.PortMapping
for _, container := range containers {
for _, p := range container.Ports {
portBinding := ocicni.PortMapping{
HostPort: p.HostPort,
ContainerPort: p.ContainerPort,
Protocol: strings.ToLower(string(p.Protocol)),
}
if p.HostIP != "" {
logrus.Debug("HostIP on port bindings is not supported")
}
infraPorts = append(infraPorts, portBinding)
}
}
return infraPorts
}
// kubeContainerToCreateConfig takes a v1.Container and returns a createconfig describing a container
func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container, runtime *libpod.Runtime, newImage *image.Image, namespaces map[string]string, volumes map[string]string, podID string) (*createconfig.CreateConfig, error) {
var (
containerConfig createconfig.CreateConfig
)
// The default for MemorySwappiness is -1, not 0
containerConfig.Resources.MemorySwappiness = -1
containerConfig.Image = containerYAML.Image
containerConfig.ImageID = newImage.ID()
containerConfig.Name = containerYAML.Name
containerConfig.Tty = containerYAML.TTY
containerConfig.WorkDir = containerYAML.WorkingDir
containerConfig.Pod = podID
imageData, _ := newImage.Inspect(ctx)
containerConfig.User = "0"
if imageData != nil {
containerConfig.User = imageData.Config.User
}
if containerConfig.SecurityOpts != nil {
if containerYAML.SecurityContext.ReadOnlyRootFilesystem != nil {
containerConfig.ReadOnlyRootfs = *containerYAML.SecurityContext.ReadOnlyRootFilesystem
}
if containerYAML.SecurityContext.Privileged != nil {
containerConfig.Privileged = *containerYAML.SecurityContext.Privileged
}
if containerYAML.SecurityContext.AllowPrivilegeEscalation != nil {
containerConfig.NoNewPrivs = !*containerYAML.SecurityContext.AllowPrivilegeEscalation
}
}
containerConfig.Command = []string{}
if imageData != nil && imageData.Config != nil {
containerConfig.Command = append(containerConfig.Command, imageData.Config.Entrypoint...)
}
if len(containerConfig.Command) != 0 {
containerConfig.Command = append(containerConfig.Command, containerYAML.Command...)
} else if imageData != nil && imageData.Config != nil {
containerConfig.Command = append(containerConfig.Command, imageData.Config.Cmd...)
}
if imageData != nil && len(containerConfig.Command) == 0 {
return nil, errors.Errorf("No command specified in container YAML or as CMD or ENTRYPOINT in this image for %s", containerConfig.Name)
}
containerConfig.StopSignal = 15
// If the user does not pass in ID mappings, just set to basics
if containerConfig.IDMappings == nil {
containerConfig.IDMappings = &storage.IDMappingOptions{}
}
containerConfig.NetMode = ns.NetworkMode(namespaces["net"])
containerConfig.IpcMode = ns.IpcMode(namespaces["ipc"])
containerConfig.UtsMode = ns.UTSMode(namespaces["uts"])
// disabled in code review per mheon
//containerConfig.PidMode = ns.PidMode(namespaces["pid"])
containerConfig.UsernsMode = ns.UsernsMode(namespaces["user"])
if len(containerConfig.WorkDir) == 0 {
containerConfig.WorkDir = "/"
}
// Set default environment variables and incorporate data from image, if necessary
envs := shared.EnvVariablesFromData(imageData)
// Environment Variables
for _, e := range containerYAML.Env {
envs[e.Name] = e.Value
}
containerConfig.Env = envs
for _, volume := range containerYAML.VolumeMounts {
host_path, exists := volumes[volume.Name]
if !exists {
return nil, errors.Errorf("Volume mount %s specified for container but not configured in volumes", volume.Name)
}
if err := createconfig.ValidateVolumeCtrDir(volume.MountPath); err != nil {
return nil, errors.Wrapf(err, "error in parsing MountPath")
}
containerConfig.Volumes = append(containerConfig.Volumes, fmt.Sprintf("%s:%s", host_path, volume.MountPath))
}
return &containerConfig, nil
}

View File

@ -6,8 +6,9 @@ import (
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/adapter"
"github.com/containers/libpod/pkg/util"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
@ -45,8 +46,8 @@ func init() {
flags.StringVar(&podCreateCommand.CgroupParent, "cgroup-parent", "", "Set parent cgroup for the pod")
flags.BoolVar(&podCreateCommand.Infra, "infra", true, "Create an infra container associated with the pod to share namespaces with")
flags.StringVar(&podCreateCommand.InfraImage, "infra-image", libpod.DefaultInfraImage, "The image of the infra container to associate with the pod")
flags.StringVar(&podCreateCommand.InfraCommand, "infra-command", libpod.DefaultInfraCommand, "The command to run on the infra container when the pod is started")
flags.StringVar(&podCreateCommand.InfraImage, "infra-image", define.DefaultInfraImage, "The image of the infra container to associate with the pod")
flags.StringVar(&podCreateCommand.InfraCommand, "infra-command", define.DefaultInfraCommand, "The command to run on the infra container when the pod is started")
flags.StringSliceVar(&podCreateCommand.LabelFile, "label-file", []string{}, "Read in a line delimited file of labels")
flags.StringSliceVarP(&podCreateCommand.Labels, "label", "l", []string{}, "Set metadata on pod (default [])")
flags.StringVarP(&podCreateCommand.Name, "name", "n", "", "Assign a name to the pod")
@ -78,7 +79,7 @@ func podCreateCmd(c *cliconfig.PodCreateValues) error {
return errors.Errorf("You cannot share kernel namespaces on the pod level without an infra container")
}
if c.Flag("pod-id-file").Changed && os.Geteuid() == 0 {
podIdFile, err = libpod.OpenExclusiveFile(c.PodIDFile)
podIdFile, err = util.OpenExclusiveFile(c.PodIDFile)
if err != nil && os.IsExist(err) {
return errors.Errorf("pod id file exists. Ensure another pod is not using it or delete %s", c.PodIDFile)
}

View File

@ -11,7 +11,7 @@ import (
"github.com/containers/buildah/pkg/formats"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/adapter"
"github.com/containers/libpod/pkg/util"
"github.com/docker/go-units"
@ -282,7 +282,7 @@ func generatePodFilterFuncs(filter, filterValue string) (func(pod *adapter.Pod)
}
for _, ctr_status := range ctr_statuses {
state := ctr_status.String()
if ctr_status == libpod.ContainerStateConfigured {
if ctr_status == define.ContainerStateConfigured {
state = "created"
}
if state == filterValue {
@ -504,15 +504,15 @@ func getAndSortPodJSONParams(pods []*adapter.Pod, opts podPsOptions) ([]podPsJSO
}
var status string
switch batchInfo.ConState {
case libpod.ContainerStateExited:
case define.ContainerStateExited:
fallthrough
case libpod.ContainerStateStopped:
case define.ContainerStateStopped:
status = EXITED
case libpod.ContainerStateRunning:
case define.ContainerStateRunning:
status = RUNNING
case libpod.ContainerStatePaused:
case define.ContainerStatePaused:
status = PAUSED
case libpod.ContainerStateCreated, libpod.ContainerStateConfigured:
case define.ContainerStateCreated, define.ContainerStateConfigured:
status = CREATED
default:
status = ERROR

View File

@ -2,13 +2,13 @@ package main
import (
"fmt"
"github.com/containers/libpod/pkg/adapter"
"os"
"strings"
"text/tabwriter"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/adapter"
"github.com/containers/libpod/pkg/util"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
@ -55,7 +55,7 @@ func podTopCmd(c *cliconfig.PodTopValues) error {
args := c.InputArgs
if c.ListDescriptors {
descriptors, err := libpod.GetContainerPidInformationDescriptors()
descriptors, err := util.GetContainerPidInformationDescriptors()
if err != nil {
return err
}

View File

@ -2,7 +2,6 @@ package main
import (
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/adapter"
"github.com/containers/libpod/pkg/rootless"
"github.com/pkg/errors"
@ -61,13 +60,6 @@ func restoreCmd(c *cliconfig.RestoreValues, cmd *cobra.Command) error {
}
defer runtime.Shutdown(false)
options := libpod.ContainerCheckpointOptions{
Keep: c.Keep,
TCPEstablished: c.TcpEstablished,
TargetFile: c.Import,
Name: c.Name,
}
if c.Import == "" && c.Name != "" {
return errors.Errorf("--name can only used with --import")
}
@ -93,5 +85,5 @@ func restoreCmd(c *cliconfig.RestoreValues, cmd *cobra.Command) error {
return errors.Errorf("you must provide at least one name or id")
}
return runtime.Restore(getContext(), c, options)
return runtime.Restore(getContext(), c)
}

View File

@ -51,7 +51,7 @@ type PsOptions struct {
// container related information
type BatchContainerStruct struct {
ConConfig *libpod.ContainerConfig
ConState libpod.ContainerStatus
ConState define.ContainerStatus
ExitCode int32
Exited bool
Pid int
@ -71,7 +71,7 @@ type PsContainerOutput struct {
Names string
IsInfra bool
Status string
State libpod.ContainerStatus
State define.ContainerStatus
Pid int
Size *ContainerSize
Pod string
@ -113,7 +113,7 @@ type ContainerSize struct {
// be called in PBatch
func NewBatchContainer(ctr *libpod.Container, opts PsOptions) (PsContainerOutput, error) {
var (
conState libpod.ContainerStatus
conState define.ContainerStatus
command string
created string
status string
@ -184,16 +184,16 @@ func NewBatchContainer(ctr *libpod.Container, opts PsOptions) (PsContainerOutput
}
switch conState.String() {
case libpod.ContainerStateExited.String():
case define.ContainerStateExited.String():
fallthrough
case libpod.ContainerStateStopped.String():
case define.ContainerStateStopped.String():
exitedSince := units.HumanDuration(time.Since(exitedAt))
status = fmt.Sprintf("Exited (%d) %s ago", exitCode, exitedSince)
case libpod.ContainerStateRunning.String():
case define.ContainerStateRunning.String():
status = "Up " + units.HumanDuration(time.Since(startedAt)) + " ago"
case libpod.ContainerStatePaused.String():
case define.ContainerStatePaused.String():
status = "Paused"
case libpod.ContainerStateCreated.String(), libpod.ContainerStateConfigured.String():
case define.ContainerStateCreated.String(), define.ContainerStateConfigured.String():
status = "Created"
default:
status = "Error"
@ -323,9 +323,9 @@ func generateContainerFilterFuncs(filter, filterValue string, r *libpod.Runtime)
filterValue = "exited"
}
state := status.String()
if status == libpod.ContainerStateConfigured {
if status == define.ContainerStateConfigured {
state = "created"
} else if status == libpod.ContainerStateStopped {
} else if status == define.ContainerStateStopped {
state = "exited"
}
return state == filterValue
@ -490,7 +490,7 @@ func PBatch(containers []*libpod.Container, workers int, opts PsOptions) []PsCon
// We sort out running vs non-running here to save lots of copying
// later.
if !opts.All && !opts.Latest && opts.Last < 1 {
if !res.IsInfra && res.State == libpod.ContainerStateRunning {
if !res.IsInfra && res.State == define.ContainerStateRunning {
psResults = append(psResults, res)
}
} else {
@ -505,7 +505,7 @@ func PBatch(containers []*libpod.Container, workers int, opts PsOptions) []PsCon
func BatchContainerOp(ctr *libpod.Container, opts PsOptions) (BatchContainerStruct, error) {
var (
conConfig *libpod.ContainerConfig
conState libpod.ContainerStatus
conState define.ContainerStatus
err error
exitCode int32
exited bool

View File

@ -56,7 +56,7 @@ func CreateContainer(ctx context.Context, c *GenericCLIResults, runtime *libpod.
}
if c.IsSet("cidfile") && os.Geteuid() == 0 {
cidFile, err = libpod.OpenExclusiveFile(c.String("cidfile"))
cidFile, err = util.OpenExclusiveFile(c.String("cidfile"))
if err != nil && os.IsExist(err) {
return nil, nil, errors.Errorf("container id file exists. Ensure another container is not using it or delete %s", c.String("cidfile"))
}

View File

@ -4,6 +4,7 @@ import (
"strconv"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/docker/go-connections/nat"
"github.com/pkg/errors"
@ -29,7 +30,7 @@ func GetPodStatus(pod *libpod.Pod) (string, error) {
return CreatePodStatusResults(ctrStatuses)
}
func CreatePodStatusResults(ctrStatuses map[string]libpod.ContainerStatus) (string, error) {
func CreatePodStatusResults(ctrStatuses map[string]define.ContainerStatus) (string, error) {
ctrNum := len(ctrStatuses)
if ctrNum == 0 {
return PodStateCreated, nil
@ -43,15 +44,15 @@ func CreatePodStatusResults(ctrStatuses map[string]libpod.ContainerStatus) (stri
}
for _, ctrStatus := range ctrStatuses {
switch ctrStatus {
case libpod.ContainerStateExited:
case define.ContainerStateExited:
fallthrough
case libpod.ContainerStateStopped:
case define.ContainerStateStopped:
statuses[PodStateStopped]++
case libpod.ContainerStateRunning:
case define.ContainerStateRunning:
statuses[PodStateRunning]++
case libpod.ContainerStatePaused:
case define.ContainerStatePaused:
statuses[PodStatePaused]++
case libpod.ContainerStateCreated, libpod.ContainerStateConfigured:
case define.ContainerStateCreated, define.ContainerStateConfigured:
statuses[PodStateCreated]++
default:
statuses[PodStateErrored]++

View File

@ -1,3 +1,5 @@
//+build !remoteclient
package main
import (
@ -12,8 +14,9 @@ import (
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/image"
units "github.com/docker/go-units"
"github.com/docker/go-units"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
@ -358,7 +361,7 @@ func ctrIsActive(ctr *libpod.Container) (bool, error) {
if err != nil {
return false, err
}
return state == libpod.ContainerStatePaused || state == libpod.ContainerStateRunning, nil
return state == define.ContainerStatePaused || state == define.ContainerStateRunning, nil
}
func activeContainers(containers []*libpod.Container) (map[string]*libpod.Container, error) {

View File

@ -7,14 +7,14 @@ import (
"text/tabwriter"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/adapter"
"github.com/containers/libpod/pkg/util"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
func getDescriptorString() string {
descriptors, err := libpod.GetContainerPidInformationDescriptors()
descriptors, err := util.GetContainerPidInformationDescriptors()
if err == nil {
return fmt.Sprintf(`
Format Descriptors:
@ -67,7 +67,7 @@ func topCmd(c *cliconfig.TopValues) error {
args := c.InputArgs
if c.ListDescriptors {
descriptors, err := libpod.GetContainerPidInformationDescriptors()
descriptors, err := util.GetContainerPidInformationDescriptors()
if err != nil {
return err
}

View File

@ -1,4 +1,4 @@
// +build linux
// +build !remoteclient
package main
@ -8,8 +8,7 @@ import (
"os/exec"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/pkg/adapter"
"github.com/containers/libpod/pkg/rootless"
"github.com/pkg/errors"
"github.com/spf13/cobra"
@ -41,10 +40,10 @@ func init() {
flags.SetInterspersed(false)
}
func unshareEnv(config *libpod.RuntimeConfig) []string {
func unshareEnv(graphroot, runroot string) []string {
return append(os.Environ(), "_CONTAINERS_USERNS_CONFIGURED=done",
fmt.Sprintf("CONTAINERS_GRAPHROOT=%s", config.StorageConfig.GraphRoot),
fmt.Sprintf("CONTAINERS_RUNROOT=%s", config.StorageConfig.RunRoot))
fmt.Sprintf("CONTAINERS_GRAPHROOT=%s", graphroot),
fmt.Sprintf("CONTAINERS_RUNROOT=%s", runroot))
}
// unshareCmd execs whatever using the ID mappings that we want to use for ourselves
@ -63,7 +62,7 @@ func unshareCmd(c *cliconfig.PodmanCommand) error {
c.InputArgs = []string{shell}
}
runtime, err := libpodruntime.GetRuntime(getContext(), c)
runtime, err := adapter.GetRuntime(getContext(), c)
if err != nil {
return err
}
@ -73,7 +72,7 @@ func unshareCmd(c *cliconfig.PodmanCommand) error {
}
cmd := exec.Command(c.InputArgs[0], c.InputArgs[1:]...)
cmd.Env = unshareEnv(runtimeConfig)
cmd.Env = unshareEnv(runtimeConfig.StorageConfig.GraphRoot, runtimeConfig.StorageConfig.RunRoot)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr

View File

@ -10,7 +10,7 @@ import (
"github.com/containers/buildah/pkg/formats"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/adapter"
"github.com/pkg/errors"
"github.com/spf13/cobra"
@ -40,7 +40,7 @@ func init() {
// versionCmd gets and prints version info for version command
func versionCmd(c *cliconfig.VersionValues) error {
clientVersion, err := libpod.GetVersion()
clientVersion, err := define.GetVersion()
if err != nil {
errors.Wrapf(err, "unable to determine version")
}
@ -85,7 +85,7 @@ func versionCmd(c *cliconfig.VersionValues) error {
return nil
}
func formatVersion(writer io.Writer, version libpod.Version) {
func formatVersion(writer io.Writer, version define.Version) {
fmt.Fprintf(writer, "Version:\t%s\n", version.Version)
fmt.Fprintf(writer, "RemoteAPI Version:\t%d\n", version.RemoteAPIVersion)
fmt.Fprintf(writer, "Go Version:\t%s\n", version.GoVersion)

View File

@ -7,6 +7,7 @@ import (
"testing"
"time"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/lock"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/opencontainers/runtime-tools/generate"
@ -49,7 +50,7 @@ func getTestContainer(id, name string, manager lock.Manager) (*Container, error)
},
},
state: &ContainerState{
State: ContainerStateRunning,
State: define.ContainerStateRunning,
ConfigPath: "/does/not/exist/specs/" + id,
RunDir: "/does/not/exist/tmp/",
Mounted: true,

View File

@ -21,31 +21,6 @@ import (
"github.com/pkg/errors"
)
// ContainerStatus represents the current state of a container
type ContainerStatus int
const (
// ContainerStateUnknown indicates that the container is in an error
// state where information about it cannot be retrieved
ContainerStateUnknown ContainerStatus = iota
// ContainerStateConfigured indicates that the container has had its
// storage configured but it has not been created in the OCI runtime
ContainerStateConfigured ContainerStatus = iota
// ContainerStateCreated indicates the container has been created in
// the OCI runtime but not started
ContainerStateCreated ContainerStatus = iota
// ContainerStateRunning indicates the container is currently executing
ContainerStateRunning ContainerStatus = iota
// ContainerStateStopped indicates that the container was running but has
// exited
ContainerStateStopped ContainerStatus = iota
// ContainerStatePaused indicates that the container has been paused
ContainerStatePaused ContainerStatus = iota
// ContainerStateExited indicates the the container has stopped and been
// cleaned up
ContainerStateExited ContainerStatus = iota
)
// CgroupfsDefaultCgroupParent is the cgroup parent for CGroupFS in libpod
const CgroupfsDefaultCgroupParent = "/libpod_parent"
@ -169,7 +144,7 @@ type Container struct {
// It is stored on disk in a tmpfs and recreated on reboot
type ContainerState struct {
// The current state of the running container
State ContainerStatus `json:"state"`
State define.ContainerStatus `json:"state"`
// The path to the JSON OCI runtime spec for this container
ConfigPath string `json:"configPath,omitempty"`
// RunDir is a per-boot directory for container content
@ -428,51 +403,6 @@ type ContainerNamedVolume struct {
Options []string `json:"options,omitempty"`
}
// ContainerStatus returns a string representation for users
// of a container state
func (t ContainerStatus) String() string {
switch t {
case ContainerStateUnknown:
return "unknown"
case ContainerStateConfigured:
return "configured"
case ContainerStateCreated:
return "created"
case ContainerStateRunning:
return "running"
case ContainerStateStopped:
return "stopped"
case ContainerStatePaused:
return "paused"
case ContainerStateExited:
return "exited"
}
return "bad state"
}
// StringToContainerStatus converts a string representation of a containers
// status into an actual container status type
func StringToContainerStatus(status string) (ContainerStatus, error) {
switch status {
case ContainerStateUnknown.String():
return ContainerStateUnknown, nil
case ContainerStateConfigured.String():
return ContainerStateConfigured, nil
case ContainerStateCreated.String():
return ContainerStateCreated, nil
case ContainerStateRunning.String():
return ContainerStateRunning, nil
case ContainerStateStopped.String():
return ContainerStateStopped, nil
case ContainerStatePaused.String():
return ContainerStatePaused, nil
case ContainerStateExited.String():
return ContainerStateExited, nil
default:
return ContainerStateUnknown, errors.Wrapf(define.ErrInvalidArg, "unknown container state: %s", status)
}
}
// Config accessors
// Unlocked
@ -823,13 +753,13 @@ func (c *Container) WorkingDir() string {
// Require locking
// State returns the current state of the container
func (c *Container) State() (ContainerStatus, error) {
func (c *Container) State() (define.ContainerStatus, error) {
if !c.batched {
c.lock.Lock()
defer c.lock.Unlock()
if err := c.syncContainer(); err != nil {
return ContainerStateUnknown, err
return define.ContainerStateUnknown, err
}
}
return c.state.State, nil
@ -1097,7 +1027,7 @@ func (c *Container) NamespacePath(ns LinuxNS) (string, error) {
}
}
if c.state.State != ContainerStateRunning && c.state.State != ContainerStatePaused {
if c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused {
return "", errors.Wrapf(define.ErrCtrStopped, "cannot get namespace path unless container %s is running", c.ID())
}

73
libpod/container.log.go Normal file
View File

@ -0,0 +1,73 @@
package libpod
import (
"os"
"github.com/containers/libpod/libpod/logs"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// Log is a runtime function that can read one or more container logs.
func (r *Runtime) Log(containers []*Container, options *logs.LogOptions, logChannel chan *logs.LogLine) error {
for _, ctr := range containers {
if err := ctr.ReadLog(options, logChannel); err != nil {
return err
}
}
return nil
}
// ReadLog reads a containers log based on the input options and returns loglines over a channel
func (c *Container) ReadLog(options *logs.LogOptions, logChannel chan *logs.LogLine) error {
// TODO Skip sending logs until journald logs can be read
// TODO make this not a magic string
if c.LogDriver() == JournaldLogging {
return c.readFromJournal(options, logChannel)
}
return c.readFromLogFile(options, logChannel)
}
func (c *Container) readFromLogFile(options *logs.LogOptions, logChannel chan *logs.LogLine) error {
t, tailLog, err := logs.GetLogFile(c.LogPath(), options)
if err != nil {
// If the log file does not exist, this is not fatal.
if os.IsNotExist(errors.Cause(err)) {
return nil
}
return errors.Wrapf(err, "unable to read log file %s for %s ", c.ID(), c.LogPath())
}
options.WaitGroup.Add(1)
if len(tailLog) > 0 {
for _, nll := range tailLog {
nll.CID = c.ID()
if nll.Since(options.Since) {
logChannel <- nll
}
}
}
go func() {
var partial string
for line := range t.Lines {
nll, err := logs.NewLogLine(line.Text)
if err != nil {
logrus.Error(err)
continue
}
if nll.Partial() {
partial = partial + nll.Msg
continue
} else if !nll.Partial() && len(partial) > 1 {
nll.Msg = partial
partial = ""
}
nll.CID = c.ID()
if nll.Since(options.Since) {
logChannel <- nll
}
}
options.WaitGroup.Done()
}()
return nil
}

View File

@ -37,9 +37,9 @@ func (c *Container) Init(ctx context.Context) (err error) {
}
}
if !(c.state.State == ContainerStateConfigured ||
c.state.State == ContainerStateStopped ||
c.state.State == ContainerStateExited) {
if !(c.state.State == define.ContainerStateConfigured ||
c.state.State == define.ContainerStateStopped ||
c.state.State == define.ContainerStateExited) {
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has already been created in runtime", c.ID())
}
@ -55,7 +55,7 @@ func (c *Container) Init(ctx context.Context) (err error) {
return err
}
if c.state.State == ContainerStateStopped {
if c.state.State == define.ContainerStateStopped {
// Reinitialize the container
return c.reinit(ctx, false)
}
@ -178,14 +178,14 @@ func (c *Container) StopWithTimeout(timeout uint) error {
}
}
if c.state.State == ContainerStateConfigured ||
c.state.State == ContainerStateUnknown ||
c.state.State == ContainerStatePaused {
if c.state.State == define.ContainerStateConfigured ||
c.state.State == define.ContainerStateUnknown ||
c.state.State == define.ContainerStatePaused {
return errors.Wrapf(define.ErrCtrStateInvalid, "can only stop created, running, or stopped containers. %s is in state %s", c.ID(), c.state.State.String())
}
if c.state.State == ContainerStateStopped ||
c.state.State == ContainerStateExited {
if c.state.State == define.ContainerStateStopped ||
c.state.State == define.ContainerStateExited {
return define.ErrCtrStopped
}
defer c.newContainerEvent(events.Stop)
@ -203,7 +203,7 @@ func (c *Container) Kill(signal uint) error {
}
}
if c.state.State != ContainerStateRunning {
if c.state.State != define.ContainerStateRunning {
return errors.Wrapf(define.ErrCtrStateInvalid, "can only kill running containers. %s is in state %s", c.ID(), c.state.State.String())
}
@ -241,7 +241,7 @@ func (c *Container) Exec(tty, privileged bool, env, cmd []string, user, workDir
conState := c.state.State
// TODO can probably relax this once we track exec sessions
if conState != ContainerStateRunning {
if conState != define.ContainerStateRunning {
return errors.Wrapf(define.ErrCtrStateInvalid, "cannot exec into container that is not running")
}
if privileged || c.config.Privileged {
@ -399,9 +399,9 @@ func (c *Container) Attach(streams *AttachStreams, keys string, resize <-chan re
c.lock.Unlock()
}
if c.state.State != ContainerStateCreated &&
c.state.State != ContainerStateRunning &&
c.state.State != ContainerStateExited {
if c.state.State != define.ContainerStateCreated &&
c.state.State != define.ContainerStateRunning &&
c.state.State != define.ContainerStateExited {
return errors.Wrapf(define.ErrCtrStateInvalid, "can only attach to created or running containers")
}
defer c.newContainerEvent(events.Attach)
@ -440,7 +440,7 @@ func (c *Container) Unmount(force bool) error {
return errors.Wrapf(err, "can't determine how many times %s is mounted, refusing to unmount", c.ID())
}
if mounted == 1 {
if c.state.State == ContainerStateRunning || c.state.State == ContainerStatePaused {
if c.state.State == define.ContainerStateRunning || c.state.State == define.ContainerStatePaused {
return errors.Wrapf(define.ErrCtrStateInvalid, "cannot unmount storage for container %s as it is running or paused", c.ID())
}
if len(c.state.ExecSessions) != 0 {
@ -464,10 +464,10 @@ func (c *Container) Pause() error {
}
}
if c.state.State == ContainerStatePaused {
if c.state.State == define.ContainerStatePaused {
return errors.Wrapf(define.ErrCtrStateInvalid, "%q is already paused", c.ID())
}
if c.state.State != ContainerStateRunning {
if c.state.State != define.ContainerStateRunning {
return errors.Wrapf(define.ErrCtrStateInvalid, "%q is not running, can't pause", c.state.State)
}
defer c.newContainerEvent(events.Pause)
@ -485,7 +485,7 @@ func (c *Container) Unpause() error {
}
}
if c.state.State != ContainerStatePaused {
if c.state.State != define.ContainerStatePaused {
return errors.Wrapf(define.ErrCtrStateInvalid, "%q is not paused, can't unpause", c.ID())
}
defer c.newContainerEvent(events.Unpause)
@ -578,7 +578,7 @@ func (c *Container) Cleanup(ctx context.Context) error {
}
// Check if state is good
if c.state.State == ContainerStateRunning || c.state.State == ContainerStatePaused {
if c.state.State == define.ContainerStateRunning || c.state.State == define.ContainerStatePaused {
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, refusing to clean up", c.ID())
}
@ -656,9 +656,9 @@ func (c *Container) Sync() error {
// If runtime knows about the container, update its status in runtime
// And then save back to disk
if (c.state.State != ContainerStateUnknown) &&
(c.state.State != ContainerStateConfigured) &&
(c.state.State != ContainerStateExited) {
if (c.state.State != define.ContainerStateUnknown) &&
(c.state.State != define.ContainerStateConfigured) &&
(c.state.State != define.ContainerStateExited) {
oldState := c.state.State
if err := c.ociRuntime.updateContainerStatus(c, true); err != nil {
return err
@ -687,27 +687,27 @@ func (c *Container) Refresh(ctx context.Context) error {
}
wasCreated := false
if c.state.State == ContainerStateCreated {
if c.state.State == define.ContainerStateCreated {
wasCreated = true
}
wasRunning := false
if c.state.State == ContainerStateRunning {
if c.state.State == define.ContainerStateRunning {
wasRunning = true
}
wasPaused := false
if c.state.State == ContainerStatePaused {
if c.state.State == define.ContainerStatePaused {
wasPaused = true
}
// First, unpause the container if it's paused
if c.state.State == ContainerStatePaused {
if c.state.State == define.ContainerStatePaused {
if err := c.unpause(); err != nil {
return err
}
}
// Next, if the container is running, stop it
if c.state.State == ContainerStateRunning {
if c.state.State == define.ContainerStateRunning {
if err := c.stop(c.config.StopTimeout); err != nil {
return err
}
@ -724,7 +724,7 @@ func (c *Container) Refresh(ctx context.Context) error {
// If the container is in ContainerStateStopped, we need to delete it
// from the runtime and clear conmon state
if c.state.State == ContainerStateStopped {
if c.state.State == define.ContainerStateStopped {
if err := c.delete(ctx); err != nil {
return err
}

View File

@ -9,6 +9,7 @@ import (
"github.com/containers/buildah"
"github.com/containers/buildah/util"
is "github.com/containers/image/storage"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/events"
"github.com/containers/libpod/libpod/image"
"github.com/pkg/errors"
@ -48,7 +49,7 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai
}
}
if c.state.State == ContainerStateRunning && options.Pause {
if c.state.State == define.ContainerStateRunning && options.Pause {
if err := c.ociRuntime.pauseContainer(c); err != nil {
return nil, errors.Wrapf(err, "error pausing container %q", c.ID())
}

View File

@ -244,13 +244,13 @@ func startNode(ctx context.Context, node *containerNode, setError bool, ctrError
// Start the container (only if it is not running)
if !ctrErrored {
if !restart && node.container.state.State != ContainerStateRunning {
if !restart && node.container.state.State != define.ContainerStateRunning {
if err := node.container.initAndStart(ctx); err != nil {
ctrErrored = true
ctrErrors[node.id] = err
}
}
if restart && node.container.state.State != ContainerStatePaused && node.container.state.State != ContainerStateUnknown {
if restart && node.container.state.State != define.ContainerStatePaused && node.container.state.State != define.ContainerStateUnknown {
if err := node.container.restartWithTimeout(ctx, node.container.config.StopTimeout); err != nil {
ctrErrored = true
ctrErrors[node.id] = err

View File

@ -5,6 +5,7 @@ import (
"time"
"github.com/containers/image/manifest"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/driver"
"github.com/cri-o/ocicni/pkg/ocicni"
spec "github.com/opencontainers/runtime-spec/specs-go"
@ -255,8 +256,8 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data)
State: &InspectContainerState{
OciVersion: spec.Version,
Status: runtimeInfo.State.String(),
Running: runtimeInfo.State == ContainerStateRunning,
Paused: runtimeInfo.State == ContainerStatePaused,
Running: runtimeInfo.State == define.ContainerStateRunning,
Paused: runtimeInfo.State == define.ContainerStatePaused,
OOMKilled: runtimeInfo.OOMKilled,
Dead: runtimeInfo.State.String() == "bad state",
Pid: runtimeInfo.PID,

View File

@ -156,7 +156,7 @@ func (c *Container) waitForExitFileAndSync() error {
// Reset our state
c.state.ExitCode = -1
c.state.FinishedTime = time.Now()
c.state.State = ContainerStateStopped
c.state.State = define.ContainerStateStopped
if err2 := c.save(); err2 != nil {
logrus.Errorf("Error saving container %s state: %v", c.ID(), err2)
@ -241,9 +241,9 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (restarted bool, er
// Is the container running again?
// If so, we don't have to do anything
if c.state.State == ContainerStateRunning || c.state.State == ContainerStatePaused {
if c.state.State == define.ContainerStateRunning || c.state.State == define.ContainerStatePaused {
return false, nil
} else if c.state.State == ContainerStateUnknown {
} else if c.state.State == define.ContainerStateUnknown {
return false, errors.Wrapf(define.ErrInternal, "invalid container state encountered in restart attempt!")
}
@ -267,13 +267,13 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (restarted bool, er
return false, err
}
if c.state.State == ContainerStateStopped {
if c.state.State == define.ContainerStateStopped {
// Reinitialize the container if we need to
if err := c.reinit(ctx, true); err != nil {
return false, err
}
} else if c.state.State == ContainerStateConfigured ||
c.state.State == ContainerStateExited {
} else if c.state.State == define.ContainerStateConfigured ||
c.state.State == define.ContainerStateExited {
// Initialize the container
if err := c.init(ctx, true); err != nil {
return false, err
@ -295,9 +295,9 @@ func (c *Container) syncContainer() error {
}
// If runtime knows about the container, update its status in runtime
// And then save back to disk
if (c.state.State != ContainerStateUnknown) &&
(c.state.State != ContainerStateConfigured) &&
(c.state.State != ContainerStateExited) {
if (c.state.State != define.ContainerStateUnknown) &&
(c.state.State != define.ContainerStateConfigured) &&
(c.state.State != define.ContainerStateExited) {
oldState := c.state.State
// TODO: optionally replace this with a stat for the exit file
if err := c.ociRuntime.updateContainerStatus(c, false); err != nil {
@ -307,8 +307,8 @@ func (c *Container) syncContainer() error {
if c.state.State != oldState {
// Check for a restart policy match
if c.config.RestartPolicy != RestartPolicyNone && c.config.RestartPolicy != RestartPolicyNo &&
(oldState == ContainerStateRunning || oldState == ContainerStatePaused) &&
(c.state.State == ContainerStateStopped || c.state.State == ContainerStateExited) &&
(oldState == define.ContainerStateRunning || oldState == define.ContainerStatePaused) &&
(c.state.State == define.ContainerStateStopped || c.state.State == define.ContainerStateExited) &&
!c.state.StoppedByUser {
c.state.RestartPolicyMatch = true
}
@ -336,7 +336,7 @@ func (c *Container) setupStorage(ctx context.Context) error {
return errors.Wrapf(define.ErrCtrRemoved, "container %s is not valid", c.ID())
}
if c.state.State != ContainerStateConfigured {
if c.state.State != define.ContainerStateConfigured {
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s must be in Configured state to have storage set up", c.ID())
}
@ -418,7 +418,7 @@ func (c *Container) setupStorage(ctx context.Context) error {
// Tear down a container's storage prior to removal
func (c *Container) teardownStorage() error {
if c.state.State == ContainerStateRunning || c.state.State == ContainerStatePaused {
if c.state.State == define.ContainerStateRunning || c.state.State == define.ContainerStatePaused {
return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove storage for container %s as it is running or paused", c.ID())
}
@ -454,8 +454,8 @@ func resetState(state *ContainerState) error {
state.PID = 0
state.Mountpoint = ""
state.Mounted = false
if state.State != ContainerStateExited {
state.State = ContainerStateConfigured
if state.State != define.ContainerStateExited {
state.State = define.ContainerStateConfigured
}
state.ExecSessions = make(map[string]*ExecSession)
state.NetworkStatus = nil
@ -609,7 +609,7 @@ func (c *Container) isStopped() (bool, error) {
if err != nil {
return true, err
}
return (c.state.State != ContainerStateRunning && c.state.State != ContainerStatePaused), nil
return (c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused), nil
}
// save container state to the database
@ -625,10 +625,10 @@ func (c *Container) save() error {
// Otherwise, this function will return with error if there are dependencies of this container that aren't running.
func (c *Container) prepareToStart(ctx context.Context, recursive bool) (err error) {
// Container must be created or stopped to be started
if !(c.state.State == ContainerStateConfigured ||
c.state.State == ContainerStateCreated ||
c.state.State == ContainerStateStopped ||
c.state.State == ContainerStateExited) {
if !(c.state.State == define.ContainerStateConfigured ||
c.state.State == define.ContainerStateCreated ||
c.state.State == define.ContainerStateStopped ||
c.state.State == define.ContainerStateExited) {
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s must be in Created or Stopped state to be started", c.ID())
}
@ -654,13 +654,13 @@ func (c *Container) prepareToStart(ctx context.Context, recursive bool) (err err
return err
}
if c.state.State == ContainerStateStopped {
if c.state.State == define.ContainerStateStopped {
// Reinitialize the container if we need to
if err := c.reinit(ctx, false); err != nil {
return err
}
} else if c.state.State == ContainerStateConfigured ||
c.state.State == ContainerStateExited {
} else if c.state.State == define.ContainerStateConfigured ||
c.state.State == define.ContainerStateExited {
// Or initialize it if necessary
if err := c.init(ctx, false); err != nil {
return err
@ -763,7 +763,7 @@ func (c *Container) getAllDependencies(visited map[string]*Container) error {
}
// if the dependency is already running, we can assume its dependencies are also running
// so no need to add them to those we need to start
if status != ContainerStateRunning {
if status != define.ContainerStateRunning {
visited[depID] = dep
if err := dep.getAllDependencies(visited); err != nil {
return err
@ -795,7 +795,7 @@ func (c *Container) checkDependenciesRunning() ([]string, error) {
if err != nil {
return nil, errors.Wrapf(err, "error retrieving state of dependency %s of container %s", dep, c.ID())
}
if state != ContainerStateRunning {
if state != define.ContainerStateRunning {
notRunning = append(notRunning, dep)
}
depCtrs[dep] = depCtr
@ -824,7 +824,7 @@ func (c *Container) checkDependenciesRunningLocked(depCtrs map[string]*Container
return nil, err
}
if depCtr.state.State != ContainerStateRunning {
if depCtr.state.State != define.ContainerStateRunning {
notRunning = append(notRunning, dep)
}
}
@ -875,7 +875,7 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error {
c.state.ExitCode = 0
c.state.Exited = false
c.state.State = ContainerStateCreated
c.state.State = define.ContainerStateCreated
c.state.StoppedByUser = false
c.state.RestartPolicyMatch = false
@ -906,7 +906,7 @@ func (c *Container) cleanupRuntime(ctx context.Context) error {
// If the container is not ContainerStateStopped or
// ContainerStateCreated, do nothing.
if c.state.State != ContainerStateStopped && c.state.State != ContainerStateCreated {
if c.state.State != define.ContainerStateStopped && c.state.State != define.ContainerStateCreated {
return nil
}
@ -922,10 +922,10 @@ func (c *Container) cleanupRuntime(ctx context.Context) error {
// If we were Stopped, we are now Exited, as we've removed ourself
// from the runtime.
// If we were Created, we are now Configured.
if c.state.State == ContainerStateStopped {
c.state.State = ContainerStateExited
} else if c.state.State == ContainerStateCreated {
c.state.State = ContainerStateConfigured
if c.state.State == define.ContainerStateStopped {
c.state.State = define.ContainerStateExited
} else if c.state.State == define.ContainerStateCreated {
c.state.State = define.ContainerStateConfigured
}
if c.valid {
@ -964,16 +964,16 @@ func (c *Container) reinit(ctx context.Context, retainRetries bool) error {
// Does not lock or check validity
func (c *Container) initAndStart(ctx context.Context) (err error) {
// If we are ContainerStateUnknown, throw an error
if c.state.State == ContainerStateUnknown {
if c.state.State == define.ContainerStateUnknown {
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is in an unknown state", c.ID())
}
// If we are running, do nothing
if c.state.State == ContainerStateRunning {
if c.state.State == define.ContainerStateRunning {
return nil
}
// If we are paused, throw an error
if c.state.State == ContainerStatePaused {
if c.state.State == define.ContainerStatePaused {
return errors.Wrapf(define.ErrCtrStateInvalid, "cannot start paused container %s", c.ID())
}
@ -991,14 +991,14 @@ func (c *Container) initAndStart(ctx context.Context) (err error) {
// If we are ContainerStateStopped we need to remove from runtime
// And reset to ContainerStateConfigured
if c.state.State == ContainerStateStopped {
if c.state.State == define.ContainerStateStopped {
logrus.Debugf("Recreating container %s in OCI runtime", c.ID())
if err := c.reinit(ctx, false); err != nil {
return err
}
} else if c.state.State == ContainerStateConfigured ||
c.state.State == ContainerStateExited {
} else if c.state.State == define.ContainerStateConfigured ||
c.state.State == define.ContainerStateExited {
if err := c.init(ctx, false); err != nil {
return err
}
@ -1019,7 +1019,7 @@ func (c *Container) start() error {
}
logrus.Debugf("Started container %s", c.ID())
c.state.State = ContainerStateRunning
c.state.State = define.ContainerStateRunning
if c.config.HealthCheckConfig != nil {
if err := c.updateHealthStatus(HealthCheckStarting); err != nil {
@ -1060,7 +1060,7 @@ func (c *Container) pause() error {
logrus.Debugf("Paused container %s", c.ID())
c.state.State = ContainerStatePaused
c.state.State = define.ContainerStatePaused
return c.save()
}
@ -1073,20 +1073,20 @@ func (c *Container) unpause() error {
logrus.Debugf("Unpaused container %s", c.ID())
c.state.State = ContainerStateRunning
c.state.State = define.ContainerStateRunning
return c.save()
}
// Internal, non-locking function to restart a container
func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (err error) {
if c.state.State == ContainerStateUnknown || c.state.State == ContainerStatePaused {
if c.state.State == define.ContainerStateUnknown || c.state.State == define.ContainerStatePaused {
return errors.Wrapf(define.ErrCtrStateInvalid, "unable to restart a container in a paused or unknown state")
}
c.newContainerEvent(events.Restart)
if c.state.State == ContainerStateRunning {
if c.state.State == define.ContainerStateRunning {
if err := c.stop(timeout); err != nil {
return err
}
@ -1102,13 +1102,13 @@ func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (err e
return err
}
if c.state.State == ContainerStateStopped {
if c.state.State == define.ContainerStateStopped {
// Reinitialize the container if we need to
if err := c.reinit(ctx, false); err != nil {
return err
}
} else if c.state.State == ContainerStateConfigured ||
c.state.State == ContainerStateExited {
} else if c.state.State == define.ContainerStateConfigured ||
c.state.State == define.ContainerStateExited {
// Initialize the container
if err := c.init(ctx, false); err != nil {
return err
@ -1482,12 +1482,12 @@ func (c *Container) copyWithTarFromImage(src, dest string) error {
// If it is, we'll remove the container anyways.
// Returns nil if safe to remove, or an error describing why it's unsafe if not.
func (c *Container) checkReadyForRemoval() error {
if c.state.State == ContainerStateUnknown {
if c.state.State == define.ContainerStateUnknown {
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is in invalid state", c.ID())
}
if c.state.State == ContainerStateRunning ||
c.state.State == ContainerStatePaused {
if c.state.State == define.ContainerStateRunning ||
c.state.State == define.ContainerStatePaused {
return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it is %s - running or paused containers cannot be removed", c.ID(), c.state.State.String())
}

View File

@ -573,7 +573,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
return err
}
if c.state.State != ContainerStateRunning {
if c.state.State != define.ContainerStateRunning {
return errors.Wrapf(define.ErrCtrStateInvalid, "%q is not running, cannot checkpoint", c.state.State)
}
@ -605,7 +605,7 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
logrus.Debugf("Checkpointed container %s", c.ID())
if !options.KeepRunning {
c.state.State = ContainerStateStopped
c.state.State = define.ContainerStateStopped
// Cleanup Storage and Network
if err := c.cleanup(ctx); err != nil {
@ -664,7 +664,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
return err
}
if (c.state.State != ContainerStateConfigured) && (c.state.State != ContainerStateExited) {
if (c.state.State != define.ContainerStateConfigured) && (c.state.State != define.ContainerStateExited) {
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, cannot restore", c.ID())
}
@ -781,7 +781,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
logrus.Debugf("Restored container %s", c.ID())
c.state.State = ContainerStateRunning
c.state.State = define.ContainerStateRunning
if !options.Keep {
// Delete all checkpoint related files. At this point, in theory, all files

View File

@ -9,6 +9,7 @@ import (
"strings"
"time"
"github.com/containers/libpod/libpod/logs"
journal "github.com/coreos/go-systemd/sdjournal"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@ -27,7 +28,7 @@ const (
bufLen = 16384
)
func (c *Container) readFromJournal(options *LogOptions, logChannel chan *LogLine) error {
func (c *Container) readFromJournal(options *logs.LogOptions, logChannel chan *logs.LogLine) error {
var config journal.JournalReaderConfig
config.NumFromTail = options.Tail
config.Formatter = journalFormatter
@ -79,7 +80,7 @@ func (c *Container) readFromJournal(options *LogOptions, logChannel chan *LogLin
// because we are reusing bytes, we need to make
// sure the old data doesn't get into the new line
bytestr := string(bytes[:ec])
logLine, err2 := newLogLine(bytestr)
logLine, err2 := logs.NewLogLine(bytestr)
if err2 != nil {
logrus.Error(err2)
continue
@ -98,7 +99,7 @@ func (c *Container) readFromJournal(options *LogOptions, logChannel chan *LogLin
func journalFormatter(entry *journal.JournalEntry) (string, error) {
usec := entry.RealtimeTimestamp
tsString := time.Unix(0, int64(usec)*int64(time.Microsecond)).Format(logTimeFormat)
tsString := time.Unix(0, int64(usec)*int64(time.Microsecond)).Format(logs.LogTimeFormat)
output := fmt.Sprintf("%s ", tsString)
priority, ok := entry.Fields["PRIORITY"]
if !ok {
@ -114,9 +115,9 @@ func journalFormatter(entry *journal.JournalEntry) (string, error) {
// if CONTAINER_PARTIAL_MESSAGE is defined, the log type is "P"
if _, ok := entry.Fields["CONTAINER_PARTIAL_MESSAGE"]; ok {
output += fmt.Sprintf("%s ", partialLogType)
output += fmt.Sprintf("%s ", logs.PartialLogType)
} else {
output += fmt.Sprintf("%s ", fullLogType)
output += fmt.Sprintf("%s ", logs.FullLogType)
}
// Finally, append the message
@ -129,12 +130,12 @@ func journalFormatter(entry *journal.JournalEntry) (string, error) {
}
type FollowBuffer struct {
logChannel chan *LogLine
logChannel chan *logs.LogLine
}
func (f FollowBuffer) Write(p []byte) (int, error) {
bytestr := string(p)
logLine, err := newLogLine(bytestr)
logLine, err := logs.NewLogLine(bytestr)
if err != nil {
return -1, err
}

View File

@ -4,9 +4,10 @@ package libpod
import (
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/logs"
"github.com/pkg/errors"
)
func (c *Container) readFromJournal(options *LogOptions, logChannel chan *LogLine) error {
func (c *Container) readFromJournal(options *logs.LogOptions, logChannel chan *logs.LogLine) error {
return errors.Wrapf(define.ErrOSNotSupported, "Journald logging only enabled with systemd on linux")
}

View File

@ -6,6 +6,7 @@ import (
"strconv"
"strings"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/psgo"
"github.com/pkg/errors"
@ -18,7 +19,7 @@ func (c *Container) Top(descriptors []string) ([]string, error) {
if err != nil {
return nil, errors.Wrapf(err, "unable to look up state for %s", c.ID())
}
if conStat != ContainerStateRunning {
if conStat != define.ContainerStateRunning {
return nil, errors.Errorf("top can only be used on running containers")
}
@ -60,9 +61,3 @@ func (c *Container) GetContainerPidInformation(descriptors []string) ([]string,
}
return res, nil
}
// GetContainerPidInformationDescriptors returns a string slice of all supported
// format descriptors of GetContainerPidInformation.
func GetContainerPidInformationDescriptors() ([]string, error) {
return psgo.ListDescriptors(), nil
}

View File

@ -15,9 +15,3 @@ import "github.com/containers/libpod/libpod/define"
func (c *Container) GetContainerPidInformation(descriptors []string) ([]string, error) {
return nil, define.ErrNotImplemented
}
// GetContainerPidInformationDescriptors returns a string slice of all supported
// format descriptors of GetContainerPidInformation.
func GetContainerPidInformationDescriptors() ([]string, error) {
return nil, define.ErrNotImplemented
}

View File

@ -3,8 +3,18 @@ package define
var (
// DefaultInitPath is the default path to the container-init binary
DefaultInitPath = "/usr/libexec/podman/catatonit"
// DefaultInfraImage to use for infra container
DefaultInfraImage = "k8s.gcr.io/pause:3.1"
// DefaultInfraCommand to be run in an infra container
DefaultInfraCommand = "/pause"
)
// CtrRemoveTimeout is the default number of seconds to wait after stopping a container
// before sending the kill signal
const CtrRemoveTimeout = 10
// InfoData holds the info type, i.e store, host etc and the data for each type
type InfoData struct {
Type string
Data map[string]interface{}
}

View File

@ -0,0 +1,73 @@
package define
import "github.com/pkg/errors"
// ContainerStatus represents the current state of a container
type ContainerStatus int
const (
// ContainerStateUnknown indicates that the container is in an error
// state where information about it cannot be retrieved
ContainerStateUnknown ContainerStatus = iota
// ContainerStateConfigured indicates that the container has had its
// storage configured but it has not been created in the OCI runtime
ContainerStateConfigured ContainerStatus = iota
// ContainerStateCreated indicates the container has been created in
// the OCI runtime but not started
ContainerStateCreated ContainerStatus = iota
// ContainerStateRunning indicates the container is currently executing
ContainerStateRunning ContainerStatus = iota
// ContainerStateStopped indicates that the container was running but has
// exited
ContainerStateStopped ContainerStatus = iota
// ContainerStatePaused indicates that the container has been paused
ContainerStatePaused ContainerStatus = iota
// ContainerStateExited indicates the the container has stopped and been
// cleaned up
ContainerStateExited ContainerStatus = iota
)
// ContainerStatus returns a string representation for users
// of a container state
func (t ContainerStatus) String() string {
switch t {
case ContainerStateUnknown:
return "unknown"
case ContainerStateConfigured:
return "configured"
case ContainerStateCreated:
return "created"
case ContainerStateRunning:
return "running"
case ContainerStateStopped:
return "stopped"
case ContainerStatePaused:
return "paused"
case ContainerStateExited:
return "exited"
}
return "bad state"
}
// StringToContainerStatus converts a string representation of a containers
// status into an actual container status type
func StringToContainerStatus(status string) (ContainerStatus, error) {
switch status {
case ContainerStateUnknown.String():
return ContainerStateUnknown, nil
case ContainerStateConfigured.String():
return ContainerStateConfigured, nil
case ContainerStateCreated.String():
return ContainerStateCreated, nil
case ContainerStateRunning.String():
return ContainerStateRunning, nil
case ContainerStateStopped.String():
return ContainerStateStopped, nil
case ContainerStatePaused.String():
return ContainerStatePaused, nil
case ContainerStateExited.String():
return ContainerStateExited, nil
default:
return ContainerStateUnknown, errors.Wrapf(ErrInvalidArg, "unknown container state: %s", status)
}
}

View File

@ -1,4 +1,4 @@
package libpod
package define
import (
"runtime"

View File

@ -9,6 +9,7 @@ import (
"strings"
"time"
"github.com/containers/libpod/libpod/define"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@ -169,7 +170,7 @@ func checkHealthCheckCanBeRun(c *Container) (HealthCheckStatus, error) {
if err != nil {
return HealthCheckInternalError, err
}
if cstate != ContainerStateRunning {
if cstate != define.ContainerStateRunning {
return HealthCheckContainerStopped, errors.Errorf("container %s is not running", c.ID())
}
if !c.HasHealthCheck() {

View File

@ -19,12 +19,6 @@ import (
"github.com/pkg/errors"
)
// InfoData holds the info type, i.e store, host etc and the data for each type
type InfoData struct {
Type string
Data map[string]interface{}
}
// top-level "host" info
func (r *Runtime) hostInfo() (map[string]interface{}, error) {
// lets say OS, arch, number of cpus, amount of memory, maybe os distribution/version, hostname, kernel version, uptime

View File

@ -1,31 +1,29 @@
package libpod
package logs
import (
"fmt"
"io/ioutil"
"os"
"strings"
"sync"
"time"
"github.com/hpcloud/tail"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const (
// logTimeFormat is the time format used in the log.
// LogTimeFormat is the time format used in the log.
// It is a modified version of RFC3339Nano that guarantees trailing
// zeroes are not trimmed, taken from
// https://github.com/golang/go/issues/19635
logTimeFormat = "2006-01-02T15:04:05.000000000Z07:00"
LogTimeFormat = "2006-01-02T15:04:05.000000000Z07:00"
// partialLogType signifies a log line that exceeded the buffer
// PartialLogType signifies a log line that exceeded the buffer
// length and needed to spill into a new line
partialLogType = "P"
PartialLogType = "P"
// fullLogType signifies a log line is full
fullLogType = "F"
// FullLogType signifies a log line is full
FullLogType = "F"
)
// LogOptions is the options you can use for logs
@ -48,72 +46,8 @@ type LogLine struct {
CID string
}
// Log is a runtime function that can read one or more container logs.
func (r *Runtime) Log(containers []*Container, options *LogOptions, logChannel chan *LogLine) error {
for _, ctr := range containers {
if err := ctr.ReadLog(options, logChannel); err != nil {
return err
}
}
return nil
}
// ReadLog reads a containers log based on the input options and returns loglines over a channel
func (c *Container) ReadLog(options *LogOptions, logChannel chan *LogLine) error {
// TODO Skip sending logs until journald logs can be read
// TODO make this not a magic string
if c.LogDriver() == JournaldLogging {
return c.readFromJournal(options, logChannel)
}
return c.readFromLogFile(options, logChannel)
}
func (c *Container) readFromLogFile(options *LogOptions, logChannel chan *LogLine) error {
t, tailLog, err := getLogFile(c.LogPath(), options)
if err != nil {
// If the log file does not exist, this is not fatal.
if os.IsNotExist(errors.Cause(err)) {
return nil
}
return errors.Wrapf(err, "unable to read log file %s for %s ", c.ID(), c.LogPath())
}
options.WaitGroup.Add(1)
if len(tailLog) > 0 {
for _, nll := range tailLog {
nll.CID = c.ID()
if nll.Since(options.Since) {
logChannel <- nll
}
}
}
go func() {
var partial string
for line := range t.Lines {
nll, err := newLogLine(line.Text)
if err != nil {
logrus.Error(err)
continue
}
if nll.Partial() {
partial = partial + nll.Msg
continue
} else if !nll.Partial() && len(partial) > 1 {
nll.Msg = partial
partial = ""
}
nll.CID = c.ID()
if nll.Since(options.Since) {
logChannel <- nll
}
}
options.WaitGroup.Done()
}()
return nil
}
// getLogFile returns an hp tail for a container given options
func getLogFile(path string, options *LogOptions) (*tail.Tail, []*LogLine, error) {
// GetLogFile returns an hp tail for a container given options
func GetLogFile(path string, options *LogOptions) (*tail.Tail, []*LogLine, error) {
var (
whence int
err error
@ -154,7 +88,7 @@ func getTailLog(path string, tail int) ([]*LogLine, error) {
if len(splitContent[i]) == 0 {
continue
}
nll, err := newLogLine(splitContent[i])
nll, err := NewLogLine(splitContent[i])
if err != nil {
return nil, err
}
@ -191,7 +125,7 @@ func (l *LogLine) String(options *LogOptions) string {
out = fmt.Sprintf("%s ", cid)
}
if options.Timestamps {
out = out + fmt.Sprintf("%s ", l.Time.Format(logTimeFormat))
out = out + fmt.Sprintf("%s ", l.Time.Format(LogTimeFormat))
}
return out + l.Msg
}
@ -201,13 +135,13 @@ func (l *LogLine) Since(since time.Time) bool {
return l.Time.After(since)
}
// newLogLine creates a logLine struct from a container log string
func newLogLine(line string) (*LogLine, error) {
// NewLogLine creates a logLine struct from a container log string
func NewLogLine(line string) (*LogLine, error) {
splitLine := strings.Split(line, " ")
if len(splitLine) < 4 {
return nil, errors.Errorf("'%s' is not a valid container log line", line)
}
logTime, err := time.Parse(logTimeFormat, splitLine[0])
logTime, err := time.Parse(LogTimeFormat, splitLine[0])
if err != nil {
return nil, errors.Wrapf(err, "unable to convert time %s from container log", splitLine[0])
}
@ -222,7 +156,7 @@ func newLogLine(line string) (*LogLine, error) {
// Partial returns a bool if the log line is a partial log type
func (l *LogLine) Partial() bool {
if l.ParseLogType == partialLogType {
if l.ParseLogType == PartialLogType {
return true
}
return false

View File

@ -217,7 +217,7 @@ func (r *OCIRuntime) updateContainerStatus(ctr *Container, useRuntime bool) erro
// If not using the OCI runtime, we don't need to do most of this.
if !useRuntime {
// If the container's not running, nothing to do.
if ctr.state.State != ContainerStateRunning && ctr.state.State != ContainerStatePaused {
if ctr.state.State != define.ContainerStateRunning && ctr.state.State != define.ContainerStatePaused {
return nil
}
@ -233,7 +233,7 @@ func (r *OCIRuntime) updateContainerStatus(ctr *Container, useRuntime bool) erro
}
// Alright, it exists. Transition to Stopped state.
ctr.state.State = ContainerStateStopped
ctr.state.State = define.ContainerStateStopped
// Read the exit file to get our stopped time and exit code.
return ctr.handleExitFile(exitFile, info)
@ -264,7 +264,7 @@ func (r *OCIRuntime) updateContainerStatus(ctr *Container, useRuntime bool) erro
ctr.removeConmonFiles()
ctr.state.ExitCode = -1
ctr.state.FinishedTime = time.Now()
ctr.state.State = ContainerStateExited
ctr.state.State = define.ContainerStateExited
return nil
}
return errors.Wrapf(err, "error getting container %s state. stderr/out: %s", ctr.ID(), out)
@ -283,13 +283,13 @@ func (r *OCIRuntime) updateContainerStatus(ctr *Container, useRuntime bool) erro
switch state.Status {
case "created":
ctr.state.State = ContainerStateCreated
ctr.state.State = define.ContainerStateCreated
case "paused":
ctr.state.State = ContainerStatePaused
ctr.state.State = define.ContainerStatePaused
case "running":
ctr.state.State = ContainerStateRunning
ctr.state.State = define.ContainerStateRunning
case "stopped":
ctr.state.State = ContainerStateStopped
ctr.state.State = define.ContainerStateStopped
default:
return errors.Wrapf(define.ErrInternal, "unrecognized status returned by runtime for container %s: %s",
ctr.ID(), state.Status)
@ -297,7 +297,7 @@ func (r *OCIRuntime) updateContainerStatus(ctr *Container, useRuntime bool) erro
// Only grab exit status if we were not already stopped
// If we were, it should already be in the database
if ctr.state.State == ContainerStateStopped && oldState != ContainerStateStopped {
if ctr.state.State == define.ContainerStateStopped && oldState != define.ContainerStateStopped {
var fi os.FileInfo
chWait := make(chan error)
defer close(chWait)

View File

@ -113,7 +113,7 @@ func (p *Pod) StopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m
}
// Ignore containers that are not running
if ctr.state.State != ContainerStateRunning {
if ctr.state.State != define.ContainerStateRunning {
ctr.lock.Unlock()
continue
}
@ -181,7 +181,7 @@ func (p *Pod) Pause() (map[string]error, error) {
}
// Ignore containers that are not running
if ctr.state.State != ContainerStateRunning {
if ctr.state.State != define.ContainerStateRunning {
ctr.lock.Unlock()
continue
}
@ -240,7 +240,7 @@ func (p *Pod) Unpause() (map[string]error, error) {
}
// Ignore containers that are not paused
if ctr.state.State != ContainerStatePaused {
if ctr.state.State != define.ContainerStatePaused {
ctr.lock.Unlock()
continue
}
@ -353,7 +353,7 @@ func (p *Pod) Kill(signal uint) (map[string]error, error) {
}
// Ignore containers that are not running
if ctr.state.State != ContainerStateRunning {
if ctr.state.State != define.ContainerStateRunning {
ctr.lock.Unlock()
continue
}
@ -383,7 +383,7 @@ func (p *Pod) Kill(signal uint) (map[string]error, error) {
// Status gets the status of all containers in the pod
// Returns a map of Container ID to Container Status
func (p *Pod) Status() (map[string]ContainerStatus, error) {
func (p *Pod) Status() (map[string]define.ContainerStatus, error) {
p.lock.Lock()
defer p.lock.Unlock()
@ -403,7 +403,7 @@ func (p *Pod) Status() (map[string]ContainerStatus, error) {
}
// Now that all containers are locked, get their status
status := make(map[string]ContainerStatus, len(allCtrs))
status := make(map[string]define.ContainerStatus, len(allCtrs))
for _, ctr := range allCtrs {
if err := ctr.syncContainer(); err != nil {
return nil, err

View File

@ -6,6 +6,7 @@ import (
"strconv"
"strings"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/psgo"
)
@ -34,7 +35,7 @@ func (p *Pod) GetPodPidInformation(descriptors []string) ([]string, error) {
c.lock.Unlock()
return nil, err
}
if c.state.State == ContainerStateRunning {
if c.state.State == define.ContainerStateRunning {
pid := strconv.Itoa(c.state.PID)
pids = append(pids, pid)
}

View File

@ -73,9 +73,8 @@ var (
OverrideConfigPath = etcDir + "/containers/libpod.conf"
// DefaultInfraImage to use for infra container
DefaultInfraImage = "k8s.gcr.io/pause:3.1"
// DefaultInfraCommand to be run in an infra container
DefaultInfraCommand = "/pause"
// DefaultSHMLockPath is the default path for SHM locks
DefaultSHMLockPath = "/libpod_lock"
@ -305,8 +304,8 @@ func defaultRuntimeConfig() (RuntimeConfig, error) {
NoPivotRoot: false,
CNIConfigDir: etcDir + "/cni/net.d/",
CNIPluginDir: []string{"/usr/libexec/cni", "/usr/lib/cni", "/usr/local/lib/cni", "/opt/cni/bin"},
InfraCommand: DefaultInfraCommand,
InfraImage: DefaultInfraImage,
InfraCommand: define.DefaultInfraCommand,
InfraImage: define.DefaultInfraImage,
EnablePortReservation: true,
EnableLabeling: true,
NumLocks: 2048,
@ -314,6 +313,25 @@ func defaultRuntimeConfig() (RuntimeConfig, error) {
}, nil
}
// SetXdgRuntimeDir ensures the XDG_RUNTIME_DIR env variable is set
// containers/image uses XDG_RUNTIME_DIR to locate the auth file.
func SetXdgRuntimeDir(val string) error {
if !rootless.IsRootless() {
return nil
}
if val == "" {
var err error
val, err = util.GetRootlessRuntimeDir()
if err != nil {
return err
}
}
if err := os.Setenv("XDG_RUNTIME_DIR", val); err != nil {
return errors.Wrapf(err, "cannot set XDG_RUNTIME_DIR")
}
return nil
}
func getDefaultTmpDir() (string, error) {
if !rootless.IsRootless() {
return "/var/run/libpod", nil
@ -336,25 +354,6 @@ func getDefaultTmpDir() (string, error) {
return filepath.Join(libpodRuntimeDir, "tmp"), nil
}
// SetXdgRuntimeDir ensures the XDG_RUNTIME_DIR env variable is set
// containers/image uses XDG_RUNTIME_DIR to locate the auth file.
func SetXdgRuntimeDir(val string) error {
if !rootless.IsRootless() {
return nil
}
if val == "" {
var err error
val, err = util.GetRootlessRuntimeDir()
if err != nil {
return err
}
}
if err := os.Setenv("XDG_RUNTIME_DIR", val); err != nil {
return errors.Wrapf(err, "cannot set XDG_RUNTIME_DIR")
}
return nil
}
// NewRuntime creates a new container runtime
// Options can be passed to override the default configuration for the runtime
func NewRuntime(ctx context.Context, options ...RuntimeOption) (runtime *Runtime, err error) {
@ -1194,21 +1193,21 @@ func (r *Runtime) refresh(alivePath string) error {
}
// Info returns the store and host information
func (r *Runtime) Info() ([]InfoData, error) {
info := []InfoData{}
func (r *Runtime) Info() ([]define.InfoData, error) {
info := []define.InfoData{}
// get host information
hostInfo, err := r.hostInfo()
if err != nil {
return nil, errors.Wrapf(err, "error getting host info")
}
info = append(info, InfoData{Type: "host", Data: hostInfo})
info = append(info, define.InfoData{Type: "host", Data: hostInfo})
// get store information
storeInfo, err := r.storeInfo()
if err != nil {
return nil, errors.Wrapf(err, "error getting store info")
}
info = append(info, InfoData{Type: "store", Data: storeInfo})
info = append(info, define.InfoData{Type: "store", Data: storeInfo})
reg, err := sysreg.GetRegistries()
if err != nil {
@ -1228,7 +1227,7 @@ func (r *Runtime) Info() ([]InfoData, error) {
return nil, errors.Wrapf(err, "error getting registries")
}
registries["blocked"] = breg
info = append(info, InfoData{Type: "registries", Data: registries})
info = append(info, define.InfoData{Type: "registries", Data: registries})
return info, nil
}

View File

@ -133,7 +133,7 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container, restore bo
logrus.Debugf("Allocated lock %d for container %s", ctr.lock.ID(), ctr.ID())
ctr.valid = true
ctr.state.State = ContainerStateConfigured
ctr.state.State = config2.ContainerStateConfigured
ctr.runtime = r
if ctr.config.OCIRuntime == "" {
@ -370,7 +370,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
}
}
if c.state.State == ContainerStatePaused {
if c.state.State == config2.ContainerStatePaused {
if err := c.ociRuntime.killContainer(c, 9); err != nil {
return err
}
@ -384,7 +384,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
}
// Check that the container's in a good state to be removed
if c.state.State == ContainerStateRunning {
if c.state.State == config2.ContainerStateRunning {
if err := c.ociRuntime.stopContainer(c, c.StopTimeout()); err != nil {
return errors.Wrapf(err, "cannot remove container %s as it could not be stopped", c.ID())
}
@ -464,8 +464,8 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
// Delete the container.
// Not needed in Configured and Exited states, where the container
// doesn't exist in the runtime
if c.state.State != ContainerStateConfigured &&
c.state.State != ContainerStateExited {
if c.state.State != config2.ContainerStateConfigured &&
c.state.State != config2.ContainerStateExited {
if err := c.delete(ctx); err != nil {
if cleanupErr == nil {
cleanupErr = err
@ -582,7 +582,7 @@ func (r *Runtime) GetAllContainers() ([]*Container, error) {
func (r *Runtime) GetRunningContainers() ([]*Container, error) {
running := func(c *Container) bool {
state, _ := c.State()
return state == ContainerStateRunning
return state == config2.ContainerStateRunning
}
return r.GetContainers(running)
}

View File

@ -5,6 +5,7 @@ package libpod
import (
"context"
"fmt"
"github.com/containers/libpod/pkg/util"
"io/ioutil"
"os"
"path/filepath"
@ -12,7 +13,6 @@ import (
"syscall"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/pkg/util"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)

View File

@ -8,6 +8,7 @@ import (
"testing"
"time"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/lock"
"github.com/containers/storage"
"github.com/stretchr/testify/assert"
@ -700,7 +701,7 @@ func TestSaveAndUpdateContainer(t *testing.T) {
retrievedCtr, err := state.Container(testCtr.ID())
require.NoError(t, err)
retrievedCtr.state.State = ContainerStateStopped
retrievedCtr.state.State = define.ContainerStateStopped
retrievedCtr.state.ExitCode = 127
retrievedCtr.state.FinishedTime = time.Now()
@ -729,7 +730,7 @@ func TestSaveAndUpdateContainerSameNamespaceSucceeds(t *testing.T) {
retrievedCtr, err := state.Container(testCtr.ID())
assert.NoError(t, err)
retrievedCtr.state.State = ContainerStateStopped
retrievedCtr.state.State = define.ContainerStateStopped
retrievedCtr.state.ExitCode = 127
retrievedCtr.state.FinishedTime = time.Now()

View File

@ -26,7 +26,7 @@ func (c *Container) GetContainerStats(previousStats *ContainerStats) (*Container
}
}
if c.state.State != ContainerStateRunning {
if c.state.State != define.ContainerStateRunning {
return stats, define.ErrCtrStateInvalid
}
@ -61,7 +61,7 @@ func (c *Container) GetContainerStats(previousStats *ContainerStats) (*Container
stats.MemLimit = getMemLimit(cgroupStats.Memory.Usage.Limit)
stats.MemPerc = (float64(stats.MemUsage) / float64(stats.MemLimit)) * 100
stats.PIDs = 0
if conState == ContainerStateRunning {
if conState == define.ContainerStateRunning {
stats.PIDs = cgroupStats.Pids.Current
}
stats.BlockInput, stats.BlockOutput = calculateBlockIO(cgroupStats)

View File

@ -24,17 +24,6 @@ const (
DefaultTransport = "docker://"
)
// OpenExclusiveFile opens a file for writing and ensure it doesn't already exist
func OpenExclusiveFile(path string) (*os.File, error) {
baseDir := filepath.Dir(path)
if baseDir != "" {
if _, err := os.Stat(baseDir); err != nil {
return nil, err
}
}
return os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
}
// FuncTimer helps measure the execution time of a function
// For debug purposes, do not leave in code
// used like defer FuncTimer("foo")

View File

@ -20,9 +20,11 @@ import (
"github.com/containers/image/manifest"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/cmd/podman/shared/parse"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/libpod/logs"
"github.com/containers/libpod/pkg/adapter/shortcuts"
"github.com/containers/libpod/pkg/systemdgen"
"github.com/containers/psgo"
@ -242,7 +244,7 @@ func (r *LocalRuntime) UmountRootFilesystems(ctx context.Context, cli *cliconfig
logrus.Debugf("Error umounting container %s state: %s", ctr.ID(), err.Error())
continue
}
if state == libpod.ContainerStateRunning {
if state == define.ContainerStateRunning {
logrus.Debugf("Error umounting container %s, is running", ctr.ID())
continue
}
@ -283,13 +285,14 @@ func (r *LocalRuntime) WaitOnContainers(ctx context.Context, cli *cliconfig.Wait
}
// Log logs one or more containers
func (r *LocalRuntime) Log(c *cliconfig.LogsValues, options *libpod.LogOptions) error {
func (r *LocalRuntime) Log(c *cliconfig.LogsValues, options *logs.LogOptions) error {
var wg sync.WaitGroup
options.WaitGroup = &wg
if len(c.InputArgs) > 1 {
options.Multi = true
}
logChannel := make(chan *libpod.LogLine, int(c.Tail)*len(c.InputArgs)+1)
logChannel := make(chan *logs.LogLine, int(c.Tail)*len(c.InputArgs)+1)
containers, err := shortcuts.GetContainersByContext(false, c.Latest, c.InputArgs, r.Runtime)
if err != nil {
return err
@ -488,7 +491,7 @@ func (r *LocalRuntime) Attach(ctx context.Context, c *cliconfig.AttachValues) er
if err != nil {
return errors.Wrapf(err, "unable to determine state of %s", ctr.ID())
}
if conState != libpod.ContainerStateRunning {
if conState != define.ContainerStateRunning {
return errors.Errorf("you can only attach to running containers")
}
@ -539,16 +542,23 @@ func (r *LocalRuntime) Checkpoint(c *cliconfig.CheckpointValues) error {
}
// Restore one or more containers
func (r *LocalRuntime) Restore(ctx context.Context, c *cliconfig.RestoreValues, options libpod.ContainerCheckpointOptions) error {
func (r *LocalRuntime) Restore(ctx context.Context, c *cliconfig.RestoreValues) error {
var (
containers []*libpod.Container
err, lastError error
filterFuncs []libpod.ContainerFilter
)
options := libpod.ContainerCheckpointOptions{
Keep: c.Keep,
TCPEstablished: c.TcpEstablished,
TargetFile: c.Import,
Name: c.Name,
}
filterFuncs = append(filterFuncs, func(c *libpod.Container) bool {
state, _ := c.State()
return state == libpod.ContainerStateExited
return state == define.ContainerStateExited
})
if c.Import != "" {
@ -606,7 +616,7 @@ func (r *LocalRuntime) Start(ctx context.Context, c *cliconfig.StartValues, sigP
return exitCode, errors.Wrapf(err, "unable to get container state")
}
ctrRunning := ctrState == libpod.ContainerStateRunning
ctrRunning := ctrState == define.ContainerStateRunning
if c.Attach {
inputStream := os.Stdin
@ -732,7 +742,7 @@ func (r *LocalRuntime) UnpauseContainers(ctx context.Context, cli *cliconfig.Unp
var filterFuncs []libpod.ContainerFilter
filterFuncs = append(filterFuncs, func(c *libpod.Container) bool {
state, _ := c.State()
return state == libpod.ContainerStatePaused
return state == define.ContainerStatePaused
})
ctrs, err = r.GetContainers(filterFuncs...)
} else {
@ -929,7 +939,7 @@ func (r *LocalRuntime) Prune(ctx context.Context, maxWorkers int, force bool) ([
if c.PodID() != "" {
return false
}
if state == libpod.ContainerStateStopped || state == libpod.ContainerStateExited {
if state == define.ContainerStateStopped || state == define.ContainerStateExited {
return true
}
return false
@ -1020,7 +1030,7 @@ func (r *LocalRuntime) Port(c *cliconfig.PortValues) ([]*Container, error) {
//Convert libpod containers to adapter Containers
for _, con := range containers {
if state, _ := con.State(); state != libpod.ContainerStateRunning {
if state, _ := con.State(); state != define.ContainerStateRunning {
continue
}
portContainers = append(portContainers, &Container{con})
@ -1101,3 +1111,61 @@ func (r *LocalRuntime) Commit(ctx context.Context, c *cliconfig.CommitValues, co
}
return newImage.ID(), nil
}
// Exec a command in a container
func (r *LocalRuntime) Exec(c *cliconfig.ExecValues, cmd []string) error {
var ctr *Container
var err error
if c.Latest {
ctr, err = r.GetLatestContainer()
} else {
ctr, err = r.LookupContainer(c.InputArgs[0])
}
if err != nil {
return errors.Wrapf(err, "unable to exec into %s", c.InputArgs[0])
}
if c.PreserveFDs > 0 {
entries, err := ioutil.ReadDir("/proc/self/fd")
if err != nil {
return errors.Wrapf(err, "unable to read /proc/self/fd")
}
m := make(map[int]bool)
for _, e := range entries {
i, err := strconv.Atoi(e.Name())
if err != nil {
if err != nil {
return errors.Wrapf(err, "cannot parse %s in /proc/self/fd", e.Name())
}
}
m[i] = true
}
for i := 3; i < 3+c.PreserveFDs; i++ {
if _, found := m[i]; !found {
return errors.New("invalid --preserve-fds=N specified. Not enough FDs available")
}
}
}
// ENVIRONMENT VARIABLES
env := map[string]string{}
if err := parse.ReadKVStrings(env, []string{}, c.Env); err != nil {
return errors.Wrapf(err, "unable to process environment variables")
}
envs := []string{}
for k, v := range env {
envs = append(envs, fmt.Sprintf("%s=%s", k, v))
}
streams := new(libpod.AttachStreams)
streams.OutputStream = os.Stdout
streams.ErrorStream = os.Stderr
streams.InputStream = os.Stdin
streams.AttachOutput = true
streams.AttachError = true
streams.AttachInput = true
return ctr.Exec(c.Tty, c.Privileged, envs, cmd, c.User, c.Workdir, streams, c.PreserveFDs)
}

View File

@ -17,6 +17,7 @@ import (
iopodman "github.com/containers/libpod/cmd/podman/varlink"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/logs"
"github.com/containers/libpod/pkg/varlinkapi/virtwriter"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/docker/docker/pkg/term"
@ -411,8 +412,8 @@ func BatchContainerOp(ctr *Container, opts shared.PsOptions) (shared.BatchContai
return bcs, nil
}
// Logs one or more containers over a varlink connection
func (r *LocalRuntime) Log(c *cliconfig.LogsValues, options *libpod.LogOptions) error {
// Log one or more containers over a varlink connection
func (r *LocalRuntime) Log(c *cliconfig.LogsValues, options *logs.LogOptions) error {
// GetContainersLogs
reply, err := iopodman.GetContainersLogs().Send(r.Conn, uint64(varlink.More), c.InputArgs, c.Follow, c.Latest, options.Since.Format(time.RFC3339Nano), int64(c.Tail), c.Timestamps)
if err != nil {
@ -434,7 +435,7 @@ func (r *LocalRuntime) Log(c *cliconfig.LogsValues, options *libpod.LogOptions)
if err != nil {
return errors.Wrapf(err, "unable to parse time of log %s", log.Time)
}
logLine := libpod.LogLine{
logLine := logs.LogLine{
Device: log.Device,
ParseLogType: log.ParseLogType,
Time: lTime,
@ -516,7 +517,7 @@ func (r *LocalRuntime) Ps(c *cliconfig.PsValues, opts shared.PsOptions) ([]share
RootFsSize: ctr.RootFsSize,
RwSize: ctr.RwSize,
}
state, err := libpod.StringToContainerStatus(ctr.State)
state, err := define.StringToContainerStatus(ctr.State)
if err != nil {
return nil, err
}
@ -645,7 +646,7 @@ func (r *LocalRuntime) Attach(ctx context.Context, c *cliconfig.AttachValues) er
if err != nil {
return nil
}
if ctr.state.State != libpod.ContainerStateRunning {
if ctr.state.State != define.ContainerStateRunning {
return errors.New("you can only attach to running containers")
}
inputStream := os.Stdin
@ -682,7 +683,7 @@ func (r *LocalRuntime) Checkpoint(c *cliconfig.CheckpointValues) error {
if err != nil {
return err
}
if ctr.state.State == libpod.ContainerStateRunning {
if ctr.state.State == define.ContainerStateRunning {
runningIds = append(runningIds, id)
}
}
@ -703,7 +704,7 @@ func (r *LocalRuntime) Checkpoint(c *cliconfig.CheckpointValues) error {
}
// Restore one or more containers
func (r *LocalRuntime) Restore(ctx context.Context, c *cliconfig.RestoreValues, options libpod.ContainerCheckpointOptions) error {
func (r *LocalRuntime) Restore(ctx context.Context, c *cliconfig.RestoreValues) error {
if c.Import != "" {
return errors.New("the remote client does not support importing checkpoints")
}
@ -722,7 +723,7 @@ func (r *LocalRuntime) Restore(ctx context.Context, c *cliconfig.RestoreValues,
if err != nil {
return err
}
if ctr.state.State != libpod.ContainerStateRunning {
if ctr.state.State != define.ContainerStateRunning {
exitedIDs = append(exitedIDs, id)
}
}
@ -730,7 +731,7 @@ func (r *LocalRuntime) Restore(ctx context.Context, c *cliconfig.RestoreValues,
}
for _, id := range ids {
if _, err := iopodman.ContainerRestore().Call(r.Conn, id, options.Keep, options.TCPEstablished); err != nil {
if _, err := iopodman.ContainerRestore().Call(r.Conn, id, c.Keep, c.TcpEstablished); err != nil {
if lastError != nil {
fmt.Fprintln(os.Stderr, lastError)
}
@ -797,7 +798,7 @@ func (r *LocalRuntime) PauseContainers(ctx context.Context, cli *cliconfig.Pause
)
if cli.All {
filters := []string{libpod.ContainerStateRunning.String()}
filters := []string{define.ContainerStateRunning.String()}
ctrs, err = r.LookupContainersWithStatus(filters)
} else {
ctrs, err = r.LookupContainers(cli.InputArgs)
@ -834,7 +835,7 @@ func (r *LocalRuntime) UnpauseContainers(ctx context.Context, cli *cliconfig.Unp
logrus.Debugf("Setting maximum rm workers to %d", maxWorkers)
if cli.All {
filters := []string{libpod.ContainerStatePaused.String()}
filters := []string{define.ContainerStatePaused.String()}
ctrs, err = r.LookupContainersWithStatus(filters)
} else {
ctrs, err = r.LookupContainers(cli.InputArgs)
@ -873,7 +874,7 @@ func (r *LocalRuntime) Restart(ctx context.Context, c *cliconfig.RestartValues)
}
restartContainers = append(restartContainers, lastCtr)
} else if c.Running {
containers, err = r.LookupContainersWithStatus([]string{libpod.ContainerStateRunning.String()})
containers, err = r.LookupContainersWithStatus([]string{define.ContainerStateRunning.String()})
if err != nil {
return nil, nil, err
}
@ -941,7 +942,7 @@ func (r *LocalRuntime) Prune(ctx context.Context, maxWorkers int, force bool) ([
)
logrus.Debugf("Setting maximum rm workers to %d", maxWorkers)
filters := []string{libpod.ContainerStateExited.String()}
filters := []string{define.ContainerStateExited.String()}
ctrs, err = r.LookupContainersWithStatus(filters)
if err != nil {
return ok, failures, err
@ -974,7 +975,7 @@ func (r *LocalRuntime) Port(c *cliconfig.PortValues) ([]*Container, error) {
containers, err = r.GetContainersByContext(false, c.Latest, c.InputArgs)
} else {
// we need to only use running containers if all
filters := []string{libpod.ContainerStateRunning.String()}
filters := []string{define.ContainerStateRunning.String()}
containers, err = r.LookupContainersWithStatus(filters)
}
if err != nil {
@ -1025,3 +1026,8 @@ func (r *LocalRuntime) Commit(ctx context.Context, c *cliconfig.CommitValues, co
}
return iid, nil
}
// Exec executes a container in a running container
func (r *LocalRuntime) Exec(c *cliconfig.ExecValues, cmd []string) error {
return define.ErrNotImplemented
}

View File

@ -4,16 +4,16 @@ package adapter
import (
"encoding/json"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/cmd/podman/varlink"
"github.com/containers/libpod/libpod"
)
// Info returns information for the host system and its components
func (r RemoteRuntime) Info() ([]libpod.InfoData, error) {
func (r RemoteRuntime) Info() ([]define.InfoData, error) {
// TODO the varlink implementation for info should be updated to match the output for regular info
var (
reply []libpod.InfoData
reply []define.InfoData
hostInfo map[string]interface{}
store map[string]interface{}
)
@ -43,9 +43,9 @@ func (r RemoteRuntime) Info() ([]libpod.InfoData, error) {
insecureRegistries["registries"] = info.Insecure_registries
// Add everything to the reply
reply = append(reply, libpod.InfoData{Type: "host", Data: hostInfo})
reply = append(reply, libpod.InfoData{Type: "registries", Data: registries})
reply = append(reply, libpod.InfoData{Type: "insecure registries", Data: insecureRegistries})
reply = append(reply, libpod.InfoData{Type: "store", Data: store})
reply = append(reply, define.InfoData{Type: "host", Data: hostInfo})
reply = append(reply, define.InfoData{Type: "registries", Data: registries})
reply = append(reply, define.InfoData{Type: "insecure registries", Data: insecureRegistries})
reply = append(reply, define.InfoData{Type: "store", Data: store})
return reply, nil
}

View File

@ -4,14 +4,33 @@ package adapter
import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"github.com/containers/image/types"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/adapter/shortcuts"
ns "github.com/containers/libpod/pkg/namespaces"
createconfig "github.com/containers/libpod/pkg/spec"
"github.com/containers/storage"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/ghodss/yaml"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
)
const (
// https://kubernetes.io/docs/concepts/storage/volumes/#hostpath
createDirectoryPermission = 0755
// https://kubernetes.io/docs/concepts/storage/volumes/#hostpath
createFilePermission = 0644
)
// PodContainerStats is struct containing an adapter Pod and a libpod
@ -420,3 +439,286 @@ func (r *LocalRuntime) GetStatPods(c *cliconfig.PodStatsValues) ([]*Pod, error)
}
return adapterPods, nil
}
// PlayKubeYAML creates pods and containers from a kube YAML file
func (r *LocalRuntime) PlayKubeYAML(ctx context.Context, c *cliconfig.KubePlayValues, yamlFile string) (*Pod, error) {
var (
containers []*libpod.Container
pod *libpod.Pod
podOptions []libpod.PodCreateOption
podYAML v1.Pod
registryCreds *types.DockerAuthConfig
writer io.Writer
)
content, err := ioutil.ReadFile(yamlFile)
if err != nil {
return nil, err
}
if err := yaml.Unmarshal(content, &podYAML); err != nil {
return nil, errors.Wrapf(err, "unable to read %s as YAML", yamlFile)
}
// check for name collision between pod and container
podName := podYAML.ObjectMeta.Name
for _, n := range podYAML.Spec.Containers {
if n.Name == podName {
fmt.Printf("a container exists with the same name (%s) as the pod in your YAML file; changing pod name to %s_pod\n", podName, podName)
podName = fmt.Sprintf("%s_pod", podName)
}
}
podOptions = append(podOptions, libpod.WithInfraContainer())
podOptions = append(podOptions, libpod.WithPodName(podName))
// TODO for now we just used the default kernel namespaces; we need to add/subtract this from yaml
nsOptions, err := shared.GetNamespaceOptions(strings.Split(shared.DefaultKernelNamespaces, ","))
if err != nil {
return nil, err
}
podOptions = append(podOptions, nsOptions...)
podPorts := getPodPorts(podYAML.Spec.Containers)
podOptions = append(podOptions, libpod.WithInfraContainerPorts(podPorts))
// Create the Pod
pod, err = r.NewPod(ctx, podOptions...)
if err != nil {
return nil, err
}
podInfraID, err := pod.InfraContainerID()
if err != nil {
return nil, err
}
namespaces := map[string]string{
// Disabled during code review per mheon
//"pid": fmt.Sprintf("container:%s", podInfraID),
"net": fmt.Sprintf("container:%s", podInfraID),
"user": fmt.Sprintf("container:%s", podInfraID),
"ipc": fmt.Sprintf("container:%s", podInfraID),
"uts": fmt.Sprintf("container:%s", podInfraID),
}
if !c.Quiet {
writer = os.Stderr
}
dockerRegistryOptions := image.DockerRegistryOptions{
DockerRegistryCreds: registryCreds,
DockerCertPath: c.CertDir,
}
if c.Flag("tls-verify").Changed {
dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.TlsVerify)
}
// map from name to mount point
volumes := make(map[string]string)
for _, volume := range podYAML.Spec.Volumes {
hostPath := volume.VolumeSource.HostPath
if hostPath == nil {
return nil, errors.Errorf("HostPath is currently the only supported VolumeSource")
}
if hostPath.Type != nil {
switch *hostPath.Type {
case v1.HostPathDirectoryOrCreate:
if _, err := os.Stat(hostPath.Path); os.IsNotExist(err) {
if err := os.Mkdir(hostPath.Path, createDirectoryPermission); err != nil {
return nil, errors.Errorf("Error creating HostPath %s at %s", volume.Name, hostPath.Path)
}
}
// unconditionally label a newly created volume as private
if err := libpod.LabelVolumePath(hostPath.Path, false); err != nil {
return nil, errors.Wrapf(err, "Error giving %s a label", hostPath.Path)
}
break
case v1.HostPathFileOrCreate:
if _, err := os.Stat(hostPath.Path); os.IsNotExist(err) {
f, err := os.OpenFile(hostPath.Path, os.O_RDONLY|os.O_CREATE, createFilePermission)
if err != nil {
return nil, errors.Errorf("Error creating HostPath %s at %s", volume.Name, hostPath.Path)
}
if err := f.Close(); err != nil {
logrus.Warnf("Error in closing newly created HostPath file: %v", err)
}
}
// unconditionally label a newly created volume as private
if err := libpod.LabelVolumePath(hostPath.Path, false); err != nil {
return nil, errors.Wrapf(err, "Error giving %s a label", hostPath.Path)
}
break
case v1.HostPathDirectory:
case v1.HostPathFile:
case v1.HostPathUnset:
// do nothing here because we will verify the path exists in validateVolumeHostDir
break
default:
return nil, errors.Errorf("Directories are the only supported HostPath type")
}
}
if err := createconfig.ValidateVolumeHostDir(hostPath.Path); err != nil {
return nil, errors.Wrapf(err, "Error in parsing HostPath in YAML")
}
volumes[volume.Name] = hostPath.Path
}
for _, container := range podYAML.Spec.Containers {
newImage, err := r.ImageRuntime().New(ctx, container.Image, c.SignaturePolicy, c.Authfile, writer, &dockerRegistryOptions, image.SigningOptions{}, false, nil)
if err != nil {
return nil, err
}
createConfig, err := kubeContainerToCreateConfig(ctx, container, r.Runtime, newImage, namespaces, volumes, pod.ID())
if err != nil {
return nil, err
}
ctr, err := shared.CreateContainerFromCreateConfig(r.Runtime, createConfig, ctx, pod)
if err != nil {
return nil, err
}
containers = append(containers, ctr)
}
// start the containers
for _, ctr := range containers {
if err := ctr.Start(ctx, true); err != nil {
// Making this a hard failure here to avoid a mess
// the other containers are in created status
return nil, err
}
}
// We've now successfully converted this YAML into a pod
// print our pod and containers, signifying we succeeded
fmt.Printf("Pod:\n%s\n", pod.ID())
if len(containers) == 1 {
fmt.Printf("Container:\n")
}
if len(containers) > 1 {
fmt.Printf("Containers:\n")
}
for _, ctr := range containers {
fmt.Println(ctr.ID())
}
if err := playcleanup(ctx, r, pod, nil); err != nil {
logrus.Errorf("unable to remove pod %s after failing to play kube", pod.ID())
}
return nil, nil
}
func playcleanup(ctx context.Context, runtime *LocalRuntime, pod *libpod.Pod, err error) error {
if err != nil && pod != nil {
return runtime.RemovePod(ctx, pod, true, true)
}
return nil
}
// getPodPorts converts a slice of kube container descriptions to an
// array of ocicni portmapping descriptions usable in libpod
func getPodPorts(containers []v1.Container) []ocicni.PortMapping {
var infraPorts []ocicni.PortMapping
for _, container := range containers {
for _, p := range container.Ports {
portBinding := ocicni.PortMapping{
HostPort: p.HostPort,
ContainerPort: p.ContainerPort,
Protocol: strings.ToLower(string(p.Protocol)),
}
if p.HostIP != "" {
logrus.Debug("HostIP on port bindings is not supported")
}
infraPorts = append(infraPorts, portBinding)
}
}
return infraPorts
}
// kubeContainerToCreateConfig takes a v1.Container and returns a createconfig describing a container
func kubeContainerToCreateConfig(ctx context.Context, containerYAML v1.Container, runtime *libpod.Runtime, newImage *image.Image, namespaces map[string]string, volumes map[string]string, podID string) (*createconfig.CreateConfig, error) {
var (
containerConfig createconfig.CreateConfig
)
// The default for MemorySwappiness is -1, not 0
containerConfig.Resources.MemorySwappiness = -1
containerConfig.Image = containerYAML.Image
containerConfig.ImageID = newImage.ID()
containerConfig.Name = containerYAML.Name
containerConfig.Tty = containerYAML.TTY
containerConfig.WorkDir = containerYAML.WorkingDir
containerConfig.Pod = podID
imageData, _ := newImage.Inspect(ctx)
containerConfig.User = "0"
if imageData != nil {
containerConfig.User = imageData.Config.User
}
if containerConfig.SecurityOpts != nil {
if containerYAML.SecurityContext.ReadOnlyRootFilesystem != nil {
containerConfig.ReadOnlyRootfs = *containerYAML.SecurityContext.ReadOnlyRootFilesystem
}
if containerYAML.SecurityContext.Privileged != nil {
containerConfig.Privileged = *containerYAML.SecurityContext.Privileged
}
if containerYAML.SecurityContext.AllowPrivilegeEscalation != nil {
containerConfig.NoNewPrivs = !*containerYAML.SecurityContext.AllowPrivilegeEscalation
}
}
containerConfig.Command = []string{}
if imageData != nil && imageData.Config != nil {
containerConfig.Command = append(containerConfig.Command, imageData.Config.Entrypoint...)
}
if len(containerConfig.Command) != 0 {
containerConfig.Command = append(containerConfig.Command, containerYAML.Command...)
} else if imageData != nil && imageData.Config != nil {
containerConfig.Command = append(containerConfig.Command, imageData.Config.Cmd...)
}
if imageData != nil && len(containerConfig.Command) == 0 {
return nil, errors.Errorf("No command specified in container YAML or as CMD or ENTRYPOINT in this image for %s", containerConfig.Name)
}
containerConfig.StopSignal = 15
// If the user does not pass in ID mappings, just set to basics
if containerConfig.IDMappings == nil {
containerConfig.IDMappings = &storage.IDMappingOptions{}
}
containerConfig.NetMode = ns.NetworkMode(namespaces["net"])
containerConfig.IpcMode = ns.IpcMode(namespaces["ipc"])
containerConfig.UtsMode = ns.UTSMode(namespaces["uts"])
// disabled in code review per mheon
//containerConfig.PidMode = ns.PidMode(namespaces["pid"])
containerConfig.UsernsMode = ns.UsernsMode(namespaces["user"])
if len(containerConfig.WorkDir) == 0 {
containerConfig.WorkDir = "/"
}
// Set default environment variables and incorporate data from image, if necessary
envs := shared.EnvVariablesFromData(imageData)
// Environment Variables
for _, e := range containerYAML.Env {
envs[e.Name] = e.Value
}
containerConfig.Env = envs
for _, volume := range containerYAML.VolumeMounts {
hostPath, exists := volumes[volume.Name]
if !exists {
return nil, errors.Errorf("Volume mount %s specified for container but not configured in volumes", volume.Name)
}
if err := createconfig.ValidateVolumeCtrDir(volume.MountPath); err != nil {
return nil, errors.Wrapf(err, "error in parsing MountPath")
}
containerConfig.Volumes = append(containerConfig.Volumes, fmt.Sprintf("%s:%s", hostPath, volume.MountPath))
}
return &containerConfig, nil
}

View File

@ -258,25 +258,25 @@ func (p *Pod) AllContainers() ([]*Container, error) {
}
// Status ...
func (p *Pod) Status() (map[string]libpod.ContainerStatus, error) {
ctrs := make(map[string]libpod.ContainerStatus)
func (p *Pod) Status() (map[string]define.ContainerStatus, error) {
ctrs := make(map[string]define.ContainerStatus)
for _, i := range p.containers {
var status libpod.ContainerStatus
var status define.ContainerStatus
switch i.State {
case "exited":
status = libpod.ContainerStateExited
status = define.ContainerStateExited
case "stopped":
status = libpod.ContainerStateStopped
status = define.ContainerStateStopped
case "running":
status = libpod.ContainerStateRunning
status = define.ContainerStateRunning
case "paused":
status = libpod.ContainerStatePaused
status = define.ContainerStatePaused
case "created":
status = libpod.ContainerStateCreated
case "configured":
status = libpod.ContainerStateConfigured
status = define.ContainerStateCreated
case "define.red":
status = define.ContainerStateConfigured
default:
status = libpod.ContainerStateUnknown
status = define.ContainerStateUnknown
}
ctrs[i.ID] = status
}
@ -564,3 +564,8 @@ func (r *LocalRuntime) PrunePods(ctx context.Context, cli *cliconfig.PodPruneVal
}
return ok, failures, nil
}
// PlayKubeYAML creates pods and containers from a kube YAML file
func (r *LocalRuntime) PlayKubeYAML(ctx context.Context, c *cliconfig.KubePlayValues, yamlFile string) (*Pod, error) {
return nil, define.ErrNotImplemented
}

View File

@ -5,6 +5,7 @@ package adapter
import (
"bufio"
"context"
"github.com/containers/libpod/libpod/define"
"io"
"io/ioutil"
"os"
@ -313,8 +314,13 @@ func IsImageNotFound(err error) bool {
}
// HealthCheck is a wrapper to same named function in libpod
func (r *LocalRuntime) HealthCheck(c *cliconfig.HealthCheckValues) (libpod.HealthCheckStatus, error) {
return r.Runtime.HealthCheck(c.InputArgs[0])
func (r *LocalRuntime) HealthCheck(c *cliconfig.HealthCheckValues) (string, error) {
output := "unhealthy"
status, err := r.Runtime.HealthCheck(c.InputArgs[0])
if status == libpod.HealthCheckSuccess {
output = "healthy"
}
return output, err
}
// Events is a wrapper to libpod to obtain libpod/podman events
@ -395,8 +401,8 @@ func (r *LocalRuntime) GetPodsByStatus(statuses []string) ([]*libpod.Pod, error)
}
// GetVersion is an alias to satisfy interface{}
func (r *LocalRuntime) GetVersion() (libpod.Version, error) {
return libpod.GetVersion()
func (r *LocalRuntime) GetVersion() (define.Version, error) {
return define.GetVersion()
}
// RemoteEndpoint resolve interface requirement

View File

@ -771,8 +771,8 @@ func IsImageNotFound(err error) bool {
}
// HealthCheck executes a container's healthcheck over a varlink connection
func (r *LocalRuntime) HealthCheck(c *cliconfig.HealthCheckValues) (libpod.HealthCheckStatus, error) {
return -1, define.ErrNotImplemented
func (r *LocalRuntime) HealthCheck(c *cliconfig.HealthCheckValues) (string, error) {
return "", define.ErrNotImplemented
}
// Events monitors libpod/podman events over a varlink connection
@ -907,22 +907,22 @@ func (r *LocalRuntime) GetContainersByContext(all bool, latest bool, namesOrIDs
}
// GetVersion returns version information from service
func (r *LocalRuntime) GetVersion() (libpod.Version, error) {
func (r *LocalRuntime) GetVersion() (define.Version, error) {
version, goVersion, gitCommit, built, osArch, apiVersion, err := iopodman.GetVersion().Call(r.Conn)
if err != nil {
return libpod.Version{}, errors.Wrapf(err, "Unable to obtain server version information")
return define.Version{}, errors.Wrapf(err, "Unable to obtain server version information")
}
var buildTime int64
if built != "" {
t, err := time.Parse(time.RFC3339, built)
if err != nil {
return libpod.Version{}, nil
return define.Version{}, nil
}
buildTime = t.Unix()
}
return libpod.Version{
return define.Version{
RemoteAPIVersion: apiVersion,
Version: version,
GoVersion: goVersion,

View File

@ -29,6 +29,7 @@ import (
"time"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@ -209,7 +210,7 @@ func followLog(reader *bufio.Reader, writer *logWriter, opts *LogOptions, ctr *l
if err != nil {
return err
}
if state != libpod.ContainerStateRunning && state != libpod.ContainerStatePaused {
if state != define.ContainerStateRunning && state != define.ContainerStatePaused {
break
}
continue

View File

@ -337,3 +337,14 @@ func GetGlobalOpts(c *cliconfig.RunlabelValues) string {
})
return strings.Join(optsCommand, " ")
}
// OpenExclusiveFile opens a file for writing and ensure it doesn't already exist
func OpenExclusiveFile(path string) (*os.File, error) {
baseDir := filepath.Dir(path)
if baseDir != "" {
if _, err := os.Stat(baseDir); err != nil {
return nil, err
}
}
return os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
}

11
pkg/util/utils_darwin.go Normal file
View File

@ -0,0 +1,11 @@
//+build darwin
package util
import (
"github.com/pkg/errors"
)
func GetContainerPidInformationDescriptors() ([]string, error) {
return []string{}, errors.New("this function is not supported on darwin")
}

11
pkg/util/utils_linux.go Normal file
View File

@ -0,0 +1,11 @@
package util
import (
"github.com/containers/psgo"
)
// GetContainerPidInformationDescriptors returns a string slice of all supported
// format descriptors of GetContainerPidInformation.
func GetContainerPidInformationDescriptors() ([]string, error) {
return psgo.ListDescriptors(), nil
}

View File

@ -7,11 +7,12 @@ package util
import (
"fmt"
"github.com/containers/libpod/pkg/rootless"
"github.com/pkg/errors"
"os"
"path/filepath"
"syscall"
"github.com/containers/libpod/pkg/rootless"
"github.com/pkg/errors"
)
// GetRootlessRuntimeDir returns the runtime directory when running as non root

View File

@ -6,18 +6,24 @@ import (
"github.com/pkg/errors"
)
// GetRootlessRuntimeDir returns the runtime directory when running as non root
func GetRootlessRuntimeDir() (string, error) {
return "", errors.New("this function is not implemented for windows")
}
// IsCgroup2UnifiedMode returns whether we are running in cgroup 2 unified mode.
func IsCgroup2UnifiedMode() (bool, error) {
return false, errors.New("this function is not implemented for windows")
}
// GetContainerPidInformationDescriptors returns a string slice of all supported
// format descriptors of GetContainerPidInformation.
func GetContainerPidInformationDescriptors() ([]string, error) {
return nil, errors.New("this function is not implemented for windows")
}
// GetRootlessPauseProcessPidPath returns the path to the file that holds the pid for
// the pause process
func GetRootlessPauseProcessPidPath() (string, error) {
return "", errors.New("this function is not implemented for windows")
}
// GetRootlessRuntimeDir returns the runtime directory when running as non root
func GetRootlessRuntimeDir() (string, error) {
return "", errors.New("this function is not implemented for windows")
}

View File

@ -58,7 +58,7 @@ func (i *LibpodAPI) Attach(call iopodman.VarlinkCall, name string, detachKeys st
if err != nil {
return call.ReplyErrorOccurred(err.Error())
}
if !start && state != libpod.ContainerStateRunning {
if !start && state != define.ContainerStateRunning {
return call.ReplyErrorOccurred("container must be running to attach")
}
@ -73,7 +73,7 @@ func (i *LibpodAPI) Attach(call iopodman.VarlinkCall, name string, detachKeys st
}
}()
if state == libpod.ContainerStateRunning {
if state == define.ContainerStateRunning {
finalErr = attach(ctr, streams, detachKeys, resize, errChan)
} else {
finalErr = startAndAttach(ctr, streams, detachKeys, resize, errChan)

View File

@ -17,6 +17,7 @@ import (
"github.com/containers/libpod/cmd/podman/varlink"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/logs"
"github.com/containers/libpod/pkg/adapter/shortcuts"
cc "github.com/containers/libpod/pkg/spec"
"github.com/containers/storage/pkg/archive"
@ -139,7 +140,7 @@ func (i *LibpodAPI) GetContainersByStatus(call iopodman.VarlinkCall, statuses []
containers []iopodman.Container
)
for _, status := range statuses {
lpstatus, err := libpod.StringToContainerStatus(status)
lpstatus, err := define.StringToContainerStatus(status)
if err != nil {
return call.ReplyErrorOccurred(err.Error())
}
@ -199,7 +200,7 @@ func (i *LibpodAPI) ListContainerProcesses(call iopodman.VarlinkCall, name strin
if err != nil {
return call.ReplyErrorOccurred(err.Error())
}
if containerState != libpod.ContainerStateRunning {
if containerState != define.ContainerStateRunning {
return call.ReplyErrorOccurred(fmt.Sprintf("container %s is not running", name))
}
var psArgs []string
@ -230,7 +231,7 @@ func (i *LibpodAPI) GetContainerLogs(call iopodman.VarlinkCall, name string) err
return call.ReplyErrorOccurred(err.Error())
}
if _, err := os.Stat(logPath); err != nil {
if containerState == libpod.ContainerStateConfigured {
if containerState == define.ContainerStateConfigured {
return call.ReplyGetContainerLogs(logs)
}
}
@ -260,7 +261,7 @@ func (i *LibpodAPI) GetContainerLogs(call iopodman.VarlinkCall, name string) err
if err != nil {
return call.ReplyErrorOccurred(err.Error())
}
if state != libpod.ContainerStateRunning && state != libpod.ContainerStatePaused {
if state != define.ContainerStateRunning && state != define.ContainerStatePaused {
return call.ReplyErrorOccurred(fmt.Sprintf("%s is no longer running", ctr.ID()))
}
@ -360,7 +361,7 @@ func (i *LibpodAPI) StartContainer(call iopodman.VarlinkCall, name string) error
if err != nil {
return call.ReplyErrorOccurred(err.Error())
}
if state == libpod.ContainerStateRunning || state == libpod.ContainerStatePaused {
if state == define.ContainerStateRunning || state == define.ContainerStatePaused {
return call.ReplyErrorOccurred("container is already running or paused")
}
recursive := false
@ -511,7 +512,7 @@ func (i *LibpodAPI) DeleteStoppedContainers(call iopodman.VarlinkCall) error {
if err != nil {
return call.ReplyErrorOccurred(err.Error())
}
if state != libpod.ContainerStateRunning {
if state != define.ContainerStateRunning {
if err := i.Runtime.RemoveContainer(ctx, ctr, false, false); err != nil {
return call.ReplyErrorOccurred(err.Error())
}
@ -535,7 +536,7 @@ func (i *LibpodAPI) GetAttachSockets(call iopodman.VarlinkCall, name string) err
// If the container hasn't been run, we need to run init
// so the conmon sockets get created.
if status == libpod.ContainerStateConfigured || status == libpod.ContainerStateStopped {
if status == define.ContainerStateConfigured || status == define.ContainerStateStopped {
if err := ctr.Init(getContext()); err != nil {
return call.ReplyErrorOccurred(err.Error())
}
@ -720,7 +721,7 @@ func (i *LibpodAPI) GetContainersLogs(call iopodman.VarlinkCall, names []string,
if err != nil {
return call.ReplyErrorOccurred(err.Error())
}
options := libpod.LogOptions{
options := logs.LogOptions{
Follow: follow,
Since: sinceTime,
Tail: uint64(tail),
@ -731,7 +732,7 @@ func (i *LibpodAPI) GetContainersLogs(call iopodman.VarlinkCall, names []string,
if len(names) > 1 {
options.Multi = true
}
logChannel := make(chan *libpod.LogLine, int(tail)*len(names)+1)
logChannel := make(chan *logs.LogLine, int(tail)*len(names)+1)
containers, err := shortcuts.GetContainersByContext(false, latest, names, i.Runtime)
if err != nil {
return call.ReplyErrorOccurred(err.Error())
@ -753,7 +754,7 @@ func (i *LibpodAPI) GetContainersLogs(call iopodman.VarlinkCall, names []string,
return call.ReplyGetContainersLogs(iopodman.LogLine{})
}
func newPodmanLogLine(line *libpod.LogLine) iopodman.LogLine {
func newPodmanLogLine(line *logs.LogLine) iopodman.LogLine {
return iopodman.LogLine{
Device: line.Device,
ParseLogType: line.ParseLogType,

View File

@ -3,17 +3,17 @@
package varlinkapi
import (
"github.com/containers/libpod/libpod/define"
goruntime "runtime"
"strings"
"time"
"github.com/containers/libpod/cmd/podman/varlink"
"github.com/containers/libpod/libpod"
)
// GetVersion ...
func (i *LibpodAPI) GetVersion(call iopodman.VarlinkCall) error {
versionInfo, err := libpod.GetVersion()
versionInfo, err := define.GetVersion()
if err != nil {
return err
}
@ -30,7 +30,7 @@ func (i *LibpodAPI) GetVersion(call iopodman.VarlinkCall) error {
// GetInfo returns details about the podman host and its stores
func (i *LibpodAPI) GetInfo(call iopodman.VarlinkCall) error {
versionInfo, err := libpod.GetVersion()
versionInfo, err := define.GetVersion()
if err != nil {
return err
}

View File

@ -12,6 +12,7 @@ import (
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/cmd/podman/varlink"
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
"github.com/containers/storage/pkg/archive"
)
@ -73,7 +74,7 @@ func makeListContainer(containerID string, batchInfo shared.BatchContainerStruct
Names: batchInfo.ConConfig.Name,
Labels: batchInfo.ConConfig.Labels,
Mounts: mounts,
Containerrunning: batchInfo.ConState == libpod.ContainerStateRunning,
Containerrunning: batchInfo.ConState == define.ContainerStateRunning,
Namespaces: namespace,
}
if batchInfo.Size != nil {

2
vendor/modules.txt vendored
View File

@ -542,11 +542,11 @@ k8s.io/apimachinery/pkg/api/resource
k8s.io/apimachinery/pkg/apis/meta/v1
k8s.io/apimachinery/pkg/util/wait
k8s.io/apimachinery/pkg/util/runtime
k8s.io/apimachinery/pkg/selection
k8s.io/apimachinery/pkg/runtime
k8s.io/apimachinery/pkg/runtime/schema
k8s.io/apimachinery/pkg/types
k8s.io/apimachinery/pkg/util/intstr
k8s.io/apimachinery/pkg/selection
k8s.io/apimachinery/pkg/conversion
k8s.io/apimachinery/pkg/labels
k8s.io/apimachinery/pkg/watch