mirror of https://github.com/containers/podman.git
code cleanup
clean up code identified as problematic by golands inspection Signed-off-by: baude <bbaude@redhat.com>
This commit is contained in:
parent
f7407f2eb5
commit
1d36501f96
2
Makefile
2
Makefile
|
@ -384,7 +384,7 @@ install.libseccomp.sudo:
|
|||
|
||||
|
||||
cmd/podman/varlink/iopodman.go: cmd/podman/varlink/io.podman.varlink
|
||||
$(GO) generate ./cmd/podman/varlink/...
|
||||
GO111MODULE=off $(GO) generate ./cmd/podman/varlink/...
|
||||
|
||||
API.md: cmd/podman/varlink/io.podman.varlink
|
||||
$(GO) generate ./docs/...
|
||||
|
|
|
@ -339,7 +339,6 @@ func getRuntimeConfigBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
|||
}
|
||||
|
||||
func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt.Bucket) error {
|
||||
valid := true
|
||||
ctrBkt := ctrsBkt.Bucket(id)
|
||||
if ctrBkt == nil {
|
||||
return errors.Wrapf(define.ErrNoSuchCtr, "container %s not found in DB", string(id))
|
||||
|
@ -386,7 +385,7 @@ func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt.
|
|||
}
|
||||
|
||||
ctr.runtime = s.runtime
|
||||
ctr.valid = valid
|
||||
ctr.valid = true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -639,7 +638,7 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
|
|||
}
|
||||
|
||||
// Add ctr to pod
|
||||
if pod != nil {
|
||||
if pod != nil && podCtrs != nil {
|
||||
if err := podCtrs.Put(ctrID, ctrName); err != nil {
|
||||
return errors.Wrapf(err, "error adding container %s to pod %s", ctr.ID(), pod.ID())
|
||||
}
|
||||
|
@ -737,7 +736,7 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
|
|||
}
|
||||
}
|
||||
|
||||
if podDB != nil {
|
||||
if podDB != nil && pod != nil {
|
||||
// Check if the container is in the pod, remove it if it is
|
||||
podCtrs := podDB.Bucket(containersBkt)
|
||||
if podCtrs == nil {
|
||||
|
|
|
@ -89,13 +89,13 @@ func getTestContainer(id, name string, manager lock.Manager) (*Container, error)
|
|||
|
||||
ctr.config.Labels["test"] = "testing"
|
||||
|
||||
// Allocate a lock for the container
|
||||
lock, err := manager.AllocateLock()
|
||||
// Allocate a containerLock for the container
|
||||
containerLock, err := manager.AllocateLock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ctr.lock = lock
|
||||
ctr.config.LockID = lock.ID()
|
||||
ctr.lock = containerLock
|
||||
ctr.config.LockID = containerLock.ID()
|
||||
|
||||
return ctr, nil
|
||||
}
|
||||
|
@ -114,13 +114,13 @@ func getTestPod(id, name string, manager lock.Manager) (*Pod, error) {
|
|||
valid: true,
|
||||
}
|
||||
|
||||
// Allocate a lock for the pod
|
||||
lock, err := manager.AllocateLock()
|
||||
// Allocate a podLock for the pod
|
||||
podLock, err := manager.AllocateLock()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pod.lock = lock
|
||||
pod.config.LockID = lock.ID()
|
||||
pod.lock = podLock
|
||||
pod.config.LockID = podLock.ID()
|
||||
|
||||
return pod, nil
|
||||
}
|
||||
|
|
|
@ -145,7 +145,9 @@ func redirectResponseToOutputStreams(outputStream, errorStream io.Writer, writeO
|
|||
default:
|
||||
logrus.Infof("Received unexpected attach type %+d", buf[0])
|
||||
}
|
||||
|
||||
if dst == nil {
|
||||
return errors.New("output destination cannot be nil")
|
||||
}
|
||||
if doWrite {
|
||||
nw, ew := dst.Write(buf[1:nr])
|
||||
if ew != nil {
|
||||
|
|
|
@ -206,12 +206,12 @@ func (c *Container) Inspect(size bool) (*InspectContainerData, error) {
|
|||
func (c *Container) getContainerInspectData(size bool, driverData *driver.Data) (*InspectContainerData, error) {
|
||||
config := c.config
|
||||
runtimeInfo := c.state
|
||||
spec, err := c.specFromState()
|
||||
stateSpec, err := c.specFromState()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Process is allowed to be nil in the spec
|
||||
// Process is allowed to be nil in the stateSpec
|
||||
args := []string{}
|
||||
if config.Spec.Process != nil {
|
||||
args = config.Spec.Process.Args
|
||||
|
@ -244,7 +244,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data)
|
|||
}
|
||||
}
|
||||
|
||||
mounts, err := c.getInspectMounts(spec)
|
||||
mounts, err := c.getInspectMounts(stateSpec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -255,7 +255,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data)
|
|||
Path: path,
|
||||
Args: args,
|
||||
State: &InspectContainerState{
|
||||
OciVersion: spec.Version,
|
||||
OciVersion: stateSpec.Version,
|
||||
Status: runtimeInfo.State.String(),
|
||||
Running: runtimeInfo.State == define.ContainerStateRunning,
|
||||
Paused: runtimeInfo.State == define.ContainerStatePaused,
|
||||
|
@ -285,9 +285,9 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data)
|
|||
Driver: driverData.Name,
|
||||
MountLabel: config.MountLabel,
|
||||
ProcessLabel: config.ProcessLabel,
|
||||
EffectiveCaps: spec.Process.Capabilities.Effective,
|
||||
BoundingCaps: spec.Process.Capabilities.Bounding,
|
||||
AppArmorProfile: spec.Process.ApparmorProfile,
|
||||
EffectiveCaps: stateSpec.Process.Capabilities.Effective,
|
||||
BoundingCaps: stateSpec.Process.Capabilities.Bounding,
|
||||
AppArmorProfile: stateSpec.Process.ApparmorProfile,
|
||||
ExecIDs: execIDs,
|
||||
GraphDriver: driverData,
|
||||
Mounts: mounts,
|
||||
|
@ -338,7 +338,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *driver.Data)
|
|||
// Get information on the container's network namespace (if present)
|
||||
data = c.getContainerNetworkInfo(data)
|
||||
|
||||
inspectConfig, err := c.generateInspectContainerConfig(spec)
|
||||
inspectConfig, err := c.generateInspectContainerConfig(stateSpec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -555,7 +555,7 @@ func (c *Container) removeConmonFiles() error {
|
|||
if !os.IsNotExist(err) {
|
||||
return errors.Wrapf(err, "error running stat on container %s exit file", c.ID())
|
||||
}
|
||||
} else if err == nil {
|
||||
} else {
|
||||
// Rename should replace the old exit file (if it exists)
|
||||
if err := os.Rename(exitFile, oldExitFile); err != nil {
|
||||
return errors.Wrapf(err, "error renaming container %s exit file", c.ID())
|
||||
|
@ -568,11 +568,11 @@ func (c *Container) removeConmonFiles() error {
|
|||
func (c *Container) export(path string) error {
|
||||
mountPoint := c.state.Mountpoint
|
||||
if !c.state.Mounted {
|
||||
mount, err := c.runtime.store.Mount(c.ID(), c.config.MountLabel)
|
||||
containerMount, err := c.runtime.store.Mount(c.ID(), c.config.MountLabel)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error mounting container %q", c.ID())
|
||||
}
|
||||
mountPoint = mount
|
||||
mountPoint = containerMount
|
||||
defer func() {
|
||||
if _, err := c.runtime.store.Unmount(c.ID(), false); err != nil {
|
||||
logrus.Errorf("error unmounting container %q: %v", c.ID(), err)
|
||||
|
@ -856,18 +856,18 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error {
|
|||
span.SetTag("struct", "container")
|
||||
defer span.Finish()
|
||||
|
||||
// Generate the OCI spec
|
||||
spec, err := c.generateSpec(ctx)
|
||||
// Generate the OCI newSpec
|
||||
newSpec, err := c.generateSpec(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Save the OCI spec to disk
|
||||
if err := c.saveSpec(spec); err != nil {
|
||||
// Save the OCI newSpec to disk
|
||||
if err := c.saveSpec(newSpec); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// With the spec complete, do an OCI create
|
||||
// With the newSpec complete, do an OCI create
|
||||
if err := c.ociRuntime.createContainer(c, c.config.CgroupParent, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1167,8 +1167,8 @@ func (c *Container) cleanupStorage() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
for _, mount := range c.config.Mounts {
|
||||
if err := c.unmountSHM(mount); err != nil {
|
||||
for _, containerMount := range c.config.Mounts {
|
||||
if err := c.unmountSHM(containerMount); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -1399,14 +1399,14 @@ func (c *Container) setupOCIHooks(ctx context.Context, config *spec.Spec) (exten
|
|||
}
|
||||
return nil, err
|
||||
}
|
||||
hooks, err := manager.Hooks(config, c.Spec().Annotations, len(c.config.UserVolumes) > 0)
|
||||
ociHooks, err := manager.Hooks(config, c.Spec().Annotations, len(c.config.UserVolumes) > 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(hooks) > 0 || config.Hooks != nil {
|
||||
logrus.Warnf("implicit hook directories are deprecated; set --hooks-dir=%q explicitly to continue to load hooks from this directory", hDir)
|
||||
if len(ociHooks) > 0 || config.Hooks != nil {
|
||||
logrus.Warnf("implicit hook directories are deprecated; set --ociHooks-dir=%q explicitly to continue to load ociHooks from this directory", hDir)
|
||||
}
|
||||
for i, hook := range hooks {
|
||||
for i, hook := range ociHooks {
|
||||
allHooks[i] = hook
|
||||
}
|
||||
}
|
||||
|
|
|
@ -185,9 +185,13 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
|
|||
// If network namespace was requested, add it now
|
||||
if c.config.CreateNetNS {
|
||||
if c.config.PostConfigureNetNS {
|
||||
g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, "")
|
||||
if err := g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, ""); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, c.state.NetNS.Path())
|
||||
if err := g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, c.state.NetNS.Path()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -415,7 +419,9 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
|
|||
|
||||
if rootPropagation != "" {
|
||||
logrus.Debugf("set root propagation to %q", rootPropagation)
|
||||
g.SetLinuxRootPropagation(rootPropagation)
|
||||
if err := g.SetLinuxRootPropagation(rootPropagation); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Warning: precreate hooks may alter g.Config in place.
|
||||
|
@ -561,7 +567,9 @@ func (c *Container) checkpointRestoreLabelLog(fileName string) (err error) {
|
|||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create CRIU log file %q", dumpLog)
|
||||
}
|
||||
logFile.Close()
|
||||
if err := logFile.Close(); err != nil {
|
||||
logrus.Errorf("unable to close log file: %q", err)
|
||||
}
|
||||
if err = label.SetFileLabel(dumpLog, c.MountLabel()); err != nil {
|
||||
return errors.Wrapf(err, "failed to label CRIU log file %q", dumpLog)
|
||||
}
|
||||
|
@ -620,9 +628,11 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
|
|||
"config.dump",
|
||||
"spec.dump",
|
||||
}
|
||||
for _, delete := range cleanup {
|
||||
file := filepath.Join(c.bundlePath(), delete)
|
||||
os.Remove(file)
|
||||
for _, del := range cleanup {
|
||||
file := filepath.Join(c.bundlePath(), del)
|
||||
if err := os.Remove(file); err != nil {
|
||||
logrus.Debugf("unable to remove file %s", file)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -702,7 +712,9 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
json.Unmarshal(networkJSON, &networkStatus)
|
||||
if err := json.Unmarshal(networkJSON, &networkStatus); err != nil {
|
||||
return err
|
||||
}
|
||||
// Take the first IP address
|
||||
var IP net.IP
|
||||
if len(networkStatus) > 0 {
|
||||
|
@ -744,7 +756,9 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
|
|||
|
||||
// We want to have the same network namespace as before.
|
||||
if c.config.CreateNetNS {
|
||||
g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, c.state.NetNS.Path())
|
||||
if err := g.AddOrReplaceLinuxNamespace(spec.NetworkNamespace, c.state.NetNS.Path()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.makeBindMounts(); err != nil {
|
||||
|
@ -769,7 +783,9 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
|
|||
}
|
||||
|
||||
// Cleanup for a working restore.
|
||||
c.removeConmonFiles()
|
||||
if err := c.removeConmonFiles(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Save the OCI spec to disk
|
||||
if err := c.saveSpec(g.Spec()); err != nil {
|
||||
|
@ -793,8 +809,8 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
|
|||
logrus.Debugf("Non-fatal: removal of checkpoint directory (%s) failed: %v", c.CheckpointPath(), err)
|
||||
}
|
||||
cleanup := [...]string{"restore.log", "dump.log", "stats-dump", "stats-restore", "network.status"}
|
||||
for _, delete := range cleanup {
|
||||
file := filepath.Join(c.bundlePath(), delete)
|
||||
for _, del := range cleanup {
|
||||
file := filepath.Join(c.bundlePath(), del)
|
||||
err = os.Remove(file)
|
||||
if err != nil {
|
||||
logrus.Debugf("Non-fatal: removal of checkpoint file (%s) failed: %v", file, err)
|
||||
|
@ -824,14 +840,14 @@ func (c *Container) makeBindMounts() error {
|
|||
// will recreate. Only do this if we aren't sharing them with
|
||||
// another container.
|
||||
if c.config.NetNsCtr == "" {
|
||||
if path, ok := c.state.BindMounts["/etc/resolv.conf"]; ok {
|
||||
if err := os.Remove(path); err != nil && !os.IsNotExist(err) {
|
||||
if resolvePath, ok := c.state.BindMounts["/etc/resolv.conf"]; ok {
|
||||
if err := os.Remove(resolvePath); err != nil && !os.IsNotExist(err) {
|
||||
return errors.Wrapf(err, "error removing container %s resolv.conf", c.ID())
|
||||
}
|
||||
delete(c.state.BindMounts, "/etc/resolv.conf")
|
||||
}
|
||||
if path, ok := c.state.BindMounts["/etc/hosts"]; ok {
|
||||
if err := os.Remove(path); err != nil && !os.IsNotExist(err) {
|
||||
if hostsPath, ok := c.state.BindMounts["/etc/hosts"]; ok {
|
||||
if err := os.Remove(hostsPath); err != nil && !os.IsNotExist(err) {
|
||||
return errors.Wrapf(err, "error removing container %s hosts", c.ID())
|
||||
}
|
||||
delete(c.state.BindMounts, "/etc/hosts")
|
||||
|
@ -968,10 +984,10 @@ func (c *Container) makeBindMounts() error {
|
|||
// generateResolvConf generates a containers resolv.conf
|
||||
func (c *Container) generateResolvConf() (string, error) {
|
||||
resolvConf := "/etc/resolv.conf"
|
||||
for _, ns := range c.config.Spec.Linux.Namespaces {
|
||||
if ns.Type == spec.NetworkNamespace {
|
||||
if ns.Path != "" && !strings.HasPrefix(ns.Path, "/proc/") {
|
||||
definedPath := filepath.Join("/etc/netns", filepath.Base(ns.Path), "resolv.conf")
|
||||
for _, namespace := range c.config.Spec.Linux.Namespaces {
|
||||
if namespace.Type == spec.NetworkNamespace {
|
||||
if namespace.Path != "" && !strings.HasPrefix(namespace.Path, "/proc/") {
|
||||
definedPath := filepath.Join("/etc/netns", filepath.Base(namespace.Path), "resolv.conf")
|
||||
_, err := os.Stat(definedPath)
|
||||
if err == nil {
|
||||
resolvConf = definedPath
|
||||
|
@ -1096,10 +1112,10 @@ func (c *Container) generatePasswd() (string, error) {
|
|||
if c.config.User == "" {
|
||||
return "", nil
|
||||
}
|
||||
spec := strings.SplitN(c.config.User, ":", 2)
|
||||
userspec := spec[0]
|
||||
if len(spec) > 1 {
|
||||
groupspec = spec[1]
|
||||
splitSpec := strings.SplitN(c.config.User, ":", 2)
|
||||
userspec := splitSpec[0]
|
||||
if len(splitSpec) > 1 {
|
||||
groupspec = splitSpec[1]
|
||||
}
|
||||
// If a non numeric User, then don't generate passwd
|
||||
uid, err := strconv.ParseUint(userspec, 10, 32)
|
||||
|
@ -1137,7 +1153,7 @@ func (c *Container) generatePasswd() (string, error) {
|
|||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to create temporary passwd file")
|
||||
}
|
||||
if os.Chmod(passwdFile, 0644); err != nil {
|
||||
if err := os.Chmod(passwdFile, 0644); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return passwdFile, nil
|
||||
|
|
|
@ -17,7 +17,6 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
v12 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
@ -324,22 +323,6 @@ func libpodEnvVarsToKubeEnvVars(envs []string) ([]v1.EnvVar, error) {
|
|||
return envVars, nil
|
||||
}
|
||||
|
||||
// Is this worth it?
|
||||
func libpodMaxAndMinToResourceList(c *Container) (v1.ResourceList, v1.ResourceList) { //nolint
|
||||
// It does not appear we can properly calculate CPU resources from the information
|
||||
// we know in libpod. Libpod knows CPUs by time, shares, etc.
|
||||
|
||||
// We also only know about a memory limit; no memory minimum
|
||||
maxResources := make(map[v1.ResourceName]resource.Quantity)
|
||||
minResources := make(map[v1.ResourceName]resource.Quantity)
|
||||
config := c.Config()
|
||||
maxMem := config.Spec.Linux.Resources.Memory.Limit
|
||||
|
||||
_ = maxMem
|
||||
|
||||
return maxResources, minResources
|
||||
}
|
||||
|
||||
// libpodMountsToKubeVolumeMounts converts the containers mounts to a struct kube understands
|
||||
func libpodMountsToKubeVolumeMounts(c *Container) ([]v1.VolumeMount, []v1.Volume, error) {
|
||||
var vms []v1.VolumeMount
|
||||
|
@ -427,16 +410,14 @@ func determineCapAddDropFromCapabilities(defaultCaps, containerCaps []string) *v
|
|||
// those indicate a dropped cap
|
||||
for _, capability := range defaultCaps {
|
||||
if !util.StringInSlice(capability, containerCaps) {
|
||||
cap := v1.Capability(capability)
|
||||
drop = append(drop, cap)
|
||||
drop = append(drop, v1.Capability(capability))
|
||||
}
|
||||
}
|
||||
// Find caps in the container but not in the defaults; those indicate
|
||||
// an added cap
|
||||
for _, capability := range containerCaps {
|
||||
if !util.StringInSlice(capability, defaultCaps) {
|
||||
cap := v1.Capability(capability)
|
||||
add = append(add, cap)
|
||||
add = append(add, v1.Capability(capability))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -294,14 +294,14 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) {
|
|||
return errors.Wrapf(err, "cannot shutdown the socket %s", apiSocket)
|
||||
}
|
||||
buf := make([]byte, 2048)
|
||||
len, err := conn.Read(buf)
|
||||
readLength, err := conn.Read(buf)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cannot read from control socket %s", apiSocket)
|
||||
}
|
||||
// if there is no 'error' key in the received JSON data, then the operation was
|
||||
// successful.
|
||||
var y map[string]interface{}
|
||||
if err := json.Unmarshal(buf[0:len], &y); err != nil {
|
||||
if err := json.Unmarshal(buf[0:readLength], &y); err != nil {
|
||||
return errors.Wrapf(err, "error parsing error status from slirp4netns")
|
||||
}
|
||||
if e, found := y["error"]; found {
|
||||
|
@ -332,7 +332,9 @@ func (r *Runtime) setupNetNS(ctr *Container) (err error) {
|
|||
if err != nil {
|
||||
return errors.Wrapf(err, "cannot open %s", nsPath)
|
||||
}
|
||||
mountPointFd.Close()
|
||||
if err := mountPointFd.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := unix.Mount(nsProcess, nsPath, "none", unix.MS_BIND, ""); err != nil {
|
||||
return errors.Wrapf(err, "cannot mount %s", nsPath)
|
||||
|
@ -352,12 +354,12 @@ func (r *Runtime) setupNetNS(ctr *Container) (err error) {
|
|||
|
||||
// Join an existing network namespace
|
||||
func joinNetNS(path string) (ns.NetNS, error) {
|
||||
ns, err := ns.GetNS(path)
|
||||
netNS, err := ns.GetNS(path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error retrieving network namespace at %s", path)
|
||||
}
|
||||
|
||||
return ns, nil
|
||||
return netNS, nil
|
||||
}
|
||||
|
||||
// Close a network namespace.
|
||||
|
|
|
@ -263,7 +263,9 @@ func (r *OCIRuntime) updateContainerStatus(ctr *Container, useRuntime bool) erro
|
|||
return errors.Wrapf(err, "error getting container %s state", ctr.ID())
|
||||
}
|
||||
if strings.Contains(string(out), "does not exist") {
|
||||
ctr.removeConmonFiles()
|
||||
if err := ctr.removeConmonFiles(); err != nil {
|
||||
logrus.Debugf("unable to remove conmon files for container %s", ctr.ID())
|
||||
}
|
||||
ctr.state.ExitCode = -1
|
||||
ctr.state.FinishedTime = time.Now()
|
||||
ctr.state.State = define.ContainerStateExited
|
||||
|
@ -273,7 +275,9 @@ func (r *OCIRuntime) updateContainerStatus(ctr *Container, useRuntime bool) erro
|
|||
}
|
||||
defer cmd.Wait()
|
||||
|
||||
errPipe.Close()
|
||||
if err := errPipe.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
out, err := ioutil.ReadAll(outPipe)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading stdout: %s", ctr.ID())
|
||||
|
@ -433,8 +437,8 @@ func (r *OCIRuntime) execContainer(c *Container, cmd, capAdd, env []string, tty
|
|||
args = append(args, "--no-new-privs")
|
||||
}
|
||||
|
||||
for _, cap := range capAdd {
|
||||
args = append(args, "--cap", cap)
|
||||
for _, capabilityAdd := range capAdd {
|
||||
args = append(args, "--cap", capabilityAdd)
|
||||
}
|
||||
|
||||
for _, envVar := range env {
|
||||
|
@ -475,7 +479,9 @@ func (r *OCIRuntime) execContainer(c *Container, cmd, capAdd, env []string, tty
|
|||
for fd := 3; fd < 3+preserveFDs; fd++ {
|
||||
// These fds were passed down to the runtime. Close them
|
||||
// and not interfere
|
||||
os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)).Close()
|
||||
if err := os.NewFile(uintptr(fd), fmt.Sprintf("fd-%d", fd)).Close(); err != nil {
|
||||
logrus.Debugf("unable to close file fd-%d", fd)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -484,7 +490,9 @@ func (r *OCIRuntime) execContainer(c *Container, cmd, capAdd, env []string, tty
|
|||
|
||||
// checkpointContainer checkpoints the given container
|
||||
func (r *OCIRuntime) checkpointContainer(ctr *Container, options ContainerCheckpointOptions) error {
|
||||
label.SetSocketLabel(ctr.ProcessLabel())
|
||||
if err := label.SetSocketLabel(ctr.ProcessLabel()); err != nil {
|
||||
return err
|
||||
}
|
||||
// imagePath is used by CRIU to store the actual checkpoint files
|
||||
imagePath := ctr.CheckpointPath()
|
||||
// workPath will be used to store dump.log and stats-dump
|
||||
|
|
|
@ -342,7 +342,9 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res
|
|||
)
|
||||
plabel, err = selinux.CurrentLabel()
|
||||
if err != nil {
|
||||
childPipe.Close()
|
||||
if err := childPipe.Close(); err != nil {
|
||||
logrus.Errorf("failed to close child pipe: %q", err)
|
||||
}
|
||||
return errors.Wrapf(err, "Failed to get current SELinux label")
|
||||
}
|
||||
|
||||
|
|
|
@ -325,7 +325,7 @@ func WithMaxLogSize(limit int64) RuntimeOption {
|
|||
|
||||
// WithNoPivotRoot sets the runtime to use MS_MOVE instead of PIVOT_ROOT when
|
||||
// starting containers.
|
||||
func WithNoPivotRoot(noPivot bool) RuntimeOption {
|
||||
func WithNoPivotRoot() RuntimeOption {
|
||||
return func(rt *Runtime) error {
|
||||
if rt.valid {
|
||||
return config2.ErrRuntimeFinalized
|
||||
|
|
|
@ -245,7 +245,7 @@ type RuntimeConfig struct {
|
|||
// EventsLogger determines where events should be logged
|
||||
EventsLogger string `toml:"events_logger"`
|
||||
// EventsLogFilePath is where the events log is stored.
|
||||
EventsLogFilePath string `toml:-"events_logfile_path"`
|
||||
EventsLogFilePath string `toml:"-events_logfile_path"`
|
||||
//DetachKeys is the sequence of keys used to detach a container
|
||||
DetachKeys string `toml:"detach_keys"`
|
||||
}
|
||||
|
@ -643,7 +643,9 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options ..
|
|||
}
|
||||
|
||||
if configPath != "" {
|
||||
os.MkdirAll(filepath.Dir(configPath), 0755)
|
||||
if err := os.MkdirAll(filepath.Dir(configPath), 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
file, err := os.OpenFile(configPath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
|
||||
if err != nil && !os.IsExist(err) {
|
||||
return nil, errors.Wrapf(err, "cannot open file %s", configPath)
|
||||
|
@ -652,7 +654,9 @@ func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options ..
|
|||
defer file.Close()
|
||||
enc := toml.NewEncoder(file)
|
||||
if err := enc.Encode(runtime.config); err != nil {
|
||||
os.Remove(configPath)
|
||||
if removeErr := os.Remove(configPath); removeErr != nil {
|
||||
logrus.Debugf("unable to remove %s: %q", configPath, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -430,22 +430,17 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, force bool,
|
|||
// If we're removing the pod, the container will be evicted
|
||||
// from the state elsewhere
|
||||
if !removePod {
|
||||
if err := r.state.RemoveContainerFromPod(pod, c); err != nil {
|
||||
if cleanupErr == nil {
|
||||
cleanupErr = err
|
||||
} else {
|
||||
logrus.Errorf("removing container from pod: %v", err)
|
||||
}
|
||||
if cleanupErr == nil {
|
||||
cleanupErr = err
|
||||
} else {
|
||||
logrus.Errorf("removing container from pod: %v", err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err := r.state.RemoveContainer(c); err != nil {
|
||||
if cleanupErr == nil {
|
||||
cleanupErr = err
|
||||
} else {
|
||||
logrus.Errorf("removing container: %v", err)
|
||||
}
|
||||
cleanupErr = err
|
||||
}
|
||||
logrus.Errorf("removing container: %v", err)
|
||||
}
|
||||
|
||||
// Set container as invalid so it can no longer be used
|
||||
|
|
|
@ -37,7 +37,9 @@ func stopPauseProcess() error {
|
|||
if err := os.Remove(pausePidPath); err != nil {
|
||||
return errors.Wrapf(err, "cannot delete pause pid file %s", pausePidPath)
|
||||
}
|
||||
syscall.Kill(pausePid, syscall.SIGKILL)
|
||||
if err := syscall.Kill(pausePid, syscall.SIGKILL); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -46,10 +46,6 @@ func (c *Container) GetContainerStats(previousStats *ContainerStats) (*Container
|
|||
return stats, errors.Wrapf(err, "unable to obtain cgroup stats")
|
||||
}
|
||||
conState := c.state.State
|
||||
if err != nil {
|
||||
return stats, errors.Wrapf(err, "unable to determine container state")
|
||||
}
|
||||
|
||||
netStats, err := getContainerNetIO(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -9,8 +9,6 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/libpod/libpod/define"
|
||||
"github.com/fsnotify/fsnotify"
|
||||
spec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
|
@ -32,24 +30,6 @@ func FuncTimer(funcName string) {
|
|||
fmt.Printf("%s executed in %d ms\n", funcName, elapsed)
|
||||
}
|
||||
|
||||
// CopyStringStringMap deep copies a map[string]string and returns the result
|
||||
func CopyStringStringMap(m map[string]string) map[string]string {
|
||||
n := map[string]string{}
|
||||
for k, v := range m {
|
||||
n[k] = v
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// GetPolicyContext creates a signature policy context for the given signature policy path
|
||||
func GetPolicyContext(path string) (*signature.PolicyContext, error) {
|
||||
policy, err := signature.DefaultPolicy(&types.SystemContext{SignaturePolicyPath: path})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return signature.NewPolicyContext(policy)
|
||||
}
|
||||
|
||||
// RemoveScientificNotationFromFloat returns a float without any
|
||||
// scientific notation if the number has any.
|
||||
// golang does not handle conversion of float64s that have scientific
|
||||
|
|
|
@ -48,11 +48,6 @@ func ExecCmdWithStdStreams(stdin io.Reader, stdout, stderr io.Writer, env []stri
|
|||
return nil
|
||||
}
|
||||
|
||||
// StatusToExitCode converts wait status code to an exit code
|
||||
func StatusToExitCode(status int) int {
|
||||
return ((status) & 0xff00) >> 8
|
||||
}
|
||||
|
||||
// ErrDetach is an error indicating that the user manually detached from the
|
||||
// container.
|
||||
var ErrDetach = errors.New("detached from container")
|
||||
|
|
|
@ -538,11 +538,11 @@ gopkg.in/yaml.v2
|
|||
k8s.io/api/core/v1
|
||||
# k8s.io/apimachinery v0.0.0-20190624085041-961b39a1baa0
|
||||
k8s.io/apimachinery/pkg/fields
|
||||
k8s.io/apimachinery/pkg/api/resource
|
||||
k8s.io/apimachinery/pkg/apis/meta/v1
|
||||
k8s.io/apimachinery/pkg/util/wait
|
||||
k8s.io/apimachinery/pkg/util/runtime
|
||||
k8s.io/apimachinery/pkg/selection
|
||||
k8s.io/apimachinery/pkg/api/resource
|
||||
k8s.io/apimachinery/pkg/runtime
|
||||
k8s.io/apimachinery/pkg/runtime/schema
|
||||
k8s.io/apimachinery/pkg/types
|
||||
|
|
Loading…
Reference in New Issue