Merge pull request #3521 from baude/golangcilint1

first pass of corrections for golangci-lint
This commit is contained in:
OpenShift Merge Robot 2019-07-11 01:22:30 +02:00 committed by GitHub
commit e2e8477f83
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
105 changed files with 456 additions and 406 deletions

View File

@ -51,6 +51,6 @@ func attachCmd(c *cliconfig.AttachValues) error {
if err != nil {
return errors.Wrapf(err, "error creating runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
return runtime.Attach(getContext(), c)
}

View File

@ -57,14 +57,20 @@ func init() {
budFlags := buildahcli.GetBudFlags(&budFlagsValues)
flag := budFlags.Lookup("pull")
flag.Value.Set("true")
if err := flag.Value.Set("true"); err != nil {
logrus.Error("unable to set pull flag to true")
}
flag.DefValue = "true"
layerFlags := buildahcli.GetLayerFlags(&layerValues)
flag = layerFlags.Lookup("layers")
flag.Value.Set(useLayers())
if err := flag.Value.Set(useLayers()); err != nil {
logrus.Error("unable to set uselayers")
}
flag.DefValue = useLayers()
flag = layerFlags.Lookup("force-rm")
flag.Value.Set("true")
if err := flag.Value.Set("true"); err != nil {
logrus.Error("unable to set force-rm flag to true")
}
flag.DefValue = "true"
fromAndBugFlags := buildahcli.GetFromAndBudFlags(&fromAndBudValues, &userNSValues, &namespaceValues)
@ -72,7 +78,7 @@ func init() {
flags.AddFlagSet(&budFlags)
flags.AddFlagSet(&layerFlags)
flags.AddFlagSet(&fromAndBugFlags)
flags.MarkHidden("signature-policy")
markFlagHidden(flags, "signature-policy")
}
func getDockerfiles(files []string) []string {
@ -177,7 +183,6 @@ func buildCmd(c *cliconfig.BuildValues) error {
}
contextDir = absDir
}
cliArgs = Tail(cliArgs)
} else {
// No context directory or URL was specified. Try to use the
// home of the first locally-available Dockerfile.
@ -218,7 +223,7 @@ func buildCmd(c *cliconfig.BuildValues) error {
}
// end from buildah
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
var stdout, stderr, reporter *os.File
stdout = os.Stdout

View File

@ -59,6 +59,6 @@ func checkpointCmd(c *cliconfig.CheckpointValues) error {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
return runtime.Checkpoint(c)
}

View File

@ -52,7 +52,7 @@ func cleanupCmd(c *cliconfig.CleanupValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
ok, failures, err := runtime.CleanupContainers(getContext(), c)
if err != nil {

View File

@ -53,7 +53,7 @@ func commitCmd(c *cliconfig.CommitValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
args := c.InputArgs
if len(args) != 2 {

View File

@ -11,7 +11,6 @@ import (
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/storage"
"github.com/fatih/camelcase"
jsoniter "github.com/json-iterator/go"
"github.com/pkg/errors"
@ -19,8 +18,7 @@ import (
)
var (
stores = make(map[storage.Store]struct{})
json = jsoniter.ConfigCompatibleWithStandardLibrary
json = jsoniter.ConfigCompatibleWithStandardLibrary
)
const (

View File

@ -43,7 +43,7 @@ func pruneContainersCmd(c *cliconfig.PruneContainersValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
maxWorkers := shared.DefaultPoolSize("prune")
if c.GlobalIsSet("max-workers") {

View File

@ -68,7 +68,7 @@ func cpCmd(c *cliconfig.CpValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
return copyBetweenHostAndContainer(runtime, args[0], args[1], c.Extract, c.Pause)
}
@ -95,7 +95,11 @@ func copyBetweenHostAndContainer(runtime *libpod.Runtime, src string, dest strin
if err != nil {
return err
}
defer ctr.Unmount(false)
defer func() {
if err := ctr.Unmount(false); err != nil {
logrus.Errorf("unable to umount container '%s': %q", ctr.ID(), err)
}
}()
// We can't pause rootless containers.
if pause && rootless.IsRootless() {

View File

@ -57,7 +57,7 @@ func createCmd(c *cliconfig.CreateValues) error {
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
cid, err := runtime.CreateContainer(getContext(), c)
if err != nil {

View File

@ -61,8 +61,7 @@ func init() {
flags.BoolVar(&diffCommand.Archive, "archive", true, "Save the diff as a tar archive")
flags.StringVar(&diffCommand.Format, "format", "", "Change the output format")
flags.BoolVarP(&diffCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
flags.MarkHidden("archive")
markFlagHidden(flags, "archive")
markFlagHiddenForRemoteClient("latest", flags)
}
@ -93,7 +92,7 @@ func diffCmd(c *cliconfig.DiffValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
var to string
if c.Latest {
@ -137,7 +136,5 @@ func diffCmd(c *cliconfig.DiffValues) error {
} else {
out = stdoutStruct{output: diffOutput}
}
formats.Writer(out).Out()
return nil
return formats.Writer(out).Out()
}

View File

@ -36,7 +36,7 @@ func init() {
flags.BoolVar(&eventsCommand.Stream, "stream", true, "stream new events; for testing only")
flags.StringVar(&eventsCommand.Since, "since", "", "show all events created since timestamp")
flags.StringVar(&eventsCommand.Until, "until", "", "show all events until timestamp")
flags.MarkHidden("stream")
markFlagHidden(flags, "stream")
}
func eventsCmd(c *cliconfig.EventValues) error {
@ -44,7 +44,7 @@ func eventsCmd(c *cliconfig.EventValues) error {
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
return runtime.Events(c)
}

View File

@ -64,7 +64,7 @@ func execCmd(c *cliconfig.ExecValues) error {
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
err = runtime.Exec(c, cmd)
if errors.Cause(err) == define.ErrCtrStateInvalid {

View File

@ -90,7 +90,7 @@ func imageExistsCmd(c *cliconfig.ImageExistsValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
if _, err := runtime.NewImageFromLocal(args[0]); err != nil {
//TODO we need to ask about having varlink defined errors exposed
//so we can reuse them
@ -111,7 +111,7 @@ func containerExistsCmd(c *cliconfig.ContainerExistsValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
if _, err := runtime.LookupContainer(args[0]); err != nil {
if errors.Cause(err) == define.ErrNoSuchCtr || err.Error() == "io.podman.ContainerNotFound" {
os.Exit(1)
@ -130,7 +130,7 @@ func podExistsCmd(c *cliconfig.PodExistsValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
if _, err := runtime.LookupPod(args[0]); err != nil {
if errors.Cause(err) == define.ErrNoSuchPod || err.Error() == "io.podman.PodNotFound" {

View File

@ -45,7 +45,7 @@ func exportCmd(c *cliconfig.ExportValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
args := c.InputArgs
if len(args) == 0 {

View File

@ -62,7 +62,7 @@ func generateKubeYAMLCmd(c *cliconfig.GenerateKubeValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
podYAML, serviceYAML, err := runtime.GenerateKube(c)
if err != nil {

View File

@ -50,7 +50,7 @@ func generateSystemdCmd(c *cliconfig.GenerateSystemdValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
// User input stop timeout must be 0 or greater
if c.Flag("timeout").Changed && c.StopTimeout < 0 {

View File

@ -42,6 +42,7 @@ func healthCheckCmd(c *cliconfig.HealthCheckValues) error {
if err != nil {
return errors.Wrap(err, "could not get runtime")
}
defer runtime.DeferredShutdown(false)
status, err := runtime.HealthCheck(c)
fmt.Println(status)
return err

View File

@ -71,7 +71,7 @@ func historyCmd(c *cliconfig.HistoryValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
format := genHistoryFormat(c.Format, c.Quiet)

View File

@ -138,7 +138,7 @@ func imagesCmd(c *cliconfig.ImagesValues) error {
if err != nil {
return errors.Wrapf(err, "Could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
if len(c.InputArgs) == 1 {
image = c.InputArgs[0]
}

View File

@ -41,7 +41,7 @@ func pruneImagesCmd(c *cliconfig.PruneImagesValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
// Call prune; if any cids are returned, print them and then
// return err in case an error also came up

View File

@ -49,7 +49,7 @@ func importCmd(c *cliconfig.ImportValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
var (
source string

View File

@ -55,7 +55,7 @@ func infoCmd(c *cliconfig.InfoValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
infoArr, err := runtime.Info()
if err != nil {
@ -97,9 +97,7 @@ func infoCmd(c *cliconfig.InfoValues) error {
out = formats.StdoutTemplate{Output: info, Template: infoOutputFormat}
}
formats.Writer(out).Out()
return nil
return formats.Writer(out).Out()
}
// top-level "debug" info

View File

@ -54,7 +54,7 @@ func initCmd(c *cliconfig.InitValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
ok, failures, err := runtime.InitContainers(ctx, c)
if err != nil {

View File

@ -88,7 +88,7 @@ func inspectCmd(c *cliconfig.InspectValues) error {
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
if !util.StringInSlice(inspectType, []string{inspectTypeContainer, inspectTypeImage, inspectAll}) {
return errors.Errorf("the only recognized types are %q, %q, and %q", inspectTypeContainer, inspectTypeImage, inspectAll)
@ -193,8 +193,8 @@ func iterateInput(ctx context.Context, size bool, args []string, runtime *adapte
inspectError = errors.Wrapf(err, "error getting libpod container inspect data %s", ctr.ID())
break
}
artifact, inspectError := getArtifact(ctr)
if inspectError != nil {
artifact, err := getArtifact(ctr)
if err != nil {
inspectError = err
break
}

View File

@ -63,7 +63,7 @@ func killCmd(c *cliconfig.KillValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
ok, failures, err := runtime.KillContainers(getContext(), c, killSignal)
if err != nil {

View File

@ -43,7 +43,7 @@ func init() {
// Disabled flags for the remote client
if !remote {
flags.StringVar(&loadCommand.SignaturePolicy, "signature-policy", "", "Pathname of signature policy file (not usually used)")
flags.MarkHidden("signature-policy")
markFlagHidden(flags, "signature-policy")
}
}
@ -65,7 +65,7 @@ func loadCmd(c *cliconfig.LoadValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
if len(c.Input) > 0 {
if err := parse.ValidateFileName(c.Input); err != nil {

View File

@ -54,8 +54,7 @@ func init() {
flags.StringVar(&logsCommand.Since, "since", "", "Show logs since TIMESTAMP")
flags.Uint64Var(&logsCommand.Tail, "tail", 0, "Output the specified number of LINES at the end of the logs. Defaults to 0, which prints all lines")
flags.BoolVarP(&logsCommand.Timestamps, "timestamps", "t", false, "Output the timestamps in the log")
flags.MarkHidden("details")
markFlagHidden(flags, "details")
flags.SetInterspersed(false)
markFlagHiddenForRemoteClient("latest", flags)
@ -68,7 +67,7 @@ func logsCmd(c *cliconfig.LogsValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
sinceTime := time.Time{}
if c.Flag("since").Changed {

View File

@ -45,14 +45,18 @@ func init() {
rootCmd.PersistentFlags().StringVar(&MainGlobalOpts.NetworkCmdPath, "network-cmd-path", "", "Path to the command for configuring the network")
rootCmd.PersistentFlags().StringVar(&MainGlobalOpts.CniConfigDir, "cni-config-dir", "", "Path of the configuration directory for CNI networks")
rootCmd.PersistentFlags().StringVar(&MainGlobalOpts.DefaultMountsFile, "default-mounts-file", "", "Path to default mounts file")
rootCmd.PersistentFlags().MarkHidden("defaults-mount-file")
if err := rootCmd.PersistentFlags().MarkHidden("default-mounts-file"); err != nil {
logrus.Error("unable to mark default-mounts-file flag as hidden")
}
// Override default --help information of `--help` global flag
var dummyHelp bool
rootCmd.PersistentFlags().BoolVar(&dummyHelp, "help", false, "Help for podman")
rootCmd.PersistentFlags().StringSliceVar(&MainGlobalOpts.HooksDir, "hooks-dir", []string{}, "Set the OCI hooks directory path (may be set multiple times)")
rootCmd.PersistentFlags().StringVar(&MainGlobalOpts.LogLevel, "log-level", "error", "Log messages above specified level: debug, info, warn, error, fatal or panic")
rootCmd.PersistentFlags().IntVar(&MainGlobalOpts.MaxWorks, "max-workers", 0, "The maximum number of workers for parallel operations")
rootCmd.PersistentFlags().MarkHidden("max-workers")
if err := rootCmd.PersistentFlags().MarkHidden("max-workers"); err != nil {
logrus.Error("unable to mark max-workers flag as hidden")
}
rootCmd.PersistentFlags().StringVar(&MainGlobalOpts.Namespace, "namespace", "", "Set the libpod namespace, used to create separate views of the containers and pods on the system")
rootCmd.PersistentFlags().StringVar(&MainGlobalOpts.Root, "root", "", "Path to the root directory in which data, including images, is stored")
rootCmd.PersistentFlags().StringVar(&MainGlobalOpts.Runroot, "runroot", "", "Path to the 'run directory' where all state information is stored")
@ -118,10 +122,10 @@ func setupRootless(cmd *cobra.Command, args []string) error {
return nil
}
podmanCmd := cliconfig.PodmanCommand{
cmd,
args,
MainGlobalOpts,
remoteclient,
Command: cmd,
InputArgs: args,
GlobalFlags: MainGlobalOpts,
Remote: remoteclient,
}
pausePidPath, err := util.GetRootlessPauseProcessPidPath()
@ -148,7 +152,7 @@ func setupRootless(cmd *cobra.Command, args []string) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
ctrs, err := runtime.GetRunningContainers()
if err != nil {

View File

@ -65,7 +65,7 @@ func mountCmd(c *cliconfig.MountValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
if os.Geteuid() != 0 {
rtc, err := runtime.GetConfig()

View File

@ -46,7 +46,7 @@ func pauseCmd(c *cliconfig.PauseValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
args := c.InputArgs
if len(args) < 1 && !c.All {

View File

@ -2,7 +2,6 @@ package main
import (
"fmt"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/pkg/adapter"
@ -45,7 +44,7 @@ func init() {
flags.StringVar(&playKubeCommand.CertDir, "cert-dir", "", "`Pathname` of a directory containing TLS certificates and keys")
flags.StringVar(&playKubeCommand.SignaturePolicy, "signature-policy", "", "`Pathname` of signature policy file (not usually used)")
flags.BoolVar(&playKubeCommand.TlsVerify, "tls-verify", true, "Require HTTPS and verify certificates when contacting registries")
flags.MarkHidden("signature-policy")
markFlagHidden(flags, "signature-policy")
}
}
@ -63,7 +62,7 @@ func playKubeCmd(c *cliconfig.KubePlayValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
_, err = runtime.PlayKubeYAML(ctx, c, args[0])
return err

View File

@ -8,6 +8,7 @@ import (
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/adapter"
"github.com/containers/libpod/pkg/errorhandling"
"github.com/containers/libpod/pkg/util"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@ -56,7 +57,6 @@ func init() {
flags.StringVar(&podCreateCommand.Share, "share", shared.DefaultKernelNamespaces, "A comma delimited list of kernel namespaces the pod will share")
}
func podCreateCmd(c *cliconfig.PodCreateValues) error {
var (
err error
@ -67,7 +67,7 @@ func podCreateCmd(c *cliconfig.PodCreateValues) error {
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
if len(c.Publish) > 0 {
if !c.Infra {
@ -86,8 +86,8 @@ func podCreateCmd(c *cliconfig.PodCreateValues) error {
if err != nil {
return errors.Errorf("error opening pod-id-file %s", c.PodIDFile)
}
defer podIdFile.Close()
defer podIdFile.Sync()
defer errorhandling.CloseQuiet(podIdFile)
defer errorhandling.SyncQuiet(podIdFile)
}
labels, err := shared.GetAllLabels(c.LabelFile, c.Labels)

View File

@ -53,7 +53,7 @@ func podInspectCmd(c *cliconfig.PodInspectValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
if c.Latest {
pod, err = runtime.GetLatestPod()

View File

@ -53,7 +53,7 @@ func podKillCmd(c *cliconfig.PodKillValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
killSignal := uint(syscall.SIGTERM)

View File

@ -49,7 +49,7 @@ func podPauseCmd(c *cliconfig.PodPauseValues) error {
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
pauseIDs, conErrors, pauseErrors := runtime.PausePods(c)

View File

@ -20,7 +20,7 @@ import (
)
const (
STOPPED = "Stopped"
STOPPED = "Stopped" //nolint
RUNNING = "Running"
PAUSED = "Paused"
EXITED = "Exited"
@ -36,9 +36,9 @@ var (
)
type podPsCtrInfo struct {
Name string `"json:name,omitempty"`
Id string `"json:id,omitempty"`
Status string `"json:status,omitempty"`
Name string `json:"name,omitempty"`
Id string `json:"id,omitempty"`
Status string `json:"status,omitempty"`
}
type podPsOptions struct {
@ -161,7 +161,7 @@ func podPsCmd(c *cliconfig.PodPsValues) error {
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
opts := podPsOptions{
NoTrunc: c.NoTrunc,

View File

@ -51,7 +51,7 @@ func podRestartCmd(c *cliconfig.PodRestartValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
restartIDs, conErrors, restartErrors := runtime.RestartPods(getContext(), c)

View File

@ -51,7 +51,7 @@ func podRmCmd(c *cliconfig.PodRmValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
podRmIds, podRmErrors := runtime.RemovePods(getContext(), c)
for _, p := range podRmIds {

View File

@ -49,7 +49,7 @@ func podStartCmd(c *cliconfig.PodStartValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
podStartIDs, podStartErrors := runtime.StartPods(getContext(), c)
for _, p := range podStartIDs {

View File

@ -74,16 +74,13 @@ func podStatsCmd(c *cliconfig.PodStatsValues) error {
if ctr > 1 {
return errors.Errorf("--all, --latest and containers cannot be used together")
} else if ctr == 0 {
// If user didn't specify, imply --all
all = true
}
runtime, err := adapter.GetRuntime(getContext(), &c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
times := -1
if c.NoStream {
@ -173,7 +170,9 @@ func podStatsCmd(c *cliconfig.PodStatsValues) error {
tm.Flush()
}
if strings.ToLower(format) == formats.JSONString {
outputJson(newStats)
if err := outputJson(newStats); err != nil {
return err
}
} else {
results := podContainerStatsToPodStatOut(newStats)
@ -300,17 +299,3 @@ func outputJson(stats []*adapter.PodContainerStats) error {
fmt.Println(string(b))
return nil
}
func getPodsByList(podList []string, r *libpod.Runtime) ([]*libpod.Pod, error) {
var (
pods []*libpod.Pod
)
for _, p := range podList {
pod, err := r.LookupPod(p)
if err != nil {
return nil, err
}
pods = append(pods, pod)
}
return pods, nil
}

View File

@ -51,7 +51,7 @@ func podStopCmd(c *cliconfig.PodStopValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
podStopIds, podStopErrors := runtime.StopPods(getContext(), c)
for _, p := range podStopIds {

View File

@ -44,8 +44,7 @@ func init() {
flags := podTopCommand.Flags()
flags.BoolVarP(&podTopCommand.Latest, "latest,", "l", false, "Act on the latest pod podman is aware of")
flags.BoolVar(&podTopCommand.ListDescriptors, "list-descriptors", false, "")
flags.MarkHidden("list-descriptors")
markFlagHidden(flags, "list-descriptors")
}
func podTopCmd(c *cliconfig.PodTopValues) error {
@ -71,7 +70,7 @@ func podTopCmd(c *cliconfig.PodTopValues) error {
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
if c.Latest {
descriptors = args
@ -85,8 +84,9 @@ func podTopCmd(c *cliconfig.PodTopValues) error {
return err
}
for _, proc := range psOutput {
fmt.Fprintln(w, proc)
if _, err := fmt.Fprintln(w, proc); err != nil {
return err
}
}
w.Flush()
return nil
return w.Flush()
}

View File

@ -50,7 +50,7 @@ func podUnpauseCmd(c *cliconfig.PodUnpauseValues) error {
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
unpauseIDs, conErrors, unpauseErrors := runtime.UnpausePods(c)

View File

@ -40,8 +40,11 @@ func podPruneCmd(c *cliconfig.PodPruneValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
ok, failures, err := runtime.PrunePods(getContext(), c)
if err != nil {
return err
}
return printCmdResults(ok, failures)
}

View File

@ -95,9 +95,12 @@ func portCmd(c *cliconfig.PortValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
containers, err := runtime.Port(c)
if err != nil {
return err
}
for _, con := range containers {
portmappings, err := con.PortMappings()
if err != nil {

View File

@ -15,87 +15,32 @@ import (
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/pkg/adapter"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/docker/go-units"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/fields"
)
const (
mountTruncLength = 12
hid = "CONTAINER ID"
himage = "IMAGE"
hcommand = "COMMAND"
hcreated = "CREATED"
hstatus = "STATUS"
hports = "PORTS"
hnames = "NAMES"
hsize = "SIZE"
hinfra = "IS INFRA"
hpod = "POD"
nspid = "PID"
nscgroup = "CGROUPNS"
nsipc = "IPC"
nsmnt = "MNT"
nsnet = "NET"
nspidns = "PIDNS"
nsuserns = "USERNS"
nsuts = "UTS"
hid = "CONTAINER ID"
himage = "IMAGE"
hcommand = "COMMAND"
hcreated = "CREATED"
hstatus = "STATUS"
hports = "PORTS"
hnames = "NAMES"
hsize = "SIZE"
hinfra = "IS INFRA" //nolint
hpod = "POD"
nspid = "PID"
nscgroup = "CGROUPNS"
nsipc = "IPC"
nsmnt = "MNT"
nsnet = "NET"
nspidns = "PIDNS"
nsuserns = "USERNS"
nsuts = "UTS"
)
type psTemplateParams struct {
ID string
Image string
Command string
CreatedAtTime time.Time
Created string
Status string
Ports string
Size string
Names string
Labels string
Mounts string
PID int
CGROUPNS string
IPC string
MNT string
NET string
PIDNS string
USERNS string
UTS string
Pod string
IsInfra bool
}
// psJSONParams is used as a base structure for the psParams
// If template output is requested, psJSONParams will be converted to
// psTemplateParams.
// psJSONParams will be populated by data from libpod.Container,
// the members of the struct are the sama data types as their sources.
type psJSONParams struct {
ID string `json:"id"`
Image string `json:"image"`
ImageID string `json:"image_id"`
Command []string `json:"command"`
ExitCode int32 `json:"exitCode"`
Exited bool `json:"exited"`
CreatedAt time.Time `json:"createdAt"`
StartedAt time.Time `json:"startedAt"`
ExitedAt time.Time `json:"exitedAt"`
Status string `json:"status"`
PID int `json:"PID"`
Ports []ocicni.PortMapping `json:"ports"`
Size *shared.ContainerSize `json:"size,omitempty"`
Names string `json:"names"`
Labels fields.Set `json:"labels"`
Mounts []string `json:"mounts"`
ContainerRunning bool `json:"ctrRunning"`
Namespaces *shared.Namespace `json:"namespace,omitempty"`
Pod string `json:"pod,omitempty"`
IsInfra bool `json:"infra"`
}
// Type declaration and functions for sorting the PS output
type psSorted []shared.PsContainerOutput
@ -223,7 +168,7 @@ func psCmd(c *cliconfig.PsValues) error {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
if !watch {
if err := psDisplay(c, runtime); err != nil {
@ -273,22 +218,6 @@ func checkFlagsPassed(c *cliconfig.PsValues) error {
return nil
}
// generate the accurate header based on template given
func (p *psTemplateParams) headerMap() map[string]string {
v := reflect.Indirect(reflect.ValueOf(p))
values := make(map[string]string)
for i := 0; i < v.NumField(); i++ {
key := v.Type().Field(i).Name
value := key
if value == "ID" {
value = "Container" + value
}
values[key] = strings.ToUpper(splitCamelCase(value))
}
return values
}
func sortPsOutput(sortBy string, psOutput psSorted) (psSorted, error) {
switch sortBy {
case "id":

View File

@ -60,7 +60,7 @@ func init() {
flags.StringVar(&pullCommand.CertDir, "cert-dir", "", "`Pathname` of a directory containing TLS certificates and keys")
flags.StringVar(&pullCommand.SignaturePolicy, "signature-policy", "", "`Pathname` of signature policy file (not usually used)")
flags.BoolVar(&pullCommand.TlsVerify, "tls-verify", true, "Require HTTPS and verify certificates when contacting registries")
flags.MarkHidden("signature-policy")
markFlagHidden(flags, "signature-policy")
}
}
@ -82,7 +82,7 @@ func pullCmd(c *cliconfig.PullValues) (retError error) {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
args := c.InputArgs
if len(args) == 0 {

View File

@ -63,7 +63,7 @@ func init() {
flags.BoolVar(&pushCommand.Compress, "compress", false, "Compress tarball image layers when pushing to a directory using the 'dir' transport. (default is same compression type as source)")
flags.StringVar(&pushCommand.SignaturePolicy, "signature-policy", "", "`Pathname` of signature policy file (not usually used)")
flags.BoolVar(&pushCommand.TlsVerify, "tls-verify", true, "Require HTTPS and verify certificates when contacting registries")
flags.MarkHidden("signature-policy")
markFlagHidden(flags, "signature-policy")
}
}
@ -109,7 +109,7 @@ func pushCmd(c *cliconfig.PushValues) error {
if err != nil {
return errors.Wrapf(err, "could not create runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
var writer io.Writer
if !c.Quiet {

View File

@ -42,7 +42,7 @@ func refreshCmd(c *cliconfig.RefreshValues) error {
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
allCtrs, err := runtime.GetAllContainers()
if err != nil {

View File

@ -55,7 +55,7 @@ func restartCmd(c *cliconfig.RestartValues) error {
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
ok, failures, err := runtime.Restart(getContext(), c)
if err != nil {

View File

@ -58,7 +58,7 @@ func restoreCmd(c *cliconfig.RestoreValues, cmd *cobra.Command) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
if c.Import == "" && c.Name != "" {
return errors.Errorf("--name can only used with --import")

View File

@ -54,7 +54,7 @@ func rmCmd(c *cliconfig.RmValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
// Storage conflicts with --all/--latest/--volumes
if c.Storage {

View File

@ -55,7 +55,7 @@ func rmiCmd(c *cliconfig.RmiValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
args := c.InputArgs
if len(args) == 0 && !removeAll {

View File

@ -37,7 +37,6 @@ func init() {
flags.Bool("sig-proxy", true, "Proxy received signals to the process")
getCreateFlags(&runCommand.PodmanCommand)
markFlagHiddenForRemoteClient("authfile", flags)
flags.MarkHidden("signature-policy")
}
func runCmd(c *cliconfig.RunValues) error {
@ -54,7 +53,7 @@ func runCmd(c *cliconfig.RunValues) error {
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
exitCode, err = runtime.Run(getContext(), c, exitCode)
return err

View File

@ -53,10 +53,9 @@ func init() {
flags.StringVar(&runlabelCommand.Opt1, "opt1", "", "Optional parameter to pass for install")
flags.StringVar(&runlabelCommand.Opt2, "opt2", "", "Optional parameter to pass for install")
flags.StringVar(&runlabelCommand.Opt3, "opt3", "", "Optional parameter to pass for install")
flags.MarkHidden("opt1")
flags.MarkHidden("opt2")
flags.MarkHidden("opt3")
markFlagHidden(flags, "opt1")
markFlagHidden(flags, "opt2")
markFlagHidden(flags, "opt3")
flags.BoolP("pull", "p", false, "Pull the image if it does not exist locally prior to executing the label contents")
flags.BoolVarP(&runlabelCommand.Quiet, "quiet", "q", false, "Suppress output information when installing images")
// Disabled flags for the remote client
@ -66,10 +65,11 @@ func init() {
flags.StringVar(&runlabelCommand.SignaturePolicy, "signature-policy", "", "`Pathname` of signature policy file (not usually used)")
flags.BoolVar(&runlabelCommand.TlsVerify, "tls-verify", true, "Require HTTPS and verify certificates when contacting registries")
flags.MarkDeprecated("pull", "podman will pull if not found in local storage")
flags.MarkHidden("signature-policy")
if err := flags.MarkDeprecated("pull", "podman will pull if not found in local storage"); err != nil {
logrus.Error("unable to mark pull flag deprecated")
}
markFlagHidden(flags, "signature-policy")
}
markFlagHiddenForRemoteClient("authfile", flags)
}
// installCmd gets the data from the command line and calls installImage
@ -95,7 +95,7 @@ func runlabelCmd(c *cliconfig.RunlabelValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
args := c.InputArgs
if len(args) < 2 {

View File

@ -74,7 +74,7 @@ func saveCmd(c *cliconfig.SaveValues) error {
if err != nil {
return errors.Wrapf(err, "could not create runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
if c.Flag("compress").Changed && (c.Format != ociManifestDir && c.Format != v2s2ManifestDir && c.Format == "") {
return errors.Errorf("--compress can only be set when --format is either 'oci-dir' or 'docker-dir'")

View File

@ -13,11 +13,6 @@ import (
"github.com/spf13/cobra"
)
const (
descriptionTruncLength = 44
maxQueries = 25
)
var (
searchCommand cliconfig.SearchValues
searchDescription = `Search registries for a given image. Can search all the default registries or a specific registry.
@ -89,8 +84,7 @@ func searchCmd(c *cliconfig.SearchValues) error {
return nil
}
out := formats.StdoutTemplateArray{Output: searchToGeneric(results), Template: format, Fields: searchHeaderMap()}
formats.Writer(out).Out()
return nil
return formats.Writer(out).Out()
}
// searchHeaderMap returns the headers of a SearchResult.

View File

@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"fmt"
"github.com/containers/libpod/pkg/errorhandling"
"io"
"os"
"path/filepath"
@ -63,8 +64,8 @@ func CreateContainer(ctx context.Context, c *GenericCLIResults, runtime *libpod.
if err != nil {
return nil, nil, errors.Errorf("error opening cidfile %s", c.String("cidfile"))
}
defer cidFile.Close()
defer cidFile.Sync()
defer errorhandling.CloseQuiet(cidFile)
defer errorhandling.SyncQuiet(cidFile)
}
imageName := ""
@ -82,6 +83,9 @@ func CreateContainer(ctx context.Context, c *GenericCLIResults, runtime *libpod.
return nil, nil, err
}
data, err = newImage.Inspect(ctx)
if err != nil {
return nil, nil, err
}
names := newImage.Names()
if len(names) > 0 {
imageName = names[0]

View File

@ -60,7 +60,7 @@ func signCmd(c *cliconfig.SignValues) error {
if err != nil {
return errors.Wrapf(err, "could not create runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
signby := c.SignBy
if signby == "" {

View File

@ -69,7 +69,7 @@ func startCmd(c *cliconfig.StartValues) error {
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
exitCode, err = runtime.Start(getContext(), c, sigProxy)
return err
}

View File

@ -93,7 +93,7 @@ func statsCmd(c *cliconfig.StatsValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
times := -1
if c.NoStream {
@ -175,7 +175,9 @@ func statsCmd(c *cliconfig.StatsValues) error {
tm.MoveCursor(1, 1)
tm.Flush()
}
outputStats(reportStats, format)
if err := outputStats(reportStats, format); err != nil {
return err
}
time.Sleep(time.Second)
}
return nil

View File

@ -60,7 +60,7 @@ func stopCmd(c *cliconfig.StopValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
ok, failures, err := runtime.StopContainers(getContext(), c)
if err != nil {

View File

@ -106,7 +106,7 @@ func dfSystemCmd(c *cliconfig.SystemDfValues) error {
if err != nil {
return errors.Wrapf(err, "Could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
ctx := getContext()
@ -131,11 +131,10 @@ func dfSystemCmd(c *cliconfig.SystemDfValues) error {
if c.Format != "" {
format = strings.Replace(c.Format, `\t`, "\t", -1)
}
generateSysDfOutput(systemDfDiskUsages, format)
return nil
return generateSysDfOutput(systemDfDiskUsages, format)
}
func generateSysDfOutput(systemDfDiskUsages []systemDfDiskUsage, format string) {
func generateSysDfOutput(systemDfDiskUsages []systemDfDiskUsage, format string) error {
var systemDfHeader = map[string]string{
"Type": "TYPE",
"Total": "TOTAL",
@ -144,7 +143,7 @@ func generateSysDfOutput(systemDfDiskUsages []systemDfDiskUsage, format string)
"Reclaimable": "RECLAIMABLE",
}
out := formats.StdoutTemplateArray{Output: systemDfDiskUsageToGeneric(systemDfDiskUsages), Template: format, Fields: systemDfHeader}
formats.Writer(out).Out()
return formats.Writer(out).Out()
}
func getDiskUsage(ctx context.Context, runtime *libpod.Runtime, metaData dfMetaData) ([]systemDfDiskUsage, error) {
@ -554,10 +553,11 @@ func imagesVerboseOutput(ctx context.Context, metaData dfMetaData) error {
if err != nil {
return errors.Wrapf(err, "error getting verbose output of images")
}
os.Stderr.WriteString("Images space usage:\n\n")
if _, err := os.Stderr.WriteString("Images space usage:\n\n"); err != nil {
return err
}
out := formats.StdoutTemplateArray{Output: systemDfImageVerboseDiskUsageToGeneric(imagesVerboseDiskUsage), Template: imageVerboseFormat, Fields: imageVerboseHeader}
formats.Writer(out).Out()
return nil
return formats.Writer(out).Out()
}
func containersVerboseOutput(ctx context.Context, metaData dfMetaData) error {
@ -575,10 +575,12 @@ func containersVerboseOutput(ctx context.Context, metaData dfMetaData) error {
if err != nil {
return errors.Wrapf(err, "error getting verbose output of containers")
}
os.Stderr.WriteString("\nContainers space usage:\n\n")
if _, err := os.Stderr.WriteString("\nContainers space usage:\n\n"); err != nil {
return err
}
out := formats.StdoutTemplateArray{Output: systemDfContainerVerboseDiskUsageToGeneric(containersVerboseDiskUsage), Template: containerVerboseFormat, Fields: containerVerboseHeader}
formats.Writer(out).Out()
return nil
return formats.Writer(out).Out()
}
func volumesVerboseOutput(ctx context.Context, metaData dfMetaData) error {
@ -591,10 +593,11 @@ func volumesVerboseOutput(ctx context.Context, metaData dfMetaData) error {
if err != nil {
return errors.Wrapf(err, "error getting verbose output of volumes")
}
os.Stderr.WriteString("\nLocal Volumes space usage:\n\n")
if _, err := os.Stderr.WriteString("\nLocal Volumes space usage:\n\n"); err != nil {
return err
}
out := formats.StdoutTemplateArray{Output: systemDfVolumeVerboseDiskUsageToGeneric(volumesVerboseDiskUsage), Template: volumeVerboseFormat, Fields: volumeVerboseHeader}
formats.Writer(out).Out()
return nil
return formats.Writer(out).Out()
}
func verboseOutput(ctx context.Context, metaData dfMetaData) error {

View File

@ -76,27 +76,33 @@ Are you sure you want to continue? [y/N] `, volumeString)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
rmWorkers := shared.Parallelize("rm")
ctx := getContext()
fmt.Println("Deleted Containers")
ok, failures, lasterr := runtime.Prune(ctx, rmWorkers, false)
printCmdResults(ok, failures)
defer runtime.DeferredShutdown(false)
// We must clean out pods first because if they may have infra containers
fmt.Println("Deleted Pods")
pruneValues := cliconfig.PodPruneValues{
PodmanCommand: c.PodmanCommand,
Force: c.Force,
}
ok, failures, err = runtime.PrunePods(ctx, &pruneValues)
ctx := getContext()
ok, failures, lasterr := runtime.PrunePods(ctx, &pruneValues)
if err := printCmdResults(ok, failures); err != nil {
return err
}
rmWorkers := shared.Parallelize("rm")
fmt.Println("Deleted Containers")
ok, failures, err = runtime.Prune(ctx, rmWorkers, false)
if err != nil {
if lasterr != nil {
logrus.Errorf("%q", lasterr)
logrus.Errorf("%q", err)
}
lasterr = err
}
printCmdResults(ok, failures)
if err := printCmdResults(ok, failures); err != nil {
return err
}
if c.Bool("volumes") {
fmt.Println("Deleted Volumes")

View File

@ -42,7 +42,7 @@ func tagCmd(c *cliconfig.TagValues) error {
if err != nil {
return errors.Wrapf(err, "could not create runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
newImage, err := runtime.NewImageFromLocal(args[0])
if err != nil {

View File

@ -57,7 +57,7 @@ func init() {
flags := topCommand.Flags()
flags.SetInterspersed(false)
flags.BoolVar(&topCommand.ListDescriptors, "list-descriptors", false, "")
flags.MarkHidden("list-descriptors")
markFlagHidden(flags, "list-descriptors")
flags.BoolVarP(&topCommand.Latest, "latest", "l", false, "Act on the latest container podman is aware of")
markFlagHiddenForRemoteClient("latest", flags)
}
@ -83,7 +83,7 @@ func topCmd(c *cliconfig.TopValues) error {
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
psOutput, err := runtime.Top(c)
if err != nil {
@ -91,8 +91,9 @@ func topCmd(c *cliconfig.TopValues) error {
}
w := tabwriter.NewWriter(os.Stdout, 5, 1, 3, ' ', 0)
for _, proc := range psOutput {
fmt.Fprintln(w, proc)
if _, err := fmt.Fprintln(w, proc); err != nil {
return err
}
}
w.Flush()
return nil
return w.Flush()
}

View File

@ -55,7 +55,7 @@ func treeCmd(c *cliconfig.TreeValues) error {
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
imageInfo, layerInfoMap, img, err := runtime.Tree(c)
if err != nil {
return err

View File

@ -7,7 +7,6 @@ import (
"strings"
"github.com/containers/buildah/pkg/formats"
"github.com/containers/image/types"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/libpod/image"
@ -57,7 +56,7 @@ func init() {
showTrustCommand.SetUsageTemplate(UsageTemplate())
setFlags := setTrustCommand.Flags()
setFlags.StringVar(&setTrustCommand.PolicyPath, "policypath", "", "")
setFlags.MarkHidden("policypath")
markFlagHidden(setFlags, "policypath")
setFlags.StringSliceVarP(&setTrustCommand.PubKeysFile, "pubkeysfile", "f", []string{}, `Path of installed public key(s) to trust for TARGET.
Absolute path to keys is added to policy.json. May
used multiple times to define multiple public keys.
@ -68,9 +67,9 @@ File(s) must exist before using this command`)
showFlags.BoolVarP(&showTrustCommand.Json, "json", "j", false, "Output as json")
showFlags.StringVar(&showTrustCommand.PolicyPath, "policypath", "", "")
showFlags.BoolVar(&showTrustCommand.Raw, "raw", false, "Output raw policy file")
showFlags.MarkHidden("policypath")
markFlagHidden(showFlags, "policypath")
showFlags.StringVar(&showTrustCommand.RegistryPath, "registrypath", "", "")
showFlags.MarkHidden("registrypath")
markFlagHidden(showFlags, "registrypath")
}
func showTrustCmd(c *cliconfig.ShowTrustValues) error {
@ -238,10 +237,6 @@ func isValidTrustType(t string) bool {
return false
}
func getDefaultPolicyPath() string {
return trust.DefaultPolicyPath(&types.SystemContext{})
}
func getPolicyJSON(policyContentStruct trust.PolicyContent, systemRegistriesDirPath string) (map[string]map[string]interface{}, error) {
registryConfigs, err := trust.LoadAndMergeConfig(systemRegistriesDirPath)
if err != nil {

View File

@ -52,7 +52,7 @@ func umountCmd(c *cliconfig.UmountValues) error {
if err != nil {
return errors.Wrapf(err, "error creating runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
ok, failures, err := runtime.UmountRootFilesystems(getContext(), c)
if err != nil {

View File

@ -45,7 +45,7 @@ func unpauseCmd(c *cliconfig.UnpauseValues) error {
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
args := c.InputArgs
if len(args) < 1 && !c.All {

View File

@ -3,27 +3,12 @@ package main
import (
"fmt"
"reflect"
"runtime/debug"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
// printParallelOutput takes the map of parallel worker results and outputs them
// to stdout
func printParallelOutput(m map[string]error, errCount int) error {
var lastError error
for cid, result := range m {
if result != nil {
if errCount > 1 {
fmt.Println(result.Error())
}
lastError = result
continue
}
fmt.Println(cid)
}
return lastError
}
// print results from CLI command
func printCmdResults(ok []string, failures map[string]error) error {
for _, id := range ok {
@ -48,6 +33,17 @@ func printCmdResults(ok []string, failures map[string]error) error {
// on the remote-client
func markFlagHiddenForRemoteClient(flagName string, flags *pflag.FlagSet) {
if remoteclient {
flags.MarkHidden(flagName)
if err := flags.MarkHidden(flagName); err != nil {
debug.PrintStack()
logrus.Errorf("unable to mark %s as hidden in the remote-client", flagName)
}
}
}
// markFlagHidden is a helper function to log an error if marking
// a flag as hidden happens to fail
func markFlagHidden(flags *pflag.FlagSet, flag string) {
if err := flags.MarkHidden(flag); err != nil {
logrus.Errorf("unable to mark flag '%s' as hidden: %q", flag, err)
}
}

View File

@ -83,7 +83,7 @@ func varlinkCmd(c *cliconfig.VarlinkValues) error {
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
var varlinkInterfaces = []*iopodman.VarlinkInterface{varlinkapi.New(&c.PodmanCommand, runtime)}
// Register varlink service. The metadata can be retrieved with:

View File

@ -42,7 +42,7 @@ func init() {
func versionCmd(c *cliconfig.VersionValues) error {
clientVersion, err := define.GetVersion()
if err != nil {
errors.Wrapf(err, "unable to determine version")
return errors.Wrapf(err, "unable to determine version")
}
versionOutputFormat := c.Format
@ -63,18 +63,22 @@ func versionCmd(c *cliconfig.VersionValues) error {
defer w.Flush()
if remote {
fmt.Fprintf(w, "Client:\n")
if _, err := fmt.Fprintf(w, "Client:\n"); err != nil {
return err
}
}
formatVersion(w, clientVersion)
if remote {
fmt.Fprintf(w, "\nService:\n")
if _, err := fmt.Fprintf(w, "\nService:\n"); err != nil {
return err
}
runtime, err := adapter.GetRuntime(getContext(), &c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
serviceVersion, err := runtime.GetVersion()
if err != nil {

View File

@ -46,7 +46,7 @@ func volumeCreateCmd(c *cliconfig.VolumeCreateValues) error {
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
if len(c.InputArgs) > 1 {
return errors.Errorf("too many arguments, create takes at most 1 argument")

View File

@ -47,7 +47,7 @@ func volumeInspectCmd(c *cliconfig.VolumeInspectValues) error {
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
vols, err := runtime.InspectVolumes(getContext(), c)
if err != nil {

View File

@ -76,7 +76,7 @@ func volumeLsCmd(c *cliconfig.VolumeLsValues) error {
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
opts := volumeLsOptions{
Quiet: c.Quiet,

View File

@ -67,7 +67,7 @@ func volumePruneCmd(c *cliconfig.VolumePruneValues) error {
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
// Prompt for confirmation if --force is not set
if !c.Force {

View File

@ -51,7 +51,7 @@ func volumeRmCmd(c *cliconfig.VolumeRmValues) error {
if err != nil {
return errors.Wrapf(err, "error creating libpod runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
deletedVolumeNames, err := runtime.RemoveVolumes(getContext(), c)
if err != nil {
if len(deletedVolumeNames) > 0 {

View File

@ -55,7 +55,7 @@ func waitCmd(c *cliconfig.WaitValues) error {
if err != nil {
return errors.Wrapf(err, "error creating runtime")
}
defer runtime.Shutdown(false)
defer runtime.DeferredShutdown(false)
ok, failures, err := runtime.WaitOnContainers(getContext(), c, interval)
if err != nil {

View File

@ -39,7 +39,7 @@ Valid placeholders for the Go template are listed below:
| **Placeholder** | **Description** |
| --------------- | --------------- |
| .Pod | Pod ID |
| .CID | Container ID |
| .ID | Container ID |
| .Name | Container Name |
| .CPU | CPU percentage |
| .MemUsage | Memory usage |

View File

@ -66,7 +66,7 @@ func NewBoltState(path string, runtime *Runtime) (State, error) {
if err != nil {
return nil, errors.Wrapf(err, "error opening database %s", path)
}
// Everywhere else, we use s.closeDBCon(db) to ensure the state's DB
// Everywhere else, we use s.deferredCloseDBCon(db) to ensure the state's DB
// mutex is also unlocked.
// However, here, the mutex has not been locked, since we just created
// the DB connection, and it hasn't left this function yet - no risk of
@ -141,7 +141,7 @@ func (s *BoltState) Refresh() error {
if err != nil {
return err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
err = db.Update(func(tx *bolt.Tx) error {
idBucket, err := getIDBucket(tx)
@ -253,7 +253,7 @@ func (s *BoltState) GetDBConfig() (*DBConfig, error) {
if err != nil {
return nil, err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
configBucket, err := getRuntimeConfigBucket(tx)
@ -298,7 +298,7 @@ func (s *BoltState) ValidateDBConfig(runtime *Runtime) error {
if err != nil {
return err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
// Check runtime configuration
if err := checkRuntimeConfig(db, runtime); err != nil {
@ -342,7 +342,7 @@ func (s *BoltState) Container(id string) (*Container, error) {
if err != nil {
return nil, err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
ctrBucket, err := getCtrBucket(tx)
@ -378,7 +378,7 @@ func (s *BoltState) LookupContainer(idOrName string) (*Container, error) {
if err != nil {
return nil, err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
ctrBucket, err := getCtrBucket(tx)
@ -484,7 +484,7 @@ func (s *BoltState) HasContainer(id string) (bool, error) {
if err != nil {
return false, err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
exists := false
@ -549,7 +549,7 @@ func (s *BoltState) RemoveContainer(ctr *Container) error {
if err != nil {
return err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
err = db.Update(func(tx *bolt.Tx) error {
return s.removeContainer(ctr, nil, tx)
@ -580,7 +580,7 @@ func (s *BoltState) UpdateContainer(ctr *Container) error {
if err != nil {
return err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
ctrBucket, err := getCtrBucket(tx)
@ -651,7 +651,7 @@ func (s *BoltState) SaveContainer(ctr *Container) error {
if err != nil {
return err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
err = db.Update(func(tx *bolt.Tx) error {
ctrBucket, err := getCtrBucket(tx)
@ -708,7 +708,7 @@ func (s *BoltState) ContainerInUse(ctr *Container) ([]string, error) {
if err != nil {
return nil, err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
ctrBucket, err := getCtrBucket(tx)
@ -759,7 +759,7 @@ func (s *BoltState) AllContainers() ([]*Container, error) {
if err != nil {
return nil, err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
allCtrsBucket, err := getAllCtrsBucket(tx)
@ -833,7 +833,7 @@ func (s *BoltState) RewriteContainerConfig(ctr *Container, newCfg *ContainerConf
if err != nil {
return err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
err = db.Update(func(tx *bolt.Tx) error {
ctrBkt, err := getCtrBucket(tx)
@ -877,7 +877,7 @@ func (s *BoltState) RewritePodConfig(pod *Pod, newCfg *PodConfig) error {
if err != nil {
return err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
err = db.Update(func(tx *bolt.Tx) error {
podBkt, err := getPodBucket(tx)
@ -920,7 +920,7 @@ func (s *BoltState) Pod(id string) (*Pod, error) {
if err != nil {
return nil, err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
podBkt, err := getPodBucket(tx)
@ -955,7 +955,7 @@ func (s *BoltState) LookupPod(idOrName string) (*Pod, error) {
if err != nil {
return nil, err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
podBkt, err := getPodBucket(tx)
@ -1062,7 +1062,7 @@ func (s *BoltState) HasPod(id string) (bool, error) {
if err != nil {
return false, err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
podBkt, err := getPodBucket(tx)
@ -1118,7 +1118,7 @@ func (s *BoltState) PodHasContainer(pod *Pod, id string) (bool, error) {
if err != nil {
return false, err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
podBkt, err := getPodBucket(tx)
@ -1180,7 +1180,7 @@ func (s *BoltState) PodContainersByID(pod *Pod) ([]string, error) {
if err != nil {
return nil, err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
podBkt, err := getPodBucket(tx)
@ -1242,7 +1242,7 @@ func (s *BoltState) PodContainers(pod *Pod) ([]*Container, error) {
if err != nil {
return nil, err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
podBkt, err := getPodBucket(tx)
@ -1312,7 +1312,7 @@ func (s *BoltState) AddVolume(volume *Volume) error {
if err != nil {
return err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
err = db.Update(func(tx *bolt.Tx) error {
volBkt, err := getVolBucket(tx)
@ -1369,7 +1369,7 @@ func (s *BoltState) RemoveVolume(volume *Volume) error {
if err != nil {
return err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
err = db.Update(func(tx *bolt.Tx) error {
volBkt, err := getVolBucket(tx)
@ -1451,7 +1451,7 @@ func (s *BoltState) AllVolumes() ([]*Volume, error) {
if err != nil {
return nil, err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
allVolsBucket, err := getAllVolsBucket(tx)
@ -1512,7 +1512,7 @@ func (s *BoltState) Volume(name string) (*Volume, error) {
if err != nil {
return nil, err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
volBkt, err := getVolBucket(tx)
@ -1547,7 +1547,7 @@ func (s *BoltState) HasVolume(name string) (bool, error) {
if err != nil {
return false, err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
volBkt, err := getVolBucket(tx)
@ -1587,7 +1587,7 @@ func (s *BoltState) VolumeInUse(volume *Volume) ([]string, error) {
if err != nil {
return nil, err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
volBucket, err := getVolBucket(tx)
@ -1673,7 +1673,7 @@ func (s *BoltState) AddPod(pod *Pod) error {
if err != nil {
return err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
err = db.Update(func(tx *bolt.Tx) error {
podBkt, err := getPodBucket(tx)
@ -1782,7 +1782,7 @@ func (s *BoltState) RemovePod(pod *Pod) error {
if err != nil {
return err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
err = db.Update(func(tx *bolt.Tx) error {
podBkt, err := getPodBucket(tx)
@ -1877,7 +1877,7 @@ func (s *BoltState) RemovePodContainers(pod *Pod) error {
if err != nil {
return err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
err = db.Update(func(tx *bolt.Tx) error {
podBkt, err := getPodBucket(tx)
@ -2038,7 +2038,7 @@ func (s *BoltState) RemoveContainerFromPod(pod *Pod, ctr *Container) error {
if err != nil {
return err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
err = db.Update(func(tx *bolt.Tx) error {
return s.removeContainer(ctr, pod, tx)
@ -2066,7 +2066,7 @@ func (s *BoltState) UpdatePod(pod *Pod) error {
if err != nil {
return err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
podID := []byte(pod.ID())
@ -2126,7 +2126,7 @@ func (s *BoltState) SavePod(pod *Pod) error {
if err != nil {
return err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
podID := []byte(pod.ID())
@ -2168,7 +2168,7 @@ func (s *BoltState) AllPods() ([]*Pod, error) {
if err != nil {
return nil, err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
err = db.View(func(tx *bolt.Tx) error {
allPodsBucket, err := getAllPodsBucket(tx)

View File

@ -247,6 +247,15 @@ func (s *BoltState) getDBCon() (*bolt.DB, error) {
return db, nil
}
// deferredCloseDBCon closes the bolt db but instead of returning an
// error it logs the error. it is meant to be used within the confines
// of a defer statement only
func (s *BoltState) deferredCloseDBCon(db *bolt.DB) {
if err := s.closeDBCon(db); err != nil {
logrus.Errorf("failed to close libpod db: %q", err)
}
}
// Close a connection to the database.
// MUST be used in place of `db.Close()` to ensure proper unlocking of the
// state.
@ -479,7 +488,7 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
if err != nil {
return err
}
defer s.closeDBCon(db)
defer s.deferredCloseDBCon(db)
err = db.Update(func(tx *bolt.Tx) error {
idsBucket, err := getIDBucket(tx)

View File

@ -10,6 +10,7 @@ import (
"path/filepath"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/errorhandling"
"github.com/containers/libpod/pkg/kubeutils"
"github.com/containers/libpod/utils"
"github.com/docker/docker/pkg/term"
@ -66,7 +67,7 @@ func (c *Container) attachContainerSocket(resize <-chan remotecommand.TerminalSi
logrus.Debugf("Could not open ctl file: %v", err)
return
}
defer controlFile.Close()
defer errorhandling.CloseQuiet(controlFile)
logrus.Debugf("Received a resize event: %+v", size)
if _, err = fmt.Fprintf(controlFile, "%d %d %d\n", 1, size.Height, size.Width); err != nil {
@ -108,7 +109,9 @@ func (c *Container) attachContainerSocket(resize <-chan remotecommand.TerminalSi
var err error
if streams.AttachInput {
_, err = utils.CopyDetachable(conn, streams.InputStream, detachKeys)
conn.CloseWrite()
if err := conn.CloseWrite(); err != nil {
logrus.Error("failed to close write in attach")
}
}
stdinDone <- err
}()

View File

@ -323,7 +323,7 @@ func (i *Image) Names() []string {
// RepoDigests returns a string array of repodigests associated with the image
func (i *Image) RepoDigests() ([]string, error) {
var repoDigests []string
digest := i.Digest()
imageDigest := i.Digest()
for _, name := range i.Names() {
named, err := reference.ParseNormalizedNamed(name)
@ -331,7 +331,7 @@ func (i *Image) RepoDigests() ([]string, error) {
return nil, err
}
canonical, err := reference.WithDigest(reference.TrimNamed(named), digest)
canonical, err := reference.WithDigest(reference.TrimNamed(named), imageDigest)
if err != nil {
return nil, err
}
@ -462,11 +462,11 @@ func getImageDigest(ctx context.Context, src types.ImageReference, sc *types.Sys
return "", err
}
defer newImg.Close()
digest := newImg.ConfigInfo().Digest
if err = digest.Validate(); err != nil {
imageDigest := newImg.ConfigInfo().Digest
if err = imageDigest.Validate(); err != nil {
return "", errors.Wrapf(err, "error getting config info")
}
return "@" + digest.Hex(), nil
return "@" + imageDigest.Hex(), nil
}
// normalizedTag returns the canonical version of tag for use in Image.Names()
@ -495,7 +495,9 @@ func normalizedTag(tag string) (reference.Named, error) {
// TagImage adds a tag to the given image
func (i *Image) TagImage(tag string) error {
i.reloadImage()
if err := i.reloadImage(); err != nil {
return err
}
ref, err := normalizedTag(tag)
if err != nil {
return err
@ -508,14 +510,18 @@ func (i *Image) TagImage(tag string) error {
if err := i.imageruntime.store.SetNames(i.ID(), tags); err != nil {
return err
}
i.reloadImage()
if err := i.reloadImage(); err != nil {
return err
}
defer i.newImageEvent(events.Tag)
return nil
}
// UntagImage removes a tag from the given image
func (i *Image) UntagImage(tag string) error {
i.reloadImage()
if err := i.reloadImage(); err != nil {
return err
}
var newTags []string
tags := i.Names()
if !util.StringInSlice(tag, tags) {
@ -529,7 +535,9 @@ func (i *Image) UntagImage(tag string) error {
if err := i.imageruntime.store.SetNames(i.ID(), newTags); err != nil {
return err
}
i.reloadImage()
if err := i.reloadImage(); err != nil {
return err
}
defer i.newImageEvent(events.Untag)
return nil
}
@ -825,7 +833,7 @@ func (i *Image) GetLabel(ctx context.Context, label string) (string, error) {
// Annotations returns the annotations of an image
func (i *Image) Annotations(ctx context.Context) (map[string]string, error) {
manifest, manifestType, err := i.Manifest(ctx)
imageManifest, manifestType, err := i.Manifest(ctx)
if err != nil {
return nil, err
}
@ -833,7 +841,7 @@ func (i *Image) Annotations(ctx context.Context) (map[string]string, error) {
switch manifestType {
case ociv1.MediaTypeImageManifest:
var m ociv1.Manifest
if err := json.Unmarshal(manifest, &m); err == nil {
if err := json.Unmarshal(imageManifest, &m); err == nil {
for k, v := range m.Annotations {
annotations[k] = v
}

View File

@ -263,7 +263,9 @@ func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goa
copyOptions.SourceCtx.SystemRegistriesConfPath = systemRegistriesConfPath // FIXME: Set this more globally. Probably no reason not to have it in every types.SystemContext, and to compute the value just once in one place.
// Print the following statement only when pulling from a docker or atomic registry
if writer != nil && (imageInfo.srcRef.Transport().Name() == DockerTransport || imageInfo.srcRef.Transport().Name() == AtomicTransport) {
io.WriteString(writer, fmt.Sprintf("Trying to pull %s...", imageInfo.image))
if _, err := io.WriteString(writer, fmt.Sprintf("Trying to pull %s...", imageInfo.image)); err != nil {
return nil, err
}
}
// If the label is not nil, check if the label exists and if not, return err
if label != nil {
@ -277,7 +279,7 @@ func (ir *Runtime) doPullImage(ctx context.Context, sc *types.SystemContext, goa
pullErrors = multierror.Append(pullErrors, err)
logrus.Errorf("Error pulling image ref %s: %v", imageInfo.srcRef.StringWithinTransport(), err)
if writer != nil {
io.WriteString(writer, "Failed\n")
_, _ = io.WriteString(writer, "Failed\n")
}
} else {
if !goal.pullAllPairs {

View File

@ -99,7 +99,9 @@ func SearchImages(term string, options SearchOptions) ([]SearchResult, error) {
ctx := context.Background()
for i := range registries {
sem.Acquire(ctx, 1)
if err := sem.Acquire(ctx, 1); err != nil {
return nil, err
}
go searchImageInRegistryHelper(i, registries[i])
}

View File

@ -5,6 +5,7 @@ package libpod
import (
"crypto/rand"
"fmt"
"github.com/containers/libpod/pkg/errorhandling"
"net"
"os"
"os/exec"
@ -168,8 +169,8 @@ func (r *Runtime) setupRootlessNetNS(ctr *Container) (err error) {
if err != nil {
return errors.Wrapf(err, "failed to open pipe")
}
defer syncR.Close()
defer syncW.Close()
defer errorhandling.CloseQuiet(syncR)
defer errorhandling.CloseQuiet(syncW)
havePortMapping := len(ctr.Config().PortMappings) > 0
apiSocket := filepath.Join(ctr.ociRuntime.tmpDir, fmt.Sprintf("%s.net", ctr.config.ID))

View File

@ -17,6 +17,7 @@ import (
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/cgroups"
"github.com/containers/libpod/pkg/errorhandling"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/pkg/util"
"github.com/containers/libpod/utils"
@ -117,7 +118,7 @@ func (r *OCIRuntime) createContainer(ctr *Container, cgroupParent string, restor
if err != nil {
return err
}
defer fd.Close()
defer errorhandling.CloseQuiet(fd)
// create a new mountns on the current thread
if err = unix.Unshare(unix.CLONE_NEWNS); err != nil {
@ -207,8 +208,8 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res
return errors.Wrapf(err, "error creating socket pair for start pipe")
}
defer parentPipe.Close()
defer parentStartPipe.Close()
defer errorhandling.CloseQuiet(parentPipe)
defer errorhandling.CloseQuiet(parentStartPipe)
ociLog := filepath.Join(ctr.state.RunDir, "oci-log")
logLevel := logrus.GetLevel()
@ -364,20 +365,26 @@ func (r *OCIRuntime) createOCIContainer(ctr *Container, cgroupParent string, res
err = cmd.Start()
// Ignore error returned from SetProcessLabel("") call,
// can't recover.
label.SetProcessLabel("")
if err := label.SetProcessLabel(""); err != nil {
_ = err
}
runtime.UnlockOSThread()
} else {
err = cmd.Start()
}
if err != nil {
childPipe.Close()
errorhandling.CloseQuiet(childPipe)
return err
}
defer cmd.Wait()
// We don't need childPipe on the parent side
childPipe.Close()
childStartPipe.Close()
if err := childPipe.Close(); err != nil {
return err
}
if err := childStartPipe.Close(); err != nil {
return err
}
// Move conmon to specified cgroup
if err := r.moveConmonToCgroup(ctr, cgroupParent, cmd); err != nil {

View File

@ -1162,6 +1162,13 @@ func (r *Runtime) GetConfig() (*RuntimeConfig, error) {
return config, nil
}
// DeferredShutdown shuts down the runtime without exposing any
// errors. This is only meant to be used when the runtime is being
// shutdown within a defer statement; else use Shutdown
func (r *Runtime) DeferredShutdown(force bool) {
_ = r.Shutdown(force)
}
// Shutdown shuts down the runtime and associated containers and storage
// If force is true, containers and mounted storage will be shut down before
// cleaning up; if force is false, an error will be returned if there are

View File

@ -95,8 +95,8 @@ func (r *LocalRuntime) StopContainers(ctx context.Context, cli *cliconfig.StopVa
}
pool.Add(shared.Job{
c.ID(),
func() error {
ID: c.ID(),
Fn: func() error {
err := c.StopWithTimeout(*timeout)
if err != nil {
if errors.Cause(err) == define.ErrCtrStopped {
@ -134,8 +134,8 @@ func (r *LocalRuntime) KillContainers(ctx context.Context, cli *cliconfig.KillVa
c := c
pool.Add(shared.Job{
c.ID(),
func() error {
ID: c.ID(),
Fn: func() error {
return c.Kill(uint(signal))
},
})
@ -163,8 +163,8 @@ func (r *LocalRuntime) InitContainers(ctx context.Context, cli *cliconfig.InitVa
ctr := c
pool.Add(shared.Job{
ctr.ID(),
func() error {
ID: ctr.ID(),
Fn: func() error {
err := ctr.Init(ctx)
if err != nil {
// If we're initializing all containers, ignore invalid state errors

View File

@ -97,6 +97,14 @@ func GetRuntime(ctx context.Context, c *cliconfig.PodmanCommand) (*LocalRuntime,
}, nil
}
// DeferredShutdown is a bogus wrapper for compaat with the libpod
// runtime and should only be run when a defer is being used
func (r RemoteRuntime) DeferredShutdown(force bool) {
if err := r.Shutdown(force); err != nil {
logrus.Error("unable to shutdown runtime")
}
}
// Shutdown is a bogus wrapper for compat with the libpod runtime
func (r RemoteRuntime) Shutdown(force bool) error {
return nil

View File

@ -27,7 +27,9 @@ func ProxySignals(ctr *libpod.Container) {
if err := ctr.Kill(uint(s.(syscall.Signal))); err != nil {
logrus.Errorf("Error forwarding signal %d to container %s: %v", s, ctr.ID(), err)
signal.StopCatch(sigBuffer)
syscall.Kill(syscall.Getpid(), s.(syscall.Signal))
if err := syscall.Kill(syscall.Getpid(), s.(syscall.Signal)); err != nil {
logrus.Errorf("failed to kill pid %d", syscall.Getpid())
}
}
}
}()

View File

@ -35,7 +35,9 @@ func StartAttachCtr(ctx context.Context, ctr *libpod.Container, stdout, stderr,
}
logrus.SetFormatter(&RawTtyFormatter{})
term.SetRawTerminal(os.Stdin.Fd())
if _, err := term.SetRawTerminal(os.Stdin.Fd()); err != nil {
return err
}
defer restoreTerminal(oldTermState)
}

View File

@ -0,0 +1,23 @@
package errorhandling
import (
"os"
"github.com/sirupsen/logrus"
)
// SyncQuiet syncs a file and logs any error. Should only be used within
// a defer.
func SyncQuiet(f *os.File) {
if err := f.Sync(); err != nil {
logrus.Errorf("unable to sync file %s: %q", f.Name(), err)
}
}
// CloseQuiet closes a file and logs any error. Should only be used within
// a defer.
func CloseQuiet(f *os.File) {
if err := f.Close(); err != nil {
logrus.Errorf("unable to close file %s: %q", f.Name(), err)
}
}

View File

@ -18,6 +18,7 @@ package firewall
import (
"fmt"
"github.com/sirupsen/logrus"
"strings"
"github.com/godbus/dbus"
@ -113,7 +114,9 @@ func (fb *fwdBackend) Del(conf *FirewallNetConf) error {
// Remove firewalld rules which assigned the given source IP to the given zone
firewalldObj := fb.conn.Object(firewalldName, firewalldPath)
var res string
firewalldObj.Call(firewalldZoneInterface+"."+firewalldRemoveSourceMethod, 0, getFirewalldZone(conf), ipStr).Store(&res)
if err := firewalldObj.Call(firewalldZoneInterface+"."+firewalldRemoveSourceMethod, 0, getFirewalldZone(conf), ipStr).Store(&res); err != nil {
logrus.Errorf("unable to store firewallobj")
}
}
return nil
}

View File

@ -21,6 +21,7 @@ package firewall
import (
"fmt"
"github.com/sirupsen/logrus"
"net"
"github.com/coreos/go-iptables/iptables"
@ -53,7 +54,9 @@ func generateFilterRule(privChainName string) []string {
func cleanupRules(ipt *iptables.IPTables, privChainName string, rules [][]string) {
for _, rule := range rules {
ipt.Delete("filter", privChainName, rule...)
if err := ipt.Delete("filter", privChainName, rule...); err != nil {
logrus.Errorf("failed to delete iptables rule %s", privChainName)
}
}
}
@ -185,7 +188,9 @@ func (ib *iptablesBackend) Add(conf *FirewallNetConf) error {
func (ib *iptablesBackend) Del(conf *FirewallNetConf) error {
for proto, ipt := range ib.protos {
ib.delRules(conf, ipt, proto)
if err := ib.delRules(conf, ipt, proto); err != nil {
logrus.Errorf("failed to delete iptables backend rule %s", conf.IptablesAdminChainName)
}
}
return nil
}

View File

@ -5,6 +5,7 @@ import (
"bytes"
"context"
"fmt"
"github.com/sirupsen/logrus"
"io"
osexec "os/exec"
"time"
@ -54,7 +55,9 @@ func Run(ctx context.Context, hook *rspec.Hook, state []byte, stdout io.Writer,
case err = <-exit:
return err, err
case <-ctx.Done():
cmd.Process.Kill()
if err := cmd.Process.Kill(); err != nil {
logrus.Errorf("failed to kill pid %v", cmd.Process)
}
timer := time.NewTimer(postKillTimeout)
defer timer.Stop()
select {

View File

@ -30,6 +30,7 @@ import (
"github.com/containers/libpod/libpod"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/errorhandling"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@ -153,7 +154,7 @@ func ReadLogs(logPath string, ctr *libpod.Container, opts *LogOptions) error {
if err != nil {
return errors.Wrapf(err, "failed to open log file %q", logPath)
}
defer file.Close()
defer errorhandling.CloseQuiet(file)
msg := &logMessage{}
opts.bytes = -1
@ -161,9 +162,9 @@ func ReadLogs(logPath string, ctr *libpod.Container, opts *LogOptions) error {
reader := bufio.NewReader(file)
if opts.Follow {
followLog(reader, writer, opts, ctr, msg, logPath)
err = followLog(reader, writer, opts, ctr, msg, logPath)
} else {
dumpLog(reader, writer, opts, msg, logPath)
err = dumpLog(reader, writer, opts, msg, logPath)
}
return err
}

View File

@ -83,7 +83,9 @@ func NewNS() (ns.NetNS, error) {
if err != nil {
return nil, err
}
mountPointFd.Close()
if err := mountPointFd.Close(); err != nil {
return nil, err
}
// Ensure the mount point is cleaned up on errors; if the namespace
// was successfully mounted this will have no effect because the file

View File

@ -17,6 +17,7 @@ import (
"syscall"
"unsafe"
"github.com/containers/libpod/pkg/errorhandling"
"github.com/containers/storage/pkg/idtools"
"github.com/docker/docker/pkg/signal"
"github.com/godbus/dbus"
@ -41,8 +42,7 @@ const (
)
func runInUser() error {
os.Setenv("_CONTAINERS_USERNS_CONFIGURED", "done")
return nil
return os.Setenv("_CONTAINERS_USERNS_CONFIGURED", "done")
}
var (
@ -57,9 +57,15 @@ func IsRootless() bool {
rootlessGIDInit := int(C.rootless_gid())
if rootlessUIDInit != 0 {
// This happens if we joined the user+mount namespace as part of
os.Setenv("_CONTAINERS_USERNS_CONFIGURED", "done")
os.Setenv("_CONTAINERS_ROOTLESS_UID", fmt.Sprintf("%d", rootlessUIDInit))
os.Setenv("_CONTAINERS_ROOTLESS_GID", fmt.Sprintf("%d", rootlessGIDInit))
if err := os.Setenv("_CONTAINERS_USERNS_CONFIGURED", "done"); err != nil {
logrus.Errorf("failed to set environment variable %s as %s", "_CONTAINERS_USERNS_CONFIGURED", "done")
}
if err := os.Setenv("_CONTAINERS_ROOTLESS_UID", fmt.Sprintf("%d", rootlessUIDInit)); err != nil {
logrus.Errorf("failed to set environment variable %s as %d", "_CONTAINERS_ROOTLESS_UID", rootlessUIDInit)
}
if err := os.Setenv("_CONTAINERS_ROOTLESS_GID", fmt.Sprintf("%d", rootlessGIDInit)); err != nil {
logrus.Errorf("failed to set environment variable %s as %d", "_CONTAINERS_ROOTLESS_GID", rootlessGIDInit)
}
}
isRootless = os.Geteuid() != 0 || os.Getenv("_CONTAINERS_USERNS_CONFIGURED") != ""
})
@ -185,18 +191,24 @@ func getUserNSFirstChild(fd uintptr) (*os.File, error) {
}
if ns == currentNS {
syscall.Close(int(nextFd))
if err := syscall.Close(int(nextFd)); err != nil {
return nil, err
}
// Drop O_CLOEXEC for the fd.
_, _, errno := syscall.Syscall(syscall.SYS_FCNTL, fd, syscall.F_SETFD, 0)
if errno != 0 {
syscall.Close(int(fd))
if err := syscall.Close(int(fd)); err != nil {
logrus.Errorf("failed to close file descriptor %d", fd)
}
return nil, errno
}
return os.NewFile(fd, "userns child"), nil
}
syscall.Close(int(fd))
if err := syscall.Close(int(fd)); err != nil {
return nil, err
}
fd = nextFd
}
}
@ -252,7 +264,9 @@ func EnableLinger() (string, error) {
if lingerEnabled && lingerFile != "" {
f, err := os.Create(lingerFile)
if err == nil {
f.Close()
if err := f.Close(); err != nil {
logrus.Errorf("failed to close %s", f.Name())
}
} else {
logrus.Debugf("could not create linger file: %v", err)
}
@ -348,8 +362,8 @@ func becomeRootInUserNS(pausePid, fileToRead string, fileOutput *os.File) (bool,
}
r, w := os.NewFile(uintptr(fds[0]), "sync host"), os.NewFile(uintptr(fds[1]), "sync child")
defer r.Close()
defer w.Close()
defer errorhandling.CloseQuiet(r)
defer errorhandling.CloseQuiet(w)
defer w.Write([]byte("0"))
pidC := C.reexec_in_user_namespace(C.int(r.Fd()), cPausePid, cFileToRead, fileOutputFD)
@ -361,9 +375,9 @@ func becomeRootInUserNS(pausePid, fileToRead string, fileOutput *os.File) (bool,
var uids, gids []idtools.IDMap
username := os.Getenv("USER")
if username == "" {
user, err := user.LookupId(fmt.Sprintf("%d", os.Getuid()))
userID, err := user.LookupId(fmt.Sprintf("%d", os.Getuid()))
if err == nil {
username = user.Username
username = userID.Username
}
}
mappings, err := idtools.NewIDMappings(username, username)
@ -458,7 +472,9 @@ func becomeRootInUserNS(pausePid, fileToRead string, fileOutput *os.File) (bool,
continue
}
syscall.Kill(int(pidC), s.(syscall.Signal))
if err := syscall.Kill(int(pidC), s.(syscall.Signal)); err != nil {
logrus.Errorf("failed to kill %d", int(pidC))
}
}
}()
@ -519,17 +535,19 @@ func TryJoinFromFilePaths(pausePidPath string, needNewNamespace bool, paths []st
r, w := os.NewFile(uintptr(fds[0]), "read file"), os.NewFile(uintptr(fds[1]), "write file")
defer w.Close()
defer r.Close()
defer errorhandling.CloseQuiet(w)
defer errorhandling.CloseQuiet(r)
if _, _, err := becomeRootInUserNS("", path, w); err != nil {
lastErr = err
continue
}
w.Close()
if err := w.Close(); err != nil {
return false, 0, err
}
defer func() {
r.Close()
errorhandling.CloseQuiet(r)
C.reexec_in_user_namespace_wait(-1, 0)
}()

Some files were not shown because too many files have changed in this diff Show More