libpod: switch to golang native error wrapping
We now use the golang error wrapping format specifier `%w` instead of the deprecated github.com/pkg/errors package. [NO NEW TESTS NEEDED] Signed-off-by: Sascha Grunert <sgrunert@redhat.com>
This commit is contained in:
parent
340eeed0cb
commit
251d91699d
File diff suppressed because it is too large
Load Diff
|
|
@ -2,6 +2,7 @@ package libpod
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
|
@ -10,7 +11,6 @@ import (
|
|||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/pkg/rootless"
|
||||
"github.com/containers/storage"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
|
@ -195,7 +195,7 @@ func checkRuntimeConfig(db *bolt.DB, rt *Runtime) error {
|
|||
}
|
||||
|
||||
if err := configBkt.Put(missing.key, dbValue); err != nil {
|
||||
return errors.Wrapf(err, "error updating %s in DB runtime config", missing.name)
|
||||
return fmt.Errorf("error updating %s in DB runtime config: %w", missing.name, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -236,8 +236,8 @@ func readOnlyValidateConfig(bucket *bolt.Bucket, toCheck dbConfigValidation) (bo
|
|||
return true, nil
|
||||
}
|
||||
|
||||
return true, errors.Wrapf(define.ErrDBBadConfig, "database %s %q does not match our %s %q",
|
||||
toCheck.name, dbValue, toCheck.name, toCheck.runtimeValue)
|
||||
return true, fmt.Errorf("database %s %q does not match our %s %q: %w",
|
||||
toCheck.name, dbValue, toCheck.name, toCheck.runtimeValue, define.ErrDBBadConfig)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
|
|
@ -254,7 +254,7 @@ func (s *BoltState) getDBCon() (*bolt.DB, error) {
|
|||
|
||||
db, err := bolt.Open(s.dbPath, 0600, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error opening database %s", s.dbPath)
|
||||
return nil, fmt.Errorf("error opening database %s: %w", s.dbPath, err)
|
||||
}
|
||||
|
||||
return db, nil
|
||||
|
|
@ -283,7 +283,7 @@ func (s *BoltState) closeDBCon(db *bolt.DB) error {
|
|||
func getIDBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
||||
bkt := tx.Bucket(idRegistryBkt)
|
||||
if bkt == nil {
|
||||
return nil, errors.Wrapf(define.ErrDBBadConfig, "id registry bucket not found in DB")
|
||||
return nil, fmt.Errorf("id registry bucket not found in DB: %w", define.ErrDBBadConfig)
|
||||
}
|
||||
return bkt, nil
|
||||
}
|
||||
|
|
@ -291,7 +291,7 @@ func getIDBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
|||
func getNamesBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
||||
bkt := tx.Bucket(nameRegistryBkt)
|
||||
if bkt == nil {
|
||||
return nil, errors.Wrapf(define.ErrDBBadConfig, "name registry bucket not found in DB")
|
||||
return nil, fmt.Errorf("name registry bucket not found in DB: %w", define.ErrDBBadConfig)
|
||||
}
|
||||
return bkt, nil
|
||||
}
|
||||
|
|
@ -299,7 +299,7 @@ func getNamesBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
|||
func getNSBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
||||
bkt := tx.Bucket(nsRegistryBkt)
|
||||
if bkt == nil {
|
||||
return nil, errors.Wrapf(define.ErrDBBadConfig, "namespace registry bucket not found in DB")
|
||||
return nil, fmt.Errorf("namespace registry bucket not found in DB: %w", define.ErrDBBadConfig)
|
||||
}
|
||||
return bkt, nil
|
||||
}
|
||||
|
|
@ -307,7 +307,7 @@ func getNSBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
|||
func getCtrBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
||||
bkt := tx.Bucket(ctrBkt)
|
||||
if bkt == nil {
|
||||
return nil, errors.Wrapf(define.ErrDBBadConfig, "containers bucket not found in DB")
|
||||
return nil, fmt.Errorf("containers bucket not found in DB: %w", define.ErrDBBadConfig)
|
||||
}
|
||||
return bkt, nil
|
||||
}
|
||||
|
|
@ -315,7 +315,7 @@ func getCtrBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
|||
func getAllCtrsBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
||||
bkt := tx.Bucket(allCtrsBkt)
|
||||
if bkt == nil {
|
||||
return nil, errors.Wrapf(define.ErrDBBadConfig, "all containers bucket not found in DB")
|
||||
return nil, fmt.Errorf("all containers bucket not found in DB: %w", define.ErrDBBadConfig)
|
||||
}
|
||||
return bkt, nil
|
||||
}
|
||||
|
|
@ -323,7 +323,7 @@ func getAllCtrsBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
|||
func getPodBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
||||
bkt := tx.Bucket(podBkt)
|
||||
if bkt == nil {
|
||||
return nil, errors.Wrapf(define.ErrDBBadConfig, "pods bucket not found in DB")
|
||||
return nil, fmt.Errorf("pods bucket not found in DB: %w", define.ErrDBBadConfig)
|
||||
}
|
||||
return bkt, nil
|
||||
}
|
||||
|
|
@ -331,7 +331,7 @@ func getPodBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
|||
func getAllPodsBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
||||
bkt := tx.Bucket(allPodsBkt)
|
||||
if bkt == nil {
|
||||
return nil, errors.Wrapf(define.ErrDBBadConfig, "all pods bucket not found in DB")
|
||||
return nil, fmt.Errorf("all pods bucket not found in DB: %w", define.ErrDBBadConfig)
|
||||
}
|
||||
return bkt, nil
|
||||
}
|
||||
|
|
@ -339,7 +339,7 @@ func getAllPodsBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
|||
func getVolBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
||||
bkt := tx.Bucket(volBkt)
|
||||
if bkt == nil {
|
||||
return nil, errors.Wrapf(define.ErrDBBadConfig, "volumes bucket not found in DB")
|
||||
return nil, fmt.Errorf("volumes bucket not found in DB: %w", define.ErrDBBadConfig)
|
||||
}
|
||||
return bkt, nil
|
||||
}
|
||||
|
|
@ -347,7 +347,7 @@ func getVolBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
|||
func getAllVolsBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
||||
bkt := tx.Bucket(allVolsBkt)
|
||||
if bkt == nil {
|
||||
return nil, errors.Wrapf(define.ErrDBBadConfig, "all volumes bucket not found in DB")
|
||||
return nil, fmt.Errorf("all volumes bucket not found in DB: %w", define.ErrDBBadConfig)
|
||||
}
|
||||
return bkt, nil
|
||||
}
|
||||
|
|
@ -355,7 +355,7 @@ func getAllVolsBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
|||
func getExecBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
||||
bkt := tx.Bucket(execBkt)
|
||||
if bkt == nil {
|
||||
return nil, errors.Wrapf(define.ErrDBBadConfig, "exec bucket not found in DB")
|
||||
return nil, fmt.Errorf("exec bucket not found in DB: %w", define.ErrDBBadConfig)
|
||||
}
|
||||
return bkt, nil
|
||||
}
|
||||
|
|
@ -363,7 +363,7 @@ func getExecBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
|||
func getRuntimeConfigBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
||||
bkt := tx.Bucket(runtimeConfigBkt)
|
||||
if bkt == nil {
|
||||
return nil, errors.Wrapf(define.ErrDBBadConfig, "runtime configuration bucket not found in DB")
|
||||
return nil, fmt.Errorf("runtime configuration bucket not found in DB: %w", define.ErrDBBadConfig)
|
||||
}
|
||||
return bkt, nil
|
||||
}
|
||||
|
|
@ -371,7 +371,7 @@ func getRuntimeConfigBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
|||
func getExitCodeBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
||||
bkt := tx.Bucket(exitCodeBkt)
|
||||
if bkt == nil {
|
||||
return nil, errors.Wrapf(define.ErrDBBadConfig, "exit-code container bucket not found in DB")
|
||||
return nil, fmt.Errorf("exit-code container bucket not found in DB: %w", define.ErrDBBadConfig)
|
||||
}
|
||||
return bkt, nil
|
||||
}
|
||||
|
|
@ -379,7 +379,7 @@ func getExitCodeBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
|||
func getExitCodeTimeStampBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
||||
bkt := tx.Bucket(exitCodeTimeStampBkt)
|
||||
if bkt == nil {
|
||||
return nil, errors.Wrapf(define.ErrDBBadConfig, "exit-code time stamp bucket not found in DB")
|
||||
return nil, fmt.Errorf("exit-code time stamp bucket not found in DB: %w", define.ErrDBBadConfig)
|
||||
}
|
||||
return bkt, nil
|
||||
}
|
||||
|
|
@ -387,23 +387,23 @@ func getExitCodeTimeStampBucket(tx *bolt.Tx) (*bolt.Bucket, error) {
|
|||
func (s *BoltState) getContainerConfigFromDB(id []byte, config *ContainerConfig, ctrsBkt *bolt.Bucket) error {
|
||||
ctrBkt := ctrsBkt.Bucket(id)
|
||||
if ctrBkt == nil {
|
||||
return errors.Wrapf(define.ErrNoSuchCtr, "container %s not found in DB", string(id))
|
||||
return fmt.Errorf("container %s not found in DB: %w", string(id), define.ErrNoSuchCtr)
|
||||
}
|
||||
|
||||
if s.namespaceBytes != nil {
|
||||
ctrNamespaceBytes := ctrBkt.Get(namespaceKey)
|
||||
if !bytes.Equal(s.namespaceBytes, ctrNamespaceBytes) {
|
||||
return errors.Wrapf(define.ErrNSMismatch, "cannot retrieve container %s as it is part of namespace %q and we are in namespace %q", string(id), string(ctrNamespaceBytes), s.namespace)
|
||||
return fmt.Errorf("cannot retrieve container %s as it is part of namespace %q and we are in namespace %q: %w", string(id), string(ctrNamespaceBytes), s.namespace, define.ErrNSMismatch)
|
||||
}
|
||||
}
|
||||
|
||||
configBytes := ctrBkt.Get(configKey)
|
||||
if configBytes == nil {
|
||||
return errors.Wrapf(define.ErrInternal, "container %s missing config key in DB", string(id))
|
||||
return fmt.Errorf("container %s missing config key in DB: %w", string(id), define.ErrInternal)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(configBytes, config); err != nil {
|
||||
return errors.Wrapf(err, "error unmarshalling container %s config", string(id))
|
||||
return fmt.Errorf("error unmarshalling container %s config: %w", string(id), err)
|
||||
}
|
||||
|
||||
// convert ports to the new format if needed
|
||||
|
|
@ -426,7 +426,7 @@ func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt.
|
|||
// Get the lock
|
||||
lock, err := s.runtime.lockManager.RetrieveLock(ctr.config.LockID)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error retrieving lock for container %s", string(id))
|
||||
return fmt.Errorf("error retrieving lock for container %s: %w", string(id), err)
|
||||
}
|
||||
ctr.lock = lock
|
||||
|
||||
|
|
@ -473,29 +473,29 @@ func (s *BoltState) getContainerFromDB(id []byte, ctr *Container, ctrsBkt *bolt.
|
|||
func (s *BoltState) getPodFromDB(id []byte, pod *Pod, podBkt *bolt.Bucket) error {
|
||||
podDB := podBkt.Bucket(id)
|
||||
if podDB == nil {
|
||||
return errors.Wrapf(define.ErrNoSuchPod, "pod with ID %s not found", string(id))
|
||||
return fmt.Errorf("pod with ID %s not found: %w", string(id), define.ErrNoSuchPod)
|
||||
}
|
||||
|
||||
if s.namespaceBytes != nil {
|
||||
podNamespaceBytes := podDB.Get(namespaceKey)
|
||||
if !bytes.Equal(s.namespaceBytes, podNamespaceBytes) {
|
||||
return errors.Wrapf(define.ErrNSMismatch, "cannot retrieve pod %s as it is part of namespace %q and we are in namespace %q", string(id), string(podNamespaceBytes), s.namespace)
|
||||
return fmt.Errorf("cannot retrieve pod %s as it is part of namespace %q and we are in namespace %q: %w", string(id), string(podNamespaceBytes), s.namespace, define.ErrNSMismatch)
|
||||
}
|
||||
}
|
||||
|
||||
podConfigBytes := podDB.Get(configKey)
|
||||
if podConfigBytes == nil {
|
||||
return errors.Wrapf(define.ErrInternal, "pod %s is missing configuration key in DB", string(id))
|
||||
return fmt.Errorf("pod %s is missing configuration key in DB: %w", string(id), define.ErrInternal)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(podConfigBytes, pod.config); err != nil {
|
||||
return errors.Wrapf(err, "error unmarshalling pod %s config from DB", string(id))
|
||||
return fmt.Errorf("error unmarshalling pod %s config from DB: %w", string(id), err)
|
||||
}
|
||||
|
||||
// Get the lock
|
||||
lock, err := s.runtime.lockManager.RetrieveLock(pod.config.LockID)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error retrieving lock for pod %s", string(id))
|
||||
return fmt.Errorf("error retrieving lock for pod %s: %w", string(id), err)
|
||||
}
|
||||
pod.lock = lock
|
||||
|
||||
|
|
@ -508,23 +508,23 @@ func (s *BoltState) getPodFromDB(id []byte, pod *Pod, podBkt *bolt.Bucket) error
|
|||
func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bucket) error {
|
||||
volDB := volBkt.Bucket(name)
|
||||
if volDB == nil {
|
||||
return errors.Wrapf(define.ErrNoSuchVolume, "volume with name %s not found", string(name))
|
||||
return fmt.Errorf("volume with name %s not found: %w", string(name), define.ErrNoSuchVolume)
|
||||
}
|
||||
|
||||
volConfigBytes := volDB.Get(configKey)
|
||||
if volConfigBytes == nil {
|
||||
return errors.Wrapf(define.ErrInternal, "volume %s is missing configuration key in DB", string(name))
|
||||
return fmt.Errorf("volume %s is missing configuration key in DB: %w", string(name), define.ErrInternal)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(volConfigBytes, volume.config); err != nil {
|
||||
return errors.Wrapf(err, "error unmarshalling volume %s config from DB", string(name))
|
||||
return fmt.Errorf("error unmarshalling volume %s config from DB: %w", string(name), err)
|
||||
}
|
||||
|
||||
// Volume state is allowed to be nil for legacy compatibility
|
||||
volStateBytes := volDB.Get(stateKey)
|
||||
if volStateBytes != nil {
|
||||
if err := json.Unmarshal(volStateBytes, volume.state); err != nil {
|
||||
return errors.Wrapf(err, "error unmarshalling volume %s state from DB", string(name))
|
||||
return fmt.Errorf("error unmarshalling volume %s state from DB: %w", string(name), err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -546,7 +546,7 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu
|
|||
// Get the lock
|
||||
lock, err := s.runtime.lockManager.RetrieveLock(volume.config.LockID)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error retrieving lock for volume %q", string(name))
|
||||
return fmt.Errorf("error retrieving lock for volume %q: %w", string(name), err)
|
||||
}
|
||||
volume.lock = lock
|
||||
|
||||
|
|
@ -560,8 +560,8 @@ func (s *BoltState) getVolumeFromDB(name []byte, volume *Volume, volBkt *bolt.Bu
|
|||
// If pod is not nil, the container is added to the pod as well
|
||||
func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
|
||||
if s.namespace != "" && s.namespace != ctr.config.Namespace {
|
||||
return errors.Wrapf(define.ErrNSMismatch, "cannot add container %s as it is in namespace %q and we are in namespace %q",
|
||||
ctr.ID(), s.namespace, ctr.config.Namespace)
|
||||
return fmt.Errorf("cannot add container %s as it is in namespace %q and we are in namespace %q: %w",
|
||||
ctr.ID(), s.namespace, ctr.config.Namespace, define.ErrNSMismatch)
|
||||
}
|
||||
|
||||
// Set the original networks to nil. We can save some space by not storing it in the config
|
||||
|
|
@ -572,11 +572,11 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
|
|||
// JSON container structs to insert into DB
|
||||
configJSON, err := json.Marshal(ctr.config)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error marshalling container %s config to JSON", ctr.ID())
|
||||
return fmt.Errorf("error marshalling container %s config to JSON: %w", ctr.ID(), err)
|
||||
}
|
||||
stateJSON, err := json.Marshal(ctr.state)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error marshalling container %s state to JSON", ctr.ID())
|
||||
return fmt.Errorf("error marshalling container %s state to JSON: %w", ctr.ID(), err)
|
||||
}
|
||||
netNSPath := getNetNSPath(ctr)
|
||||
dependsCtrs := ctr.Dependencies()
|
||||
|
|
@ -594,16 +594,16 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
|
|||
for net, opts := range configNetworks {
|
||||
// Check that we don't have any empty network names
|
||||
if net == "" {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "network names cannot be an empty string")
|
||||
return fmt.Errorf("network names cannot be an empty string: %w", define.ErrInvalidArg)
|
||||
}
|
||||
if opts.InterfaceName == "" {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "network interface name cannot be an empty string")
|
||||
return fmt.Errorf("network interface name cannot be an empty string: %w", define.ErrInvalidArg)
|
||||
}
|
||||
// always add the short id as alias for docker compat
|
||||
opts.Aliases = append(opts.Aliases, ctr.config.ID[:12])
|
||||
optBytes, err := json.Marshal(opts)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error marshalling network options JSON for container %s", ctr.ID())
|
||||
return fmt.Errorf("error marshalling network options JSON for container %s: %w", ctr.ID(), err)
|
||||
}
|
||||
networks[net] = optBytes
|
||||
}
|
||||
|
|
@ -659,17 +659,17 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
|
|||
podDB = podBucket.Bucket(podID)
|
||||
if podDB == nil {
|
||||
pod.valid = false
|
||||
return errors.Wrapf(define.ErrNoSuchPod, "pod %s does not exist in database", pod.ID())
|
||||
return fmt.Errorf("pod %s does not exist in database: %w", pod.ID(), define.ErrNoSuchPod)
|
||||
}
|
||||
podCtrs = podDB.Bucket(containersBkt)
|
||||
if podCtrs == nil {
|
||||
return errors.Wrapf(define.ErrInternal, "pod %s does not have a containers bucket", pod.ID())
|
||||
return fmt.Errorf("pod %s does not have a containers bucket: %w", pod.ID(), define.ErrInternal)
|
||||
}
|
||||
|
||||
podNS := podDB.Get(namespaceKey)
|
||||
if !bytes.Equal(podNS, ctrNamespace) {
|
||||
return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %s and pod %s is in namespace %s",
|
||||
ctr.ID(), ctr.config.Namespace, pod.ID(), pod.config.Namespace)
|
||||
return fmt.Errorf("container %s is in namespace %s and pod %s is in namespace %s: %w",
|
||||
ctr.ID(), ctr.config.Namespace, pod.ID(), pod.config.Namespace, define.ErrNSMismatch)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -680,7 +680,7 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
|
|||
if allCtrsBucket.Get(idExist) == nil {
|
||||
err = define.ErrPodExists
|
||||
}
|
||||
return errors.Wrapf(err, "ID \"%s\" is in use", ctr.ID())
|
||||
return fmt.Errorf("ID \"%s\" is in use: %w", ctr.ID(), err)
|
||||
}
|
||||
nameExist := namesBucket.Get(ctrName)
|
||||
if nameExist != nil {
|
||||
|
|
@ -688,66 +688,66 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
|
|||
if allCtrsBucket.Get(nameExist) == nil {
|
||||
err = define.ErrPodExists
|
||||
}
|
||||
return errors.Wrapf(err, "name \"%s\" is in use", ctr.Name())
|
||||
return fmt.Errorf("name \"%s\" is in use: %w", ctr.Name(), err)
|
||||
}
|
||||
|
||||
// No overlapping containers
|
||||
// Add the new container to the DB
|
||||
if err := idsBucket.Put(ctrID, ctrName); err != nil {
|
||||
return errors.Wrapf(err, "error adding container %s ID to DB", ctr.ID())
|
||||
return fmt.Errorf("error adding container %s ID to DB: %w", ctr.ID(), err)
|
||||
}
|
||||
if err := namesBucket.Put(ctrName, ctrID); err != nil {
|
||||
return errors.Wrapf(err, "error adding container %s name (%s) to DB", ctr.ID(), ctr.Name())
|
||||
return fmt.Errorf("error adding container %s name (%s) to DB: %w", ctr.ID(), ctr.Name(), err)
|
||||
}
|
||||
if ctrNamespace != nil {
|
||||
if err := nsBucket.Put(ctrID, ctrNamespace); err != nil {
|
||||
return errors.Wrapf(err, "error adding container %s namespace (%q) to DB", ctr.ID(), ctr.Namespace())
|
||||
return fmt.Errorf("error adding container %s namespace (%q) to DB: %w", ctr.ID(), ctr.Namespace(), err)
|
||||
}
|
||||
}
|
||||
if err := allCtrsBucket.Put(ctrID, ctrName); err != nil {
|
||||
return errors.Wrapf(err, "error adding container %s to all containers bucket in DB", ctr.ID())
|
||||
return fmt.Errorf("error adding container %s to all containers bucket in DB: %w", ctr.ID(), err)
|
||||
}
|
||||
|
||||
newCtrBkt, err := ctrBucket.CreateBucket(ctrID)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error adding container %s bucket to DB", ctr.ID())
|
||||
return fmt.Errorf("error adding container %s bucket to DB: %w", ctr.ID(), err)
|
||||
}
|
||||
|
||||
if err := newCtrBkt.Put(configKey, configJSON); err != nil {
|
||||
return errors.Wrapf(err, "error adding container %s config to DB", ctr.ID())
|
||||
return fmt.Errorf("error adding container %s config to DB: %w", ctr.ID(), err)
|
||||
}
|
||||
if err := newCtrBkt.Put(stateKey, stateJSON); err != nil {
|
||||
return errors.Wrapf(err, "error adding container %s state to DB", ctr.ID())
|
||||
return fmt.Errorf("error adding container %s state to DB: %w", ctr.ID(), err)
|
||||
}
|
||||
if ctrNamespace != nil {
|
||||
if err := newCtrBkt.Put(namespaceKey, ctrNamespace); err != nil {
|
||||
return errors.Wrapf(err, "error adding container %s namespace to DB", ctr.ID())
|
||||
return fmt.Errorf("error adding container %s namespace to DB: %w", ctr.ID(), err)
|
||||
}
|
||||
}
|
||||
if pod != nil {
|
||||
if err := newCtrBkt.Put(podIDKey, []byte(pod.ID())); err != nil {
|
||||
return errors.Wrapf(err, "error adding container %s pod to DB", ctr.ID())
|
||||
return fmt.Errorf("error adding container %s pod to DB: %w", ctr.ID(), err)
|
||||
}
|
||||
}
|
||||
if netNSPath != "" {
|
||||
if err := newCtrBkt.Put(netNSKey, []byte(netNSPath)); err != nil {
|
||||
return errors.Wrapf(err, "error adding container %s netns path to DB", ctr.ID())
|
||||
return fmt.Errorf("error adding container %s netns path to DB: %w", ctr.ID(), err)
|
||||
}
|
||||
}
|
||||
if len(networks) > 0 {
|
||||
ctrNetworksBkt, err := newCtrBkt.CreateBucket(networksBkt)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating networks bucket for container %s", ctr.ID())
|
||||
return fmt.Errorf("error creating networks bucket for container %s: %w", ctr.ID(), err)
|
||||
}
|
||||
for network, opts := range networks {
|
||||
if err := ctrNetworksBkt.Put([]byte(network), opts); err != nil {
|
||||
return errors.Wrapf(err, "error adding network %q to networks bucket for container %s", network, ctr.ID())
|
||||
return fmt.Errorf("error adding network %q to networks bucket for container %s: %w", network, ctr.ID(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := newCtrBkt.CreateBucket(dependenciesBkt); err != nil {
|
||||
return errors.Wrapf(err, "error creating dependencies bucket for container %s", ctr.ID())
|
||||
return fmt.Errorf("error creating dependencies bucket for container %s: %w", ctr.ID(), err)
|
||||
}
|
||||
|
||||
// Add dependencies for the container
|
||||
|
|
@ -756,42 +756,42 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
|
|||
|
||||
depCtrBkt := ctrBucket.Bucket(depCtrID)
|
||||
if depCtrBkt == nil {
|
||||
return errors.Wrapf(define.ErrNoSuchCtr, "container %s depends on container %s, but it does not exist in the DB", ctr.ID(), dependsCtr)
|
||||
return fmt.Errorf("container %s depends on container %s, but it does not exist in the DB: %w", ctr.ID(), dependsCtr, define.ErrNoSuchCtr)
|
||||
}
|
||||
|
||||
depCtrPod := depCtrBkt.Get(podIDKey)
|
||||
if pod != nil {
|
||||
// If we're part of a pod, make sure the dependency is part of the same pod
|
||||
if depCtrPod == nil {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "container %s depends on container %s which is not in pod %s", ctr.ID(), dependsCtr, pod.ID())
|
||||
return fmt.Errorf("container %s depends on container %s which is not in pod %s: %w", ctr.ID(), dependsCtr, pod.ID(), define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
if string(depCtrPod) != pod.ID() {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "container %s depends on container %s which is in a different pod (%s)", ctr.ID(), dependsCtr, string(depCtrPod))
|
||||
return fmt.Errorf("container %s depends on container %s which is in a different pod (%s): %w", ctr.ID(), dependsCtr, string(depCtrPod), define.ErrInvalidArg)
|
||||
}
|
||||
} else if depCtrPod != nil {
|
||||
// If we're not part of a pod, we cannot depend on containers in a pod
|
||||
return errors.Wrapf(define.ErrInvalidArg, "container %s depends on container %s which is in a pod - containers not in pods cannot depend on containers in pods", ctr.ID(), dependsCtr)
|
||||
return fmt.Errorf("container %s depends on container %s which is in a pod - containers not in pods cannot depend on containers in pods: %w", ctr.ID(), dependsCtr, define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
depNamespace := depCtrBkt.Get(namespaceKey)
|
||||
if !bytes.Equal(ctrNamespace, depNamespace) {
|
||||
return errors.Wrapf(define.ErrNSMismatch, "container %s in namespace %q depends on container %s in namespace %q - namespaces must match", ctr.ID(), ctr.config.Namespace, dependsCtr, string(depNamespace))
|
||||
return fmt.Errorf("container %s in namespace %q depends on container %s in namespace %q - namespaces must match: %w", ctr.ID(), ctr.config.Namespace, dependsCtr, string(depNamespace), define.ErrNSMismatch)
|
||||
}
|
||||
|
||||
depCtrDependsBkt := depCtrBkt.Bucket(dependenciesBkt)
|
||||
if depCtrDependsBkt == nil {
|
||||
return errors.Wrapf(define.ErrInternal, "container %s does not have a dependencies bucket", dependsCtr)
|
||||
return fmt.Errorf("container %s does not have a dependencies bucket: %w", dependsCtr, define.ErrInternal)
|
||||
}
|
||||
if err := depCtrDependsBkt.Put(ctrID, ctrName); err != nil {
|
||||
return errors.Wrapf(err, "error adding ctr %s as dependency of container %s", ctr.ID(), dependsCtr)
|
||||
return fmt.Errorf("error adding ctr %s as dependency of container %s: %w", ctr.ID(), dependsCtr, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Add ctr to pod
|
||||
if pod != nil && podCtrs != nil {
|
||||
if err := podCtrs.Put(ctrID, ctrName); err != nil {
|
||||
return errors.Wrapf(err, "error adding container %s to pod %s", ctr.ID(), pod.ID())
|
||||
return fmt.Errorf("error adding container %s to pod %s: %w", ctr.ID(), pod.ID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -799,16 +799,16 @@ func (s *BoltState) addContainer(ctr *Container, pod *Pod) error {
|
|||
for _, vol := range ctr.config.NamedVolumes {
|
||||
volDB := volBkt.Bucket([]byte(vol.Name))
|
||||
if volDB == nil {
|
||||
return errors.Wrapf(define.ErrNoSuchVolume, "no volume with name %s found in database when adding container %s", vol.Name, ctr.ID())
|
||||
return fmt.Errorf("no volume with name %s found in database when adding container %s: %w", vol.Name, ctr.ID(), define.ErrNoSuchVolume)
|
||||
}
|
||||
|
||||
ctrDepsBkt, err := volDB.CreateBucketIfNotExists(volDependenciesBkt)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating volume %s dependencies bucket to add container %s", vol.Name, ctr.ID())
|
||||
return fmt.Errorf("error creating volume %s dependencies bucket to add container %s: %w", vol.Name, ctr.ID(), err)
|
||||
}
|
||||
if depExists := ctrDepsBkt.Get(ctrID); depExists == nil {
|
||||
if err := ctrDepsBkt.Put(ctrID, ctrID); err != nil {
|
||||
return errors.Wrapf(err, "error adding container %s to volume %s dependencies", ctr.ID(), vol.Name)
|
||||
return fmt.Errorf("error adding container %s to volume %s dependencies: %w", ctr.ID(), vol.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -868,7 +868,7 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
|
|||
podDB = podBucket.Bucket(podID)
|
||||
if podDB == nil {
|
||||
pod.valid = false
|
||||
return errors.Wrapf(define.ErrNoSuchPod, "no pod with ID %s found in DB", pod.ID())
|
||||
return fmt.Errorf("no pod with ID %s found in DB: %w", pod.ID(), define.ErrNoSuchPod)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -876,17 +876,17 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
|
|||
ctrExists := ctrBucket.Bucket(ctrID)
|
||||
if ctrExists == nil {
|
||||
ctr.valid = false
|
||||
return errors.Wrapf(define.ErrNoSuchCtr, "no container with ID %s found in DB", ctr.ID())
|
||||
return fmt.Errorf("no container with ID %s found in DB: %w", ctr.ID(), define.ErrNoSuchCtr)
|
||||
}
|
||||
|
||||
// Compare namespace
|
||||
// We can't remove containers not in our namespace
|
||||
if s.namespace != "" {
|
||||
if s.namespace != ctr.config.Namespace {
|
||||
return errors.Wrapf(define.ErrNSMismatch, "container %s is in namespace %q, does not match our namespace %q", ctr.ID(), ctr.config.Namespace, s.namespace)
|
||||
return fmt.Errorf("container %s is in namespace %q, does not match our namespace %q: %w", ctr.ID(), ctr.config.Namespace, s.namespace, define.ErrNSMismatch)
|
||||
}
|
||||
if pod != nil && s.namespace != pod.config.Namespace {
|
||||
return errors.Wrapf(define.ErrNSMismatch, "pod %s is in namespace %q, does not match out namespace %q", pod.ID(), pod.config.Namespace, s.namespace)
|
||||
return fmt.Errorf("pod %s is in namespace %q, does not match out namespace %q: %w", pod.ID(), pod.config.Namespace, s.namespace, define.ErrNSMismatch)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -899,10 +899,10 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
|
|||
} else {
|
||||
ctrInPod := podCtrs.Get(ctrID)
|
||||
if ctrInPod == nil {
|
||||
return errors.Wrapf(define.ErrNoSuchCtr, "container %s is not in pod %s", ctr.ID(), pod.ID())
|
||||
return fmt.Errorf("container %s is not in pod %s: %w", ctr.ID(), pod.ID(), define.ErrNoSuchCtr)
|
||||
}
|
||||
if err := podCtrs.Delete(ctrID); err != nil {
|
||||
return errors.Wrapf(err, "error removing container %s from pod %s", ctr.ID(), pod.ID())
|
||||
return fmt.Errorf("error removing container %s from pod %s: %w", ctr.ID(), pod.ID(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -920,14 +920,14 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
|
|||
return err
|
||||
}
|
||||
if len(sessions) > 0 {
|
||||
return errors.Wrapf(define.ErrExecSessionExists, "container %s has active exec sessions: %s", ctr.ID(), strings.Join(sessions, ", "))
|
||||
return fmt.Errorf("container %s has active exec sessions: %s: %w", ctr.ID(), strings.Join(sessions, ", "), define.ErrExecSessionExists)
|
||||
}
|
||||
}
|
||||
|
||||
// Does the container have dependencies?
|
||||
ctrDepsBkt := ctrExists.Bucket(dependenciesBkt)
|
||||
if ctrDepsBkt == nil {
|
||||
return errors.Wrapf(define.ErrInternal, "container %s does not have a dependencies bucket", ctr.ID())
|
||||
return fmt.Errorf("container %s does not have a dependencies bucket: %w", ctr.ID(), define.ErrInternal)
|
||||
}
|
||||
deps := []string{}
|
||||
err = ctrDepsBkt.ForEach(func(id, value []byte) error {
|
||||
|
|
@ -939,25 +939,25 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
|
|||
return err
|
||||
}
|
||||
if len(deps) != 0 {
|
||||
return errors.Wrapf(define.ErrDepExists, "container %s is a dependency of the following containers: %s", ctr.ID(), strings.Join(deps, ", "))
|
||||
return fmt.Errorf("container %s is a dependency of the following containers: %s: %w", ctr.ID(), strings.Join(deps, ", "), define.ErrDepExists)
|
||||
}
|
||||
|
||||
if err := ctrBucket.DeleteBucket(ctrID); err != nil {
|
||||
return errors.Wrapf(define.ErrInternal, "error deleting container %s from DB", ctr.ID())
|
||||
return fmt.Errorf("error deleting container %s from DB: %w", ctr.ID(), define.ErrInternal)
|
||||
}
|
||||
|
||||
if err := idsBucket.Delete(ctrID); err != nil {
|
||||
return errors.Wrapf(err, "error deleting container %s ID in DB", ctr.ID())
|
||||
return fmt.Errorf("error deleting container %s ID in DB: %w", ctr.ID(), err)
|
||||
}
|
||||
|
||||
if err := namesBucket.Delete(ctrName); err != nil {
|
||||
return errors.Wrapf(err, "error deleting container %s name in DB", ctr.ID())
|
||||
return fmt.Errorf("error deleting container %s name in DB: %w", ctr.ID(), err)
|
||||
}
|
||||
if err := nsBucket.Delete(ctrID); err != nil {
|
||||
return errors.Wrapf(err, "error deleting container %s namespace in DB", ctr.ID())
|
||||
return fmt.Errorf("error deleting container %s namespace in DB: %w", ctr.ID(), err)
|
||||
}
|
||||
if err := allCtrsBucket.Delete(ctrID); err != nil {
|
||||
return errors.Wrapf(err, "error deleting container %s from all containers bucket in DB", ctr.ID())
|
||||
return fmt.Errorf("error deleting container %s from all containers bucket in DB: %w", ctr.ID(), err)
|
||||
}
|
||||
|
||||
depCtrs := ctr.Dependencies()
|
||||
|
|
@ -986,7 +986,7 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
|
|||
}
|
||||
|
||||
if err := depCtrDependsBkt.Delete(ctrID); err != nil {
|
||||
return errors.Wrapf(err, "error removing container %s as a dependency of container %s", ctr.ID(), depCtr)
|
||||
return fmt.Errorf("error removing container %s as a dependency of container %s: %w", ctr.ID(), depCtr, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1001,11 +1001,11 @@ func (s *BoltState) removeContainer(ctr *Container, pod *Pod, tx *bolt.Tx) error
|
|||
|
||||
ctrDepsBkt := volDB.Bucket(volDependenciesBkt)
|
||||
if ctrDepsBkt == nil {
|
||||
return errors.Wrapf(define.ErrInternal, "volume %s is missing container dependencies bucket, cannot remove container %s from dependencies", vol.Name, ctr.ID())
|
||||
return fmt.Errorf("volume %s is missing container dependencies bucket, cannot remove container %s from dependencies: %w", vol.Name, ctr.ID(), define.ErrInternal)
|
||||
}
|
||||
if depExists := ctrDepsBkt.Get(ctrID); depExists == nil {
|
||||
if err := ctrDepsBkt.Delete(ctrID); err != nil {
|
||||
return errors.Wrapf(err, "error deleting container %s dependency on volume %s", ctr.ID(), vol.Name)
|
||||
return fmt.Errorf("error deleting container %s dependency on volume %s: %w", ctr.ID(), vol.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1062,7 +1062,7 @@ func (s *BoltState) lookupContainerID(idOrName string, ctrBucket, namesBucket, n
|
|||
}
|
||||
if strings.HasPrefix(string(checkID), idOrName) {
|
||||
if exists {
|
||||
return errors.Wrapf(define.ErrCtrExists, "more than one result for container ID %s", idOrName)
|
||||
return fmt.Errorf("more than one result for container ID %s: %w", idOrName, define.ErrCtrExists)
|
||||
}
|
||||
id = checkID
|
||||
exists = true
|
||||
|
|
@ -1075,9 +1075,9 @@ func (s *BoltState) lookupContainerID(idOrName string, ctrBucket, namesBucket, n
|
|||
return nil, err
|
||||
} else if !exists {
|
||||
if isPod {
|
||||
return nil, errors.Wrapf(define.ErrNoSuchCtr, "%q is a pod, not a container", idOrName)
|
||||
return nil, fmt.Errorf("%q is a pod, not a container: %w", idOrName, define.ErrNoSuchCtr)
|
||||
}
|
||||
return nil, errors.Wrapf(define.ErrNoSuchCtr, "no container with name or ID %q found", idOrName)
|
||||
return nil, fmt.Errorf("no container with name or ID %q found: %w", idOrName, define.ErrNoSuchCtr)
|
||||
}
|
||||
return id, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,8 +4,9 @@
|
|||
package libpod
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -29,7 +30,7 @@ func replaceNetNS(netNSPath string, ctr *Container, newState *ContainerState) er
|
|||
newState.NetNS = ns
|
||||
} else {
|
||||
if ctr.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) {
|
||||
return errors.Wrapf(err, "error joining network namespace of container %s", ctr.ID())
|
||||
return fmt.Errorf("error joining network namespace of container %s: %w", ctr.ID(), err)
|
||||
}
|
||||
|
||||
logrus.Errorf("Joining network namespace for container %s: %v", ctr.ID(), err)
|
||||
|
|
|
|||
|
|
@ -19,7 +19,6 @@ import (
|
|||
"github.com/containers/podman/v4/libpod/lock"
|
||||
"github.com/containers/storage"
|
||||
spec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -355,14 +354,14 @@ func (c *Container) specFromState() (*spec.Spec, error) {
|
|||
returnSpec = new(spec.Spec)
|
||||
content, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading container config")
|
||||
return nil, fmt.Errorf("error reading container config: %w", err)
|
||||
}
|
||||
if err := json.Unmarshal(content, &returnSpec); err != nil {
|
||||
return nil, errors.Wrapf(err, "error unmarshalling container config")
|
||||
return nil, fmt.Errorf("error unmarshalling container config: %w", err)
|
||||
}
|
||||
} else if !os.IsNotExist(err) {
|
||||
// ignore when the file does not exist
|
||||
return nil, errors.Wrapf(err, "error opening container config")
|
||||
return nil, fmt.Errorf("error opening container config: %w", err)
|
||||
}
|
||||
|
||||
return returnSpec, nil
|
||||
|
|
@ -518,7 +517,7 @@ func (c *Container) PortMappings() ([]types.PortMapping, error) {
|
|||
if len(c.config.NetNsCtr) > 0 {
|
||||
netNsCtr, err := c.runtime.GetContainer(c.config.NetNsCtr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to look up network namespace for container %s", c.ID())
|
||||
return nil, fmt.Errorf("unable to look up network namespace for container %s: %w", c.ID(), err)
|
||||
}
|
||||
return netNsCtr.PortMappings()
|
||||
}
|
||||
|
|
@ -705,7 +704,7 @@ func (c *Container) Mounted() (bool, string, error) {
|
|||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if err := c.syncContainer(); err != nil {
|
||||
return false, "", errors.Wrapf(err, "error updating container %s state", c.ID())
|
||||
return false, "", fmt.Errorf("error updating container %s state: %w", c.ID(), err)
|
||||
}
|
||||
}
|
||||
// We cannot directly return c.state.Mountpoint as it is not guaranteed
|
||||
|
|
@ -735,7 +734,7 @@ func (c *Container) StartedTime() (time.Time, error) {
|
|||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if err := c.syncContainer(); err != nil {
|
||||
return time.Time{}, errors.Wrapf(err, "error updating container %s state", c.ID())
|
||||
return time.Time{}, fmt.Errorf("error updating container %s state: %w", c.ID(), err)
|
||||
}
|
||||
}
|
||||
return c.state.StartedTime, nil
|
||||
|
|
@ -747,7 +746,7 @@ func (c *Container) FinishedTime() (time.Time, error) {
|
|||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if err := c.syncContainer(); err != nil {
|
||||
return time.Time{}, errors.Wrapf(err, "error updating container %s state", c.ID())
|
||||
return time.Time{}, fmt.Errorf("error updating container %s state: %w", c.ID(), err)
|
||||
}
|
||||
}
|
||||
return c.state.FinishedTime, nil
|
||||
|
|
@ -762,7 +761,7 @@ func (c *Container) ExitCode() (int32, bool, error) {
|
|||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if err := c.syncContainer(); err != nil {
|
||||
return 0, false, errors.Wrapf(err, "error updating container %s state", c.ID())
|
||||
return 0, false, fmt.Errorf("error updating container %s state: %w", c.ID(), err)
|
||||
}
|
||||
}
|
||||
return c.state.ExitCode, c.state.Exited, nil
|
||||
|
|
@ -774,7 +773,7 @@ func (c *Container) OOMKilled() (bool, error) {
|
|||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if err := c.syncContainer(); err != nil {
|
||||
return false, errors.Wrapf(err, "error updating container %s state", c.ID())
|
||||
return false, fmt.Errorf("error updating container %s state: %w", c.ID(), err)
|
||||
}
|
||||
}
|
||||
return c.state.OOMKilled, nil
|
||||
|
|
@ -845,7 +844,7 @@ func (c *Container) execSessionNoCopy(id string) (*ExecSession, error) {
|
|||
|
||||
session, ok := c.state.ExecSessions[id]
|
||||
if !ok {
|
||||
return nil, errors.Wrapf(define.ErrNoSuchExecSession, "no exec session with ID %s found in container %s", id, c.ID())
|
||||
return nil, fmt.Errorf("no exec session with ID %s found in container %s: %w", id, c.ID(), define.ErrNoSuchExecSession)
|
||||
}
|
||||
|
||||
return session, nil
|
||||
|
|
@ -861,7 +860,7 @@ func (c *Container) ExecSession(id string) (*ExecSession, error) {
|
|||
|
||||
returnSession := new(ExecSession)
|
||||
if err := JSONDeepCopy(session, returnSession); err != nil {
|
||||
return nil, errors.Wrapf(err, "error copying contents of container %s exec session %s", c.ID(), session.ID())
|
||||
return nil, fmt.Errorf("error copying contents of container %s exec session %s: %w", c.ID(), session.ID(), err)
|
||||
}
|
||||
|
||||
return returnSession, nil
|
||||
|
|
@ -921,7 +920,7 @@ func (c *Container) NamespacePath(linuxNS LinuxNS) (string, error) { //nolint:in
|
|||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if err := c.syncContainer(); err != nil {
|
||||
return "", errors.Wrapf(err, "error updating container %s state", c.ID())
|
||||
return "", fmt.Errorf("error updating container %s state: %w", c.ID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -932,11 +931,11 @@ func (c *Container) NamespacePath(linuxNS LinuxNS) (string, error) { //nolint:in
|
|||
// If the container is not running, an error will be returned
|
||||
func (c *Container) namespacePath(linuxNS LinuxNS) (string, error) { //nolint:interfacer
|
||||
if c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused {
|
||||
return "", errors.Wrapf(define.ErrCtrStopped, "cannot get namespace path unless container %s is running", c.ID())
|
||||
return "", fmt.Errorf("cannot get namespace path unless container %s is running: %w", c.ID(), define.ErrCtrStopped)
|
||||
}
|
||||
|
||||
if linuxNS == InvalidNS {
|
||||
return "", errors.Wrapf(define.ErrInvalidArg, "invalid namespace requested from container %s", c.ID())
|
||||
return "", fmt.Errorf("invalid namespace requested from container %s: %w", c.ID(), define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("/proc/%d/ns/%s", c.state.PID, linuxNS.String()), nil
|
||||
|
|
@ -959,7 +958,7 @@ func (c *Container) CgroupPath() (string, error) {
|
|||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if err := c.syncContainer(); err != nil {
|
||||
return "", errors.Wrapf(err, "error updating container %s state", c.ID())
|
||||
return "", fmt.Errorf("error updating container %s state: %w", c.ID(), err)
|
||||
}
|
||||
}
|
||||
return c.cGroupPath()
|
||||
|
|
@ -971,10 +970,10 @@ func (c *Container) CgroupPath() (string, error) {
|
|||
// NOTE: only call this when owning the container's lock.
|
||||
func (c *Container) cGroupPath() (string, error) {
|
||||
if c.config.NoCgroups || c.config.CgroupsMode == "disabled" {
|
||||
return "", errors.Wrapf(define.ErrNoCgroups, "this container is not creating cgroups")
|
||||
return "", fmt.Errorf("this container is not creating cgroups: %w", define.ErrNoCgroups)
|
||||
}
|
||||
if c.state.State != define.ContainerStateRunning && c.state.State != define.ContainerStatePaused {
|
||||
return "", errors.Wrapf(define.ErrCtrStopped, "cannot get cgroup path unless container %s is running", c.ID())
|
||||
return "", fmt.Errorf("cannot get cgroup path unless container %s is running: %w", c.ID(), define.ErrCtrStopped)
|
||||
}
|
||||
|
||||
// Read /proc/{PID}/cgroup and find the *longest* cgroup entry. That's
|
||||
|
|
@ -995,7 +994,7 @@ func (c *Container) cGroupPath() (string, error) {
|
|||
// If the file doesn't exist, it means the container could have been terminated
|
||||
// so report it.
|
||||
if os.IsNotExist(err) {
|
||||
return "", errors.Wrapf(define.ErrCtrStopped, "cannot get cgroup path unless container %s is running", c.ID())
|
||||
return "", fmt.Errorf("cannot get cgroup path unless container %s is running: %w", c.ID(), define.ErrCtrStopped)
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
|
@ -1024,7 +1023,7 @@ func (c *Container) cGroupPath() (string, error) {
|
|||
}
|
||||
|
||||
if len(cgroupPath) == 0 {
|
||||
return "", errors.Errorf("could not find any cgroup in %q", procPath)
|
||||
return "", fmt.Errorf("could not find any cgroup in %q", procPath)
|
||||
}
|
||||
|
||||
cgroupManager := c.CgroupManager()
|
||||
|
|
@ -1059,7 +1058,7 @@ func (c *Container) RootFsSize() (int64, error) {
|
|||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if err := c.syncContainer(); err != nil {
|
||||
return -1, errors.Wrapf(err, "error updating container %s state", c.ID())
|
||||
return -1, fmt.Errorf("error updating container %s state: %w", c.ID(), err)
|
||||
}
|
||||
}
|
||||
return c.rootFsSize()
|
||||
|
|
@ -1071,7 +1070,7 @@ func (c *Container) RWSize() (int64, error) {
|
|||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if err := c.syncContainer(); err != nil {
|
||||
return -1, errors.Wrapf(err, "error updating container %s state", c.ID())
|
||||
return -1, fmt.Errorf("error updating container %s state: %w", c.ID(), err)
|
||||
}
|
||||
}
|
||||
return c.rwSize()
|
||||
|
|
@ -1173,7 +1172,7 @@ func (c *Container) ContainerState() (*ContainerState, error) {
|
|||
}
|
||||
returnConfig := new(ContainerState)
|
||||
if err := JSONDeepCopy(c.state, returnConfig); err != nil {
|
||||
return nil, errors.Wrapf(err, "error copying container %s state", c.ID())
|
||||
return nil, fmt.Errorf("error copying container %s state: %w", c.ID(), err)
|
||||
}
|
||||
return c.state, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package libpod
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
|
@ -14,7 +15,6 @@ import (
|
|||
"github.com/containers/podman/v4/libpod/events"
|
||||
"github.com/containers/podman/v4/pkg/signal"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -39,7 +39,7 @@ func (c *Container) Init(ctx context.Context, recursive bool) error {
|
|||
}
|
||||
|
||||
if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateStopped, define.ContainerStateExited) {
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has already been created in runtime", c.ID())
|
||||
return fmt.Errorf("container %s has already been created in runtime: %w", c.ID(), define.ErrCtrStateInvalid)
|
||||
}
|
||||
|
||||
if !recursive {
|
||||
|
|
@ -197,7 +197,7 @@ func (c *Container) StopWithTimeout(timeout uint) error {
|
|||
}
|
||||
|
||||
if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning, define.ContainerStateStopping) {
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "can only stop created or running containers. %s is in state %s", c.ID(), c.state.State.String())
|
||||
return fmt.Errorf("can only stop created or running containers. %s is in state %s: %w", c.ID(), c.state.State.String(), define.ErrCtrStateInvalid)
|
||||
}
|
||||
|
||||
return c.stop(timeout)
|
||||
|
|
@ -221,7 +221,7 @@ func (c *Container) Kill(signal uint) error {
|
|||
// stop the container and if that is taking too long, a user
|
||||
// may have decided to kill the container after all.
|
||||
default:
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "can only kill running containers. %s is in state %s", c.ID(), c.state.State.String())
|
||||
return fmt.Errorf("can only kill running containers. %s is in state %s: %w", c.ID(), c.state.State.String(), define.ErrCtrStateInvalid)
|
||||
}
|
||||
|
||||
// Hardcode all = false, we only use all when removing.
|
||||
|
|
@ -241,7 +241,7 @@ func (c *Container) Kill(signal uint) error {
|
|||
// the duration of its runtime, only using it at the beginning to verify state.
|
||||
func (c *Container) Attach(streams *define.AttachStreams, keys string, resize <-chan define.TerminalSize) error {
|
||||
if c.LogDriver() == define.PassthroughLogging {
|
||||
return errors.Wrapf(define.ErrNoLogs, "this container is using the 'passthrough' log driver, cannot attach")
|
||||
return fmt.Errorf("this container is using the 'passthrough' log driver, cannot attach: %w", define.ErrNoLogs)
|
||||
}
|
||||
if !c.batched {
|
||||
c.lock.Lock()
|
||||
|
|
@ -254,7 +254,7 @@ func (c *Container) Attach(streams *define.AttachStreams, keys string, resize <-
|
|||
}
|
||||
|
||||
if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning) {
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "can only attach to created or running containers")
|
||||
return fmt.Errorf("can only attach to created or running containers: %w", define.ErrCtrStateInvalid)
|
||||
}
|
||||
|
||||
// HACK: This is really gross, but there isn't a better way without
|
||||
|
|
@ -320,11 +320,11 @@ func (c *Container) HTTPAttach(r *http.Request, w http.ResponseWriter, streams *
|
|||
}
|
||||
|
||||
if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning) {
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "can only attach to created or running containers")
|
||||
return fmt.Errorf("can only attach to created or running containers: %w", define.ErrCtrStateInvalid)
|
||||
}
|
||||
|
||||
if !streamAttach && !streamLogs {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "must specify at least one of stream or logs")
|
||||
return fmt.Errorf("must specify at least one of stream or logs: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
logrus.Infof("Performing HTTP Hijack attach to container %s", c.ID())
|
||||
|
|
@ -346,7 +346,7 @@ func (c *Container) AttachResize(newSize define.TerminalSize) error {
|
|||
}
|
||||
|
||||
if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning) {
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "can only resize created or running containers")
|
||||
return fmt.Errorf("can only resize created or running containers: %w", define.ErrCtrStateInvalid)
|
||||
}
|
||||
|
||||
logrus.Infof("Resizing TTY of container %s", c.ID())
|
||||
|
|
@ -383,20 +383,20 @@ func (c *Container) Unmount(force bool) error {
|
|||
if c.state.Mounted {
|
||||
mounted, err := c.runtime.storageService.MountedContainerImage(c.ID())
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "can't determine how many times %s is mounted, refusing to unmount", c.ID())
|
||||
return fmt.Errorf("can't determine how many times %s is mounted, refusing to unmount: %w", c.ID(), err)
|
||||
}
|
||||
if mounted == 1 {
|
||||
if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) {
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "cannot unmount storage for container %s as it is running or paused", c.ID())
|
||||
return fmt.Errorf("cannot unmount storage for container %s as it is running or paused: %w", c.ID(), define.ErrCtrStateInvalid)
|
||||
}
|
||||
execSessions, err := c.getActiveExecSessions()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(execSessions) != 0 {
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has active exec sessions, refusing to unmount", c.ID())
|
||||
return fmt.Errorf("container %s has active exec sessions, refusing to unmount: %w", c.ID(), define.ErrCtrStateInvalid)
|
||||
}
|
||||
return errors.Wrapf(define.ErrInternal, "can't unmount %s last mount, it is still in use", c.ID())
|
||||
return fmt.Errorf("can't unmount %s last mount, it is still in use: %w", c.ID(), define.ErrInternal)
|
||||
}
|
||||
}
|
||||
defer c.newContainerEvent(events.Unmount)
|
||||
|
|
@ -415,10 +415,10 @@ func (c *Container) Pause() error {
|
|||
}
|
||||
|
||||
if c.state.State == define.ContainerStatePaused {
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "%q is already paused", c.ID())
|
||||
return fmt.Errorf("%q is already paused: %w", c.ID(), define.ErrCtrStateInvalid)
|
||||
}
|
||||
if c.state.State != define.ContainerStateRunning {
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "%q is not running, can't pause", c.state.State)
|
||||
return fmt.Errorf("%q is not running, can't pause: %w", c.state.State, define.ErrCtrStateInvalid)
|
||||
}
|
||||
defer c.newContainerEvent(events.Pause)
|
||||
return c.pause()
|
||||
|
|
@ -436,7 +436,7 @@ func (c *Container) Unpause() error {
|
|||
}
|
||||
|
||||
if c.state.State != define.ContainerStatePaused {
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "%q is not paused, can't unpause", c.ID())
|
||||
return fmt.Errorf("%q is not paused, can't unpause: %w", c.ID(), define.ErrCtrStateInvalid)
|
||||
}
|
||||
defer c.newContainerEvent(events.Unpause)
|
||||
return c.unpause()
|
||||
|
|
@ -455,7 +455,7 @@ func (c *Container) Export(path string) error {
|
|||
}
|
||||
|
||||
if c.state.State == define.ContainerStateRemoving {
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "cannot mount container %s as it is being removed", c.ID())
|
||||
return fmt.Errorf("cannot mount container %s as it is being removed: %w", c.ID(), define.ErrCtrStateInvalid)
|
||||
}
|
||||
|
||||
defer c.newContainerEvent(events.Mount)
|
||||
|
|
@ -666,22 +666,21 @@ func (c *Container) Cleanup(ctx context.Context) error {
|
|||
defer c.lock.Unlock()
|
||||
|
||||
if err := c.syncContainer(); err != nil {
|
||||
switch errors.Cause(err) {
|
||||
// When the container has already been removed, the OCI runtime directory remain.
|
||||
case define.ErrNoSuchCtr, define.ErrCtrRemoved:
|
||||
if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved) {
|
||||
if err := c.cleanupRuntime(ctx); err != nil {
|
||||
return errors.Wrapf(err, "error cleaning up container %s from OCI runtime", c.ID())
|
||||
return fmt.Errorf("error cleaning up container %s from OCI runtime: %w", c.ID(), err)
|
||||
}
|
||||
default:
|
||||
logrus.Errorf("Syncing container %s status: %v", c.ID(), err)
|
||||
return nil
|
||||
}
|
||||
logrus.Errorf("Syncing container %s status: %v", c.ID(), err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Check if state is good
|
||||
if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated, define.ContainerStateStopped, define.ContainerStateStopping, define.ContainerStateExited) {
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, refusing to clean up", c.ID())
|
||||
return fmt.Errorf("container %s is running or paused, refusing to clean up: %w", c.ID(), define.ErrCtrStateInvalid)
|
||||
}
|
||||
|
||||
// Handle restart policy.
|
||||
|
|
@ -703,7 +702,7 @@ func (c *Container) Cleanup(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
if len(sessions) > 0 {
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s has active exec sessions, refusing to clean up", c.ID())
|
||||
return fmt.Errorf("container %s has active exec sessions, refusing to clean up: %w", c.ID(), define.ErrCtrStateInvalid)
|
||||
}
|
||||
|
||||
defer c.newContainerEvent(events.Cleanup)
|
||||
|
|
@ -789,7 +788,7 @@ func (c *Container) ReloadNetwork() error {
|
|||
}
|
||||
|
||||
if !c.ensureState(define.ContainerStateCreated, define.ContainerStateRunning) {
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "cannot reload network unless container network has been configured")
|
||||
return fmt.Errorf("cannot reload network unless container network has been configured: %w", define.ErrCtrStateInvalid)
|
||||
}
|
||||
|
||||
return c.reloadNetwork()
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package libpod
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
|
|
@ -12,7 +13,6 @@ import (
|
|||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/libpod/events"
|
||||
libpodutil "github.com/containers/podman/v4/pkg/util"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -34,7 +34,7 @@ type ContainerCommitOptions struct {
|
|||
// image
|
||||
func (c *Container) Commit(ctx context.Context, destImage string, options ContainerCommitOptions) (*libimage.Image, error) {
|
||||
if c.config.Rootfs != "" {
|
||||
return nil, errors.Errorf("cannot commit a container that uses an exploded rootfs")
|
||||
return nil, errors.New("cannot commit a container that uses an exploded rootfs")
|
||||
}
|
||||
|
||||
if !c.batched {
|
||||
|
|
@ -48,7 +48,7 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai
|
|||
|
||||
if c.state.State == define.ContainerStateRunning && options.Pause {
|
||||
if err := c.pause(); err != nil {
|
||||
return nil, errors.Wrapf(err, "error pausing container %q to commit", c.ID())
|
||||
return nil, fmt.Errorf("error pausing container %q to commit: %w", c.ID(), err)
|
||||
}
|
||||
defer func() {
|
||||
if err := c.unpause(); err != nil {
|
||||
|
|
@ -136,7 +136,7 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai
|
|||
if include {
|
||||
vol, err := c.runtime.GetVolume(v.Name)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "volume %s used in container %s has been removed", v.Name, c.ID())
|
||||
return nil, fmt.Errorf("volume %s used in container %s has been removed: %w", v.Name, c.ID(), err)
|
||||
}
|
||||
if vol.Anonymous() {
|
||||
importBuilder.AddVolume(v.Dest)
|
||||
|
|
@ -202,7 +202,7 @@ func (c *Container) Commit(ctx context.Context, destImage string, options Contai
|
|||
|
||||
imageRef, err := is.Transport.ParseStoreReference(c.runtime.store, resolvedImageName)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing target image name %q", destImage)
|
||||
return nil, fmt.Errorf("error parsing target image name %q: %w", destImage, err)
|
||||
}
|
||||
commitRef = imageRef
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,6 +4,8 @@
|
|||
package libpod
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
|
@ -18,7 +20,6 @@ import (
|
|||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
|
@ -196,7 +197,7 @@ func getContainerUser(container *Container, mountPoint string) (specs.User, erro
|
|||
if !strings.Contains(userspec, ":") {
|
||||
groups, err2 := chrootuser.GetAdditionalGroupsForUser(mountPoint, uint64(u.UID))
|
||||
if err2 != nil {
|
||||
if errors.Cause(err2) != chrootuser.ErrNoSuchUser && err == nil {
|
||||
if !errors.Is(err2, chrootuser.ErrNoSuchUser) && err == nil {
|
||||
err = err2
|
||||
}
|
||||
} else {
|
||||
|
|
@ -253,7 +254,7 @@ func (c *Container) joinMountAndExec(f func() error) error {
|
|||
|
||||
inHostPidNS, err := c.inHostPidNS()
|
||||
if err != nil {
|
||||
errChan <- errors.Wrap(err, "checking inHostPidNS")
|
||||
errChan <- fmt.Errorf("checking inHostPidNS: %w", err)
|
||||
return
|
||||
}
|
||||
var pidFD *os.File
|
||||
|
|
|
|||
|
|
@ -2,6 +2,8 @@ package libpod
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
|
|
@ -12,7 +14,6 @@ import (
|
|||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/libpod/events"
|
||||
"github.com/containers/storage/pkg/stringid"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
|
@ -112,7 +113,7 @@ func (e *ExecSession) ContainerID() string {
|
|||
// configuration and current state.
|
||||
func (e *ExecSession) Inspect() (*define.InspectExecSession, error) {
|
||||
if e.Config == nil {
|
||||
return nil, errors.Wrapf(define.ErrInternal, "given exec session does not have a configuration block")
|
||||
return nil, fmt.Errorf("given exec session does not have a configuration block: %w", define.ErrInternal)
|
||||
}
|
||||
|
||||
output := new(define.InspectExecSession)
|
||||
|
|
@ -165,18 +166,18 @@ func (c *Container) ExecCreate(config *ExecConfig) (string, error) {
|
|||
|
||||
// Verify our config
|
||||
if config == nil {
|
||||
return "", errors.Wrapf(define.ErrInvalidArg, "must provide a configuration to ExecCreate")
|
||||
return "", fmt.Errorf("must provide a configuration to ExecCreate: %w", define.ErrInvalidArg)
|
||||
}
|
||||
if len(config.Command) == 0 {
|
||||
return "", errors.Wrapf(define.ErrInvalidArg, "must provide a non-empty command to start an exec session")
|
||||
return "", fmt.Errorf("must provide a non-empty command to start an exec session: %w", define.ErrInvalidArg)
|
||||
}
|
||||
if config.ExitCommandDelay > 0 && len(config.ExitCommand) == 0 {
|
||||
return "", errors.Wrapf(define.ErrInvalidArg, "must provide a non-empty exit command if giving an exit command delay")
|
||||
return "", fmt.Errorf("must provide a non-empty exit command if giving an exit command delay: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
// Verify that we are in a good state to continue
|
||||
if !c.ensureState(define.ContainerStateRunning) {
|
||||
return "", errors.Wrapf(define.ErrCtrStateInvalid, "can only create exec sessions on running containers")
|
||||
return "", fmt.Errorf("can only create exec sessions on running containers: %w", define.ErrCtrStateInvalid)
|
||||
}
|
||||
|
||||
// Generate an ID for our new exec session
|
||||
|
|
@ -203,7 +204,7 @@ func (c *Container) ExecCreate(config *ExecConfig) (string, error) {
|
|||
session.State = define.ExecStateCreated
|
||||
session.Config = new(ExecConfig)
|
||||
if err := JSONDeepCopy(config, session.Config); err != nil {
|
||||
return "", errors.Wrapf(err, "error copying exec configuration into exec session")
|
||||
return "", fmt.Errorf("error copying exec configuration into exec session: %w", err)
|
||||
}
|
||||
|
||||
if len(session.Config.ExitCommand) > 0 {
|
||||
|
|
@ -243,16 +244,16 @@ func (c *Container) ExecStart(sessionID string) error {
|
|||
|
||||
// Verify that we are in a good state to continue
|
||||
if !c.ensureState(define.ContainerStateRunning) {
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "can only start exec sessions when their container is running")
|
||||
return fmt.Errorf("can only start exec sessions when their container is running: %w", define.ErrCtrStateInvalid)
|
||||
}
|
||||
|
||||
session, ok := c.state.ExecSessions[sessionID]
|
||||
if !ok {
|
||||
return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
|
||||
return fmt.Errorf("container %s has no exec session with ID %s: %w", c.ID(), sessionID, define.ErrNoSuchExecSession)
|
||||
}
|
||||
|
||||
if session.State != define.ExecStateCreated {
|
||||
return errors.Wrapf(define.ErrExecSessionStateInvalid, "can only start created exec sessions, while container %s session %s state is %q", c.ID(), session.ID(), session.State.String())
|
||||
return fmt.Errorf("can only start created exec sessions, while container %s session %s state is %q: %w", c.ID(), session.ID(), session.State.String(), define.ErrExecSessionStateInvalid)
|
||||
}
|
||||
|
||||
logrus.Infof("Going to start container %s exec session %s and attach to it", c.ID(), session.ID())
|
||||
|
|
@ -295,16 +296,16 @@ func (c *Container) execStartAndAttach(sessionID string, streams *define.AttachS
|
|||
|
||||
// Verify that we are in a good state to continue
|
||||
if !c.ensureState(define.ContainerStateRunning) {
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "can only start exec sessions when their container is running")
|
||||
return fmt.Errorf("can only start exec sessions when their container is running: %w", define.ErrCtrStateInvalid)
|
||||
}
|
||||
|
||||
session, ok := c.state.ExecSessions[sessionID]
|
||||
if !ok {
|
||||
return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
|
||||
return fmt.Errorf("container %s has no exec session with ID %s: %w", c.ID(), sessionID, define.ErrNoSuchExecSession)
|
||||
}
|
||||
|
||||
if session.State != define.ExecStateCreated {
|
||||
return errors.Wrapf(define.ErrExecSessionStateInvalid, "can only start created exec sessions, while container %s session %s state is %q", c.ID(), session.ID(), session.State.String())
|
||||
return fmt.Errorf("can only start created exec sessions, while container %s session %s state is %q: %w", c.ID(), session.ID(), session.State.String(), define.ErrExecSessionStateInvalid)
|
||||
}
|
||||
|
||||
logrus.Infof("Going to start container %s exec session %s and attach to it", c.ID(), session.ID())
|
||||
|
|
@ -370,7 +371,7 @@ func (c *Container) execStartAndAttach(sessionID string, streams *define.AttachS
|
|||
if lastErr != nil {
|
||||
logrus.Errorf("Container %s exec session %s error: %v", c.ID(), session.ID(), lastErr)
|
||||
}
|
||||
return errors.Wrapf(err, "error syncing container %s state to update exec session %s", c.ID(), sessionID)
|
||||
return fmt.Errorf("error syncing container %s state to update exec session %s: %w", c.ID(), sessionID, err)
|
||||
}
|
||||
|
||||
// Now handle the error from readExecExitCode above.
|
||||
|
|
@ -441,16 +442,16 @@ func (c *Container) ExecHTTPStartAndAttach(sessionID string, r *http.Request, w
|
|||
|
||||
session, ok := c.state.ExecSessions[sessionID]
|
||||
if !ok {
|
||||
return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
|
||||
return fmt.Errorf("container %s has no exec session with ID %s: %w", c.ID(), sessionID, define.ErrNoSuchExecSession)
|
||||
}
|
||||
|
||||
// Verify that we are in a good state to continue
|
||||
if !c.ensureState(define.ContainerStateRunning) {
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "can only start exec sessions when their container is running")
|
||||
return fmt.Errorf("can only start exec sessions when their container is running: %w", define.ErrCtrStateInvalid)
|
||||
}
|
||||
|
||||
if session.State != define.ExecStateCreated {
|
||||
return errors.Wrapf(define.ErrExecSessionStateInvalid, "can only start created exec sessions, while container %s session %s state is %q", c.ID(), session.ID(), session.State.String())
|
||||
return fmt.Errorf("can only start created exec sessions, while container %s session %s state is %q: %w", c.ID(), session.ID(), session.State.String(), define.ErrExecSessionStateInvalid)
|
||||
}
|
||||
|
||||
logrus.Infof("Going to start container %s exec session %s and attach to it", c.ID(), session.ID())
|
||||
|
|
@ -567,11 +568,11 @@ func (c *Container) ExecStop(sessionID string, timeout *uint) error {
|
|||
|
||||
session, ok := c.state.ExecSessions[sessionID]
|
||||
if !ok {
|
||||
return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
|
||||
return fmt.Errorf("container %s has no exec session with ID %s: %w", c.ID(), sessionID, define.ErrNoSuchExecSession)
|
||||
}
|
||||
|
||||
if session.State != define.ExecStateRunning {
|
||||
return errors.Wrapf(define.ErrExecSessionStateInvalid, "container %s exec session %s is %q, can only stop running sessions", c.ID(), session.ID(), session.State.String())
|
||||
return fmt.Errorf("container %s exec session %s is %q, can only stop running sessions: %w", c.ID(), session.ID(), session.State.String(), define.ErrExecSessionStateInvalid)
|
||||
}
|
||||
|
||||
logrus.Infof("Stopping container %s exec session %s", c.ID(), session.ID())
|
||||
|
|
@ -617,7 +618,7 @@ func (c *Container) ExecCleanup(sessionID string) error {
|
|||
|
||||
session, ok := c.state.ExecSessions[sessionID]
|
||||
if !ok {
|
||||
return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
|
||||
return fmt.Errorf("container %s has no exec session with ID %s: %w", c.ID(), sessionID, define.ErrNoSuchExecSession)
|
||||
}
|
||||
|
||||
if session.State == define.ExecStateRunning {
|
||||
|
|
@ -628,7 +629,7 @@ func (c *Container) ExecCleanup(sessionID string) error {
|
|||
}
|
||||
|
||||
if alive {
|
||||
return errors.Wrapf(define.ErrExecSessionStateInvalid, "cannot clean up container %s exec session %s as it is running", c.ID(), session.ID())
|
||||
return fmt.Errorf("cannot clean up container %s exec session %s as it is running: %w", c.ID(), session.ID(), define.ErrExecSessionStateInvalid)
|
||||
}
|
||||
|
||||
if err := retrieveAndWriteExecExitCode(c, session.ID()); err != nil {
|
||||
|
|
@ -655,7 +656,7 @@ func (c *Container) ExecRemove(sessionID string, force bool) error {
|
|||
|
||||
session, ok := c.state.ExecSessions[sessionID]
|
||||
if !ok {
|
||||
return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
|
||||
return fmt.Errorf("container %s has no exec session with ID %s: %w", c.ID(), sessionID, define.ErrNoSuchExecSession)
|
||||
}
|
||||
|
||||
logrus.Infof("Removing container %s exec session %s", c.ID(), session.ID())
|
||||
|
|
@ -676,7 +677,7 @@ func (c *Container) ExecRemove(sessionID string, force bool) error {
|
|||
|
||||
if session.State == define.ExecStateRunning {
|
||||
if !force {
|
||||
return errors.Wrapf(define.ErrExecSessionStateInvalid, "container %s exec session %s is still running, cannot remove", c.ID(), session.ID())
|
||||
return fmt.Errorf("container %s exec session %s is still running, cannot remove: %w", c.ID(), session.ID(), define.ErrExecSessionStateInvalid)
|
||||
}
|
||||
|
||||
// Stop the session
|
||||
|
|
@ -722,13 +723,13 @@ func (c *Container) ExecResize(sessionID string, newSize define.TerminalSize) er
|
|||
|
||||
session, ok := c.state.ExecSessions[sessionID]
|
||||
if !ok {
|
||||
return errors.Wrapf(define.ErrNoSuchExecSession, "container %s has no exec session with ID %s", c.ID(), sessionID)
|
||||
return fmt.Errorf("container %s has no exec session with ID %s: %w", c.ID(), sessionID, define.ErrNoSuchExecSession)
|
||||
}
|
||||
|
||||
logrus.Infof("Resizing container %s exec session %s to %+v", c.ID(), session.ID(), newSize)
|
||||
|
||||
if session.State != define.ExecStateRunning {
|
||||
return errors.Wrapf(define.ErrExecSessionStateInvalid, "cannot resize container %s exec session %s as it is not running", c.ID(), session.ID())
|
||||
return fmt.Errorf("cannot resize container %s exec session %s as it is not running: %w", c.ID(), session.ID(), define.ErrExecSessionStateInvalid)
|
||||
}
|
||||
|
||||
// The exec session may have exited since we last updated.
|
||||
|
|
@ -744,7 +745,7 @@ func (c *Container) ExecResize(sessionID string, newSize define.TerminalSize) er
|
|||
logrus.Errorf("Saving state of container %s: %v", c.ID(), err)
|
||||
}
|
||||
|
||||
return errors.Wrapf(define.ErrExecSessionStateInvalid, "cannot resize container %s exec session %s as it has stopped", c.ID(), session.ID())
|
||||
return fmt.Errorf("cannot resize container %s exec session %s as it has stopped: %w", c.ID(), session.ID(), define.ErrExecSessionStateInvalid)
|
||||
}
|
||||
|
||||
// Make sure the exec session is still running.
|
||||
|
|
@ -780,7 +781,7 @@ func (c *Container) exec(config *ExecConfig, streams *define.AttachStreams, resi
|
|||
logrus.Debugf("Sending resize events to exec session %s", sessionID)
|
||||
for resizeRequest := range resize {
|
||||
if err := c.ExecResize(sessionID, resizeRequest); err != nil {
|
||||
if errors.Cause(err) == define.ErrExecSessionStateInvalid {
|
||||
if errors.Is(err, define.ErrExecSessionStateInvalid) {
|
||||
// The exec session stopped
|
||||
// before we could resize.
|
||||
logrus.Infof("Missed resize on exec session %s, already stopped", sessionID)
|
||||
|
|
@ -799,7 +800,7 @@ func (c *Container) exec(config *ExecConfig, streams *define.AttachStreams, resi
|
|||
|
||||
session, err := c.execSessionNoCopy(sessionID)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == define.ErrNoSuchExecSession {
|
||||
if errors.Is(err, define.ErrNoSuchExecSession) {
|
||||
// TODO: If a proper Context is ever plumbed in here, we
|
||||
// should use it.
|
||||
// As things stand, though, it's not worth it - this
|
||||
|
|
@ -807,7 +808,7 @@ func (c *Container) exec(config *ExecConfig, streams *define.AttachStreams, resi
|
|||
// streaming.
|
||||
diedEvent, err := c.runtime.GetExecDiedEvent(context.Background(), c.ID(), sessionID)
|
||||
if err != nil {
|
||||
return -1, errors.Wrapf(err, "error retrieving exec session %s exit code", sessionID)
|
||||
return -1, fmt.Errorf("error retrieving exec session %s exit code: %w", sessionID, err)
|
||||
}
|
||||
return diedEvent.ContainerExitCode, nil
|
||||
}
|
||||
|
|
@ -815,7 +816,7 @@ func (c *Container) exec(config *ExecConfig, streams *define.AttachStreams, resi
|
|||
}
|
||||
exitCode := session.ExitCode
|
||||
if err := c.ExecRemove(sessionID, false); err != nil {
|
||||
if errors.Cause(err) == define.ErrNoSuchExecSession {
|
||||
if errors.Is(err, define.ErrNoSuchExecSession) {
|
||||
return exitCode, nil
|
||||
}
|
||||
return -1, err
|
||||
|
|
@ -837,7 +838,7 @@ func (c *Container) cleanupExecBundle(sessionID string) (err error) {
|
|||
}
|
||||
if pathErr, ok := err.(*os.PathError); ok {
|
||||
err = pathErr.Err
|
||||
if errors.Cause(err) == unix.ENOTEMPTY || errors.Cause(err) == unix.EBUSY {
|
||||
if errors.Is(err, unix.ENOTEMPTY) || errors.Is(err, unix.EBUSY) {
|
||||
// give other processes a chance to use the container
|
||||
if !c.batched {
|
||||
if err := c.save(); err != nil {
|
||||
|
|
@ -909,7 +910,7 @@ func (c *Container) createExecBundle(sessionID string) (retErr error) {
|
|||
if err := os.MkdirAll(c.execExitFileDir(sessionID), execDirPermission); err != nil {
|
||||
// The directory is allowed to exist
|
||||
if !os.IsExist(err) {
|
||||
return errors.Wrapf(err, "error creating OCI runtime exit file path %s", c.execExitFileDir(sessionID))
|
||||
return fmt.Errorf("error creating OCI runtime exit file path %s: %w", c.execExitFileDir(sessionID), err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
@ -948,7 +949,7 @@ func (c *Container) getExecSessionPID(sessionID string) (int, error) {
|
|||
return oldSession.PID, nil
|
||||
}
|
||||
|
||||
return -1, errors.Wrapf(define.ErrNoSuchExecSession, "no exec session with ID %s found in container %s", sessionID, c.ID())
|
||||
return -1, fmt.Errorf("no exec session with ID %s found in container %s: %w", sessionID, c.ID(), define.ErrNoSuchExecSession)
|
||||
}
|
||||
|
||||
// getKnownExecSessions gets a list of all exec sessions we think are running,
|
||||
|
|
@ -1062,7 +1063,7 @@ func (c *Container) removeAllExecSessions() error {
|
|||
}
|
||||
// Delete all exec sessions
|
||||
if err := c.runtime.state.RemoveContainerExecSessions(c); err != nil {
|
||||
if errors.Cause(err) != define.ErrCtrRemoved {
|
||||
if !errors.Is(err, define.ErrCtrRemoved) {
|
||||
if lastErr != nil {
|
||||
logrus.Errorf("Stopping container %s exec sessions: %v", c.ID(), lastErr)
|
||||
}
|
||||
|
|
@ -1072,7 +1073,7 @@ func (c *Container) removeAllExecSessions() error {
|
|||
c.state.ExecSessions = nil
|
||||
c.state.LegacyExecSessions = nil
|
||||
if err := c.save(); err != nil {
|
||||
if errors.Cause(err) != define.ErrCtrRemoved {
|
||||
if !errors.Is(err, define.ErrCtrRemoved) {
|
||||
if lastErr != nil {
|
||||
logrus.Errorf("Stopping container %s exec sessions: %v", c.ID(), lastErr)
|
||||
}
|
||||
|
|
@ -1113,13 +1114,13 @@ func writeExecExitCode(c *Container, sessionID string, exitCode int) error {
|
|||
// If we can't do this, no point in continuing, any attempt to save
|
||||
// would write garbage to the DB.
|
||||
if err := c.syncContainer(); err != nil {
|
||||
if errors.Cause(err) == define.ErrNoSuchCtr || errors.Cause(err) == define.ErrCtrRemoved {
|
||||
if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved) {
|
||||
// Container's entirely removed. We can't save status,
|
||||
// but the container's entirely removed, so we don't
|
||||
// need to. Exit without error.
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "error syncing container %s state to remove exec session %s", c.ID(), sessionID)
|
||||
return fmt.Errorf("error syncing container %s state to remove exec session %s: %w", c.ID(), sessionID, err)
|
||||
}
|
||||
|
||||
return justWriteExecExitCode(c, sessionID, exitCode)
|
||||
|
|
|
|||
|
|
@ -2,10 +2,10 @@ package libpod
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -60,7 +60,7 @@ func BuildContainerGraph(ctrs []*Container) (*ContainerGraph, error) {
|
|||
// Get the dep's node
|
||||
depNode, ok := graph.nodes[dep]
|
||||
if !ok {
|
||||
return nil, errors.Wrapf(define.ErrNoSuchCtr, "container %s depends on container %s not found in input list", node.id, dep)
|
||||
return nil, fmt.Errorf("container %s depends on container %s not found in input list: %w", node.id, dep, define.ErrNoSuchCtr)
|
||||
}
|
||||
|
||||
// Add the dependent node to the node's dependencies
|
||||
|
|
@ -85,7 +85,7 @@ func BuildContainerGraph(ctrs []*Container) (*ContainerGraph, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
} else if cycle {
|
||||
return nil, errors.Wrapf(define.ErrInternal, "cycle found in container dependency graph")
|
||||
return nil, fmt.Errorf("cycle found in container dependency graph: %w", define.ErrInternal)
|
||||
}
|
||||
|
||||
return graph, nil
|
||||
|
|
@ -150,7 +150,7 @@ func detectCycles(graph *ContainerGraph) (bool, error) {
|
|||
if info.lowLink == info.index {
|
||||
l := len(stack)
|
||||
if l == 0 {
|
||||
return false, errors.Wrapf(define.ErrInternal, "empty stack in detectCycles")
|
||||
return false, fmt.Errorf("empty stack in detectCycles: %w", define.ErrInternal)
|
||||
}
|
||||
|
||||
// Pop off the stack
|
||||
|
|
@ -160,7 +160,7 @@ func detectCycles(graph *ContainerGraph) (bool, error) {
|
|||
// Popped item is no longer on the stack, mark as such
|
||||
topInfo, ok := nodes[topOfStack.id]
|
||||
if !ok {
|
||||
return false, errors.Wrapf(define.ErrInternal, "error finding node info for %s", topOfStack.id)
|
||||
return false, fmt.Errorf("error finding node info for %s: %w", topOfStack.id, define.ErrInternal)
|
||||
}
|
||||
topInfo.onStack = false
|
||||
|
||||
|
|
@ -203,7 +203,7 @@ func startNode(ctx context.Context, node *containerNode, setError bool, ctrError
|
|||
if setError {
|
||||
// Mark us as visited, and set an error
|
||||
ctrsVisited[node.id] = true
|
||||
ctrErrors[node.id] = errors.Wrapf(define.ErrCtrStateInvalid, "a dependency of container %s failed to start", node.id)
|
||||
ctrErrors[node.id] = fmt.Errorf("a dependency of container %s failed to start: %w", node.id, define.ErrCtrStateInvalid)
|
||||
|
||||
// Hit anyone who depends on us, and set errors on them too
|
||||
for _, successor := range node.dependedOn {
|
||||
|
|
@ -243,7 +243,7 @@ func startNode(ctx context.Context, node *containerNode, setError bool, ctrError
|
|||
} else if len(depsStopped) > 0 {
|
||||
// Our dependencies are not running
|
||||
depsList := strings.Join(depsStopped, ",")
|
||||
ctrErrors[node.id] = errors.Wrapf(define.ErrCtrStateInvalid, "the following dependencies of container %s are not running: %s", node.id, depsList)
|
||||
ctrErrors[node.id] = fmt.Errorf("the following dependencies of container %s are not running: %s: %w", node.id, depsList, define.ErrCtrStateInvalid)
|
||||
ctrErrored = true
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package libpod
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
|
@ -14,7 +15,6 @@ import (
|
|||
spec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/opencontainers/runtime-tools/generate"
|
||||
"github.com/opencontainers/runtime-tools/validate"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
)
|
||||
|
|
@ -24,15 +24,15 @@ import (
|
|||
func (c *Container) inspectLocked(size bool) (*define.InspectContainerData, error) {
|
||||
storeCtr, err := c.runtime.store.Container(c.ID())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error getting container from store %q", c.ID())
|
||||
return nil, fmt.Errorf("error getting container from store %q: %w", c.ID(), err)
|
||||
}
|
||||
layer, err := c.runtime.store.Layer(storeCtr.LayerID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading information about layer %q", storeCtr.LayerID)
|
||||
return nil, fmt.Errorf("error reading information about layer %q: %w", storeCtr.LayerID, err)
|
||||
}
|
||||
driverData, err := driver.GetDriverData(c.runtime.store, layer.ID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error getting graph driver info %q", c.ID())
|
||||
return nil, fmt.Errorf("error getting graph driver info %q: %w", c.ID(), err)
|
||||
}
|
||||
return c.getContainerInspectData(size, driverData)
|
||||
}
|
||||
|
|
@ -241,7 +241,7 @@ func (c *Container) GetMounts(namedVolumes []*ContainerNamedVolume, imageVolumes
|
|||
// volume.
|
||||
volFromDB, err := c.runtime.state.Volume(volume.Name)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error looking up volume %s in container %s config", volume.Name, c.ID())
|
||||
return nil, fmt.Errorf("error looking up volume %s in container %s config: %w", volume.Name, c.ID(), err)
|
||||
}
|
||||
mountStruct.Driver = volFromDB.Driver()
|
||||
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package libpod
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
|
@ -41,7 +42,6 @@ import (
|
|||
spec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/opencontainers/runtime-tools/generate"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
|
@ -85,7 +85,7 @@ func (c *Container) rootFsSize() (int64, error) {
|
|||
for layer.Parent != "" {
|
||||
layerSize, err := c.runtime.store.DiffSize(layer.Parent, layer.ID)
|
||||
if err != nil {
|
||||
return size, errors.Wrapf(err, "getting diffsize of layer %q and its parent %q", layer.ID, layer.Parent)
|
||||
return size, fmt.Errorf("getting diffsize of layer %q and its parent %q: %w", layer.ID, layer.Parent, err)
|
||||
}
|
||||
size += layerSize
|
||||
layer, err = c.runtime.store.Layer(layer.Parent)
|
||||
|
|
@ -201,12 +201,12 @@ func (c *Container) handleExitFile(exitFile string, fi os.FileInfo) error {
|
|||
c.state.FinishedTime = ctime.Created(fi)
|
||||
statusCodeStr, err := ioutil.ReadFile(exitFile)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to read exit file for container %s", c.ID())
|
||||
return fmt.Errorf("failed to read exit file for container %s: %w", c.ID(), err)
|
||||
}
|
||||
statusCode, err := strconv.Atoi(string(statusCodeStr))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error converting exit status code (%q) for container %s to int",
|
||||
c.ID(), statusCodeStr)
|
||||
return fmt.Errorf("error converting exit status code (%q, err) for container %s to int: %w",
|
||||
c.ID(), statusCodeStr, err)
|
||||
}
|
||||
c.state.ExitCode = int32(statusCode)
|
||||
|
||||
|
|
@ -268,7 +268,7 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (_ bool, retErr err
|
|||
if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) {
|
||||
return false, nil
|
||||
} else if c.state.State == define.ContainerStateUnknown {
|
||||
return false, errors.Wrapf(define.ErrInternal, "invalid container state encountered in restart attempt")
|
||||
return false, fmt.Errorf("invalid container state encountered in restart attempt: %w", define.ErrInternal)
|
||||
}
|
||||
|
||||
c.newContainerEvent(events.Restart)
|
||||
|
|
@ -371,7 +371,7 @@ func (c *Container) syncContainer() error {
|
|||
}
|
||||
|
||||
if !c.valid {
|
||||
return errors.Wrapf(define.ErrCtrRemoved, "container %s is not valid", c.ID())
|
||||
return fmt.Errorf("container %s is not valid: %w", c.ID(), define.ErrCtrRemoved)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -430,16 +430,16 @@ func (c *Container) setupStorageMapping(dest, from *storage.IDMappingOptions) {
|
|||
// Create container root filesystem for use
|
||||
func (c *Container) setupStorage(ctx context.Context) error {
|
||||
if !c.valid {
|
||||
return errors.Wrapf(define.ErrCtrRemoved, "container %s is not valid", c.ID())
|
||||
return fmt.Errorf("container %s is not valid: %w", c.ID(), define.ErrCtrRemoved)
|
||||
}
|
||||
|
||||
if c.state.State != define.ContainerStateConfigured {
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s must be in Configured state to have storage set up", c.ID())
|
||||
return fmt.Errorf("container %s must be in Configured state to have storage set up: %w", c.ID(), define.ErrCtrStateInvalid)
|
||||
}
|
||||
|
||||
// Need both an image ID and image name, plus a bool telling us whether to use the image configuration
|
||||
if c.config.Rootfs == "" && (c.config.RootfsImageID == "" || c.config.RootfsImageName == "") {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "must provide image ID and image name to use an image")
|
||||
return fmt.Errorf("must provide image ID and image name to use an image: %w", define.ErrInvalidArg)
|
||||
}
|
||||
options := storage.ContainerOptions{
|
||||
IDMappingOptions: storage.IDMappingOptions{
|
||||
|
|
@ -475,7 +475,7 @@ func (c *Container) setupStorage(ctx context.Context) error {
|
|||
|
||||
defOptions, err := storage.GetMountOptions(c.runtime.store.GraphDriverName(), c.runtime.store.GraphOptions())
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error getting default mount options")
|
||||
return fmt.Errorf("error getting default mount options: %w", err)
|
||||
}
|
||||
var newOptions []string
|
||||
for _, opt := range defOptions {
|
||||
|
|
@ -505,12 +505,12 @@ func (c *Container) setupStorage(ctx context.Context) error {
|
|||
}
|
||||
containerInfo, containerInfoErr = c.runtime.storageService.CreateContainerStorage(ctx, c.runtime.imageContext, c.config.RootfsImageName, c.config.RootfsImageID, c.config.Name, c.config.ID, options)
|
||||
|
||||
if !generateName || errors.Cause(containerInfoErr) != storage.ErrDuplicateName {
|
||||
if !generateName || !errors.Is(containerInfoErr, storage.ErrDuplicateName) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if containerInfoErr != nil {
|
||||
return errors.Wrapf(containerInfoErr, "error creating container storage")
|
||||
return fmt.Errorf("error creating container storage: %w", containerInfoErr)
|
||||
}
|
||||
|
||||
// only reconfig IDMappings if layer was mounted from storage
|
||||
|
|
@ -552,7 +552,7 @@ func (c *Container) setupStorage(ctx context.Context) error {
|
|||
|
||||
artifacts := filepath.Join(c.config.StaticDir, artifactsDir)
|
||||
if err := os.MkdirAll(artifacts, 0755); err != nil {
|
||||
return errors.Wrap(err, "error creating artifacts directory")
|
||||
return fmt.Errorf("error creating artifacts directory: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -581,16 +581,16 @@ func (c *Container) processLabel(processLabel string) (string, error) {
|
|||
// Tear down a container's storage prior to removal
|
||||
func (c *Container) teardownStorage() error {
|
||||
if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) {
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove storage for container %s as it is running or paused", c.ID())
|
||||
return fmt.Errorf("cannot remove storage for container %s as it is running or paused: %w", c.ID(), define.ErrCtrStateInvalid)
|
||||
}
|
||||
|
||||
artifacts := filepath.Join(c.config.StaticDir, artifactsDir)
|
||||
if err := os.RemoveAll(artifacts); err != nil {
|
||||
return errors.Wrapf(err, "error removing container %s artifacts %q", c.ID(), artifacts)
|
||||
return fmt.Errorf("error removing container %s artifacts %q: %w", c.ID(), artifacts, err)
|
||||
}
|
||||
|
||||
if err := c.cleanupStorage(); err != nil {
|
||||
return errors.Wrapf(err, "failed to clean up container %s storage", c.ID())
|
||||
return fmt.Errorf("failed to clean up container %s storage: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
if err := c.runtime.storageService.DeleteContainer(c.ID()); err != nil {
|
||||
|
|
@ -598,12 +598,12 @@ func (c *Container) teardownStorage() error {
|
|||
// error - we wanted it gone, it is already gone.
|
||||
// Potentially another tool using containers/storage already
|
||||
// removed it?
|
||||
if errors.Cause(err) == storage.ErrNotAContainer || errors.Cause(err) == storage.ErrContainerUnknown {
|
||||
if errors.Is(err, storage.ErrNotAContainer) || errors.Is(err, storage.ErrContainerUnknown) {
|
||||
logrus.Infof("Storage for container %s already removed", c.ID())
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.Wrapf(err, "error removing container %s root filesystem", c.ID())
|
||||
return fmt.Errorf("error removing container %s root filesystem: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -647,14 +647,14 @@ func (c *Container) refresh() error {
|
|||
}
|
||||
|
||||
if !c.valid {
|
||||
return errors.Wrapf(define.ErrCtrRemoved, "container %s is not valid - may have been removed", c.ID())
|
||||
return fmt.Errorf("container %s is not valid - may have been removed: %w", c.ID(), define.ErrCtrRemoved)
|
||||
}
|
||||
|
||||
// We need to get the container's temporary directory from c/storage
|
||||
// It was lost in the reboot and must be recreated
|
||||
dir, err := c.runtime.storageService.GetRunDir(c.ID())
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error retrieving temporary directory for container %s", c.ID())
|
||||
return fmt.Errorf("error retrieving temporary directory for container %s: %w", c.ID(), err)
|
||||
}
|
||||
c.state.RunDir = dir
|
||||
|
||||
|
|
@ -668,7 +668,7 @@ func (c *Container) refresh() error {
|
|||
}
|
||||
root := filepath.Join(c.runtime.config.Engine.TmpDir, "containers-root", c.ID())
|
||||
if err := os.MkdirAll(root, 0755); err != nil {
|
||||
return errors.Wrapf(err, "error creating userNS tmpdir for container %s", c.ID())
|
||||
return fmt.Errorf("error creating userNS tmpdir for container %s: %w", c.ID(), err)
|
||||
}
|
||||
if err := os.Chown(root, c.RootUID(), c.RootGID()); err != nil {
|
||||
return err
|
||||
|
|
@ -678,7 +678,7 @@ func (c *Container) refresh() error {
|
|||
// We need to pick up a new lock
|
||||
lock, err := c.runtime.lockManager.AllocateAndRetrieveLock(c.config.LockID)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error acquiring lock %d for container %s", c.config.LockID, c.ID())
|
||||
return fmt.Errorf("error acquiring lock %d for container %s: %w", c.config.LockID, c.ID(), err)
|
||||
}
|
||||
c.lock = lock
|
||||
|
||||
|
|
@ -693,13 +693,13 @@ func (c *Container) refresh() error {
|
|||
if c.config.rewrite {
|
||||
// SafeRewriteContainerConfig must be used with care. Make sure to not change config fields by accident.
|
||||
if err := c.runtime.state.SafeRewriteContainerConfig(c, "", "", c.config); err != nil {
|
||||
return errors.Wrapf(err, "failed to rewrite the config for container %s", c.config.ID)
|
||||
return fmt.Errorf("failed to rewrite the config for container %s: %w", c.config.ID, err)
|
||||
}
|
||||
c.config.rewrite = false
|
||||
}
|
||||
|
||||
if err := c.save(); err != nil {
|
||||
return errors.Wrapf(err, "error refreshing state for container %s", c.ID())
|
||||
return fmt.Errorf("error refreshing state for container %s: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
// Remove ctl and attach files, which may persist across reboot
|
||||
|
|
@ -716,26 +716,26 @@ func (c *Container) removeConmonFiles() error {
|
|||
// Files are allowed to not exist, so ignore ENOENT
|
||||
attachFile, err := c.AttachSocketPath()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get attach socket path for container %s", c.ID())
|
||||
return fmt.Errorf("failed to get attach socket path for container %s: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
if err := os.Remove(attachFile); err != nil && !os.IsNotExist(err) {
|
||||
return errors.Wrapf(err, "error removing container %s attach file", c.ID())
|
||||
return fmt.Errorf("error removing container %s attach file: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
ctlFile := filepath.Join(c.bundlePath(), "ctl")
|
||||
if err := os.Remove(ctlFile); err != nil && !os.IsNotExist(err) {
|
||||
return errors.Wrapf(err, "error removing container %s ctl file", c.ID())
|
||||
return fmt.Errorf("error removing container %s ctl file: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
winszFile := filepath.Join(c.bundlePath(), "winsz")
|
||||
if err := os.Remove(winszFile); err != nil && !os.IsNotExist(err) {
|
||||
return errors.Wrapf(err, "error removing container %s winsz file", c.ID())
|
||||
return fmt.Errorf("error removing container %s winsz file: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
oomFile := filepath.Join(c.bundlePath(), "oom")
|
||||
if err := os.Remove(oomFile); err != nil && !os.IsNotExist(err) {
|
||||
return errors.Wrapf(err, "error removing container %s OOM file", c.ID())
|
||||
return fmt.Errorf("error removing container %s OOM file: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
// Remove the exit file so we don't leak memory in tmpfs
|
||||
|
|
@ -744,7 +744,7 @@ func (c *Container) removeConmonFiles() error {
|
|||
return err
|
||||
}
|
||||
if err := os.Remove(exitFile); err != nil && !os.IsNotExist(err) {
|
||||
return errors.Wrapf(err, "error removing container %s exit file", c.ID())
|
||||
return fmt.Errorf("error removing container %s exit file: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -755,7 +755,7 @@ func (c *Container) export(path string) error {
|
|||
if !c.state.Mounted {
|
||||
containerMount, err := c.runtime.store.Mount(c.ID(), c.config.MountLabel)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "mounting container %q", c.ID())
|
||||
return fmt.Errorf("mounting container %q: %w", c.ID(), err)
|
||||
}
|
||||
mountPoint = containerMount
|
||||
defer func() {
|
||||
|
|
@ -767,12 +767,12 @@ func (c *Container) export(path string) error {
|
|||
|
||||
input, err := archive.Tar(mountPoint, archive.Uncompressed)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading container directory %q", c.ID())
|
||||
return fmt.Errorf("error reading container directory %q: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
outFile, err := os.Create(path)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating file %q", path)
|
||||
return fmt.Errorf("error creating file %q: %w", path, err)
|
||||
}
|
||||
defer outFile.Close()
|
||||
|
||||
|
|
@ -788,7 +788,7 @@ func (c *Container) getArtifactPath(name string) string {
|
|||
// save container state to the database
|
||||
func (c *Container) save() error {
|
||||
if err := c.runtime.state.SaveContainer(c); err != nil {
|
||||
return errors.Wrapf(err, "error saving container %s state", c.ID())
|
||||
return fmt.Errorf("error saving container %s state: %w", c.ID(), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -799,7 +799,7 @@ func (c *Container) save() error {
|
|||
func (c *Container) prepareToStart(ctx context.Context, recursive bool) (retErr error) {
|
||||
// Container must be created or stopped to be started
|
||||
if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated, define.ContainerStateStopped, define.ContainerStateExited) {
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s must be in Created or Stopped state to be started", c.ID())
|
||||
return fmt.Errorf("container %s must be in Created or Stopped state to be started: %w", c.ID(), define.ErrCtrStateInvalid)
|
||||
}
|
||||
|
||||
if !recursive {
|
||||
|
|
@ -842,11 +842,11 @@ func (c *Container) prepareToStart(ctx context.Context, recursive bool) (retErr
|
|||
func (c *Container) checkDependenciesAndHandleError() error {
|
||||
notRunning, err := c.checkDependenciesRunning()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error checking dependencies for container %s", c.ID())
|
||||
return fmt.Errorf("error checking dependencies for container %s: %w", c.ID(), err)
|
||||
}
|
||||
if len(notRunning) > 0 {
|
||||
depString := strings.Join(notRunning, ",")
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "some dependencies of container %s are not started: %s", c.ID(), depString)
|
||||
return fmt.Errorf("some dependencies of container %s are not started: %s: %w", c.ID(), depString, define.ErrCtrStateInvalid)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -861,7 +861,7 @@ func (c *Container) startDependencies(ctx context.Context) error {
|
|||
|
||||
depVisitedCtrs := make(map[string]*Container)
|
||||
if err := c.getAllDependencies(depVisitedCtrs); err != nil {
|
||||
return errors.Wrapf(err, "error starting dependency for container %s", c.ID())
|
||||
return fmt.Errorf("error starting dependency for container %s: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
// Because of how Go handles passing slices through functions, a slice cannot grow between function calls
|
||||
|
|
@ -874,7 +874,7 @@ func (c *Container) startDependencies(ctx context.Context) error {
|
|||
// Build a dependency graph of containers
|
||||
graph, err := BuildContainerGraph(depCtrs)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error generating dependency graph for container %s", c.ID())
|
||||
return fmt.Errorf("error generating dependency graph for container %s: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
// If there are no containers without dependencies, we can't start
|
||||
|
|
@ -884,7 +884,7 @@ func (c *Container) startDependencies(ctx context.Context) error {
|
|||
if len(graph.nodes) == 0 {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(define.ErrNoSuchCtr, "All dependencies have dependencies of %s", c.ID())
|
||||
return fmt.Errorf("all dependencies have dependencies of %s: %w", c.ID(), define.ErrNoSuchCtr)
|
||||
}
|
||||
|
||||
ctrErrors := make(map[string]error)
|
||||
|
|
@ -900,7 +900,7 @@ func (c *Container) startDependencies(ctx context.Context) error {
|
|||
for _, e := range ctrErrors {
|
||||
logrus.Errorf("%q", e)
|
||||
}
|
||||
return errors.Wrapf(define.ErrInternal, "error starting some containers")
|
||||
return fmt.Errorf("error starting some containers: %w", define.ErrInternal)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -956,13 +956,13 @@ func (c *Container) checkDependenciesRunning() ([]string, error) {
|
|||
// Get the dependency container
|
||||
depCtr, err := c.runtime.state.Container(dep)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error retrieving dependency %s of container %s from state", dep, c.ID())
|
||||
return nil, fmt.Errorf("error retrieving dependency %s of container %s from state: %w", dep, c.ID(), err)
|
||||
}
|
||||
|
||||
// Check the status
|
||||
state, err := depCtr.State()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error retrieving state of dependency %s of container %s", dep, c.ID())
|
||||
return nil, fmt.Errorf("error retrieving state of dependency %s of container %s: %w", dep, c.ID(), err)
|
||||
}
|
||||
if state != define.ContainerStateRunning && !depCtr.config.IsInfra {
|
||||
notRunning = append(notRunning, dep)
|
||||
|
|
@ -1155,9 +1155,9 @@ func (c *Container) reinit(ctx context.Context, retainRetries bool) error {
|
|||
func (c *Container) initAndStart(ctx context.Context) (retErr error) {
|
||||
// If we are ContainerStateUnknown, throw an error
|
||||
if c.state.State == define.ContainerStateUnknown {
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is in an unknown state", c.ID())
|
||||
return fmt.Errorf("container %s is in an unknown state: %w", c.ID(), define.ErrCtrStateInvalid)
|
||||
} else if c.state.State == define.ContainerStateRemoving {
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "cannot start container %s as it is being removed", c.ID())
|
||||
return fmt.Errorf("cannot start container %s as it is being removed: %w", c.ID(), define.ErrCtrStateInvalid)
|
||||
}
|
||||
|
||||
// If we are running, do nothing
|
||||
|
|
@ -1166,7 +1166,7 @@ func (c *Container) initAndStart(ctx context.Context) (retErr error) {
|
|||
}
|
||||
// If we are paused, throw an error
|
||||
if c.state.State == define.ContainerStatePaused {
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "cannot start paused container %s", c.ID())
|
||||
return fmt.Errorf("cannot start paused container %s: %w", c.ID(), define.ErrCtrStateInvalid)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
|
|
@ -1276,7 +1276,7 @@ func (c *Container) stop(timeout uint) error {
|
|||
// is held when busy-waiting for the container to be stopped.
|
||||
c.state.State = define.ContainerStateStopping
|
||||
if err := c.save(); err != nil {
|
||||
return errors.Wrapf(err, "error saving container %s state before stopping", c.ID())
|
||||
return fmt.Errorf("error saving container %s state before stopping: %w", c.ID(), err)
|
||||
}
|
||||
if !c.batched {
|
||||
c.lock.Unlock()
|
||||
|
|
@ -1287,19 +1287,18 @@ func (c *Container) stop(timeout uint) error {
|
|||
if !c.batched {
|
||||
c.lock.Lock()
|
||||
if err := c.syncContainer(); err != nil {
|
||||
switch errors.Cause(err) {
|
||||
// If the container has already been removed (e.g., via
|
||||
// the cleanup process), set the container state to "stopped".
|
||||
case define.ErrNoSuchCtr, define.ErrCtrRemoved:
|
||||
if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved) {
|
||||
// If the container has already been removed (e.g., via
|
||||
// the cleanup process), set the container state to "stopped".
|
||||
c.state.State = define.ContainerStateStopped
|
||||
return stopErr
|
||||
default:
|
||||
if stopErr != nil {
|
||||
logrus.Errorf("Syncing container %s status: %v", c.ID(), err)
|
||||
return stopErr
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if stopErr != nil {
|
||||
logrus.Errorf("Syncing container %s status: %v", c.ID(), err)
|
||||
return stopErr
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1337,7 +1336,7 @@ func (c *Container) stop(timeout uint) error {
|
|||
}
|
||||
|
||||
if err := c.save(); err != nil {
|
||||
return errors.Wrapf(err, "error saving container %s state after stopping", c.ID())
|
||||
return fmt.Errorf("error saving container %s state after stopping: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
// Wait until we have an exit file, and sync once we do
|
||||
|
|
@ -1351,16 +1350,16 @@ func (c *Container) stop(timeout uint) error {
|
|||
// Internal, non-locking function to pause a container
|
||||
func (c *Container) pause() error {
|
||||
if c.config.NoCgroups {
|
||||
return errors.Wrapf(define.ErrNoCgroups, "cannot pause without using Cgroups")
|
||||
return fmt.Errorf("cannot pause without using Cgroups: %w", define.ErrNoCgroups)
|
||||
}
|
||||
|
||||
if rootless.IsRootless() {
|
||||
cgroupv2, err := cgroups.IsCgroup2UnifiedMode()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to determine cgroupversion")
|
||||
return fmt.Errorf("failed to determine cgroupversion: %w", err)
|
||||
}
|
||||
if !cgroupv2 {
|
||||
return errors.Wrap(define.ErrNoCgroups, "can not pause containers on rootless containers with cgroup V1")
|
||||
return fmt.Errorf("can not pause containers on rootless containers with cgroup V1: %w", define.ErrNoCgroups)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1379,7 +1378,7 @@ func (c *Container) pause() error {
|
|||
// Internal, non-locking function to unpause a container
|
||||
func (c *Container) unpause() error {
|
||||
if c.config.NoCgroups {
|
||||
return errors.Wrapf(define.ErrNoCgroups, "cannot unpause without using Cgroups")
|
||||
return fmt.Errorf("cannot unpause without using Cgroups: %w", define.ErrNoCgroups)
|
||||
}
|
||||
|
||||
if err := c.ociRuntime.UnpauseContainer(c); err != nil {
|
||||
|
|
@ -1397,7 +1396,7 @@ func (c *Container) unpause() error {
|
|||
// Internal, non-locking function to restart a container
|
||||
func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (retErr error) {
|
||||
if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateCreated, define.ContainerStateRunning, define.ContainerStateStopped, define.ContainerStateExited) {
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "unable to restart a container in a paused or unknown state")
|
||||
return fmt.Errorf("unable to restart a container in a paused or unknown state: %w", define.ErrCtrStateInvalid)
|
||||
}
|
||||
|
||||
c.newContainerEvent(events.Restart)
|
||||
|
|
@ -1472,7 +1471,7 @@ func (c *Container) mountStorage() (_ string, deferredErr error) {
|
|||
if !c.config.NoShm {
|
||||
mounted, err := mount.Mounted(c.config.ShmDir)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "unable to determine if %q is mounted", c.config.ShmDir)
|
||||
return "", fmt.Errorf("unable to determine if %q is mounted: %w", c.config.ShmDir, err)
|
||||
}
|
||||
|
||||
if !mounted && !MountExists(c.config.Spec.Mounts, "/dev/shm") {
|
||||
|
|
@ -1481,7 +1480,7 @@ func (c *Container) mountStorage() (_ string, deferredErr error) {
|
|||
return "", err
|
||||
}
|
||||
if err := os.Chown(c.config.ShmDir, c.RootUID(), c.RootGID()); err != nil {
|
||||
return "", errors.Wrapf(err, "failed to chown %s", c.config.ShmDir)
|
||||
return "", fmt.Errorf("failed to chown %s: %w", c.config.ShmDir, err)
|
||||
}
|
||||
defer func() {
|
||||
if deferredErr != nil {
|
||||
|
|
@ -1501,11 +1500,11 @@ func (c *Container) mountStorage() (_ string, deferredErr error) {
|
|||
overlayDest := c.runtime.RunRoot()
|
||||
contentDir, err := overlay.GenerateStructure(overlayDest, c.ID(), "rootfs", c.RootUID(), c.RootGID())
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "rootfs-overlay: failed to create TempDir in the %s directory", overlayDest)
|
||||
return "", fmt.Errorf("rootfs-overlay: failed to create TempDir in the %s directory: %w", overlayDest, err)
|
||||
}
|
||||
overlayMount, err := overlay.Mount(contentDir, c.config.Rootfs, overlayDest, c.RootUID(), c.RootGID(), c.runtime.store.GraphOptions())
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "rootfs-overlay: creating overlay failed %q", c.config.Rootfs)
|
||||
return "", fmt.Errorf("rootfs-overlay: creating overlay failed %q: %w", c.config.Rootfs, err)
|
||||
}
|
||||
|
||||
// Seems fuse-overlayfs is not present
|
||||
|
|
@ -1515,7 +1514,7 @@ func (c *Container) mountStorage() (_ string, deferredErr error) {
|
|||
mountOpts := label.FormatMountLabel(strings.Join(overlayMount.Options, ","), c.MountLabel())
|
||||
err = mount.Mount("overlay", overlayMount.Source, overlayMount.Type, mountOpts)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "rootfs-overlay: creating overlay failed %q from native overlay", c.config.Rootfs)
|
||||
return "", fmt.Errorf("rootfs-overlay: creating overlay failed %q from native overlay: %w", c.config.Rootfs, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1526,7 +1525,7 @@ func (c *Container) mountStorage() (_ string, deferredErr error) {
|
|||
}
|
||||
hostUID, hostGID, err := butil.GetHostIDs(util.IDtoolsToRuntimeSpec(c.config.IDMappings.UIDMap), util.IDtoolsToRuntimeSpec(c.config.IDMappings.GIDMap), uint32(execUser.Uid), uint32(execUser.Gid))
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "unable to get host UID and host GID")
|
||||
return "", fmt.Errorf("unable to get host UID and host GID: %w", err)
|
||||
}
|
||||
|
||||
//note: this should not be recursive, if using external rootfs users should be responsible on configuring ownership.
|
||||
|
|
@ -1553,30 +1552,30 @@ func (c *Container) mountStorage() (_ string, deferredErr error) {
|
|||
|
||||
dirfd, err := unix.Open(mountPoint, unix.O_RDONLY|unix.O_PATH, 0)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "open mount point")
|
||||
return "", fmt.Errorf("open mount point: %w", err)
|
||||
}
|
||||
defer unix.Close(dirfd)
|
||||
|
||||
err = unix.Mkdirat(dirfd, "etc", 0755)
|
||||
if err != nil && !os.IsExist(err) {
|
||||
return "", errors.Wrap(err, "create /etc")
|
||||
return "", fmt.Errorf("create /etc: %w", err)
|
||||
}
|
||||
// If the etc directory was created, chown it to root in the container
|
||||
if err == nil && (rootUID != 0 || rootGID != 0) {
|
||||
err = unix.Fchownat(dirfd, "etc", rootUID, rootGID, unix.AT_SYMLINK_NOFOLLOW)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "chown /etc")
|
||||
return "", fmt.Errorf("chown /etc: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
etcInTheContainerPath, err := securejoin.SecureJoin(mountPoint, "etc")
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "resolve /etc in the container")
|
||||
return "", fmt.Errorf("resolve /etc in the container: %w", err)
|
||||
}
|
||||
|
||||
etcInTheContainerFd, err := unix.Open(etcInTheContainerPath, unix.O_RDONLY|unix.O_PATH, 0)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "open /etc in the container")
|
||||
return "", fmt.Errorf("open /etc in the container: %w", err)
|
||||
}
|
||||
defer unix.Close(etcInTheContainerFd)
|
||||
|
||||
|
|
@ -1584,13 +1583,13 @@ func (c *Container) mountStorage() (_ string, deferredErr error) {
|
|||
// create it, so that mount command within the container will work.
|
||||
err = unix.Symlinkat("/proc/mounts", etcInTheContainerFd, "mtab")
|
||||
if err != nil && !os.IsExist(err) {
|
||||
return "", errors.Wrap(err, "creating /etc/mtab symlink")
|
||||
return "", fmt.Errorf("creating /etc/mtab symlink: %w", err)
|
||||
}
|
||||
// If the symlink was created, then also chown it to root in the container
|
||||
if err == nil && (rootUID != 0 || rootGID != 0) {
|
||||
err = unix.Fchownat(etcInTheContainerFd, "mtab", rootUID, rootGID, unix.AT_SYMLINK_NOFOLLOW)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "chown /etc/mtab")
|
||||
return "", fmt.Errorf("chown /etc/mtab: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1624,17 +1623,17 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string)
|
|||
logrus.Debugf("Going to mount named volume %s", v.Name)
|
||||
vol, err := c.runtime.state.Volume(v.Name)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error retrieving named volume %s for container %s", v.Name, c.ID())
|
||||
return nil, fmt.Errorf("error retrieving named volume %s for container %s: %w", v.Name, c.ID(), err)
|
||||
}
|
||||
|
||||
if vol.config.LockID == c.config.LockID {
|
||||
return nil, errors.Wrapf(define.ErrWillDeadlock, "container %s and volume %s share lock ID %d", c.ID(), vol.Name(), c.config.LockID)
|
||||
return nil, fmt.Errorf("container %s and volume %s share lock ID %d: %w", c.ID(), vol.Name(), c.config.LockID, define.ErrWillDeadlock)
|
||||
}
|
||||
vol.lock.Lock()
|
||||
defer vol.lock.Unlock()
|
||||
if vol.needsMount() {
|
||||
if err := vol.mount(); err != nil {
|
||||
return nil, errors.Wrapf(err, "error mounting volume %s for container %s", vol.Name(), c.ID())
|
||||
return nil, fmt.Errorf("error mounting volume %s for container %s: %w", vol.Name(), c.ID(), err)
|
||||
}
|
||||
}
|
||||
// The volume may need a copy-up. Check the state.
|
||||
|
|
@ -1647,7 +1646,7 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string)
|
|||
|
||||
srcDir, err := securejoin.SecureJoin(mountpoint, v.Dest)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error calculating destination path to copy up container %s volume %s", c.ID(), vol.Name())
|
||||
return nil, fmt.Errorf("error calculating destination path to copy up container %s volume %s: %w", c.ID(), vol.Name(), err)
|
||||
}
|
||||
// Do a manual stat on the source directory to verify existence.
|
||||
// Skip the rest if it exists.
|
||||
|
|
@ -1658,7 +1657,7 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string)
|
|||
// up.
|
||||
return vol, nil
|
||||
}
|
||||
return nil, errors.Wrapf(err, "error identifying source directory for copy up into volume %s", vol.Name())
|
||||
return nil, fmt.Errorf("error identifying source directory for copy up into volume %s: %w", vol.Name(), err)
|
||||
}
|
||||
// If it's not a directory we're mounting over it.
|
||||
if !srcStat.IsDir() {
|
||||
|
|
@ -1670,7 +1669,7 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string)
|
|||
// RHBZ#1928643
|
||||
srcContents, err := ioutil.ReadDir(srcDir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading contents of source directory for copy up into volume %s", vol.Name())
|
||||
return nil, fmt.Errorf("error reading contents of source directory for copy up into volume %s: %w", vol.Name(), err)
|
||||
}
|
||||
if len(srcContents) == 0 {
|
||||
return vol, nil
|
||||
|
|
@ -1680,7 +1679,7 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string)
|
|||
volMount := vol.mountPoint()
|
||||
contents, err := ioutil.ReadDir(volMount)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error listing contents of volume %s mountpoint when copying up from container %s", vol.Name(), c.ID())
|
||||
return nil, fmt.Errorf("error listing contents of volume %s mountpoint when copying up from container %s: %w", vol.Name(), c.ID(), err)
|
||||
}
|
||||
if len(contents) > 0 {
|
||||
// The volume is not empty. It was likely modified
|
||||
|
|
@ -1722,11 +1721,11 @@ func (c *Container) mountNamedVolume(v *ContainerNamedVolume, mountpoint string)
|
|||
if err2 != nil {
|
||||
logrus.Errorf("Streaming contents of container %s directory for volume copy-up: %v", c.ID(), err2)
|
||||
}
|
||||
return nil, errors.Wrapf(err, "error copying up to volume %s", vol.Name())
|
||||
return nil, fmt.Errorf("error copying up to volume %s: %w", vol.Name(), err)
|
||||
}
|
||||
|
||||
if err := <-errChan; err != nil {
|
||||
return nil, errors.Wrapf(err, "error streaming container content for copy up into volume %s", vol.Name())
|
||||
return nil, fmt.Errorf("error streaming container content for copy up into volume %s: %w", vol.Name(), err)
|
||||
}
|
||||
}
|
||||
return vol, nil
|
||||
|
|
@ -1792,7 +1791,7 @@ func (c *Container) cleanupStorage() error {
|
|||
// error
|
||||
// We still want to be able to kick the container out of the
|
||||
// state
|
||||
if errors.Cause(err) == storage.ErrNotAContainer || errors.Cause(err) == storage.ErrContainerUnknown || errors.Cause(err) == storage.ErrLayerNotMounted {
|
||||
if errors.Is(err, storage.ErrNotAContainer) || errors.Is(err, storage.ErrContainerUnknown) || errors.Is(err, storage.ErrLayerNotMounted) {
|
||||
logrus.Errorf("Storage for container %s has been removed", c.ID())
|
||||
} else {
|
||||
if cleanupErr != nil {
|
||||
|
|
@ -1809,7 +1808,7 @@ func (c *Container) cleanupStorage() error {
|
|||
if cleanupErr != nil {
|
||||
logrus.Errorf("Unmounting container %s: %v", c.ID(), cleanupErr)
|
||||
}
|
||||
cleanupErr = errors.Wrapf(err, "error retrieving named volume %s for container %s", v.Name, c.ID())
|
||||
cleanupErr = fmt.Errorf("error retrieving named volume %s for container %s: %w", v.Name, c.ID(), err)
|
||||
|
||||
// We need to try and unmount every volume, so continue
|
||||
// if they fail.
|
||||
|
|
@ -1822,7 +1821,7 @@ func (c *Container) cleanupStorage() error {
|
|||
if cleanupErr != nil {
|
||||
logrus.Errorf("Unmounting container %s: %v", c.ID(), cleanupErr)
|
||||
}
|
||||
cleanupErr = errors.Wrapf(err, "error unmounting volume %s for container %s", vol.Name(), c.ID())
|
||||
cleanupErr = fmt.Errorf("error unmounting volume %s for container %s: %w", vol.Name(), c.ID(), err)
|
||||
}
|
||||
vol.lock.Unlock()
|
||||
}
|
||||
|
|
@ -1847,7 +1846,7 @@ func (c *Container) cleanup(ctx context.Context) error {
|
|||
|
||||
// Clean up network namespace, if present
|
||||
if err := c.cleanupNetwork(); err != nil {
|
||||
lastError = errors.Wrapf(err, "error removing container %s network", c.ID())
|
||||
lastError = fmt.Errorf("error removing container %s network: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
// cleanup host entry if it is shared
|
||||
|
|
@ -1885,7 +1884,7 @@ func (c *Container) cleanup(ctx context.Context) error {
|
|||
if lastError != nil {
|
||||
logrus.Errorf("Unmounting container %s storage: %v", c.ID(), err)
|
||||
} else {
|
||||
lastError = errors.Wrapf(err, "error unmounting container %s storage", c.ID())
|
||||
lastError = fmt.Errorf("error unmounting container %s storage: %w", c.ID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1969,11 +1968,11 @@ func (c *Container) stopPodIfNeeded(ctx context.Context) error {
|
|||
// hooks.
|
||||
func (c *Container) delete(ctx context.Context) error {
|
||||
if err := c.ociRuntime.DeleteContainer(c); err != nil {
|
||||
return errors.Wrapf(err, "error removing container %s from runtime", c.ID())
|
||||
return fmt.Errorf("error removing container %s from runtime: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
if err := c.postDeleteHooks(ctx); err != nil {
|
||||
return errors.Wrapf(err, "container %s poststop hooks", c.ID())
|
||||
return fmt.Errorf("container %s poststop hooks: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -2032,7 +2031,7 @@ func (c *Container) writeStringToRundir(destFile, contents string) (string, erro
|
|||
destFileName := filepath.Join(c.state.RunDir, destFile)
|
||||
|
||||
if err := os.Remove(destFileName); err != nil && !os.IsNotExist(err) {
|
||||
return "", errors.Wrapf(err, "error removing %s for container %s", destFile, c.ID())
|
||||
return "", fmt.Errorf("error removing %s for container %s: %w", destFile, c.ID(), err)
|
||||
}
|
||||
|
||||
if err := writeStringToPath(destFileName, contents, c.config.MountLabel, c.RootUID(), c.RootGID()); err != nil {
|
||||
|
|
@ -2066,22 +2065,22 @@ func (c *Container) saveSpec(spec *spec.Spec) error {
|
|||
jsonPath := filepath.Join(c.bundlePath(), "config.json")
|
||||
if _, err := os.Stat(jsonPath); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return errors.Wrapf(err, "error doing stat on container %s spec", c.ID())
|
||||
return fmt.Errorf("error doing stat on container %s spec: %w", c.ID(), err)
|
||||
}
|
||||
// The spec does not exist, we're fine
|
||||
} else {
|
||||
// The spec exists, need to remove it
|
||||
if err := os.Remove(jsonPath); err != nil {
|
||||
return errors.Wrapf(err, "error replacing runtime spec for container %s", c.ID())
|
||||
return fmt.Errorf("error replacing runtime spec for container %s: %w", c.ID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
fileJSON, err := json.Marshal(spec)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error exporting runtime spec for container %s to JSON", c.ID())
|
||||
return fmt.Errorf("error exporting runtime spec for container %s to JSON: %w", c.ID(), err)
|
||||
}
|
||||
if err := ioutil.WriteFile(jsonPath, fileJSON, 0644); err != nil {
|
||||
return errors.Wrapf(err, "error writing runtime spec JSON for container %s to disk", c.ID())
|
||||
return fmt.Errorf("error writing runtime spec JSON for container %s to disk: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
logrus.Debugf("Created OCI spec for container %s at %s", c.ID(), jsonPath)
|
||||
|
|
@ -2144,19 +2143,19 @@ func (c *Container) setupOCIHooks(ctx context.Context, config *spec.Spec) (map[s
|
|||
// mount mounts the container's root filesystem
|
||||
func (c *Container) mount() (string, error) {
|
||||
if c.state.State == define.ContainerStateRemoving {
|
||||
return "", errors.Wrapf(define.ErrCtrStateInvalid, "cannot mount container %s as it is being removed", c.ID())
|
||||
return "", fmt.Errorf("cannot mount container %s as it is being removed: %w", c.ID(), define.ErrCtrStateInvalid)
|
||||
}
|
||||
|
||||
mountPoint, err := c.runtime.storageService.MountContainerImage(c.ID())
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error mounting storage for container %s", c.ID())
|
||||
return "", fmt.Errorf("error mounting storage for container %s: %w", c.ID(), err)
|
||||
}
|
||||
mountPoint, err = filepath.EvalSymlinks(mountPoint)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error resolving storage path for container %s", c.ID())
|
||||
return "", fmt.Errorf("error resolving storage path for container %s: %w", c.ID(), err)
|
||||
}
|
||||
if err := os.Chown(mountPoint, c.RootUID(), c.RootGID()); err != nil {
|
||||
return "", errors.Wrapf(err, "cannot chown %s to %d:%d", mountPoint, c.RootUID(), c.RootGID())
|
||||
return "", fmt.Errorf("cannot chown %s to %d:%d: %w", mountPoint, c.RootUID(), c.RootGID(), err)
|
||||
}
|
||||
return mountPoint, nil
|
||||
}
|
||||
|
|
@ -2165,7 +2164,7 @@ func (c *Container) mount() (string, error) {
|
|||
func (c *Container) unmount(force bool) error {
|
||||
// Also unmount storage
|
||||
if _, err := c.runtime.storageService.UnmountContainerImage(c.ID(), force); err != nil {
|
||||
return errors.Wrapf(err, "error unmounting container %s root filesystem", c.ID())
|
||||
return fmt.Errorf("error unmounting container %s root filesystem: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -2178,11 +2177,11 @@ func (c *Container) unmount(force bool) error {
|
|||
// Returns nil if safe to remove, or an error describing why it's unsafe if not.
|
||||
func (c *Container) checkReadyForRemoval() error {
|
||||
if c.state.State == define.ContainerStateUnknown {
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "container %s is in invalid state", c.ID())
|
||||
return fmt.Errorf("container %s is in invalid state: %w", c.ID(), define.ErrCtrStateInvalid)
|
||||
}
|
||||
|
||||
if c.ensureState(define.ContainerStateRunning, define.ContainerStatePaused) && !c.IsInfra() {
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it is %s - running or paused containers cannot be removed without force", c.ID(), c.state.State.String())
|
||||
return fmt.Errorf("cannot remove container %s as it is %s - running or paused containers cannot be removed without force: %w", c.ID(), c.state.State.String(), define.ErrCtrStateInvalid)
|
||||
}
|
||||
|
||||
// Check exec sessions
|
||||
|
|
@ -2191,7 +2190,7 @@ func (c *Container) checkReadyForRemoval() error {
|
|||
return err
|
||||
}
|
||||
if len(sessions) != 0 {
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "cannot remove container %s as it has active exec sessions", c.ID())
|
||||
return fmt.Errorf("cannot remove container %s as it has active exec sessions: %w", c.ID(), define.ErrCtrStateInvalid)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -2294,7 +2293,7 @@ func (c *Container) checkExitFile() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
return errors.Wrapf(err, "error running stat on container %s exit file", c.ID())
|
||||
return fmt.Errorf("error running stat on container %s exit file: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
// Alright, it exists. Transition to Stopped state.
|
||||
|
|
@ -2332,11 +2331,11 @@ func (c *Container) extractSecretToCtrStorage(secr *ContainerSecret) error {
|
|||
|
||||
hostUID, hostGID, err := butil.GetHostIDs(util.IDtoolsToRuntimeSpec(c.config.IDMappings.UIDMap), util.IDtoolsToRuntimeSpec(c.config.IDMappings.GIDMap), secr.UID, secr.GID)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to extract secret")
|
||||
return fmt.Errorf("unable to extract secret: %w", err)
|
||||
}
|
||||
err = ioutil.WriteFile(secretFile, data, 0644)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to create %s", secretFile)
|
||||
return fmt.Errorf("unable to create %s: %w", secretFile, err)
|
||||
}
|
||||
if err := os.Lchown(secretFile, int(hostUID), int(hostGID)); err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ package libpod
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
|
@ -57,7 +58,6 @@ import (
|
|||
"github.com/opencontainers/runtime-tools/generate"
|
||||
"github.com/opencontainers/selinux/go-selinux"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
|
@ -65,7 +65,7 @@ import (
|
|||
func (c *Container) mountSHM(shmOptions string) error {
|
||||
if err := unix.Mount("shm", c.config.ShmDir, "tmpfs", unix.MS_NOEXEC|unix.MS_NOSUID|unix.MS_NODEV,
|
||||
label.FormatMountLabel(shmOptions, c.config.MountLabel)); err != nil {
|
||||
return errors.Wrapf(err, "failed to mount shm tmpfs %q", c.config.ShmDir)
|
||||
return fmt.Errorf("failed to mount shm tmpfs %q: %w", c.config.ShmDir, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -73,7 +73,7 @@ func (c *Container) mountSHM(shmOptions string) error {
|
|||
func (c *Container) unmountSHM(mount string) error {
|
||||
if err := unix.Unmount(mount, 0); err != nil {
|
||||
if err != syscall.EINVAL && err != syscall.ENOENT {
|
||||
return errors.Wrapf(err, "error unmounting container %s SHM mount %s", c.ID(), mount)
|
||||
return fmt.Errorf("error unmounting container %s SHM mount %s: %w", c.ID(), mount, err)
|
||||
}
|
||||
// If it's just an EINVAL or ENOENT, debug logs only
|
||||
logrus.Debugf("Container %s failed to unmount %s : %v", c.ID(), mount, err)
|
||||
|
|
@ -152,7 +152,7 @@ func (c *Container) prepare() error {
|
|||
// createErr is guaranteed non-nil, so print
|
||||
// unconditionally
|
||||
logrus.Errorf("Preparing container %s: %v", c.ID(), createErr)
|
||||
createErr = errors.Wrapf(err, "error unmounting storage for container %s after network create failure", c.ID())
|
||||
createErr = fmt.Errorf("error unmounting storage for container %s after network create failure: %w", c.ID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -161,7 +161,7 @@ func (c *Container) prepare() error {
|
|||
if createErr != nil {
|
||||
if err := c.cleanupNetwork(); err != nil {
|
||||
logrus.Errorf("Preparing container %s: %v", c.ID(), createErr)
|
||||
createErr = errors.Wrapf(err, "error cleaning up container %s network after setup failure", c.ID())
|
||||
createErr = fmt.Errorf("error cleaning up container %s network after setup failure: %w", c.ID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -251,7 +251,7 @@ func (c *Container) resolveWorkDir() error {
|
|||
st, err := os.Stat(resolvedWorkdir)
|
||||
if err == nil {
|
||||
if !st.IsDir() {
|
||||
return errors.Errorf("workdir %q exists on container %s, but is not a directory", workdir, c.ID())
|
||||
return fmt.Errorf("workdir %q exists on container %s, but is not a directory", workdir, c.ID())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -265,11 +265,11 @@ func (c *Container) resolveWorkDir() error {
|
|||
if c.isWorkDirSymlink(resolvedWorkdir) {
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf("workdir %q does not exist on container %s", workdir, c.ID())
|
||||
return fmt.Errorf("workdir %q does not exist on container %s", workdir, c.ID())
|
||||
}
|
||||
// This might be a serious error (e.g., permission), so
|
||||
// we need to return the full error.
|
||||
return errors.Wrapf(err, "error detecting workdir %q on container %s", workdir, c.ID())
|
||||
return fmt.Errorf("error detecting workdir %q on container %s: %w", workdir, c.ID(), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -277,16 +277,16 @@ func (c *Container) resolveWorkDir() error {
|
|||
if os.IsExist(err) {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "error creating container %s workdir", c.ID())
|
||||
return fmt.Errorf("error creating container %s workdir: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
// Ensure container entrypoint is created (if required).
|
||||
uid, gid, _, err := chrootuser.GetUser(c.state.Mountpoint, c.User())
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error looking up %s inside of the container %s", c.User(), c.ID())
|
||||
return fmt.Errorf("error looking up %s inside of the container %s: %w", c.User(), c.ID(), err)
|
||||
}
|
||||
if err := os.Chown(resolvedWorkdir, int(uid), int(gid)); err != nil {
|
||||
return errors.Wrapf(err, "error chowning container %s workdir to container root", c.ID())
|
||||
return fmt.Errorf("error chowning container %s workdir to container root: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -485,7 +485,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
|
|||
for _, namedVol := range c.config.NamedVolumes {
|
||||
volume, err := c.runtime.GetVolume(namedVol.Name)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error retrieving volume %s to add to container %s", namedVol.Name, c.ID())
|
||||
return nil, fmt.Errorf("error retrieving volume %s to add to container %s: %w", namedVol.Name, c.ID(), err)
|
||||
}
|
||||
mountPoint, err := volume.MountPoint()
|
||||
if err != nil {
|
||||
|
|
@ -522,7 +522,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
|
|||
|
||||
overlayMount, err = overlay.MountWithOptions(contentDir, mountPoint, namedVol.Dest, overlayOpts)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "mounting overlay failed %q", mountPoint)
|
||||
return nil, fmt.Errorf("mounting overlay failed %q: %w", mountPoint, err)
|
||||
}
|
||||
|
||||
for _, o := range namedVol.Options {
|
||||
|
|
@ -622,7 +622,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
|
|||
|
||||
overlayMount, err := overlay.MountWithOptions(contentDir, overlayVol.Source, overlayVol.Dest, overlayOpts)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "mounting overlay failed %q", overlayVol.Source)
|
||||
return nil, fmt.Errorf("mounting overlay failed %q: %w", overlayVol.Source, err)
|
||||
}
|
||||
|
||||
// Check overlay volume options
|
||||
|
|
@ -646,16 +646,16 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
|
|||
// Mount the specified image.
|
||||
img, _, err := c.runtime.LibimageRuntime().LookupImage(volume.Source, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error creating image volume %q:%q", volume.Source, volume.Dest)
|
||||
return nil, fmt.Errorf("error creating image volume %q:%q: %w", volume.Source, volume.Dest, err)
|
||||
}
|
||||
mountPoint, err := img.Mount(ctx, nil, "")
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error mounting image volume %q:%q", volume.Source, volume.Dest)
|
||||
return nil, fmt.Errorf("error mounting image volume %q:%q: %w", volume.Source, volume.Dest, err)
|
||||
}
|
||||
|
||||
contentDir, err := overlay.TempDir(c.config.StaticDir, c.RootUID(), c.RootGID())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create TempDir in the %s directory", c.config.StaticDir)
|
||||
return nil, fmt.Errorf("failed to create TempDir in the %s directory: %w", c.config.StaticDir, err)
|
||||
}
|
||||
|
||||
var overlayMount spec.Mount
|
||||
|
|
@ -665,7 +665,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
|
|||
overlayMount, err = overlay.MountReadOnly(contentDir, mountPoint, volume.Dest, c.RootUID(), c.RootGID(), c.runtime.store.GraphOptions())
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "creating overlay mount for image %q failed", volume.Source)
|
||||
return nil, fmt.Errorf("creating overlay mount for image %q failed: %w", volume.Source, err)
|
||||
}
|
||||
g.AddMount(overlayMount)
|
||||
}
|
||||
|
|
@ -690,7 +690,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
|
|||
if c.config.Umask != "" {
|
||||
decVal, err := strconv.ParseUint(c.config.Umask, 8, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Invalid Umask Value")
|
||||
return nil, fmt.Errorf("invalid Umask Value: %w", err)
|
||||
}
|
||||
umask := uint32(decVal)
|
||||
g.Config.Process.User.Umask = &umask
|
||||
|
|
@ -700,7 +700,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
|
|||
if len(c.config.Groups) > 0 {
|
||||
gids, err := lookup.GetContainerGroups(c.config.Groups, c.state.Mountpoint, overrides)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error looking up supplemental groups for container %s", c.ID())
|
||||
return nil, fmt.Errorf("error looking up supplemental groups for container %s: %w", c.ID(), err)
|
||||
}
|
||||
for _, gid := range gids {
|
||||
g.AddProcessAdditionalGid(gid)
|
||||
|
|
@ -709,7 +709,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
|
|||
|
||||
if c.Systemd() {
|
||||
if err := c.setupSystemd(g.Mounts(), g); err != nil {
|
||||
return nil, errors.Wrapf(err, "error adding systemd-specific mounts")
|
||||
return nil, fmt.Errorf("error adding systemd-specific mounts: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -725,7 +725,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
|
|||
// Check whether the current user namespace has enough gids available.
|
||||
availableGids, err := rootless.GetAvailableGids()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot read number of available GIDs")
|
||||
return nil, fmt.Errorf("cannot read number of available GIDs: %w", err)
|
||||
}
|
||||
gidMappings = []idtools.IDMap{{
|
||||
ContainerID: 0,
|
||||
|
|
@ -881,7 +881,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
|
|||
}
|
||||
_, err := registry.InjectDevices(g.Config, c.config.CDIDevices...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error setting up CDI devices")
|
||||
return nil, fmt.Errorf("error setting up CDI devices: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -905,7 +905,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
|
|||
if m.Type == "tmpfs" {
|
||||
finalPath, err := securejoin.SecureJoin(c.state.Mountpoint, m.Destination)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error resolving symlinks for mount destination %s", m.Destination)
|
||||
return nil, fmt.Errorf("error resolving symlinks for mount destination %s: %w", m.Destination, err)
|
||||
}
|
||||
trimmedPath := strings.TrimPrefix(finalPath, strings.TrimSuffix(c.state.Mountpoint, "/"))
|
||||
m.Destination = trimmedPath
|
||||
|
|
@ -934,7 +934,7 @@ func (c *Container) generateSpec(ctx context.Context) (*spec.Spec, error) {
|
|||
|
||||
// Warning: precreate hooks may alter g.Config in place.
|
||||
if c.state.ExtensionStageHooks, err = c.setupOCIHooks(ctx, g.Config); err != nil {
|
||||
return nil, errors.Wrapf(err, "error setting up OCI Hooks")
|
||||
return nil, fmt.Errorf("error setting up OCI Hooks: %w", err)
|
||||
}
|
||||
if len(c.config.EnvSecrets) > 0 {
|
||||
manager, err := c.runtime.SecretsManager()
|
||||
|
|
@ -986,11 +986,11 @@ func (c *Container) mountNotifySocket(g generate.Generator) error {
|
|||
logrus.Debugf("Checking notify %q dir", notifyDir)
|
||||
if err := os.MkdirAll(notifyDir, 0755); err != nil {
|
||||
if !os.IsExist(err) {
|
||||
return errors.Wrapf(err, "unable to create notify %q dir", notifyDir)
|
||||
return fmt.Errorf("unable to create notify %q dir: %w", notifyDir, err)
|
||||
}
|
||||
}
|
||||
if err := label.Relabel(notifyDir, c.MountLabel(), true); err != nil {
|
||||
return errors.Wrapf(err, "relabel failed %q", notifyDir)
|
||||
return fmt.Errorf("relabel failed %q: %w", notifyDir, err)
|
||||
}
|
||||
logrus.Debugf("Add bindmount notify %q dir", notifyDir)
|
||||
if _, ok := c.state.BindMounts["/run/notify"]; !ok {
|
||||
|
|
@ -1113,7 +1113,7 @@ func (c *Container) setupSystemd(mounts []spec.Mount, g generate.Generator) erro
|
|||
func (c *Container) addNamespaceContainer(g *generate.Generator, ns LinuxNS, ctr string, specNS spec.LinuxNamespaceType) error {
|
||||
nsCtr, err := c.runtime.state.Container(ctr)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error retrieving dependency %s of container %s from state", ctr, c.ID())
|
||||
return fmt.Errorf("error retrieving dependency %s of container %s from state: %w", ctr, c.ID(), err)
|
||||
}
|
||||
|
||||
if specNS == spec.UTSNamespace {
|
||||
|
|
@ -1200,7 +1200,7 @@ func (c *Container) createCheckpointImage(ctx context.Context, options Container
|
|||
// Create storage reference
|
||||
imageRef, err := is.Transport.ParseStoreReference(c.runtime.store, options.CreateImage)
|
||||
if err != nil {
|
||||
return errors.Errorf("Failed to parse image name")
|
||||
return errors.New("failed to parse image name")
|
||||
}
|
||||
|
||||
// Build an image scratch
|
||||
|
|
@ -1264,23 +1264,23 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
|
|||
// Check if the dependency is an infra container. If it is we can checkpoint
|
||||
// the container out of the Pod.
|
||||
if c.config.Pod == "" {
|
||||
return errors.Errorf("cannot export checkpoints of containers with dependencies")
|
||||
return errors.New("cannot export checkpoints of containers with dependencies")
|
||||
}
|
||||
|
||||
pod, err := c.runtime.state.Pod(c.config.Pod)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "container %s is in pod %s, but pod cannot be retrieved", c.ID(), c.config.Pod)
|
||||
return fmt.Errorf("container %s is in pod %s, but pod cannot be retrieved: %w", c.ID(), c.config.Pod, err)
|
||||
}
|
||||
infraID, err := pod.InfraContainerID()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cannot retrieve infra container ID for pod %s", c.config.Pod)
|
||||
return fmt.Errorf("cannot retrieve infra container ID for pod %s: %w", c.config.Pod, err)
|
||||
}
|
||||
if c.Dependencies()[0] != infraID {
|
||||
return errors.Errorf("cannot export checkpoints of containers with dependencies")
|
||||
return errors.New("cannot export checkpoints of containers with dependencies")
|
||||
}
|
||||
}
|
||||
if len(c.Dependencies()) > 1 {
|
||||
return errors.Errorf("cannot export checkpoints of containers with dependencies")
|
||||
return errors.New("cannot export checkpoints of containers with dependencies")
|
||||
}
|
||||
logrus.Debugf("Exporting checkpoint image of container %q to %q", c.ID(), options.TargetFile)
|
||||
|
||||
|
|
@ -1308,7 +1308,7 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
|
|||
// To correctly track deleted files, let's go through the output of 'podman diff'
|
||||
rootFsChanges, err := c.runtime.GetDiff("", c.ID(), define.DiffContainer)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error exporting root file-system diff for %q", c.ID())
|
||||
return fmt.Errorf("error exporting root file-system diff for %q: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
addToTarFiles, err := crutils.CRCreateRootFsDiffTar(&rootFsChanges, c.state.Mountpoint, c.bundlePath())
|
||||
|
|
@ -1325,7 +1325,7 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
|
|||
// Create an archive for each volume associated with the container
|
||||
if !options.IgnoreVolumes {
|
||||
if err := os.MkdirAll(expVolDir, 0700); err != nil {
|
||||
return errors.Wrapf(err, "error creating volumes export directory %q", expVolDir)
|
||||
return fmt.Errorf("error creating volumes export directory %q: %w", expVolDir, err)
|
||||
}
|
||||
|
||||
for _, v := range c.config.NamedVolumes {
|
||||
|
|
@ -1334,7 +1334,7 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
|
|||
|
||||
volumeTarFile, err := os.Create(volumeTarFileFullPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating %q", volumeTarFileFullPath)
|
||||
return fmt.Errorf("error creating %q: %w", volumeTarFileFullPath, err)
|
||||
}
|
||||
|
||||
volume, err := c.runtime.GetVolume(v.Name)
|
||||
|
|
@ -1347,7 +1347,7 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
|
|||
return err
|
||||
}
|
||||
if mp == "" {
|
||||
return errors.Wrapf(define.ErrInternal, "volume %s is not mounted, cannot export", volume.Name())
|
||||
return fmt.Errorf("volume %s is not mounted, cannot export: %w", volume.Name(), define.ErrInternal)
|
||||
}
|
||||
|
||||
input, err := archive.TarWithOptions(mp, &archive.TarOptions{
|
||||
|
|
@ -1355,7 +1355,7 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
|
|||
IncludeSourceDir: true,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading volume directory %q", v.Dest)
|
||||
return fmt.Errorf("error reading volume directory %q: %w", v.Dest, err)
|
||||
}
|
||||
|
||||
_, err = io.Copy(volumeTarFile, input)
|
||||
|
|
@ -1375,12 +1375,12 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
|
|||
})
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading checkpoint directory %q", c.ID())
|
||||
return fmt.Errorf("error reading checkpoint directory %q: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
outFile, err := os.Create(options.TargetFile)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating checkpoint export file %q", options.TargetFile)
|
||||
return fmt.Errorf("error creating checkpoint export file %q: %w", options.TargetFile, err)
|
||||
}
|
||||
defer outFile.Close()
|
||||
|
||||
|
|
@ -1406,10 +1406,10 @@ func (c *Container) exportCheckpoint(options ContainerCheckpointOptions) error {
|
|||
|
||||
func (c *Container) checkpointRestoreSupported(version int) error {
|
||||
if !criu.CheckForCriu(version) {
|
||||
return errors.Errorf("checkpoint/restore requires at least CRIU %d", version)
|
||||
return fmt.Errorf("checkpoint/restore requires at least CRIU %d", version)
|
||||
}
|
||||
if !c.ociRuntime.SupportsCheckpoint() {
|
||||
return errors.Errorf("configured runtime does not support checkpoint/restore")
|
||||
return errors.New("configured runtime does not support checkpoint/restore")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1420,11 +1420,11 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
|
|||
}
|
||||
|
||||
if c.state.State != define.ContainerStateRunning {
|
||||
return nil, 0, errors.Wrapf(define.ErrCtrStateInvalid, "%q is not running, cannot checkpoint", c.state.State)
|
||||
return nil, 0, fmt.Errorf("%q is not running, cannot checkpoint: %w", c.state.State, define.ErrCtrStateInvalid)
|
||||
}
|
||||
|
||||
if c.AutoRemove() && options.TargetFile == "" {
|
||||
return nil, 0, errors.Errorf("cannot checkpoint containers that have been started with '--rm' unless '--export' is used")
|
||||
return nil, 0, errors.New("cannot checkpoint containers that have been started with '--rm' unless '--export' is used")
|
||||
}
|
||||
|
||||
if err := c.resolveCheckpointImageName(&options); err != nil {
|
||||
|
|
@ -1517,12 +1517,12 @@ func (c *Container) checkpoint(ctx context.Context, options ContainerCheckpointO
|
|||
}
|
||||
statsDirectory, err := os.Open(c.bundlePath())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Not able to open %q", c.bundlePath())
|
||||
return nil, fmt.Errorf("not able to open %q: %w", c.bundlePath(), err)
|
||||
}
|
||||
|
||||
dumpStatistics, err := stats.CriuGetDumpStats(statsDirectory)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Displaying checkpointing statistics not possible")
|
||||
return nil, fmt.Errorf("displaying checkpointing statistics not possible: %w", err)
|
||||
}
|
||||
|
||||
return &define.CRIUCheckpointRestoreStatistics{
|
||||
|
|
@ -1568,7 +1568,7 @@ func (c *Container) generateContainerSpec() error {
|
|||
g := generate.NewFromSpec(c.config.Spec)
|
||||
|
||||
if err := c.saveSpec(g.Config); err != nil {
|
||||
return errors.Wrap(err, "saving imported container specification for restore failed")
|
||||
return fmt.Errorf("saving imported container specification for restore failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -1626,14 +1626,14 @@ func (c *Container) importCheckpointTar(input string) error {
|
|||
func (c *Container) importPreCheckpoint(input string) error {
|
||||
archiveFile, err := os.Open(input)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to open pre-checkpoint archive for import")
|
||||
return fmt.Errorf("failed to open pre-checkpoint archive for import: %w", err)
|
||||
}
|
||||
|
||||
defer archiveFile.Close()
|
||||
|
||||
err = archive.Untar(archiveFile, c.bundlePath(), nil)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Unpacking of pre-checkpoint archive %s failed", input)
|
||||
return fmt.Errorf("unpacking of pre-checkpoint archive %s failed: %w", input, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1650,11 +1650,11 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
|
|||
}
|
||||
|
||||
if options.Pod != "" && !crutils.CRRuntimeSupportsPodCheckpointRestore(c.ociRuntime.Path()) {
|
||||
return nil, 0, errors.Errorf("runtime %s does not support pod restore", c.ociRuntime.Path())
|
||||
return nil, 0, fmt.Errorf("runtime %s does not support pod restore", c.ociRuntime.Path())
|
||||
}
|
||||
|
||||
if !c.ensureState(define.ContainerStateConfigured, define.ContainerStateExited) {
|
||||
return nil, 0, errors.Wrapf(define.ErrCtrStateInvalid, "container %s is running or paused, cannot restore", c.ID())
|
||||
return nil, 0, fmt.Errorf("container %s is running or paused, cannot restore: %w", c.ID(), define.ErrCtrStateInvalid)
|
||||
}
|
||||
|
||||
if options.ImportPrevious != "" {
|
||||
|
|
@ -1676,7 +1676,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
|
|||
// Let's try to stat() CRIU's inventory file. If it does not exist, it makes
|
||||
// no sense to try a restore. This is a minimal check if a checkpoint exist.
|
||||
if _, err := os.Stat(filepath.Join(c.CheckpointPath(), "inventory.img")); os.IsNotExist(err) {
|
||||
return nil, 0, errors.Wrapf(err, "a complete checkpoint for this container cannot be found, cannot restore")
|
||||
return nil, 0, fmt.Errorf("a complete checkpoint for this container cannot be found, cannot restore: %w", err)
|
||||
}
|
||||
|
||||
if err := crutils.CRCreateFileWithLabel(c.bundlePath(), "restore.log", c.MountLabel()); err != nil {
|
||||
|
|
@ -1774,23 +1774,23 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
|
|||
// the ones from the infrastructure container.
|
||||
pod, err := c.runtime.LookupPod(options.Pod)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "pod %q cannot be retrieved", options.Pod)
|
||||
return nil, 0, fmt.Errorf("pod %q cannot be retrieved: %w", options.Pod, err)
|
||||
}
|
||||
|
||||
infraContainer, err := pod.InfraContainer()
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "cannot retrieved infra container from pod %q", options.Pod)
|
||||
return nil, 0, fmt.Errorf("cannot retrieved infra container from pod %q: %w", options.Pod, err)
|
||||
}
|
||||
|
||||
infraContainer.lock.Lock()
|
||||
if err := infraContainer.syncContainer(); err != nil {
|
||||
infraContainer.lock.Unlock()
|
||||
return nil, 0, errors.Wrapf(err, "Error syncing infrastructure container %s status", infraContainer.ID())
|
||||
return nil, 0, fmt.Errorf("error syncing infrastructure container %s status: %w", infraContainer.ID(), err)
|
||||
}
|
||||
if infraContainer.state.State != define.ContainerStateRunning {
|
||||
if err := infraContainer.initAndStart(ctx); err != nil {
|
||||
infraContainer.lock.Unlock()
|
||||
return nil, 0, errors.Wrapf(err, "Error starting infrastructure container %s status", infraContainer.ID())
|
||||
return nil, 0, fmt.Errorf("error starting infrastructure container %s status: %w", infraContainer.ID(), err)
|
||||
}
|
||||
}
|
||||
infraContainer.lock.Unlock()
|
||||
|
|
@ -1798,7 +1798,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
|
|||
if c.config.IPCNsCtr != "" {
|
||||
nsPath, err := infraContainer.namespacePath(IPCNS)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "cannot retrieve IPC namespace path for Pod %q", options.Pod)
|
||||
return nil, 0, fmt.Errorf("cannot retrieve IPC namespace path for Pod %q: %w", options.Pod, err)
|
||||
}
|
||||
if err := g.AddOrReplaceLinuxNamespace(string(spec.IPCNamespace), nsPath); err != nil {
|
||||
return nil, 0, err
|
||||
|
|
@ -1808,7 +1808,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
|
|||
if c.config.NetNsCtr != "" {
|
||||
nsPath, err := infraContainer.namespacePath(NetNS)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "cannot retrieve network namespace path for Pod %q", options.Pod)
|
||||
return nil, 0, fmt.Errorf("cannot retrieve network namespace path for Pod %q: %w", options.Pod, err)
|
||||
}
|
||||
if err := g.AddOrReplaceLinuxNamespace(string(spec.NetworkNamespace), nsPath); err != nil {
|
||||
return nil, 0, err
|
||||
|
|
@ -1818,7 +1818,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
|
|||
if c.config.PIDNsCtr != "" {
|
||||
nsPath, err := infraContainer.namespacePath(PIDNS)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "cannot retrieve PID namespace path for Pod %q", options.Pod)
|
||||
return nil, 0, fmt.Errorf("cannot retrieve PID namespace path for Pod %q: %w", options.Pod, err)
|
||||
}
|
||||
if err := g.AddOrReplaceLinuxNamespace(string(spec.PIDNamespace), nsPath); err != nil {
|
||||
return nil, 0, err
|
||||
|
|
@ -1828,7 +1828,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
|
|||
if c.config.UTSNsCtr != "" {
|
||||
nsPath, err := infraContainer.namespacePath(UTSNS)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "cannot retrieve UTS namespace path for Pod %q", options.Pod)
|
||||
return nil, 0, fmt.Errorf("cannot retrieve UTS namespace path for Pod %q: %w", options.Pod, err)
|
||||
}
|
||||
if err := g.AddOrReplaceLinuxNamespace(string(spec.UTSNamespace), nsPath); err != nil {
|
||||
return nil, 0, err
|
||||
|
|
@ -1838,7 +1838,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
|
|||
if c.config.CgroupNsCtr != "" {
|
||||
nsPath, err := infraContainer.namespacePath(CgroupNS)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "cannot retrieve Cgroup namespace path for Pod %q", options.Pod)
|
||||
return nil, 0, fmt.Errorf("cannot retrieve Cgroup namespace path for Pod %q: %w", options.Pod, err)
|
||||
}
|
||||
if err := g.AddOrReplaceLinuxNamespace(string(spec.CgroupNamespace), nsPath); err != nil {
|
||||
return nil, 0, err
|
||||
|
|
@ -1906,13 +1906,13 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
|
|||
|
||||
volumeFile, err := os.Open(volumeFilePath)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "failed to open volume file %s", volumeFilePath)
|
||||
return nil, 0, fmt.Errorf("failed to open volume file %s: %w", volumeFilePath, err)
|
||||
}
|
||||
defer volumeFile.Close()
|
||||
|
||||
volume, err := c.runtime.GetVolume(v.Name)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "failed to retrieve volume %s", v.Name)
|
||||
return nil, 0, fmt.Errorf("failed to retrieve volume %s: %w", v.Name, err)
|
||||
}
|
||||
|
||||
mountPoint, err := volume.MountPoint()
|
||||
|
|
@ -1920,10 +1920,10 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
|
|||
return nil, 0, err
|
||||
}
|
||||
if mountPoint == "" {
|
||||
return nil, 0, errors.Wrapf(err, "unable to import volume %s as it is not mounted", volume.Name())
|
||||
return nil, 0, fmt.Errorf("unable to import volume %s as it is not mounted: %w", volume.Name(), err)
|
||||
}
|
||||
if err := archive.UntarUncompressed(volumeFile, mountPoint, nil); err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "Failed to extract volume %s to %s", volumeFilePath, mountPoint)
|
||||
return nil, 0, fmt.Errorf("failed to extract volume %s to %s: %w", volumeFilePath, mountPoint, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1950,12 +1950,12 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti
|
|||
}
|
||||
statsDirectory, err := os.Open(c.bundlePath())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Not able to open %q", c.bundlePath())
|
||||
return nil, fmt.Errorf("not able to open %q: %w", c.bundlePath(), err)
|
||||
}
|
||||
|
||||
restoreStatistics, err := stats.CriuGetRestoreStats(statsDirectory)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Displaying restore statistics not possible")
|
||||
return nil, fmt.Errorf("displaying restore statistics not possible: %w", err)
|
||||
}
|
||||
|
||||
return &define.CRIUCheckpointRestoreStatistics{
|
||||
|
|
@ -2033,7 +2033,7 @@ func (c *Container) getRootNetNsDepCtr() (depCtr *Container, err error) {
|
|||
|
||||
depCtr, err = c.runtime.state.Container(nextCtr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error fetching dependency %s of container %s", c.config.NetNsCtr, c.ID())
|
||||
return nil, fmt.Errorf("error fetching dependency %s of container %s: %w", c.config.NetNsCtr, c.ID(), err)
|
||||
}
|
||||
// This should never happen without an error
|
||||
if depCtr == nil {
|
||||
|
|
@ -2062,7 +2062,7 @@ func (c *Container) mountIntoRootDirs(mountName string, mountPath string) error
|
|||
// Make standard bind mounts to include in the container
|
||||
func (c *Container) makeBindMounts() error {
|
||||
if err := os.Chown(c.state.RunDir, c.RootUID(), c.RootGID()); err != nil {
|
||||
return errors.Wrap(err, "cannot chown run directory")
|
||||
return fmt.Errorf("cannot chown run directory: %w", err)
|
||||
}
|
||||
|
||||
if c.state.BindMounts == nil {
|
||||
|
|
@ -2080,13 +2080,13 @@ func (c *Container) makeBindMounts() error {
|
|||
if c.config.NetNsCtr == "" {
|
||||
if resolvePath, ok := c.state.BindMounts["/etc/resolv.conf"]; ok {
|
||||
if err := os.Remove(resolvePath); err != nil && !os.IsNotExist(err) {
|
||||
return errors.Wrapf(err, "container %s", c.ID())
|
||||
return fmt.Errorf("container %s: %w", c.ID(), err)
|
||||
}
|
||||
delete(c.state.BindMounts, "/etc/resolv.conf")
|
||||
}
|
||||
if hostsPath, ok := c.state.BindMounts["/etc/hosts"]; ok {
|
||||
if err := os.Remove(hostsPath); err != nil && !os.IsNotExist(err) {
|
||||
return errors.Wrapf(err, "container %s", c.ID())
|
||||
return fmt.Errorf("container %s: %w", c.ID(), err)
|
||||
}
|
||||
delete(c.state.BindMounts, "/etc/hosts")
|
||||
}
|
||||
|
|
@ -2099,13 +2099,13 @@ func (c *Container) makeBindMounts() error {
|
|||
// them.
|
||||
depCtr, err := c.getRootNetNsDepCtr()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error fetching network namespace dependency container for container %s", c.ID())
|
||||
return fmt.Errorf("error fetching network namespace dependency container for container %s: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
// We need that container's bind mounts
|
||||
bindMounts, err := depCtr.BindMounts()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error fetching bind mounts from dependency %s of container %s", depCtr.ID(), c.ID())
|
||||
return fmt.Errorf("error fetching bind mounts from dependency %s of container %s: %w", depCtr.ID(), c.ID(), err)
|
||||
}
|
||||
|
||||
// The other container may not have a resolv.conf or /etc/hosts
|
||||
|
|
@ -2115,7 +2115,7 @@ func (c *Container) makeBindMounts() error {
|
|||
err := c.mountIntoRootDirs("/etc/resolv.conf", resolvPath)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error assigning mounts to container %s", c.ID())
|
||||
return fmt.Errorf("error assigning mounts to container %s: %w", c.ID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2135,13 +2135,13 @@ func (c *Container) makeBindMounts() error {
|
|||
err = etchosts.Add(hostsPath, getLocalhostHostEntry(c))
|
||||
lock.Unlock()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating hosts file for container %s which depends on container %s", c.ID(), depCtr.ID())
|
||||
return fmt.Errorf("error creating hosts file for container %s which depends on container %s: %w", c.ID(), depCtr.ID(), err)
|
||||
}
|
||||
|
||||
// finally, save it in the new container
|
||||
err = c.mountIntoRootDirs(config.DefaultHostsFile, hostsPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error assigning mounts to container %s", c.ID())
|
||||
return fmt.Errorf("error assigning mounts to container %s: %w", c.ID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2156,13 +2156,13 @@ func (c *Container) makeBindMounts() error {
|
|||
} else {
|
||||
if !c.config.UseImageResolvConf {
|
||||
if err := c.generateResolvConf(); err != nil {
|
||||
return errors.Wrapf(err, "error creating resolv.conf for container %s", c.ID())
|
||||
return fmt.Errorf("error creating resolv.conf for container %s: %w", c.ID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
if !c.config.UseImageHosts {
|
||||
if err := c.createHosts(); err != nil {
|
||||
return errors.Wrapf(err, "error creating hosts file for container %s", c.ID())
|
||||
return fmt.Errorf("error creating hosts file for container %s: %w", c.ID(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -2180,7 +2180,7 @@ func (c *Container) makeBindMounts() error {
|
|||
}
|
||||
} else if !c.config.UseImageHosts && c.state.BindMounts["/etc/hosts"] == "" {
|
||||
if err := c.createHosts(); err != nil {
|
||||
return errors.Wrapf(err, "error creating hosts file for container %s", c.ID())
|
||||
return fmt.Errorf("error creating hosts file for container %s: %w", c.ID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2192,7 +2192,7 @@ func (c *Container) makeBindMounts() error {
|
|||
if c.config.Passwd == nil || *c.config.Passwd {
|
||||
newPasswd, newGroup, err := c.generatePasswdAndGroup()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating temporary passwd file for container %s", c.ID())
|
||||
return fmt.Errorf("error creating temporary passwd file for container %s: %w", c.ID(), err)
|
||||
}
|
||||
if newPasswd != "" {
|
||||
// Make /etc/passwd
|
||||
|
|
@ -2213,7 +2213,7 @@ func (c *Container) makeBindMounts() error {
|
|||
if _, ok := c.state.BindMounts["/etc/hostname"]; !ok {
|
||||
hostnamePath, err := c.writeStringToRundir("hostname", c.Hostname())
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating hostname file for container %s", c.ID())
|
||||
return fmt.Errorf("error creating hostname file for container %s: %w", c.ID(), err)
|
||||
}
|
||||
c.state.BindMounts["/etc/hostname"] = hostnamePath
|
||||
}
|
||||
|
|
@ -2225,7 +2225,7 @@ func (c *Container) makeBindMounts() error {
|
|||
if ctrTimezone != "local" {
|
||||
_, err = time.LoadLocation(ctrTimezone)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error finding timezone for container %s", c.ID())
|
||||
return fmt.Errorf("error finding timezone for container %s: %w", c.ID(), err)
|
||||
}
|
||||
}
|
||||
if _, ok := c.state.BindMounts["/etc/localtime"]; !ok {
|
||||
|
|
@ -2233,18 +2233,18 @@ func (c *Container) makeBindMounts() error {
|
|||
if ctrTimezone == "local" {
|
||||
zonePath, err = filepath.EvalSymlinks("/etc/localtime")
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error finding local timezone for container %s", c.ID())
|
||||
return fmt.Errorf("error finding local timezone for container %s: %w", c.ID(), err)
|
||||
}
|
||||
} else {
|
||||
zone := filepath.Join("/usr/share/zoneinfo", ctrTimezone)
|
||||
zonePath, err = filepath.EvalSymlinks(zone)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error setting timezone for container %s", c.ID())
|
||||
return fmt.Errorf("error setting timezone for container %s: %w", c.ID(), err)
|
||||
}
|
||||
}
|
||||
localtimePath, err := c.copyTimezoneFile(zonePath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error setting timezone for container %s", c.ID())
|
||||
return fmt.Errorf("error setting timezone for container %s: %w", c.ID(), err)
|
||||
}
|
||||
c.state.BindMounts["/etc/localtime"] = localtimePath
|
||||
}
|
||||
|
|
@ -2282,7 +2282,7 @@ rootless=%d
|
|||
}
|
||||
containerenvPath, err := c.writeStringToRundir(".containerenv", containerenv)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating containerenv file for container %s", c.ID())
|
||||
return fmt.Errorf("error creating containerenv file for container %s: %w", c.ID(), err)
|
||||
}
|
||||
c.state.BindMounts["/run/.containerenv"] = containerenvPath
|
||||
}
|
||||
|
|
@ -2303,7 +2303,7 @@ rootless=%d
|
|||
if len(c.Secrets()) > 0 {
|
||||
// create /run/secrets if subscriptions did not create
|
||||
if err := c.createSecretMountDir(); err != nil {
|
||||
return errors.Wrapf(err, "error creating secrets mount")
|
||||
return fmt.Errorf("error creating secrets mount: %w", err)
|
||||
}
|
||||
for _, secret := range c.Secrets() {
|
||||
secretFileName := secret.Name
|
||||
|
|
@ -2397,7 +2397,7 @@ func (c *Container) generateResolvConf() error {
|
|||
Path: destPath,
|
||||
Searches: search,
|
||||
}); err != nil {
|
||||
return errors.Wrapf(err, "error building resolv.conf for container %s", c.ID())
|
||||
return fmt.Errorf("error building resolv.conf for container %s: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
return c.bindMountRootFile(destPath, resolvconf.DefaultResolvConf)
|
||||
|
|
@ -2598,7 +2598,7 @@ func (c *Container) generateCurrentUserGroupEntry() (string, int, error) {
|
|||
|
||||
g, err := user.LookupGroupId(strconv.Itoa(gid))
|
||||
if err != nil {
|
||||
return "", 0, errors.Wrapf(err, "failed to get current group")
|
||||
return "", 0, fmt.Errorf("failed to get current group: %w", err)
|
||||
}
|
||||
|
||||
// Look up group name to see if it exists in the image.
|
||||
|
|
@ -2620,7 +2620,7 @@ func (c *Container) generateCurrentUserGroupEntry() (string, int, error) {
|
|||
if uid != 0 {
|
||||
u, err := user.LookupId(strconv.Itoa(uid))
|
||||
if err != nil {
|
||||
return "", 0, errors.Wrapf(err, "failed to get current user to make group entry")
|
||||
return "", 0, fmt.Errorf("failed to get current user to make group entry: %w", err)
|
||||
}
|
||||
username = u.Username
|
||||
}
|
||||
|
|
@ -2718,7 +2718,7 @@ func (c *Container) generateCurrentUserPasswdEntry() (string, int, int, error) {
|
|||
|
||||
u, err := user.LookupId(strconv.Itoa(uid))
|
||||
if err != nil {
|
||||
return "", 0, 0, errors.Wrapf(err, "failed to get current user")
|
||||
return "", 0, 0, fmt.Errorf("failed to get current user: %w", err)
|
||||
}
|
||||
pwd, err := c.userPasswdEntry(u)
|
||||
if err != nil {
|
||||
|
|
@ -2820,7 +2820,7 @@ func (c *Container) generateUserPasswdEntry(addedUID int) (string, error) {
|
|||
} else {
|
||||
group, err := lookup.GetGroup(c.state.Mountpoint, groupspec)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "unable to get gid %s from group file", groupspec)
|
||||
return "", fmt.Errorf("unable to get gid %s from group file: %w", groupspec, err)
|
||||
}
|
||||
gid = group.Gid
|
||||
}
|
||||
|
|
@ -2929,7 +2929,7 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) {
|
|||
logrus.Debugf("Making /etc/passwd for container %s", c.ID())
|
||||
originPasswdFile, err := securejoin.SecureJoin(c.state.Mountpoint, "/etc/passwd")
|
||||
if err != nil {
|
||||
return "", "", errors.Wrapf(err, "error creating path to container %s /etc/passwd", c.ID())
|
||||
return "", "", fmt.Errorf("error creating path to container %s /etc/passwd: %w", c.ID(), err)
|
||||
}
|
||||
orig, err := ioutil.ReadFile(originPasswdFile)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
|
|
@ -2937,7 +2937,7 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) {
|
|||
}
|
||||
passwdFile, err := c.writeStringToStaticDir("passwd", string(orig)+passwdEntry)
|
||||
if err != nil {
|
||||
return "", "", errors.Wrapf(err, "failed to create temporary passwd file")
|
||||
return "", "", fmt.Errorf("failed to create temporary passwd file: %w", err)
|
||||
}
|
||||
if err := os.Chmod(passwdFile, 0644); err != nil {
|
||||
return "", "", err
|
||||
|
|
@ -2947,17 +2947,17 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) {
|
|||
logrus.Debugf("Modifying container %s /etc/passwd", c.ID())
|
||||
containerPasswd, err := securejoin.SecureJoin(c.state.Mountpoint, "/etc/passwd")
|
||||
if err != nil {
|
||||
return "", "", errors.Wrapf(err, "error looking up location of container %s /etc/passwd", c.ID())
|
||||
return "", "", fmt.Errorf("error looking up location of container %s /etc/passwd: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(containerPasswd, os.O_APPEND|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
return "", "", errors.Wrapf(err, "container %s", c.ID())
|
||||
return "", "", fmt.Errorf("container %s: %w", c.ID(), err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if _, err := f.WriteString(passwdEntry); err != nil {
|
||||
return "", "", errors.Wrapf(err, "unable to append to container %s /etc/passwd", c.ID())
|
||||
return "", "", fmt.Errorf("unable to append to container %s /etc/passwd: %w", c.ID(), err)
|
||||
}
|
||||
default:
|
||||
logrus.Debugf("Not modifying container %s /etc/passwd", c.ID())
|
||||
|
|
@ -2975,7 +2975,7 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) {
|
|||
logrus.Debugf("Making /etc/group for container %s", c.ID())
|
||||
originGroupFile, err := securejoin.SecureJoin(c.state.Mountpoint, "/etc/group")
|
||||
if err != nil {
|
||||
return "", "", errors.Wrapf(err, "error creating path to container %s /etc/group", c.ID())
|
||||
return "", "", fmt.Errorf("error creating path to container %s /etc/group: %w", c.ID(), err)
|
||||
}
|
||||
orig, err := ioutil.ReadFile(originGroupFile)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
|
|
@ -2983,7 +2983,7 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) {
|
|||
}
|
||||
groupFile, err := c.writeStringToStaticDir("group", string(orig)+groupEntry)
|
||||
if err != nil {
|
||||
return "", "", errors.Wrapf(err, "failed to create temporary group file")
|
||||
return "", "", fmt.Errorf("failed to create temporary group file: %w", err)
|
||||
}
|
||||
if err := os.Chmod(groupFile, 0644); err != nil {
|
||||
return "", "", err
|
||||
|
|
@ -2993,17 +2993,17 @@ func (c *Container) generatePasswdAndGroup() (string, string, error) {
|
|||
logrus.Debugf("Modifying container %s /etc/group", c.ID())
|
||||
containerGroup, err := securejoin.SecureJoin(c.state.Mountpoint, "/etc/group")
|
||||
if err != nil {
|
||||
return "", "", errors.Wrapf(err, "error looking up location of container %s /etc/group", c.ID())
|
||||
return "", "", fmt.Errorf("error looking up location of container %s /etc/group: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(containerGroup, os.O_APPEND|os.O_WRONLY, 0600)
|
||||
if err != nil {
|
||||
return "", "", errors.Wrapf(err, "container %s", c.ID())
|
||||
return "", "", fmt.Errorf("container %s: %w", c.ID(), err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if _, err := f.WriteString(groupEntry); err != nil {
|
||||
return "", "", errors.Wrapf(err, "unable to append to container %s /etc/group", c.ID())
|
||||
return "", "", fmt.Errorf("unable to append to container %s /etc/group: %w", c.ID(), err)
|
||||
}
|
||||
default:
|
||||
logrus.Debugf("Not modifying container %s /etc/group", c.ID())
|
||||
|
|
@ -3038,7 +3038,7 @@ func (c *Container) expectPodCgroup() (bool, error) {
|
|||
case cgroupManager == config.CgroupfsCgroupsManager:
|
||||
return !rootless.IsRootless(), nil
|
||||
default:
|
||||
return false, errors.Wrapf(define.ErrInvalidArg, "invalid cgroup mode %s requested for pods", cgroupManager)
|
||||
return false, fmt.Errorf("invalid cgroup mode %s requested for pods: %w", cgroupManager, define.ErrInvalidArg)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -3075,7 +3075,7 @@ func (c *Container) getOCICgroupPath() (string, error) {
|
|||
logrus.Debugf("Setting Cgroup path for container %s to %s", c.ID(), cgroupPath)
|
||||
return cgroupPath, nil
|
||||
default:
|
||||
return "", errors.Wrapf(define.ErrInvalidArg, "invalid cgroup manager %s requested", cgroupManager)
|
||||
return "", fmt.Errorf("invalid cgroup manager %s requested: %w", cgroupManager, define.ErrInvalidArg)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -3086,7 +3086,7 @@ func (c *Container) copyTimezoneFile(zonePath string) (string, error) {
|
|||
return "", err
|
||||
}
|
||||
if file.IsDir() {
|
||||
return "", errors.New("Invalid timezone: is a directory")
|
||||
return "", errors.New("invalid timezone: is a directory")
|
||||
}
|
||||
src, err := os.Open(zonePath)
|
||||
if err != nil {
|
||||
|
|
@ -3120,14 +3120,14 @@ func (c *Container) cleanupOverlayMounts() error {
|
|||
func (c *Container) checkFileExistsInRootfs(file string) (bool, error) {
|
||||
checkPath, err := securejoin.SecureJoin(c.state.Mountpoint, file)
|
||||
if err != nil {
|
||||
return false, errors.Wrapf(err, "cannot create path to container %s file %q", c.ID(), file)
|
||||
return false, fmt.Errorf("cannot create path to container %s file %q: %w", c.ID(), file, err)
|
||||
}
|
||||
stat, err := os.Stat(checkPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
}
|
||||
return false, errors.Wrapf(err, "container %s", c.ID())
|
||||
return false, fmt.Errorf("container %s: %w", c.ID(), err)
|
||||
}
|
||||
if stat.IsDir() {
|
||||
return false, nil
|
||||
|
|
@ -3163,7 +3163,7 @@ func (c *Container) createSecretMountDir() error {
|
|||
func (c *Container) fixVolumePermissions(v *ContainerNamedVolume) error {
|
||||
vol, err := c.runtime.state.Volume(v.Name)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error retrieving named volume %s for container %s", v.Name, c.ID())
|
||||
return fmt.Errorf("error retrieving named volume %s for container %s: %w", v.Name, c.ID(), err)
|
||||
}
|
||||
|
||||
vol.lock.Lock()
|
||||
|
|
@ -3190,7 +3190,7 @@ func (c *Container) fixVolumePermissions(v *ContainerNamedVolume) error {
|
|||
mappings := idtools.NewIDMappingsFromMaps(c.config.IDMappings.UIDMap, c.config.IDMappings.GIDMap)
|
||||
newPair, err := mappings.ToHost(p)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error mapping user %d:%d", uid, gid)
|
||||
return fmt.Errorf("error mapping user %d:%d: %w", uid, gid, err)
|
||||
}
|
||||
uid = newPair.UID
|
||||
gid = newPair.GID
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package libpod
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
|
@ -10,7 +11,6 @@ import (
|
|||
"github.com/containers/podman/v4/libpod/events"
|
||||
"github.com/containers/podman/v4/libpod/logs"
|
||||
"github.com/nxadm/tail/watch"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -35,9 +35,9 @@ func (r *Runtime) Log(ctx context.Context, containers []*Container, options *log
|
|||
func (c *Container) ReadLog(ctx context.Context, options *logs.LogOptions, logChannel chan *logs.LogLine, colorID int64) error {
|
||||
switch c.LogDriver() {
|
||||
case define.PassthroughLogging:
|
||||
return errors.Wrapf(define.ErrNoLogs, "this container is using the 'passthrough' log driver, cannot read logs")
|
||||
return fmt.Errorf("this container is using the 'passthrough' log driver, cannot read logs: %w", define.ErrNoLogs)
|
||||
case define.NoLogging:
|
||||
return errors.Wrapf(define.ErrNoLogs, "this container is using the 'none' log driver, cannot read logs")
|
||||
return fmt.Errorf("this container is using the 'none' log driver, cannot read logs: %w", define.ErrNoLogs)
|
||||
case define.JournaldLogging:
|
||||
return c.readFromJournal(ctx, options, logChannel, colorID)
|
||||
case define.JSONLogging:
|
||||
|
|
@ -47,7 +47,7 @@ func (c *Container) ReadLog(ctx context.Context, options *logs.LogOptions, logCh
|
|||
case define.KubernetesLogging, "":
|
||||
return c.readFromLogFile(ctx, options, logChannel, colorID)
|
||||
default:
|
||||
return errors.Wrapf(define.ErrInternal, "unrecognized log driver %q, cannot read logs", c.LogDriver())
|
||||
return fmt.Errorf("unrecognized log driver %q, cannot read logs: %w", c.LogDriver(), define.ErrInternal)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -55,10 +55,10 @@ func (c *Container) readFromLogFile(ctx context.Context, options *logs.LogOption
|
|||
t, tailLog, err := logs.GetLogFile(c.LogPath(), options)
|
||||
if err != nil {
|
||||
// If the log file does not exist, this is not fatal.
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "unable to read log file %s for %s ", c.ID(), c.LogPath())
|
||||
return fmt.Errorf("unable to read log file %s for %s : %w", c.ID(), c.LogPath(), err)
|
||||
}
|
||||
options.WaitGroup.Add(1)
|
||||
if len(tailLog) > 0 {
|
||||
|
|
@ -103,7 +103,7 @@ func (c *Container) readFromLogFile(ctx context.Context, options *logs.LogOption
|
|||
// until EOF.
|
||||
state, err := c.State()
|
||||
if err != nil || state != define.ContainerStateRunning {
|
||||
if err != nil && errors.Cause(err) != define.ErrNoSuchCtr {
|
||||
if err != nil && !errors.Is(err, define.ErrNoSuchCtr) {
|
||||
logrus.Errorf("Getting container state: %v", err)
|
||||
}
|
||||
go func() {
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ package libpod
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
|
@ -14,7 +15,6 @@ import (
|
|||
"github.com/containers/podman/v4/libpod/logs"
|
||||
"github.com/coreos/go-systemd/v22/journal"
|
||||
"github.com/coreos/go-systemd/v22/sdjournal"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -49,7 +49,7 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption
|
|||
// We need the container's events in the same journal to guarantee
|
||||
// consistency, see #10323.
|
||||
if options.Follow && c.runtime.config.Engine.EventsLogger != "journald" {
|
||||
return errors.Errorf("using --follow with the journald --log-driver but without the journald --events-backend (%s) is not supported", c.runtime.config.Engine.EventsLogger)
|
||||
return fmt.Errorf("using --follow with the journald --log-driver but without the journald --events-backend (%s) is not supported", c.runtime.config.Engine.EventsLogger)
|
||||
}
|
||||
|
||||
journal, err := sdjournal.NewJournal()
|
||||
|
|
@ -63,21 +63,21 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption
|
|||
// Add the filters for events.
|
||||
match := sdjournal.Match{Field: "SYSLOG_IDENTIFIER", Value: "podman"}
|
||||
if err := journal.AddMatch(match.String()); err != nil {
|
||||
return errors.Wrapf(err, "adding filter to journald logger: %v", match)
|
||||
return fmt.Errorf("adding filter to journald logger: %v: %w", match, err)
|
||||
}
|
||||
match = sdjournal.Match{Field: "PODMAN_ID", Value: c.ID()}
|
||||
if err := journal.AddMatch(match.String()); err != nil {
|
||||
return errors.Wrapf(err, "adding filter to journald logger: %v", match)
|
||||
return fmt.Errorf("adding filter to journald logger: %v: %w", match, err)
|
||||
}
|
||||
|
||||
// Add the filter for logs. Note the disjunction so that we match
|
||||
// either the events or the logs.
|
||||
if err := journal.AddDisjunction(); err != nil {
|
||||
return errors.Wrap(err, "adding filter disjunction to journald logger")
|
||||
return fmt.Errorf("adding filter disjunction to journald logger: %w", err)
|
||||
}
|
||||
match = sdjournal.Match{Field: "CONTAINER_ID_FULL", Value: c.ID()}
|
||||
if err := journal.AddMatch(match.String()); err != nil {
|
||||
return errors.Wrapf(err, "adding filter to journald logger: %v", match)
|
||||
return fmt.Errorf("adding filter to journald logger: %v: %w", match, err)
|
||||
}
|
||||
|
||||
if err := journal.SeekHead(); err != nil {
|
||||
|
|
@ -85,12 +85,12 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption
|
|||
}
|
||||
// API requires Next() immediately after SeekHead().
|
||||
if _, err := journal.Next(); err != nil {
|
||||
return errors.Wrap(err, "next journal")
|
||||
return fmt.Errorf("next journal: %w", err)
|
||||
}
|
||||
|
||||
// API requires a next|prev before getting a cursor.
|
||||
if _, err := journal.Previous(); err != nil {
|
||||
return errors.Wrap(err, "previous journal")
|
||||
return fmt.Errorf("previous journal: %w", err)
|
||||
}
|
||||
|
||||
// Note that the initial cursor may not yet be ready, so we'll do an
|
||||
|
|
@ -111,7 +111,7 @@ func (c *Container) readFromJournal(ctx context.Context, options *logs.LogOption
|
|||
break
|
||||
}
|
||||
if cursorError != nil {
|
||||
return errors.Wrap(cursorError, "initial journal cursor")
|
||||
return fmt.Errorf("initial journal cursor: %w", cursorError)
|
||||
}
|
||||
|
||||
options.WaitGroup.Add(1)
|
||||
|
|
@ -255,7 +255,7 @@ func journalFormatterWithID(entry *sdjournal.JournalEntry) (string, error) {
|
|||
|
||||
id, ok := entry.Fields["CONTAINER_ID_FULL"]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("no CONTAINER_ID_FULL field present in journal entry")
|
||||
return "", errors.New("no CONTAINER_ID_FULL field present in journal entry")
|
||||
}
|
||||
if len(id) > 12 {
|
||||
id = id[:12]
|
||||
|
|
@ -290,7 +290,7 @@ func formatterPrefix(entry *sdjournal.JournalEntry) (string, error) {
|
|||
output := fmt.Sprintf("%s ", tsString)
|
||||
priority, ok := entry.Fields["PRIORITY"]
|
||||
if !ok {
|
||||
return "", errors.Errorf("no PRIORITY field present in journal entry")
|
||||
return "", errors.New("no PRIORITY field present in journal entry")
|
||||
}
|
||||
switch priority {
|
||||
case journaldLogOut:
|
||||
|
|
@ -298,7 +298,7 @@ func formatterPrefix(entry *sdjournal.JournalEntry) (string, error) {
|
|||
case journaldLogErr:
|
||||
output += "stderr "
|
||||
default:
|
||||
return "", errors.Errorf("unexpected PRIORITY field in journal entry")
|
||||
return "", errors.New("unexpected PRIORITY field in journal entry")
|
||||
}
|
||||
|
||||
// if CONTAINER_PARTIAL_MESSAGE is defined, the log type is "P"
|
||||
|
|
@ -315,7 +315,7 @@ func formatterMessage(entry *sdjournal.JournalEntry) (string, error) {
|
|||
// Finally, append the message
|
||||
msg, ok := entry.Fields["MESSAGE"]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("no MESSAGE field present in journal entry")
|
||||
return "", errors.New("no MESSAGE field present in journal entry")
|
||||
}
|
||||
msg = strings.TrimSuffix(msg, "\n")
|
||||
return msg, nil
|
||||
|
|
|
|||
|
|
@ -5,16 +5,16 @@ package libpod
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/libpod/logs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (c *Container) readFromJournal(_ context.Context, _ *logs.LogOptions, _ chan *logs.LogLine, colorID int64) error {
|
||||
return errors.Wrapf(define.ErrOSNotSupported, "Journald logging only enabled with systemd on linux")
|
||||
return fmt.Errorf("journald logging only enabled with systemd on linux: %w", define.ErrOSNotSupported)
|
||||
}
|
||||
|
||||
func (c *Container) initializeJournal(ctx context.Context) error {
|
||||
return errors.Wrapf(define.ErrOSNotSupported, "Journald logging only enabled with systemd on linux")
|
||||
return fmt.Errorf("journald logging only enabled with systemd on linux: %w", define.ErrOSNotSupported)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,12 +1,12 @@
|
|||
package libpod
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
securejoin "github.com/cyphar/filepath-securejoin"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -65,7 +65,7 @@ func (c *Container) resolvePath(mountPoint string, containerPath string) (string
|
|||
return "", "", err
|
||||
}
|
||||
if mountPoint == "" {
|
||||
return "", "", errors.Errorf("volume %s is not mounted, cannot copy into it", volume.Name())
|
||||
return "", "", fmt.Errorf("volume %s is not mounted, cannot copy into it", volume.Name())
|
||||
}
|
||||
|
||||
// We found a matching volume for searchPath. We now
|
||||
|
|
|
|||
|
|
@ -4,6 +4,8 @@
|
|||
package libpod
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
|
@ -11,7 +13,6 @@ import (
|
|||
"github.com/containers/buildah/copier"
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/pkg/copy"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// statInsideMount stats the specified path *inside* the container's mount and PID
|
||||
|
|
@ -150,10 +151,10 @@ func secureStat(root string, path string) (*copier.StatForItem, error) {
|
|||
}
|
||||
|
||||
if len(globStats) != 1 {
|
||||
return nil, errors.Errorf("internal error: secureStat: expected 1 item but got %d", len(globStats))
|
||||
return nil, fmt.Errorf("internal error: secureStat: expected 1 item but got %d", len(globStats))
|
||||
}
|
||||
if len(globStats) != 1 {
|
||||
return nil, errors.Errorf("internal error: secureStat: expected 1 result but got %d", len(globStats[0].Results))
|
||||
return nil, fmt.Errorf("internal error: secureStat: expected 1 result but got %d", len(globStats[0].Results))
|
||||
}
|
||||
|
||||
// NOTE: the key in the map differ from `glob` when hitting symlink.
|
||||
|
|
@ -167,7 +168,7 @@ func secureStat(root string, path string) (*copier.StatForItem, error) {
|
|||
if stat.IsSymlink {
|
||||
target, err := copier.Eval(root, path, copier.EvalOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error evaluating symlink in container")
|
||||
return nil, fmt.Errorf("error evaluating symlink in container: %w", err)
|
||||
}
|
||||
// Need to make sure the symlink is relative to the root!
|
||||
target = strings.TrimPrefix(target, root)
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ package libpod
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
|
|
@ -14,7 +15,6 @@ import (
|
|||
"github.com/containers/podman/v4/pkg/rootless"
|
||||
"github.com/containers/psgo"
|
||||
"github.com/google/shlex"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -22,15 +22,15 @@ import (
|
|||
// []string for output
|
||||
func (c *Container) Top(descriptors []string) ([]string, error) {
|
||||
if c.config.NoCgroups {
|
||||
return nil, errors.Wrapf(define.ErrNoCgroups, "cannot run top on container %s as it did not create a cgroup", c.ID())
|
||||
return nil, fmt.Errorf("cannot run top on container %s as it did not create a cgroup: %w", c.ID(), define.ErrNoCgroups)
|
||||
}
|
||||
|
||||
conStat, err := c.State()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to look up state for %s", c.ID())
|
||||
return nil, fmt.Errorf("unable to look up state for %s: %w", c.ID(), err)
|
||||
}
|
||||
if conStat != define.ContainerStateRunning {
|
||||
return nil, errors.Errorf("top can only be used on running containers")
|
||||
return nil, errors.New("top can only be used on running containers")
|
||||
}
|
||||
|
||||
// Also support comma-separated input.
|
||||
|
|
@ -59,7 +59,7 @@ func (c *Container) Top(descriptors []string) ([]string, error) {
|
|||
for _, d := range descriptors {
|
||||
shSplit, err := shlex.Split(d)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing ps args: %v", err)
|
||||
return nil, fmt.Errorf("parsing ps args: %w", err)
|
||||
}
|
||||
for _, s := range shSplit {
|
||||
if s != "" {
|
||||
|
|
@ -70,7 +70,7 @@ func (c *Container) Top(descriptors []string) ([]string, error) {
|
|||
|
||||
output, err = c.execPS(psDescriptors)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error executing ps(1) in the container")
|
||||
return nil, fmt.Errorf("error executing ps(1) in the container: %w", err)
|
||||
}
|
||||
|
||||
// Trick: filter the ps command from the output instead of
|
||||
|
|
@ -157,7 +157,7 @@ func (c *Container) execPS(args []string) ([]string, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
} else if ec != 0 {
|
||||
return nil, errors.Errorf("Runtime failed with exit status: %d and output: %s", ec, strings.Join(stderr, " "))
|
||||
return nil, fmt.Errorf("runtime failed with exit status: %d and output: %s", ec, strings.Join(stderr, " "))
|
||||
}
|
||||
|
||||
if logrus.GetLevel() >= logrus.DebugLevel {
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@ import (
|
|||
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
spec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Validate that the configuration of a container is valid.
|
||||
|
|
@ -16,17 +15,17 @@ func (c *Container) validate() error {
|
|||
|
||||
// If one of RootfsImageIDor RootfsImageName are set, both must be set.
|
||||
if (imageIDSet || imageNameSet) && !(imageIDSet && imageNameSet) {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "both RootfsImageName and RootfsImageID must be set if either is set")
|
||||
return fmt.Errorf("both RootfsImageName and RootfsImageID must be set if either is set: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
// Cannot set RootfsImageID and Rootfs at the same time
|
||||
if imageIDSet && rootfsSet {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "cannot set both an image ID and rootfs for a container")
|
||||
return fmt.Errorf("cannot set both an image ID and rootfs for a container: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
// Must set at least one of RootfsImageID or Rootfs
|
||||
if !(imageIDSet || rootfsSet) {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "must set root filesystem source to either image or rootfs")
|
||||
return fmt.Errorf("must set root filesystem source to either image or rootfs: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
// A container cannot be marked as an infra and service container at
|
||||
|
|
@ -38,62 +37,62 @@ func (c *Container) validate() error {
|
|||
// Cannot make a network namespace if we are joining another container's
|
||||
// network namespace
|
||||
if c.config.CreateNetNS && c.config.NetNsCtr != "" {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "cannot both create a network namespace and join another container's network namespace")
|
||||
return fmt.Errorf("cannot both create a network namespace and join another container's network namespace: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
if c.config.CgroupsMode == cgroupSplit && c.config.CgroupParent != "" {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "cannot specify --cgroup-mode=split with a cgroup-parent")
|
||||
return fmt.Errorf("cannot specify --cgroup-mode=split with a cgroup-parent: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
// Not creating cgroups has a number of requirements, mostly related to
|
||||
// the PID namespace.
|
||||
if c.config.NoCgroups || c.config.CgroupsMode == "disabled" {
|
||||
if c.config.PIDNsCtr != "" {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "cannot join another container's PID namespace if not creating cgroups")
|
||||
return fmt.Errorf("cannot join another container's PID namespace if not creating cgroups: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
if c.config.CgroupParent != "" {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "cannot set cgroup parent if not creating cgroups")
|
||||
return fmt.Errorf("cannot set cgroup parent if not creating cgroups: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
// Ensure we have a PID namespace
|
||||
if c.config.Spec.Linux == nil {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "must provide Linux namespace configuration in OCI spec when using NoCgroups")
|
||||
return fmt.Errorf("must provide Linux namespace configuration in OCI spec when using NoCgroups: %w", define.ErrInvalidArg)
|
||||
}
|
||||
foundPid := false
|
||||
for _, ns := range c.config.Spec.Linux.Namespaces {
|
||||
if ns.Type == spec.PIDNamespace {
|
||||
foundPid = true
|
||||
if ns.Path != "" {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "containers not creating Cgroups must create a private PID namespace - cannot use another")
|
||||
return fmt.Errorf("containers not creating Cgroups must create a private PID namespace - cannot use another: %w", define.ErrInvalidArg)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundPid {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "containers not creating Cgroups must create a private PID namespace")
|
||||
return fmt.Errorf("containers not creating Cgroups must create a private PID namespace: %w", define.ErrInvalidArg)
|
||||
}
|
||||
}
|
||||
|
||||
// Can only set static IP or MAC is creating a network namespace.
|
||||
if !c.config.CreateNetNS && (c.config.StaticIP != nil || c.config.StaticMAC != nil) {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "cannot set static IP or MAC address if not creating a network namespace")
|
||||
return fmt.Errorf("cannot set static IP or MAC address if not creating a network namespace: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
// Cannot set static IP or MAC if joining >1 CNI network.
|
||||
if len(c.config.Networks) > 1 && (c.config.StaticIP != nil || c.config.StaticMAC != nil) {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "cannot set static IP or MAC address if joining more than one network")
|
||||
return fmt.Errorf("cannot set static IP or MAC address if joining more than one network: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
// Using image resolv.conf conflicts with various DNS settings.
|
||||
if c.config.UseImageResolvConf &&
|
||||
(len(c.config.DNSSearch) > 0 || len(c.config.DNSServer) > 0 ||
|
||||
len(c.config.DNSOption) > 0) {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "cannot configure DNS options if using image's resolv.conf")
|
||||
return fmt.Errorf("cannot configure DNS options if using image's resolv.conf: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
if c.config.UseImageHosts && len(c.config.HostAdd) > 0 {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "cannot add to /etc/hosts if using image's /etc/hosts")
|
||||
return fmt.Errorf("cannot add to /etc/hosts if using image's /etc/hosts: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
// Check named volume, overlay volume and image volume destination conflist
|
||||
|
|
@ -102,7 +101,7 @@ func (c *Container) validate() error {
|
|||
// Don't check if they already exist.
|
||||
// If they don't we will automatically create them.
|
||||
if _, ok := destinations[vol.Dest]; ok {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "two volumes found with destination %s", vol.Dest)
|
||||
return fmt.Errorf("two volumes found with destination %s: %w", vol.Dest, define.ErrInvalidArg)
|
||||
}
|
||||
destinations[vol.Dest] = true
|
||||
}
|
||||
|
|
@ -110,7 +109,7 @@ func (c *Container) validate() error {
|
|||
// Don't check if they already exist.
|
||||
// If they don't we will automatically create them.
|
||||
if _, ok := destinations[vol.Dest]; ok {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "two volumes found with destination %s", vol.Dest)
|
||||
return fmt.Errorf("two volumes found with destination %s: %w", vol.Dest, define.ErrInvalidArg)
|
||||
}
|
||||
destinations[vol.Dest] = true
|
||||
}
|
||||
|
|
@ -118,7 +117,7 @@ func (c *Container) validate() error {
|
|||
// Don't check if they already exist.
|
||||
// If they don't we will automatically create them.
|
||||
if _, ok := destinations[vol.Dest]; ok {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "two volumes found with destination %s", vol.Dest)
|
||||
return fmt.Errorf("two volumes found with destination %s: %w", vol.Dest, define.ErrInvalidArg)
|
||||
}
|
||||
destinations[vol.Dest] = true
|
||||
}
|
||||
|
|
@ -126,13 +125,13 @@ func (c *Container) validate() error {
|
|||
// If User in the OCI spec is set, require that c.config.User is set for
|
||||
// security reasons (a lot of our code relies on c.config.User).
|
||||
if c.config.User == "" && (c.config.Spec.Process.User.UID != 0 || c.config.Spec.Process.User.GID != 0) {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "please set User explicitly via WithUser() instead of in OCI spec directly")
|
||||
return fmt.Errorf("please set User explicitly via WithUser() instead of in OCI spec directly: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
// Init-ctrs must be used inside a Pod. Check if a init container type is
|
||||
// passed and if no pod is passed
|
||||
if len(c.config.InitContainerType) > 0 && len(c.config.Pod) < 1 {
|
||||
return errors.Wrap(define.ErrInvalidArg, "init containers must be created in a pod")
|
||||
return fmt.Errorf("init containers must be created in a pod: %w", define.ErrInvalidArg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,9 +1,8 @@
|
|||
package define
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ContainerStatus represents the current state of a container
|
||||
|
|
@ -91,7 +90,7 @@ func StringToContainerStatus(status string) (ContainerStatus, error) {
|
|||
case ContainerStateRemoving.String():
|
||||
return ContainerStateRemoving, nil
|
||||
default:
|
||||
return ContainerStateUnknown, errors.Wrapf(ErrInvalidArg, "unknown container state: %s", status)
|
||||
return ContainerStateUnknown, fmt.Errorf("unknown container state: %s: %w", status, ErrInvalidArg)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
package define
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -23,10 +23,10 @@ const (
|
|||
// has a predefined exit code associated. If so, it returns that, otherwise it returns
|
||||
// the exit code originally stated in libpod.Exec()
|
||||
func TranslateExecErrorToExitCode(originalEC int, err error) int {
|
||||
if errors.Cause(err) == ErrOCIRuntimePermissionDenied {
|
||||
if errors.Is(err, ErrOCIRuntimePermissionDenied) {
|
||||
return ExecErrorCodeCannotInvoke
|
||||
}
|
||||
if errors.Cause(err) == ErrOCIRuntimeNotFound {
|
||||
if errors.Is(err, ErrOCIRuntimeNotFound) {
|
||||
return ExecErrorCodeNotFound
|
||||
}
|
||||
return originalEC
|
||||
|
|
|
|||
|
|
@ -1,10 +1,11 @@
|
|||
package libpod
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/libpod/layers"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var initInodes = map[string]bool{
|
||||
|
|
@ -76,5 +77,5 @@ func (r *Runtime) getLayerID(id string, diffType define.DiffType) (string, error
|
|||
}
|
||||
lastErr = err
|
||||
}
|
||||
return "", errors.Wrapf(lastErr, "%s not found", id)
|
||||
return "", fmt.Errorf("%s not found: %w", id, lastErr)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/containers/podman/v4/libpod/events"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -178,7 +177,7 @@ func (r *Runtime) GetLastContainerEvent(ctx context.Context, nameOrID string, co
|
|||
return nil, err
|
||||
}
|
||||
if len(containerEvents) < 1 {
|
||||
return nil, errors.Wrapf(events.ErrEventNotFound, "%s not found", containerEvent.String())
|
||||
return nil, fmt.Errorf("%s not found: %w", containerEvent.String(), events.ErrEventNotFound)
|
||||
}
|
||||
// return the last element in the slice
|
||||
return containerEvents[len(containerEvents)-1], nil
|
||||
|
|
@ -201,7 +200,7 @@ func (r *Runtime) GetExecDiedEvent(ctx context.Context, nameOrID, execSessionID
|
|||
// There *should* only be one event maximum.
|
||||
// But... just in case... let's not blow up if there's more than one.
|
||||
if len(containerEvents) < 1 {
|
||||
return nil, errors.Wrapf(events.ErrEventNotFound, "exec died event for session %s (container %s) not found", execSessionID, nameOrID)
|
||||
return nil, fmt.Errorf("exec died event for session %s (container %s) not found: %w", execSessionID, nameOrID, events.ErrEventNotFound)
|
||||
}
|
||||
return containerEvents[len(containerEvents)-1], nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,9 +2,8 @@ package events
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// EventerType ...
|
||||
|
|
|
|||
|
|
@ -2,16 +2,16 @@ package events
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/containers/storage/pkg/stringid"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ErrNoJournaldLogging indicates that there is no journald logging
|
||||
// supported (requires libsystemd)
|
||||
var ErrNoJournaldLogging = errors.New("No support for journald logging")
|
||||
var ErrNoJournaldLogging = errors.New("no support for journald logging")
|
||||
|
||||
// String returns a string representation of EventerType
|
||||
func (et EventerType) String() string {
|
||||
|
|
@ -140,7 +140,7 @@ func StringToType(name string) (Type, error) {
|
|||
case "":
|
||||
return "", ErrEventTypeBlank
|
||||
}
|
||||
return "", errors.Errorf("unknown event type %q", name)
|
||||
return "", fmt.Errorf("unknown event type %q", name)
|
||||
}
|
||||
|
||||
// StringToStatus converts a string to an Event Status
|
||||
|
|
@ -225,5 +225,5 @@ func StringToStatus(name string) (Status, error) {
|
|||
case Untag.String():
|
||||
return Untag, nil
|
||||
}
|
||||
return "", errors.Errorf("unknown event status %q", name)
|
||||
return "", fmt.Errorf("unknown event status %q", name)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
package events
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -14,7 +14,7 @@ func NewEventer(options EventerOptions) (Eventer, error) {
|
|||
case strings.ToUpper(Journald.String()):
|
||||
eventer, err := newEventJournalD(options)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "eventer creation")
|
||||
return nil, fmt.Errorf("eventer creation: %w", err)
|
||||
}
|
||||
return eventer, nil
|
||||
case strings.ToUpper(LogFile.String()):
|
||||
|
|
@ -24,6 +24,6 @@ func NewEventer(options EventerOptions) (Eventer, error) {
|
|||
case strings.ToUpper(Memory.String()):
|
||||
return NewMemoryEventer(), nil
|
||||
default:
|
||||
return nil, errors.Errorf("unknown event logger type: %s", strings.ToUpper(options.EventerType))
|
||||
return nil, fmt.Errorf("unknown event logger type: %s", strings.ToUpper(options.EventerType))
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
package events
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
import "errors"
|
||||
|
||||
// NewEventer creates an eventer based on the eventer type
|
||||
func NewEventer(options EventerOptions) (Eventer, error) {
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
package events
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containers/podman/v4/pkg/util"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func generateEventFilter(filter, filterValue string) (func(e *Event) bool, error) {
|
||||
|
|
@ -74,7 +74,7 @@ func generateEventFilter(filter, filterValue string) (func(e *Event) bool, error
|
|||
return found
|
||||
}, nil
|
||||
}
|
||||
return nil, errors.Errorf("%s is an invalid filter", filter)
|
||||
return nil, fmt.Errorf("%s is an invalid filter", filter)
|
||||
}
|
||||
|
||||
func generateEventSinceOption(timeSince time.Time) func(e *Event) bool {
|
||||
|
|
@ -92,7 +92,7 @@ func generateEventUntilOption(timeUntil time.Time) func(e *Event) bool {
|
|||
func parseFilter(filter string) (string, string, error) {
|
||||
filterSplit := strings.SplitN(filter, "=", 2)
|
||||
if len(filterSplit) != 2 {
|
||||
return "", "", errors.Errorf("%s is an invalid filter", filter)
|
||||
return "", "", fmt.Errorf("%s is an invalid filter", filter)
|
||||
}
|
||||
return filterSplit[0], filterSplit[1], nil
|
||||
}
|
||||
|
|
@ -137,7 +137,7 @@ func generateEventFilters(filters []string, since, until string) (map[string][]E
|
|||
if len(since) > 0 {
|
||||
timeSince, err := util.ParseInputTime(since, true)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to convert since time of %s", since)
|
||||
return nil, fmt.Errorf("unable to convert since time of %s: %w", since, err)
|
||||
}
|
||||
filterFunc := generateEventSinceOption(timeSince)
|
||||
filterMap["since"] = []EventFilter{filterFunc}
|
||||
|
|
@ -146,7 +146,7 @@ func generateEventFilters(filters []string, since, until string) (map[string][]E
|
|||
if len(until) > 0 {
|
||||
timeUntil, err := util.ParseInputTime(until, false)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to convert until time of %s", until)
|
||||
return nil, fmt.Errorf("unable to convert until time of %s: %w", until, err)
|
||||
}
|
||||
filterFunc := generateEventUntilOption(timeUntil)
|
||||
filterMap["until"] = []EventFilter{filterFunc}
|
||||
|
|
|
|||
|
|
@ -6,13 +6,14 @@ package events
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/containers/podman/v4/pkg/util"
|
||||
"github.com/coreos/go-systemd/v22/journal"
|
||||
"github.com/coreos/go-systemd/v22/sdjournal"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -73,7 +74,7 @@ func (e EventJournalD) Read(ctx context.Context, options ReadOptions) error {
|
|||
defer close(options.EventChannel)
|
||||
filterMap, err := generateEventFilters(options.Filters, options.Since, options.Until)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse event filters")
|
||||
return fmt.Errorf("failed to parse event filters: %w", err)
|
||||
}
|
||||
|
||||
var untilTime time.Time
|
||||
|
|
@ -96,29 +97,29 @@ func (e EventJournalD) Read(ctx context.Context, options ReadOptions) error {
|
|||
// match only podman journal entries
|
||||
podmanJournal := sdjournal.Match{Field: "SYSLOG_IDENTIFIER", Value: "podman"}
|
||||
if err := j.AddMatch(podmanJournal.String()); err != nil {
|
||||
return errors.Wrap(err, "failed to add journal filter for event log")
|
||||
return fmt.Errorf("failed to add journal filter for event log: %w", err)
|
||||
}
|
||||
|
||||
if len(options.Since) == 0 && len(options.Until) == 0 && options.Stream {
|
||||
if err := j.SeekTail(); err != nil {
|
||||
return errors.Wrap(err, "failed to seek end of journal")
|
||||
return fmt.Errorf("failed to seek end of journal: %w", err)
|
||||
}
|
||||
// After SeekTail calling Next moves to a random entry.
|
||||
// To prevent this we have to call Previous first.
|
||||
// see: https://bugs.freedesktop.org/show_bug.cgi?id=64614
|
||||
if _, err := j.Previous(); err != nil {
|
||||
return errors.Wrap(err, "failed to move journal cursor to previous entry")
|
||||
return fmt.Errorf("failed to move journal cursor to previous entry: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// the api requires a next|prev before getting a cursor
|
||||
if _, err := j.Next(); err != nil {
|
||||
return errors.Wrap(err, "failed to move journal cursor to next entry")
|
||||
return fmt.Errorf("failed to move journal cursor to next entry: %w", err)
|
||||
}
|
||||
|
||||
prevCursor, err := j.GetCursor()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get journal cursor")
|
||||
return fmt.Errorf("failed to get journal cursor: %w", err)
|
||||
}
|
||||
for {
|
||||
select {
|
||||
|
|
@ -130,11 +131,11 @@ func (e EventJournalD) Read(ctx context.Context, options ReadOptions) error {
|
|||
}
|
||||
|
||||
if _, err := j.Next(); err != nil {
|
||||
return errors.Wrap(err, "failed to move journal cursor to next entry")
|
||||
return fmt.Errorf("failed to move journal cursor to next entry: %w", err)
|
||||
}
|
||||
newCursor, err := j.GetCursor()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get journal cursor")
|
||||
return fmt.Errorf("failed to get journal cursor: %w", err)
|
||||
}
|
||||
if prevCursor == newCursor {
|
||||
if !options.Stream || (len(options.Until) > 0 && time.Now().After(untilTime)) {
|
||||
|
|
@ -151,14 +152,14 @@ func (e EventJournalD) Read(ctx context.Context, options ReadOptions) error {
|
|||
|
||||
entry, err := j.GetEntry()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to read journal entry")
|
||||
return fmt.Errorf("failed to read journal entry: %w", err)
|
||||
}
|
||||
newEvent, err := newEventFromJournalEntry(entry)
|
||||
if err != nil {
|
||||
// We can't decode this event.
|
||||
// Don't fail hard - that would make events unusable.
|
||||
// Instead, log and continue.
|
||||
if errors.Cause(err) != ErrEventTypeBlank {
|
||||
if !errors.Is(err, ErrEventTypeBlank) {
|
||||
logrus.Errorf("Unable to decode event: %v", err)
|
||||
}
|
||||
continue
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ package events
|
|||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
|
@ -16,7 +17,6 @@ import (
|
|||
"github.com/containers/podman/v4/pkg/util"
|
||||
"github.com/containers/storage/pkg/lockfile"
|
||||
"github.com/nxadm/tail"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
|
@ -90,7 +90,7 @@ func (e EventLogFile) Read(ctx context.Context, options ReadOptions) error {
|
|||
defer close(options.EventChannel)
|
||||
filterMap, err := generateEventFilters(options.Filters, options.Since, options.Until)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse event filters")
|
||||
return fmt.Errorf("failed to parse event filters: %w", err)
|
||||
}
|
||||
t, err := e.getTail(options)
|
||||
if err != nil {
|
||||
|
|
@ -136,7 +136,7 @@ func (e EventLogFile) Read(ctx context.Context, options ReadOptions) error {
|
|||
case Image, Volume, Pod, System, Container, Network:
|
||||
// no-op
|
||||
default:
|
||||
return errors.Errorf("event type %s is not valid in %s", event.Type.String(), e.options.LogFilePath)
|
||||
return fmt.Errorf("event type %s is not valid in %s", event.Type.String(), e.options.LogFilePath)
|
||||
}
|
||||
if copy && applyFilters(event, filterMap) {
|
||||
options.EventChannel <- event
|
||||
|
|
|
|||
|
|
@ -2,6 +2,8 @@ package libpod
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
|
@ -9,7 +11,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -26,7 +27,7 @@ const (
|
|||
func (r *Runtime) HealthCheck(name string) (define.HealthCheckStatus, error) {
|
||||
container, err := r.LookupContainer(name)
|
||||
if err != nil {
|
||||
return define.HealthCheckContainerNotFound, errors.Wrapf(err, "unable to look up %s to perform a health check", name)
|
||||
return define.HealthCheckContainerNotFound, fmt.Errorf("unable to look up %s to perform a health check: %w", name, err)
|
||||
}
|
||||
hcStatus, err := checkHealthCheckCanBeRun(container)
|
||||
if err == nil {
|
||||
|
|
@ -44,11 +45,11 @@ func (c *Container) runHealthCheck() (define.HealthCheckStatus, error) {
|
|||
)
|
||||
hcCommand := c.HealthCheckConfig().Test
|
||||
if len(hcCommand) < 1 {
|
||||
return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
|
||||
return define.HealthCheckNotDefined, fmt.Errorf("container %s has no defined healthcheck", c.ID())
|
||||
}
|
||||
switch hcCommand[0] {
|
||||
case "", define.HealthConfigTestNone:
|
||||
return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
|
||||
return define.HealthCheckNotDefined, fmt.Errorf("container %s has no defined healthcheck", c.ID())
|
||||
case define.HealthConfigTestCmd:
|
||||
newCommand = hcCommand[1:]
|
||||
case define.HealthConfigTestCmdShell:
|
||||
|
|
@ -59,11 +60,11 @@ func (c *Container) runHealthCheck() (define.HealthCheckStatus, error) {
|
|||
newCommand = hcCommand
|
||||
}
|
||||
if len(newCommand) < 1 || newCommand[0] == "" {
|
||||
return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
|
||||
return define.HealthCheckNotDefined, fmt.Errorf("container %s has no defined healthcheck", c.ID())
|
||||
}
|
||||
rPipe, wPipe, err := os.Pipe()
|
||||
if err != nil {
|
||||
return define.HealthCheckInternalError, errors.Wrapf(err, "unable to create pipe for healthcheck session")
|
||||
return define.HealthCheckInternalError, fmt.Errorf("unable to create pipe for healthcheck session: %w", err)
|
||||
}
|
||||
defer wPipe.Close()
|
||||
defer rPipe.Close()
|
||||
|
|
@ -92,11 +93,10 @@ func (c *Container) runHealthCheck() (define.HealthCheckStatus, error) {
|
|||
config.Command = newCommand
|
||||
exitCode, hcErr := c.exec(config, streams, nil, true)
|
||||
if hcErr != nil {
|
||||
errCause := errors.Cause(hcErr)
|
||||
hcResult = define.HealthCheckFailure
|
||||
if errCause == define.ErrOCIRuntimeNotFound ||
|
||||
errCause == define.ErrOCIRuntimePermissionDenied ||
|
||||
errCause == define.ErrOCIRuntime {
|
||||
if errors.Is(hcErr, define.ErrOCIRuntimeNotFound) ||
|
||||
errors.Is(hcErr, define.ErrOCIRuntimePermissionDenied) ||
|
||||
errors.Is(hcErr, define.ErrOCIRuntime) {
|
||||
returnCode = 1
|
||||
hcErr = nil
|
||||
} else {
|
||||
|
|
@ -125,11 +125,11 @@ func (c *Container) runHealthCheck() (define.HealthCheckStatus, error) {
|
|||
if timeEnd.Sub(timeStart) > c.HealthCheckConfig().Timeout {
|
||||
returnCode = -1
|
||||
hcResult = define.HealthCheckFailure
|
||||
hcErr = errors.Errorf("healthcheck command exceeded timeout of %s", c.HealthCheckConfig().Timeout.String())
|
||||
hcErr = fmt.Errorf("healthcheck command exceeded timeout of %s", c.HealthCheckConfig().Timeout.String())
|
||||
}
|
||||
hcl := newHealthCheckLog(timeStart, timeEnd, returnCode, eventLog)
|
||||
if err := c.updateHealthCheckLog(hcl, inStartPeriod); err != nil {
|
||||
return hcResult, errors.Wrapf(err, "unable to update health check log %s for %s", c.healthCheckLogPath(), c.ID())
|
||||
return hcResult, fmt.Errorf("unable to update health check log %s for %s: %w", c.healthCheckLogPath(), c.ID(), err)
|
||||
}
|
||||
return hcResult, hcErr
|
||||
}
|
||||
|
|
@ -140,10 +140,10 @@ func checkHealthCheckCanBeRun(c *Container) (define.HealthCheckStatus, error) {
|
|||
return define.HealthCheckInternalError, err
|
||||
}
|
||||
if cstate != define.ContainerStateRunning {
|
||||
return define.HealthCheckContainerStopped, errors.Errorf("container %s is not running", c.ID())
|
||||
return define.HealthCheckContainerStopped, fmt.Errorf("container %s is not running", c.ID())
|
||||
}
|
||||
if !c.HasHealthCheck() {
|
||||
return define.HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
|
||||
return define.HealthCheckNotDefined, fmt.Errorf("container %s has no defined healthcheck", c.ID())
|
||||
}
|
||||
return define.HealthCheckDefined, nil
|
||||
}
|
||||
|
|
@ -167,7 +167,7 @@ func (c *Container) updateHealthStatus(status string) error {
|
|||
healthCheck.Status = status
|
||||
newResults, err := json.Marshal(healthCheck)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to marshall healthchecks for writing status")
|
||||
return fmt.Errorf("unable to marshall healthchecks for writing status: %w", err)
|
||||
}
|
||||
return ioutil.WriteFile(c.healthCheckLogPath(), newResults, 0700)
|
||||
}
|
||||
|
|
@ -201,7 +201,7 @@ func (c *Container) updateHealthCheckLog(hcl define.HealthCheckLog, inStartPerio
|
|||
}
|
||||
newResults, err := json.Marshal(healthCheck)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to marshall healthchecks for writing")
|
||||
return fmt.Errorf("unable to marshall healthchecks for writing: %w", err)
|
||||
}
|
||||
return ioutil.WriteFile(c.healthCheckLogPath(), newResults, 0700)
|
||||
}
|
||||
|
|
@ -222,10 +222,10 @@ func (c *Container) getHealthCheckLog() (define.HealthCheckResults, error) {
|
|||
}
|
||||
b, err := ioutil.ReadFile(c.healthCheckLogPath())
|
||||
if err != nil {
|
||||
return healthCheck, errors.Wrap(err, "failed to read health check log file")
|
||||
return healthCheck, fmt.Errorf("failed to read health check log file: %w", err)
|
||||
}
|
||||
if err := json.Unmarshal(b, &healthCheck); err != nil {
|
||||
return healthCheck, errors.Wrapf(err, "failed to unmarshal existing healthcheck results in %s", c.healthCheckLogPath())
|
||||
return healthCheck, fmt.Errorf("failed to unmarshal existing healthcheck results in %s: %w", c.healthCheckLogPath(), err)
|
||||
}
|
||||
return healthCheck, nil
|
||||
}
|
||||
|
|
@ -241,7 +241,7 @@ func (c *Container) HealthCheckStatus() (string, error) {
|
|||
// This function does not lock the container.
|
||||
func (c *Container) healthCheckStatus() (string, error) {
|
||||
if !c.HasHealthCheck() {
|
||||
return "", errors.Errorf("container %s has no defined healthcheck", c.ID())
|
||||
return "", fmt.Errorf("container %s has no defined healthcheck", c.ID())
|
||||
}
|
||||
|
||||
if err := c.syncContainer(); err != nil {
|
||||
|
|
@ -250,7 +250,7 @@ func (c *Container) healthCheckStatus() (string, error) {
|
|||
|
||||
results, err := c.getHealthCheckLog()
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "unable to get healthcheck log for %s", c.ID())
|
||||
return "", fmt.Errorf("unable to get healthcheck log for %s: %w", c.ID(), err)
|
||||
}
|
||||
|
||||
return results.Status, nil
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@ import (
|
|||
"github.com/containers/podman/v4/pkg/errorhandling"
|
||||
"github.com/containers/podman/v4/pkg/rootless"
|
||||
"github.com/containers/podman/v4/pkg/systemd"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -21,7 +20,7 @@ func (c *Container) createTimer() error {
|
|||
}
|
||||
podman, err := os.Executable()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to get path for podman for a health check timer")
|
||||
return fmt.Errorf("failed to get path for podman for a health check timer: %w", err)
|
||||
}
|
||||
|
||||
var cmd = []string{}
|
||||
|
|
@ -36,13 +35,13 @@ func (c *Container) createTimer() error {
|
|||
|
||||
conn, err := systemd.ConnectToDBUS()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to get systemd connection to add healthchecks")
|
||||
return fmt.Errorf("unable to get systemd connection to add healthchecks: %w", err)
|
||||
}
|
||||
conn.Close()
|
||||
logrus.Debugf("creating systemd-transient files: %s %s", "systemd-run", cmd)
|
||||
systemdRun := exec.Command("systemd-run", cmd...)
|
||||
if output, err := systemdRun.CombinedOutput(); err != nil {
|
||||
return errors.Errorf("%s", output)
|
||||
return fmt.Errorf("%s", output)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -65,7 +64,7 @@ func (c *Container) startTimer() error {
|
|||
}
|
||||
conn, err := systemd.ConnectToDBUS()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to get systemd connection to start healthchecks")
|
||||
return fmt.Errorf("unable to get systemd connection to start healthchecks: %w", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
|
|
@ -89,7 +88,7 @@ func (c *Container) removeTransientFiles(ctx context.Context) error {
|
|||
}
|
||||
conn, err := systemd.ConnectToDBUS()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to get systemd connection to remove healthchecks")
|
||||
return fmt.Errorf("unable to get systemd connection to remove healthchecks: %w", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package libpod
|
|||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
|
|
@ -25,7 +26,6 @@ import (
|
|||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
"github.com/opencontainers/selinux/go-selinux"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -34,20 +34,20 @@ func (r *Runtime) info() (*define.Info, error) {
|
|||
info := define.Info{}
|
||||
versionInfo, err := define.GetVersion()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error getting version info")
|
||||
return nil, fmt.Errorf("error getting version info: %w", err)
|
||||
}
|
||||
info.Version = versionInfo
|
||||
// get host information
|
||||
hostInfo, err := r.hostInfo()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error getting host info")
|
||||
return nil, fmt.Errorf("error getting host info: %w", err)
|
||||
}
|
||||
info.Host = hostInfo
|
||||
|
||||
// get store information
|
||||
storeInfo, err := r.storeInfo()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error getting store info")
|
||||
return nil, fmt.Errorf("error getting store info: %w", err)
|
||||
}
|
||||
info.Store = storeInfo
|
||||
registries := make(map[string]interface{})
|
||||
|
|
@ -55,14 +55,14 @@ func (r *Runtime) info() (*define.Info, error) {
|
|||
sys := r.SystemContext()
|
||||
data, err := sysregistriesv2.GetRegistries(sys)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error getting registries")
|
||||
return nil, fmt.Errorf("error getting registries: %w", err)
|
||||
}
|
||||
for _, reg := range data {
|
||||
registries[reg.Prefix] = reg
|
||||
}
|
||||
regs, err := sysregistriesv2.UnqualifiedSearchRegistries(sys)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error getting registries")
|
||||
return nil, fmt.Errorf("error getting registries: %w", err)
|
||||
}
|
||||
if len(regs) > 0 {
|
||||
registries["search"] = regs
|
||||
|
|
@ -86,36 +86,36 @@ func (r *Runtime) hostInfo() (*define.HostInfo, error) {
|
|||
// lets say OS, arch, number of cpus, amount of memory, maybe os distribution/version, hostname, kernel version, uptime
|
||||
mi, err := system.ReadMemInfo()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading memory info")
|
||||
return nil, fmt.Errorf("error reading memory info: %w", err)
|
||||
}
|
||||
|
||||
hostDistributionInfo := r.GetHostDistributionInfo()
|
||||
|
||||
kv, err := readKernelVersion()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading kernel version")
|
||||
return nil, fmt.Errorf("error reading kernel version: %w", err)
|
||||
}
|
||||
|
||||
host, err := os.Hostname()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error getting hostname")
|
||||
return nil, fmt.Errorf("error getting hostname: %w", err)
|
||||
}
|
||||
|
||||
seccompProfilePath, err := DefaultSeccompPath()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error getting Seccomp profile path")
|
||||
return nil, fmt.Errorf("error getting Seccomp profile path: %w", err)
|
||||
}
|
||||
|
||||
// Cgroups version
|
||||
unified, err := cgroups.IsCgroup2UnifiedMode()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading cgroups mode")
|
||||
return nil, fmt.Errorf("error reading cgroups mode: %w", err)
|
||||
}
|
||||
|
||||
// Get Map of all available controllers
|
||||
availableControllers, err := cgroups.GetAvailableControllers(nil, unified)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error getting available cgroup controllers")
|
||||
return nil, fmt.Errorf("error getting available cgroup controllers: %w", err)
|
||||
}
|
||||
cpuUtil, err := getCPUUtilization()
|
||||
if err != nil {
|
||||
|
|
@ -178,11 +178,11 @@ func (r *Runtime) hostInfo() (*define.HostInfo, error) {
|
|||
if rootless.IsRootless() {
|
||||
uidmappings, err := rootless.ReadMappingsProc("/proc/self/uid_map")
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading uid mappings")
|
||||
return nil, fmt.Errorf("error reading uid mappings: %w", err)
|
||||
}
|
||||
gidmappings, err := rootless.ReadMappingsProc("/proc/self/gid_map")
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading gid mappings")
|
||||
return nil, fmt.Errorf("error reading gid mappings: %w", err)
|
||||
}
|
||||
idmappings := define.IDMappings{
|
||||
GIDMap: gidmappings,
|
||||
|
|
@ -201,7 +201,7 @@ func (r *Runtime) hostInfo() (*define.HostInfo, error) {
|
|||
|
||||
duration, err := procUptime()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading up time")
|
||||
return nil, fmt.Errorf("error reading up time: %w", err)
|
||||
}
|
||||
|
||||
uptime := struct {
|
||||
|
|
@ -240,7 +240,7 @@ func (r *Runtime) getContainerStoreInfo() (define.ContainerStore, error) {
|
|||
for _, con := range cons {
|
||||
state, err := con.State()
|
||||
if err != nil {
|
||||
if errors.Cause(err) == define.ErrNoSuchCtr {
|
||||
if errors.Is(err, define.ErrNoSuchCtr) {
|
||||
// container was probably removed
|
||||
cs.Number--
|
||||
continue
|
||||
|
|
@ -271,7 +271,7 @@ func (r *Runtime) storeInfo() (*define.StoreInfo, error) {
|
|||
}
|
||||
images, err := r.store.Images()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error getting number of images")
|
||||
return nil, fmt.Errorf("error getting number of images: %w", err)
|
||||
}
|
||||
conInfo, err := r.getContainerStoreInfo()
|
||||
if err != nil {
|
||||
|
|
@ -281,7 +281,7 @@ func (r *Runtime) storeInfo() (*define.StoreInfo, error) {
|
|||
|
||||
var grStats syscall.Statfs_t
|
||||
if err := syscall.Statfs(r.store.GraphRoot(), &grStats); err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to collect graph root usasge for %q", r.store.GraphRoot())
|
||||
return nil, fmt.Errorf("unable to collect graph root usasge for %q: %w", r.store.GraphRoot(), err)
|
||||
}
|
||||
allocated := uint64(grStats.Bsize) * grStats.Blocks
|
||||
info := define.StoreInfo{
|
||||
|
|
@ -407,15 +407,15 @@ func getCPUUtilization() (*define.CPUUsage, error) {
|
|||
func statToPercent(stats []string) (*define.CPUUsage, error) {
|
||||
userTotal, err := strconv.ParseFloat(stats[1], 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to parse user value %q", stats[1])
|
||||
return nil, fmt.Errorf("unable to parse user value %q: %w", stats[1], err)
|
||||
}
|
||||
systemTotal, err := strconv.ParseFloat(stats[3], 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to parse system value %q", stats[3])
|
||||
return nil, fmt.Errorf("unable to parse system value %q: %w", stats[3], err)
|
||||
}
|
||||
idleTotal, err := strconv.ParseFloat(stats[4], 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to parse idle value %q", stats[4])
|
||||
return nil, fmt.Errorf("unable to parse idle value %q: %w", stats[4], err)
|
||||
}
|
||||
total := userTotal + systemTotal + idleTotal
|
||||
s := define.CPUUsage{
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package libpod
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
|
|
@ -27,7 +28,6 @@ import (
|
|||
"github.com/containers/podman/v4/pkg/util"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/opencontainers/runtime-tools/generate"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -53,11 +53,11 @@ func (p *Pod) GenerateForKube(ctx context.Context) (*v1.Pod, []v1.ServicePort, e
|
|||
}
|
||||
// If the pod has no containers, no sense to generate YAML
|
||||
if len(allContainers) == 0 {
|
||||
return nil, servicePorts, errors.Errorf("pod %s has no containers", p.ID())
|
||||
return nil, servicePorts, fmt.Errorf("pod %s has no containers", p.ID())
|
||||
}
|
||||
// If only an infra container is present, makes no sense to generate YAML
|
||||
if len(allContainers) == 1 && p.HasInfraContainer() {
|
||||
return nil, servicePorts, errors.Errorf("pod %s only has an infra container", p.ID())
|
||||
return nil, servicePorts, fmt.Errorf("pod %s only has an infra container", p.ID())
|
||||
}
|
||||
|
||||
extraHost := make([]v1.HostAlias, 0)
|
||||
|
|
@ -573,7 +573,7 @@ func containerToV1Container(ctx context.Context, c *Container) (v1.Container, []
|
|||
if !c.Privileged() && len(c.config.Spec.Linux.Devices) > 0 {
|
||||
// TODO Enable when we can support devices and their names
|
||||
kubeContainer.VolumeDevices = generateKubeVolumeDeviceFromLinuxDevice(c.config.Spec.Linux.Devices)
|
||||
return kubeContainer, kubeVolumes, nil, annotations, errors.Wrapf(define.ErrNotImplemented, "linux devices")
|
||||
return kubeContainer, kubeVolumes, nil, annotations, fmt.Errorf("linux devices: %w", define.ErrNotImplemented)
|
||||
}
|
||||
|
||||
if len(c.config.UserVolumes) > 0 {
|
||||
|
|
@ -743,7 +743,7 @@ func portMappingToContainerPort(portMappings []types.PortMapping) ([]v1.Containe
|
|||
case "SCTP":
|
||||
protocol = v1.ProtocolSCTP
|
||||
default:
|
||||
return containerPorts, errors.Errorf("unknown network protocol %s", p.Protocol)
|
||||
return containerPorts, fmt.Errorf("unknown network protocol %s", p.Protocol)
|
||||
}
|
||||
for i := uint16(0); i < p.Range; i++ {
|
||||
cp := v1.ContainerPort{
|
||||
|
|
@ -772,7 +772,7 @@ func libpodEnvVarsToKubeEnvVars(envs []string, imageEnvs []string) ([]v1.EnvVar,
|
|||
for _, e := range envs {
|
||||
split := strings.SplitN(e, "=", 2)
|
||||
if len(split) != 2 {
|
||||
return envVars, errors.Errorf("environment variable %s is malformed; should be key=value", e)
|
||||
return envVars, fmt.Errorf("environment variable %s is malformed; should be key=value", e)
|
||||
}
|
||||
if defaultEnv[split[0]] == split[1] {
|
||||
continue
|
||||
|
|
@ -892,11 +892,11 @@ func isHostPathDirectory(hostPathSource string) (bool, error) {
|
|||
|
||||
func convertVolumePathToName(hostSourcePath string) (string, error) {
|
||||
if len(hostSourcePath) == 0 {
|
||||
return "", errors.Errorf("hostSourcePath must be specified to generate volume name")
|
||||
return "", errors.New("hostSourcePath must be specified to generate volume name")
|
||||
}
|
||||
if len(hostSourcePath) == 1 {
|
||||
if hostSourcePath != "/" {
|
||||
return "", errors.Errorf("hostSourcePath malformatted: %s", hostSourcePath)
|
||||
return "", fmt.Errorf("hostSourcePath malformatted: %s", hostSourcePath)
|
||||
}
|
||||
// add special case name
|
||||
return "root", nil
|
||||
|
|
@ -1025,7 +1025,7 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) {
|
|||
defer c.lock.Unlock()
|
||||
}
|
||||
if err := c.syncContainer(); err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to sync container during YAML generation")
|
||||
return nil, fmt.Errorf("unable to sync container during YAML generation: %w", err)
|
||||
}
|
||||
|
||||
mountpoint := c.state.Mountpoint
|
||||
|
|
@ -1033,7 +1033,7 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, error) {
|
|||
var err error
|
||||
mountpoint, err = c.mount()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to mount %s mountpoint", c.ID())
|
||||
return nil, fmt.Errorf("failed to mount %s mountpoint: %w", c.ID(), err)
|
||||
}
|
||||
defer func() {
|
||||
if err := c.unmount(false); err != nil {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package file
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
|
@ -8,7 +9,6 @@ import (
|
|||
"syscall"
|
||||
|
||||
"github.com/containers/storage"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -23,7 +23,7 @@ type FileLocks struct { //nolint:revive // struct name stutters
|
|||
func CreateFileLock(path string) (*FileLocks, error) {
|
||||
_, err := os.Stat(path)
|
||||
if err == nil {
|
||||
return nil, errors.Wrapf(syscall.EEXIST, "directory %s exists", path)
|
||||
return nil, fmt.Errorf("directory %s exists: %w", path, syscall.EEXIST)
|
||||
}
|
||||
if err := os.MkdirAll(path, 0711); err != nil {
|
||||
return nil, err
|
||||
|
|
@ -57,11 +57,11 @@ func OpenFileLock(path string) (*FileLocks, error) {
|
|||
// Close() is only intended to be used while testing the locks.
|
||||
func (locks *FileLocks) Close() error {
|
||||
if !locks.valid {
|
||||
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
|
||||
return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
|
||||
}
|
||||
err := os.RemoveAll(locks.lockPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "deleting directory %s", locks.lockPath)
|
||||
return fmt.Errorf("deleting directory %s: %w", locks.lockPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -73,7 +73,7 @@ func (locks *FileLocks) getLockPath(lck uint32) string {
|
|||
// AllocateLock allocates a lock and returns the index of the lock that was allocated.
|
||||
func (locks *FileLocks) AllocateLock() (uint32, error) {
|
||||
if !locks.valid {
|
||||
return 0, errors.Wrapf(syscall.EINVAL, "locks have already been closed")
|
||||
return 0, fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
|
||||
}
|
||||
|
||||
id := uint32(0)
|
||||
|
|
@ -84,7 +84,7 @@ func (locks *FileLocks) AllocateLock() (uint32, error) {
|
|||
if os.IsExist(err) {
|
||||
continue
|
||||
}
|
||||
return 0, errors.Wrap(err, "creating lock file")
|
||||
return 0, fmt.Errorf("creating lock file: %w", err)
|
||||
}
|
||||
f.Close()
|
||||
break
|
||||
|
|
@ -98,12 +98,12 @@ func (locks *FileLocks) AllocateLock() (uint32, error) {
|
|||
// returned.
|
||||
func (locks *FileLocks) AllocateGivenLock(lck uint32) error {
|
||||
if !locks.valid {
|
||||
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
|
||||
return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(locks.getLockPath(lck), os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating lock %d", lck)
|
||||
return fmt.Errorf("error creating lock %d: %w", lck, err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
|
|
@ -115,10 +115,10 @@ func (locks *FileLocks) AllocateGivenLock(lck uint32) error {
|
|||
// The given lock must be already allocated, or an error will be returned.
|
||||
func (locks *FileLocks) DeallocateLock(lck uint32) error {
|
||||
if !locks.valid {
|
||||
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
|
||||
return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
|
||||
}
|
||||
if err := os.Remove(locks.getLockPath(lck)); err != nil {
|
||||
return errors.Wrapf(err, "deallocating lock %d", lck)
|
||||
return fmt.Errorf("deallocating lock %d: %w", lck, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -127,11 +127,11 @@ func (locks *FileLocks) DeallocateLock(lck uint32) error {
|
|||
// other containers and pods.
|
||||
func (locks *FileLocks) DeallocateAllLocks() error {
|
||||
if !locks.valid {
|
||||
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
|
||||
return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
|
||||
}
|
||||
files, err := ioutil.ReadDir(locks.lockPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading directory %s", locks.lockPath)
|
||||
return fmt.Errorf("error reading directory %s: %w", locks.lockPath, err)
|
||||
}
|
||||
var lastErr error
|
||||
for _, f := range files {
|
||||
|
|
@ -148,12 +148,12 @@ func (locks *FileLocks) DeallocateAllLocks() error {
|
|||
// LockFileLock locks the given lock.
|
||||
func (locks *FileLocks) LockFileLock(lck uint32) error {
|
||||
if !locks.valid {
|
||||
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
|
||||
return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
|
||||
}
|
||||
|
||||
l, err := storage.GetLockfile(locks.getLockPath(lck))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error acquiring lock")
|
||||
return fmt.Errorf("error acquiring lock: %w", err)
|
||||
}
|
||||
|
||||
l.Lock()
|
||||
|
|
@ -163,11 +163,11 @@ func (locks *FileLocks) LockFileLock(lck uint32) error {
|
|||
// UnlockFileLock unlocks the given lock.
|
||||
func (locks *FileLocks) UnlockFileLock(lck uint32) error {
|
||||
if !locks.valid {
|
||||
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
|
||||
return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
|
||||
}
|
||||
l, err := storage.GetLockfile(locks.getLockPath(lck))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error acquiring lock")
|
||||
return fmt.Errorf("error acquiring lock: %w", err)
|
||||
}
|
||||
|
||||
l.Unlock()
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
package lock
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Mutex holds a single mutex and whether it has been allocated.
|
||||
|
|
@ -49,7 +49,7 @@ type InMemoryManager struct {
|
|||
// of locks.
|
||||
func NewInMemoryManager(numLocks uint32) (Manager, error) {
|
||||
if numLocks == 0 {
|
||||
return nil, errors.Errorf("must provide a non-zero number of locks")
|
||||
return nil, errors.New("must provide a non-zero number of locks")
|
||||
}
|
||||
|
||||
manager := new(InMemoryManager)
|
||||
|
|
@ -78,13 +78,13 @@ func (m *InMemoryManager) AllocateLock() (Locker, error) {
|
|||
}
|
||||
}
|
||||
|
||||
return nil, errors.Errorf("all locks have been allocated")
|
||||
return nil, errors.New("all locks have been allocated")
|
||||
}
|
||||
|
||||
// RetrieveLock retrieves a lock from the manager.
|
||||
func (m *InMemoryManager) RetrieveLock(id uint32) (Locker, error) {
|
||||
if id >= m.numLocks {
|
||||
return nil, errors.Errorf("given lock ID %d is too large - this manager only supports lock indexes up to %d", id, m.numLocks-1)
|
||||
return nil, fmt.Errorf("given lock ID %d is too large - this manager only supports lock indexes up to %d", id, m.numLocks-1)
|
||||
}
|
||||
|
||||
return m.locks[id], nil
|
||||
|
|
@ -94,11 +94,11 @@ func (m *InMemoryManager) RetrieveLock(id uint32) (Locker, error) {
|
|||
// use) and returns it.
|
||||
func (m *InMemoryManager) AllocateAndRetrieveLock(id uint32) (Locker, error) {
|
||||
if id >= m.numLocks {
|
||||
return nil, errors.Errorf("given lock ID %d is too large - this manager only supports lock indexes up to %d", id, m.numLocks)
|
||||
return nil, fmt.Errorf("given lock ID %d is too large - this manager only supports lock indexes up to %d", id, m.numLocks)
|
||||
}
|
||||
|
||||
if m.locks[id].allocated {
|
||||
return nil, errors.Errorf("given lock ID %d is already in use, cannot reallocate", id)
|
||||
return nil, fmt.Errorf("given lock ID %d is already in use, cannot reallocate", id)
|
||||
}
|
||||
|
||||
m.locks[id].allocated = true
|
||||
|
|
|
|||
|
|
@ -11,11 +11,12 @@ package shm
|
|||
import "C"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -40,7 +41,7 @@ type SHMLocks struct {
|
|||
// size used by the underlying implementation.
|
||||
func CreateSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
|
||||
if numLocks == 0 {
|
||||
return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be greater than 0")
|
||||
return nil, fmt.Errorf("number of locks must be greater than 0: %w", syscall.EINVAL)
|
||||
}
|
||||
|
||||
locks := new(SHMLocks)
|
||||
|
|
@ -52,7 +53,7 @@ func CreateSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
|
|||
lockStruct := C.setup_lock_shm(cPath, C.uint32_t(numLocks), &errCode)
|
||||
if lockStruct == nil {
|
||||
// We got a null pointer, so something errored
|
||||
return nil, errors.Wrapf(syscall.Errno(-1*errCode), "failed to create %d locks in %s", numLocks, path)
|
||||
return nil, fmt.Errorf("failed to create %d locks in %s: %w", numLocks, path, syscall.Errno(-1*errCode))
|
||||
}
|
||||
|
||||
locks.lockStruct = lockStruct
|
||||
|
|
@ -69,7 +70,7 @@ func CreateSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
|
|||
// segment was created with.
|
||||
func OpenSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
|
||||
if numLocks == 0 {
|
||||
return nil, errors.Wrapf(syscall.EINVAL, "number of locks must be greater than 0")
|
||||
return nil, fmt.Errorf("number of locks must be greater than 0: %w", syscall.EINVAL)
|
||||
}
|
||||
|
||||
locks := new(SHMLocks)
|
||||
|
|
@ -81,7 +82,7 @@ func OpenSHMLock(path string, numLocks uint32) (*SHMLocks, error) {
|
|||
lockStruct := C.open_lock_shm(cPath, C.uint32_t(numLocks), &errCode)
|
||||
if lockStruct == nil {
|
||||
// We got a null pointer, so something errored
|
||||
return nil, errors.Wrapf(syscall.Errno(-1*errCode), "failed to open %d locks in %s", numLocks, path)
|
||||
return nil, fmt.Errorf("failed to open %d locks in %s: %w", numLocks, path, syscall.Errno(-1*errCode))
|
||||
}
|
||||
|
||||
locks.lockStruct = lockStruct
|
||||
|
|
@ -103,7 +104,7 @@ func (locks *SHMLocks) GetMaxLocks() uint32 {
|
|||
// Close() is only intended to be used while testing the locks.
|
||||
func (locks *SHMLocks) Close() error {
|
||||
if !locks.valid {
|
||||
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
|
||||
return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
|
||||
}
|
||||
|
||||
locks.valid = false
|
||||
|
|
@ -124,7 +125,7 @@ func (locks *SHMLocks) Close() error {
|
|||
// created will result in an error, and no semaphore will be allocated.
|
||||
func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
|
||||
if !locks.valid {
|
||||
return 0, errors.Wrapf(syscall.EINVAL, "locks have already been closed")
|
||||
return 0, fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
|
||||
}
|
||||
|
||||
// This returns a U64, so we have the full u32 range available for
|
||||
|
|
@ -138,7 +139,7 @@ func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
|
|||
// that there's no room in the SHM inn for this lock, this tends to send normal people
|
||||
// down the path of checking disk-space which is not actually their problem.
|
||||
// Give a clue that it's actually due to num_locks filling up.
|
||||
var errFull = errors.Errorf("allocation failed; exceeded num_locks (%d)", locks.maxLocks)
|
||||
var errFull = fmt.Errorf("allocation failed; exceeded num_locks (%d)", locks.maxLocks)
|
||||
return uint32(retCode), errFull
|
||||
}
|
||||
return uint32(retCode), syscall.Errno(-1 * retCode)
|
||||
|
|
@ -153,7 +154,7 @@ func (locks *SHMLocks) AllocateSemaphore() (uint32, error) {
|
|||
// returned.
|
||||
func (locks *SHMLocks) AllocateGivenSemaphore(sem uint32) error {
|
||||
if !locks.valid {
|
||||
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
|
||||
return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
|
||||
}
|
||||
|
||||
retCode := C.allocate_given_semaphore(locks.lockStruct, C.uint32_t(sem))
|
||||
|
|
@ -169,11 +170,11 @@ func (locks *SHMLocks) AllocateGivenSemaphore(sem uint32) error {
|
|||
// The given semaphore must be already allocated, or an error will be returned.
|
||||
func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error {
|
||||
if !locks.valid {
|
||||
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
|
||||
return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
|
||||
}
|
||||
|
||||
if sem > locks.maxLocks {
|
||||
return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks)
|
||||
return fmt.Errorf("given semaphore %d is higher than maximum locks count %d: %w", sem, locks.maxLocks, syscall.EINVAL)
|
||||
}
|
||||
|
||||
retCode := C.deallocate_semaphore(locks.lockStruct, C.uint32_t(sem))
|
||||
|
|
@ -189,7 +190,7 @@ func (locks *SHMLocks) DeallocateSemaphore(sem uint32) error {
|
|||
// other containers and pods.
|
||||
func (locks *SHMLocks) DeallocateAllSemaphores() error {
|
||||
if !locks.valid {
|
||||
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
|
||||
return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
|
||||
}
|
||||
|
||||
retCode := C.deallocate_all_semaphores(locks.lockStruct)
|
||||
|
|
@ -210,11 +211,11 @@ func (locks *SHMLocks) DeallocateAllSemaphores() error {
|
|||
// succeed.
|
||||
func (locks *SHMLocks) LockSemaphore(sem uint32) error {
|
||||
if !locks.valid {
|
||||
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
|
||||
return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
|
||||
}
|
||||
|
||||
if sem > locks.maxLocks {
|
||||
return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks)
|
||||
return fmt.Errorf("given semaphore %d is higher than maximum locks count %d: %w", sem, locks.maxLocks, syscall.EINVAL)
|
||||
}
|
||||
|
||||
// For pthread mutexes, we have to guarantee lock and unlock happen in
|
||||
|
|
@ -238,11 +239,11 @@ func (locks *SHMLocks) LockSemaphore(sem uint32) error {
|
|||
// succeed.
|
||||
func (locks *SHMLocks) UnlockSemaphore(sem uint32) error {
|
||||
if !locks.valid {
|
||||
return errors.Wrapf(syscall.EINVAL, "locks have already been closed")
|
||||
return fmt.Errorf("locks have already been closed: %w", syscall.EINVAL)
|
||||
}
|
||||
|
||||
if sem > locks.maxLocks {
|
||||
return errors.Wrapf(syscall.EINVAL, "given semaphore %d is higher than maximum locks count %d", sem, locks.maxLocks)
|
||||
return fmt.Errorf("given semaphore %d is higher than maximum locks count %d: %w", sem, locks.maxLocks, syscall.EINVAL)
|
||||
}
|
||||
|
||||
retCode := C.unlock_semaphore(locks.lockStruct, C.uint32_t(sem))
|
||||
|
|
|
|||
|
|
@ -4,10 +4,10 @@
|
|||
package lock
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/podman/v4/libpod/lock/shm"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// SHMLockManager manages shared memory locks.
|
||||
|
|
@ -66,8 +66,8 @@ func (m *SHMLockManager) AllocateAndRetrieveLock(id uint32) (Locker, error) {
|
|||
lock.manager = m
|
||||
|
||||
if id >= m.locks.GetMaxLocks() {
|
||||
return nil, errors.Wrapf(syscall.EINVAL, "lock ID %d is too large - max lock size is %d",
|
||||
id, m.locks.GetMaxLocks()-1)
|
||||
return nil, fmt.Errorf("lock ID %d is too large - max lock size is %d: %w",
|
||||
id, m.locks.GetMaxLocks()-1, syscall.EINVAL)
|
||||
}
|
||||
|
||||
if err := m.locks.AllocateGivenSemaphore(id); err != nil {
|
||||
|
|
@ -84,8 +84,8 @@ func (m *SHMLockManager) RetrieveLock(id uint32) (Locker, error) {
|
|||
lock.manager = m
|
||||
|
||||
if id >= m.locks.GetMaxLocks() {
|
||||
return nil, errors.Wrapf(syscall.EINVAL, "lock ID %d is too large - max lock size is %d",
|
||||
id, m.locks.GetMaxLocks()-1)
|
||||
return nil, fmt.Errorf("lock ID %d is too large - max lock size is %d: %w",
|
||||
id, m.locks.GetMaxLocks()-1, syscall.EINVAL)
|
||||
}
|
||||
|
||||
return lock, nil
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package logs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
|
@ -10,7 +11,6 @@ import (
|
|||
|
||||
"github.com/containers/podman/v4/libpod/logs/reversereader"
|
||||
"github.com/nxadm/tail"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -105,7 +105,7 @@ func getTailLog(path string, tail int) ([]*LogLine, error) {
|
|||
for {
|
||||
s, err := rr.Read()
|
||||
if err != nil {
|
||||
if errors.Cause(err) == io.EOF {
|
||||
if errors.Is(err, io.EOF) {
|
||||
inputs <- []string{leftover}
|
||||
} else {
|
||||
logrus.Error(err)
|
||||
|
|
@ -228,11 +228,11 @@ func (l *LogLine) Until(until time.Time) bool {
|
|||
func NewLogLine(line string) (*LogLine, error) {
|
||||
splitLine := strings.Split(line, " ")
|
||||
if len(splitLine) < 4 {
|
||||
return nil, errors.Errorf("'%s' is not a valid container log line", line)
|
||||
return nil, fmt.Errorf("'%s' is not a valid container log line", line)
|
||||
}
|
||||
logTime, err := time.Parse(LogTimeFormat, splitLine[0])
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to convert time %s from container log", splitLine[0])
|
||||
return nil, fmt.Errorf("unable to convert time %s from container log: %w", splitLine[0], err)
|
||||
}
|
||||
l := LogLine{
|
||||
Time: logTime,
|
||||
|
|
@ -249,11 +249,11 @@ func NewLogLine(line string) (*LogLine, error) {
|
|||
func NewJournaldLogLine(line string, withID bool) (*LogLine, error) {
|
||||
splitLine := strings.Split(line, " ")
|
||||
if len(splitLine) < 4 {
|
||||
return nil, errors.Errorf("'%s' is not a valid container log line", line)
|
||||
return nil, fmt.Errorf("'%s' is not a valid container log line", line)
|
||||
}
|
||||
logTime, err := time.Parse(LogTimeFormat, splitLine[0])
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to convert time %s from container log", splitLine[0])
|
||||
return nil, fmt.Errorf("unable to convert time %s from container log: %w", splitLine[0], err)
|
||||
}
|
||||
var msg, id string
|
||||
if withID {
|
||||
|
|
|
|||
|
|
@ -1,10 +1,10 @@
|
|||
package reversereader
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ReverseReader structure for reading a file backwards
|
||||
|
|
@ -49,12 +49,12 @@ func NewReverseReader(reader *os.File) (*ReverseReader, error) {
|
|||
// then sets the newoff set one pagesize less than the previous read.
|
||||
func (r *ReverseReader) Read() (string, error) {
|
||||
if r.offset < 0 {
|
||||
return "", errors.Wrap(io.EOF, "at beginning of file")
|
||||
return "", fmt.Errorf("at beginning of file: %w", io.EOF)
|
||||
}
|
||||
// Read from given offset
|
||||
b := make([]byte, r.readSize)
|
||||
n, err := r.reader.ReadAt(b, r.offset)
|
||||
if err != nil && errors.Cause(err) != io.EOF {
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
return "", err
|
||||
}
|
||||
if int64(n) < r.readSize {
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ package libpod
|
|||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
|
|
@ -36,7 +37,6 @@ import (
|
|||
"github.com/containers/storage/pkg/lockfile"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/vishvananda/netlink"
|
||||
"golang.org/x/sys/unix"
|
||||
|
|
@ -127,19 +127,19 @@ func (r *RootlessNetNS) Do(toRun func() error) error {
|
|||
// this must happen inside the netns thread.
|
||||
err := unix.Unshare(unix.CLONE_NEWNS)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cannot create a new mount namespace")
|
||||
return fmt.Errorf("cannot create a new mount namespace: %w", err)
|
||||
}
|
||||
|
||||
xdgRuntimeDir, err := util.GetRuntimeDir()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get runtime directory")
|
||||
return fmt.Errorf("could not get runtime directory: %w", err)
|
||||
}
|
||||
newXDGRuntimeDir := r.getPath(xdgRuntimeDir)
|
||||
// 1. Mount the netns into the new run to keep them accessible.
|
||||
// Otherwise cni setup will fail because it cannot access the netns files.
|
||||
err = unix.Mount(xdgRuntimeDir, newXDGRuntimeDir, "none", unix.MS_BIND|unix.MS_SHARED|unix.MS_REC, "")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to mount runtime directory for rootless netns")
|
||||
return fmt.Errorf("failed to mount runtime directory for rootless netns: %w", err)
|
||||
}
|
||||
|
||||
// 2. Also keep /run/systemd if it exists.
|
||||
|
|
@ -150,7 +150,7 @@ func (r *RootlessNetNS) Do(toRun func() error) error {
|
|||
newRunSystemd := r.getPath(runSystemd)
|
||||
err = unix.Mount(runSystemd, newRunSystemd, "none", unix.MS_BIND|unix.MS_REC, "")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to mount /run/systemd directory for rootless netns")
|
||||
return fmt.Errorf("failed to mount /run/systemd directory for rootless netns: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -185,7 +185,7 @@ func (r *RootlessNetNS) Do(toRun func() error) error {
|
|||
|
||||
fi, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to stat resolv.conf path")
|
||||
return fmt.Errorf("failed to stat resolv.conf path: %w", err)
|
||||
}
|
||||
|
||||
// no link, just continue
|
||||
|
|
@ -195,7 +195,7 @@ func (r *RootlessNetNS) Do(toRun func() error) error {
|
|||
|
||||
link, err := os.Readlink(path)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to read resolv.conf symlink")
|
||||
return fmt.Errorf("failed to read resolv.conf symlink: %w", err)
|
||||
}
|
||||
linkCount++
|
||||
if filepath.IsAbs(link) {
|
||||
|
|
@ -231,25 +231,25 @@ func (r *RootlessNetNS) Do(toRun func() error) error {
|
|||
rsr := r.getPath("/run/systemd/resolve")
|
||||
err = unix.Mount("", rsr, "tmpfs", unix.MS_NOEXEC|unix.MS_NOSUID|unix.MS_NODEV, "")
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to mount tmpfs on %q for rootless netns", rsr)
|
||||
return fmt.Errorf("failed to mount tmpfs on %q for rootless netns: %w", rsr, err)
|
||||
}
|
||||
}
|
||||
if strings.HasPrefix(resolvePath, "/run/") {
|
||||
resolvePath = r.getPath(resolvePath)
|
||||
err = os.MkdirAll(filepath.Dir(resolvePath), 0700)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create rootless-netns resolv.conf directory")
|
||||
return fmt.Errorf("failed to create rootless-netns resolv.conf directory: %w", err)
|
||||
}
|
||||
// we want to bind mount on this file so we have to create the file first
|
||||
_, err = os.OpenFile(resolvePath, os.O_CREATE|os.O_RDONLY, 0700)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create rootless-netns resolv.conf file")
|
||||
return fmt.Errorf("failed to create rootless-netns resolv.conf file: %w", err)
|
||||
}
|
||||
}
|
||||
// mount resolv.conf to make use of the host dns
|
||||
err = unix.Mount(r.getPath("resolv.conf"), resolvePath, "none", unix.MS_BIND, "")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to mount resolv.conf for rootless netns")
|
||||
return fmt.Errorf("failed to mount resolv.conf for rootless netns: %w", err)
|
||||
}
|
||||
|
||||
// 4. CNI plugins need access to /var/lib/cni and /run
|
||||
|
|
@ -274,14 +274,14 @@ func (r *RootlessNetNS) Do(toRun func() error) error {
|
|||
// make sure to mount var first
|
||||
err = unix.Mount(varDir, varTarget, "none", unix.MS_BIND, "")
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to mount %s for rootless netns", varTarget)
|
||||
return fmt.Errorf("failed to mount %s for rootless netns: %w", varTarget, err)
|
||||
}
|
||||
|
||||
// 5. Mount the new prepared run dir to /run, it has to be recursive to keep the other bind mounts.
|
||||
runDir := r.getPath("run")
|
||||
err = unix.Mount(runDir, "/run", "none", unix.MS_BIND|unix.MS_REC, "")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to mount /run for rootless netns")
|
||||
return fmt.Errorf("failed to mount /run for rootless netns: %w", err)
|
||||
}
|
||||
|
||||
// run the given function in the correct namespace
|
||||
|
|
@ -377,7 +377,7 @@ func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) {
|
|||
lfile := filepath.Join(runDir, "rootless-netns.lock")
|
||||
lock, err := lockfile.GetLockfile(lfile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get rootless-netns lockfile")
|
||||
return nil, fmt.Errorf("failed to get rootless-netns lockfile: %w", err)
|
||||
}
|
||||
lock.Lock()
|
||||
defer func() {
|
||||
|
|
@ -391,7 +391,7 @@ func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) {
|
|||
rootlessNetNsDir := filepath.Join(runDir, rootlessNetNsName)
|
||||
err = os.MkdirAll(rootlessNetNsDir, 0700)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create rootless-netns directory")
|
||||
return nil, fmt.Errorf("could not create rootless-netns directory: %w", err)
|
||||
}
|
||||
|
||||
nsDir, err := netns.GetNSRunDir()
|
||||
|
|
@ -411,13 +411,13 @@ func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) {
|
|||
if err != nil {
|
||||
if !new {
|
||||
// return a error if we could not get the namespace and should no create one
|
||||
return nil, errors.Wrap(err, "error getting rootless network namespace")
|
||||
return nil, fmt.Errorf("error getting rootless network namespace: %w", err)
|
||||
}
|
||||
// create a new namespace
|
||||
logrus.Debugf("creating rootless network namespace with name %q", netnsName)
|
||||
ns, err = netns.NewNSWithName(netnsName)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error creating rootless network namespace")
|
||||
return nil, fmt.Errorf("error creating rootless network namespace: %w", err)
|
||||
}
|
||||
// set up slirp4netns here
|
||||
path := r.config.Engine.NetworkCmdPath
|
||||
|
|
@ -431,7 +431,7 @@ func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) {
|
|||
|
||||
syncR, syncW, err := os.Pipe()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to open pipe")
|
||||
return nil, fmt.Errorf("failed to open pipe: %w", err)
|
||||
}
|
||||
defer errorhandling.CloseQuiet(syncR)
|
||||
defer errorhandling.CloseQuiet(syncW)
|
||||
|
|
@ -442,7 +442,7 @@ func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) {
|
|||
}
|
||||
slirpFeatures, err := checkSlirpFlags(path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error checking slirp4netns binary %s: %q", path, err)
|
||||
return nil, fmt.Errorf("error checking slirp4netns binary %s: %q: %w", path, err, err)
|
||||
}
|
||||
cmdArgs, err := createBasicSlirp4netnsCmdArgs(netOptions, slirpFeatures)
|
||||
if err != nil {
|
||||
|
|
@ -470,25 +470,25 @@ func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) {
|
|||
logPath := filepath.Join(r.config.Engine.TmpDir, "slirp4netns-rootless-netns.log")
|
||||
logFile, err := os.Create(logPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to open slirp4netns log file %s", logPath)
|
||||
return nil, fmt.Errorf("failed to open slirp4netns log file %s: %w", logPath, err)
|
||||
}
|
||||
defer logFile.Close()
|
||||
// Unlink immediately the file so we won't need to worry about cleaning it up later.
|
||||
// It is still accessible through the open fd logFile.
|
||||
if err := os.Remove(logPath); err != nil {
|
||||
return nil, errors.Wrapf(err, "delete file %s", logPath)
|
||||
return nil, fmt.Errorf("delete file %s: %w", logPath, err)
|
||||
}
|
||||
cmd.Stdout = logFile
|
||||
cmd.Stderr = logFile
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to start slirp4netns process")
|
||||
return nil, fmt.Errorf("failed to start slirp4netns process: %w", err)
|
||||
}
|
||||
// create pid file for the slirp4netns process
|
||||
// this is need to kill the process in the cleanup
|
||||
pid := strconv.Itoa(cmd.Process.Pid)
|
||||
err = ioutil.WriteFile(filepath.Join(rootlessNetNsDir, rootlessNetNsSilrp4netnsPidFile), []byte(pid), 0700)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to write rootless-netns slirp4netns pid file")
|
||||
return nil, fmt.Errorf("unable to write rootless-netns slirp4netns pid file: %w", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
|
|
@ -513,17 +513,17 @@ func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) {
|
|||
// build a new resolv.conf file which uses the slirp4netns dns server address
|
||||
resolveIP, err := GetSlirp4netnsDNS(nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to determine default slirp4netns DNS address")
|
||||
return nil, fmt.Errorf("failed to determine default slirp4netns DNS address: %w", err)
|
||||
}
|
||||
|
||||
if netOptions.cidr != "" {
|
||||
_, cidr, err := net.ParseCIDR(netOptions.cidr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to parse slirp4netns cidr")
|
||||
return nil, fmt.Errorf("failed to parse slirp4netns cidr: %w", err)
|
||||
}
|
||||
resolveIP, err = GetSlirp4netnsDNS(cidr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to determine slirp4netns DNS address from cidr: %s", cidr.String())
|
||||
return nil, fmt.Errorf("failed to determine slirp4netns DNS address from cidr: %s: %w", cidr.String(), err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -537,35 +537,35 @@ func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) {
|
|||
KeepHostServers: true,
|
||||
Nameservers: []string{resolveIP.String()},
|
||||
}); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create rootless netns resolv.conf")
|
||||
return nil, fmt.Errorf("failed to create rootless netns resolv.conf: %w", err)
|
||||
}
|
||||
// create cni directories to store files
|
||||
// they will be bind mounted to the correct location in a extra mount ns
|
||||
err = os.MkdirAll(filepath.Join(rootlessNetNsDir, persistentCNIDir), 0700)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create rootless-netns var directory")
|
||||
return nil, fmt.Errorf("could not create rootless-netns var directory: %w", err)
|
||||
}
|
||||
runDir := filepath.Join(rootlessNetNsDir, "run")
|
||||
err = os.MkdirAll(runDir, 0700)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create rootless-netns run directory")
|
||||
return nil, fmt.Errorf("could not create rootless-netns run directory: %w", err)
|
||||
}
|
||||
// relabel the new run directory to the iptables /run label
|
||||
// this is important, otherwise the iptables command will fail
|
||||
err = label.Relabel(runDir, "system_u:object_r:iptables_var_run_t:s0", false)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create relabel rootless-netns run directory")
|
||||
return nil, fmt.Errorf("could not create relabel rootless-netns run directory: %w", err)
|
||||
}
|
||||
// create systemd run directory
|
||||
err = os.MkdirAll(filepath.Join(runDir, "systemd"), 0700)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create rootless-netns systemd directory")
|
||||
return nil, fmt.Errorf("could not create rootless-netns systemd directory: %w", err)
|
||||
}
|
||||
// create the directory for the netns files at the same location
|
||||
// relative to the rootless-netns location
|
||||
err = os.MkdirAll(filepath.Join(rootlessNetNsDir, nsDir), 0700)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create rootless-netns netns directory")
|
||||
return nil, fmt.Errorf("could not create rootless-netns netns directory: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -675,7 +675,7 @@ func (r *Runtime) configureNetNS(ctr *Container, ctrNS ns.NetNS) (status map[str
|
|||
func (r *Runtime) createNetNS(ctr *Container) (n ns.NetNS, q map[string]types.StatusBlock, retErr error) {
|
||||
ctrNS, err := netns.NewNS()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "error creating network namespace for container %s", ctr.ID())
|
||||
return nil, nil, fmt.Errorf("error creating network namespace for container %s: %w", ctr.ID(), err)
|
||||
}
|
||||
defer func() {
|
||||
if retErr != nil {
|
||||
|
|
@ -702,7 +702,7 @@ func (r *Runtime) setupNetNS(ctr *Container) error {
|
|||
b := make([]byte, 16)
|
||||
|
||||
if _, err := rand.Reader.Read(b); err != nil {
|
||||
return errors.Wrapf(err, "failed to generate random netns name")
|
||||
return fmt.Errorf("failed to generate random netns name: %w", err)
|
||||
}
|
||||
nsPath, err := netns.GetNSRunDir()
|
||||
if err != nil {
|
||||
|
|
@ -723,7 +723,7 @@ func (r *Runtime) setupNetNS(ctr *Container) error {
|
|||
}
|
||||
|
||||
if err := unix.Mount(nsProcess, nsPath, "none", unix.MS_BIND, ""); err != nil {
|
||||
return errors.Wrapf(err, "cannot mount %s", nsPath)
|
||||
return fmt.Errorf("cannot mount %s: %w", nsPath, err)
|
||||
}
|
||||
|
||||
netNS, err := ns.GetNS(nsPath)
|
||||
|
|
@ -742,7 +742,7 @@ func (r *Runtime) setupNetNS(ctr *Container) error {
|
|||
func joinNetNS(path string) (ns.NetNS, error) {
|
||||
netNS, err := ns.GetNS(path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error retrieving network namespace at %s", path)
|
||||
return nil, fmt.Errorf("error retrieving network namespace at %s: %w", path, err)
|
||||
}
|
||||
|
||||
return netNS, nil
|
||||
|
|
@ -758,7 +758,7 @@ func (r *Runtime) closeNetNS(ctr *Container) error {
|
|||
}
|
||||
|
||||
if err := ctr.state.NetNS.Close(); err != nil {
|
||||
return errors.Wrapf(err, "error closing network namespace for container %s", ctr.ID())
|
||||
return fmt.Errorf("error closing network namespace for container %s: %w", ctr.ID(), err)
|
||||
}
|
||||
|
||||
ctr.state.NetNS = nil
|
||||
|
|
@ -774,8 +774,10 @@ func (r *Runtime) teardownNetwork(ns string, opts types.NetworkOptions) error {
|
|||
return err
|
||||
}
|
||||
tearDownPod := func() error {
|
||||
err := r.network.Teardown(ns, types.TeardownOptions{NetworkOptions: opts})
|
||||
return errors.Wrapf(err, "error tearing down network namespace configuration for container %s", opts.ContainerID)
|
||||
if err := r.network.Teardown(ns, types.TeardownOptions{NetworkOptions: opts}); err != nil {
|
||||
return fmt.Errorf("error tearing down network namespace configuration for container %s: %w", opts.ContainerID, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// rootlessNetNS is nil if we are root
|
||||
|
|
@ -826,12 +828,12 @@ func (r *Runtime) teardownNetNS(ctr *Container) error {
|
|||
|
||||
// First unmount the namespace
|
||||
if err := netns.UnmountNS(ctr.state.NetNS); err != nil {
|
||||
return errors.Wrapf(err, "error unmounting network namespace for container %s", ctr.ID())
|
||||
return fmt.Errorf("error unmounting network namespace for container %s: %w", ctr.ID(), err)
|
||||
}
|
||||
|
||||
// Now close the open file descriptor
|
||||
if err := ctr.state.NetNS.Close(); err != nil {
|
||||
return errors.Wrapf(err, "error closing network namespace for container %s", ctr.ID())
|
||||
return fmt.Errorf("error closing network namespace for container %s: %w", ctr.ID(), err)
|
||||
}
|
||||
|
||||
ctr.state.NetNS = nil
|
||||
|
|
@ -864,7 +866,7 @@ func getContainerNetNS(ctr *Container) (string, *Container, error) {
|
|||
// It returns nil when it is set to bridge and an error otherwise.
|
||||
func isBridgeNetMode(n namespaces.NetworkMode) error {
|
||||
if !n.IsBridge() {
|
||||
return errors.Wrapf(define.ErrNetworkModeInvalid, "%q is not supported", n)
|
||||
return fmt.Errorf("%q is not supported: %w", n, define.ErrNetworkModeInvalid)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -880,7 +882,7 @@ func isBridgeNetMode(n namespaces.NetworkMode) error {
|
|||
// extend this to stop + restart slirp4netns
|
||||
func (r *Runtime) reloadContainerNetwork(ctr *Container) (map[string]types.StatusBlock, error) {
|
||||
if ctr.state.NetNS == nil {
|
||||
return nil, errors.Wrapf(define.ErrCtrStateInvalid, "container %s network is not configured, refusing to reload", ctr.ID())
|
||||
return nil, fmt.Errorf("container %s network is not configured, refusing to reload: %w", ctr.ID(), define.ErrCtrStateInvalid)
|
||||
}
|
||||
if err := isBridgeNetMode(ctr.config.NetMode); err != nil {
|
||||
return nil, err
|
||||
|
|
@ -1047,7 +1049,7 @@ func (c *Container) getContainerNetworkInfo() (*define.InspectNetworkSettings, e
|
|||
// If we have networks - handle that here
|
||||
if len(networks) > 0 {
|
||||
if len(networks) != len(netStatus) {
|
||||
return nil, errors.Wrapf(define.ErrInternal, "network inspection mismatch: asked to join %d network(s) %v, but have information on %d network(s)", len(networks), networks, len(netStatus))
|
||||
return nil, fmt.Errorf("network inspection mismatch: asked to join %d network(s) %v, but have information on %d network(s): %w", len(networks), networks, len(netStatus), define.ErrInternal)
|
||||
}
|
||||
|
||||
settings.Networks = make(map[string]*define.InspectAdditionalNetwork)
|
||||
|
|
@ -1072,7 +1074,7 @@ func (c *Container) getContainerNetworkInfo() (*define.InspectNetworkSettings, e
|
|||
|
||||
// If not joining networks, we should have at most 1 result
|
||||
if len(netStatus) > 1 {
|
||||
return nil, errors.Wrapf(define.ErrInternal, "should have at most 1 network status result if not joining networks, instead got %d", len(netStatus))
|
||||
return nil, fmt.Errorf("should have at most 1 network status result if not joining networks, instead got %d: %w", len(netStatus), define.ErrInternal)
|
||||
}
|
||||
|
||||
if len(netStatus) == 1 {
|
||||
|
|
@ -1225,7 +1227,7 @@ func (c *Container) NetworkDisconnect(nameOrID, netName string, force bool) erro
|
|||
|
||||
_, nameExists := networks[netName]
|
||||
if !nameExists && len(networks) > 0 {
|
||||
return errors.Errorf("container %s is not connected to network %s", nameOrID, netName)
|
||||
return fmt.Errorf("container %s is not connected to network %s", nameOrID, netName)
|
||||
}
|
||||
|
||||
if err := c.syncContainer(); err != nil {
|
||||
|
|
@ -1244,7 +1246,7 @@ func (c *Container) NetworkDisconnect(nameOrID, netName string, force bool) erro
|
|||
}
|
||||
|
||||
if c.state.NetNS == nil {
|
||||
return errors.Wrapf(define.ErrNoNetwork, "unable to disconnect %s from %s", nameOrID, netName)
|
||||
return fmt.Errorf("unable to disconnect %s from %s: %w", nameOrID, netName, define.ErrNoNetwork)
|
||||
}
|
||||
|
||||
opts := types.NetworkOptions{
|
||||
|
|
@ -1362,7 +1364,7 @@ func (c *Container) NetworkConnect(nameOrID, netName string, netOpts types.PerNe
|
|||
return nil
|
||||
}
|
||||
if c.state.NetNS == nil {
|
||||
return errors.Wrapf(define.ErrNoNetwork, "unable to connect %s to %s", nameOrID, netName)
|
||||
return fmt.Errorf("unable to connect %s to %s: %w", nameOrID, netName, define.ErrNoNetwork)
|
||||
}
|
||||
|
||||
opts := types.NetworkOptions{
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ package libpod
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
|
@ -24,7 +25,6 @@ import (
|
|||
"github.com/containers/podman/v4/pkg/rootless"
|
||||
"github.com/containers/podman/v4/pkg/rootlessport"
|
||||
"github.com/containers/podman/v4/pkg/servicereaper"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -68,7 +68,7 @@ func checkSlirpFlags(path string) (*slirpFeatures, error) {
|
|||
cmd := exec.Command(path, "--help")
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "slirp4netns %q", out)
|
||||
return nil, fmt.Errorf("slirp4netns %q: %w", out, err)
|
||||
}
|
||||
return &slirpFeatures{
|
||||
HasDisableHostLoopback: strings.Contains(string(out), "--disable-host-loopback"),
|
||||
|
|
@ -95,14 +95,14 @@ func parseSlirp4netnsNetworkOptions(r *Runtime, extraOptions []string) (*slirp4n
|
|||
for _, o := range slirpOptions {
|
||||
parts := strings.SplitN(o, "=", 2)
|
||||
if len(parts) < 2 {
|
||||
return nil, errors.Errorf("unknown option for slirp4netns: %q", o)
|
||||
return nil, fmt.Errorf("unknown option for slirp4netns: %q", o)
|
||||
}
|
||||
option, value := parts[0], parts[1]
|
||||
switch option {
|
||||
case "cidr":
|
||||
ipv4, _, err := net.ParseCIDR(value)
|
||||
if err != nil || ipv4.To4() == nil {
|
||||
return nil, errors.Errorf("invalid cidr %q", value)
|
||||
return nil, fmt.Errorf("invalid cidr %q", value)
|
||||
}
|
||||
slirp4netnsOpts.cidr = value
|
||||
case "port_handler":
|
||||
|
|
@ -112,7 +112,7 @@ func parseSlirp4netnsNetworkOptions(r *Runtime, extraOptions []string) (*slirp4n
|
|||
case "rootlesskit":
|
||||
slirp4netnsOpts.isSlirpHostForward = false
|
||||
default:
|
||||
return nil, errors.Errorf("unknown port_handler for slirp4netns: %q", value)
|
||||
return nil, fmt.Errorf("unknown port_handler for slirp4netns: %q", value)
|
||||
}
|
||||
case "allow_host_loopback":
|
||||
switch value {
|
||||
|
|
@ -121,7 +121,7 @@ func parseSlirp4netnsNetworkOptions(r *Runtime, extraOptions []string) (*slirp4n
|
|||
case "false":
|
||||
slirp4netnsOpts.disableHostLoopback = true
|
||||
default:
|
||||
return nil, errors.Errorf("invalid value of allow_host_loopback for slirp4netns: %q", value)
|
||||
return nil, fmt.Errorf("invalid value of allow_host_loopback for slirp4netns: %q", value)
|
||||
}
|
||||
case "enable_ipv6":
|
||||
switch value {
|
||||
|
|
@ -130,14 +130,14 @@ func parseSlirp4netnsNetworkOptions(r *Runtime, extraOptions []string) (*slirp4n
|
|||
case "false":
|
||||
slirp4netnsOpts.enableIPv6 = false
|
||||
default:
|
||||
return nil, errors.Errorf("invalid value of enable_ipv6 for slirp4netns: %q", value)
|
||||
return nil, fmt.Errorf("invalid value of enable_ipv6 for slirp4netns: %q", value)
|
||||
}
|
||||
case "outbound_addr":
|
||||
ipv4 := net.ParseIP(value)
|
||||
if ipv4 == nil || ipv4.To4() == nil {
|
||||
_, err := net.InterfaceByName(value)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("invalid outbound_addr %q", value)
|
||||
return nil, fmt.Errorf("invalid outbound_addr %q", value)
|
||||
}
|
||||
}
|
||||
slirp4netnsOpts.outboundAddr = value
|
||||
|
|
@ -146,7 +146,7 @@ func parseSlirp4netnsNetworkOptions(r *Runtime, extraOptions []string) (*slirp4n
|
|||
if ipv6 == nil || ipv6.To4() != nil {
|
||||
_, err := net.InterfaceByName(value)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("invalid outbound_addr6: %q", value)
|
||||
return nil, fmt.Errorf("invalid outbound_addr6: %q", value)
|
||||
}
|
||||
}
|
||||
slirp4netnsOpts.outboundAddr6 = value
|
||||
|
|
@ -154,10 +154,10 @@ func parseSlirp4netnsNetworkOptions(r *Runtime, extraOptions []string) (*slirp4n
|
|||
var err error
|
||||
slirp4netnsOpts.mtu, err = strconv.Atoi(value)
|
||||
if slirp4netnsOpts.mtu < 68 || err != nil {
|
||||
return nil, errors.Errorf("invalid mtu %q", value)
|
||||
return nil, fmt.Errorf("invalid mtu %q", value)
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("unknown option for slirp4netns: %q", o)
|
||||
return nil, fmt.Errorf("unknown option for slirp4netns: %q", o)
|
||||
}
|
||||
}
|
||||
return slirp4netnsOpts, nil
|
||||
|
|
@ -180,31 +180,31 @@ func createBasicSlirp4netnsCmdArgs(options *slirp4netnsNetworkOptions, features
|
|||
|
||||
if options.cidr != "" {
|
||||
if !features.HasCIDR {
|
||||
return nil, errors.Errorf("cidr not supported")
|
||||
return nil, fmt.Errorf("cidr not supported")
|
||||
}
|
||||
cmdArgs = append(cmdArgs, fmt.Sprintf("--cidr=%s", options.cidr))
|
||||
}
|
||||
|
||||
if options.enableIPv6 {
|
||||
if !features.HasIPv6 {
|
||||
return nil, errors.Errorf("enable_ipv6 not supported")
|
||||
return nil, fmt.Errorf("enable_ipv6 not supported")
|
||||
}
|
||||
cmdArgs = append(cmdArgs, "--enable-ipv6")
|
||||
}
|
||||
|
||||
if options.outboundAddr != "" {
|
||||
if !features.HasOutboundAddr {
|
||||
return nil, errors.Errorf("outbound_addr not supported")
|
||||
return nil, fmt.Errorf("outbound_addr not supported")
|
||||
}
|
||||
cmdArgs = append(cmdArgs, fmt.Sprintf("--outbound-addr=%s", options.outboundAddr))
|
||||
}
|
||||
|
||||
if options.outboundAddr6 != "" {
|
||||
if !features.HasOutboundAddr || !features.HasIPv6 {
|
||||
return nil, errors.Errorf("outbound_addr6 not supported")
|
||||
return nil, fmt.Errorf("outbound_addr6 not supported")
|
||||
}
|
||||
if !options.enableIPv6 {
|
||||
return nil, errors.Errorf("enable_ipv6=true is required for outbound_addr6")
|
||||
return nil, fmt.Errorf("enable_ipv6=true is required for outbound_addr6")
|
||||
}
|
||||
cmdArgs = append(cmdArgs, fmt.Sprintf("--outbound-addr6=%s", options.outboundAddr6))
|
||||
}
|
||||
|
|
@ -225,7 +225,7 @@ func (r *Runtime) setupSlirp4netns(ctr *Container, netns ns.NetNS) error {
|
|||
|
||||
syncR, syncW, err := os.Pipe()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open pipe")
|
||||
return fmt.Errorf("failed to open pipe: %w", err)
|
||||
}
|
||||
defer errorhandling.CloseQuiet(syncR)
|
||||
defer errorhandling.CloseQuiet(syncW)
|
||||
|
|
@ -243,7 +243,7 @@ func (r *Runtime) setupSlirp4netns(ctr *Container, netns ns.NetNS) error {
|
|||
}
|
||||
slirpFeatures, err := checkSlirpFlags(path)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error checking slirp4netns binary %s: %q", path, err)
|
||||
return fmt.Errorf("error checking slirp4netns binary %s: %q: %w", path, err, err)
|
||||
}
|
||||
cmdArgs, err := createBasicSlirp4netnsCmdArgs(netOptions, slirpFeatures)
|
||||
if err != nil {
|
||||
|
|
@ -266,7 +266,7 @@ func (r *Runtime) setupSlirp4netns(ctr *Container, netns ns.NetNS) error {
|
|||
if !ctr.config.PostConfigureNetNS {
|
||||
ctr.rootlessSlirpSyncR, ctr.rootlessSlirpSyncW, err = os.Pipe()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create rootless network sync pipe")
|
||||
return fmt.Errorf("failed to create rootless network sync pipe: %w", err)
|
||||
}
|
||||
netnsPath = netns.Path()
|
||||
cmdArgs = append(cmdArgs, "--netns-type=path", netnsPath, "tap0")
|
||||
|
|
@ -295,13 +295,13 @@ func (r *Runtime) setupSlirp4netns(ctr *Container, netns ns.NetNS) error {
|
|||
|
||||
logFile, err := os.Create(logPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open slirp4netns log file %s", logPath)
|
||||
return fmt.Errorf("failed to open slirp4netns log file %s: %w", logPath, err)
|
||||
}
|
||||
defer logFile.Close()
|
||||
// Unlink immediately the file so we won't need to worry about cleaning it up later.
|
||||
// It is still accessible through the open fd logFile.
|
||||
if err := os.Remove(logPath); err != nil {
|
||||
return errors.Wrapf(err, "delete file %s", logPath)
|
||||
return fmt.Errorf("delete file %s: %w", logPath, err)
|
||||
}
|
||||
cmd.Stdout = logFile
|
||||
cmd.Stderr = logFile
|
||||
|
|
@ -357,7 +357,7 @@ func (r *Runtime) setupSlirp4netns(ctr *Container, netns ns.NetNS) error {
|
|||
if netOptions.enableIPv6 {
|
||||
slirpReadyWg.Done()
|
||||
}
|
||||
return errors.Wrapf(err, "failed to start slirp4netns process")
|
||||
return fmt.Errorf("failed to start slirp4netns process: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
servicereaper.AddPID(cmd.Process.Pid)
|
||||
|
|
@ -381,7 +381,7 @@ func (r *Runtime) setupSlirp4netns(ctr *Container, netns ns.NetNS) error {
|
|||
if netOptions.cidr != "" {
|
||||
ipv4, ipv4network, err := net.ParseCIDR(netOptions.cidr)
|
||||
if err != nil || ipv4.To4() == nil {
|
||||
return errors.Errorf("invalid cidr %q", netOptions.cidr)
|
||||
return fmt.Errorf("invalid cidr %q", netOptions.cidr)
|
||||
}
|
||||
ctr.slirp4netnsSubnet = ipv4network
|
||||
}
|
||||
|
|
@ -405,7 +405,7 @@ func GetSlirp4netnsIP(subnet *net.IPNet) (*net.IP, error) {
|
|||
}
|
||||
expectedIP, err := addToIP(slirpSubnet, uint32(100))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error calculating expected ip for slirp4netns")
|
||||
return nil, fmt.Errorf("error calculating expected ip for slirp4netns: %w", err)
|
||||
}
|
||||
return expectedIP, nil
|
||||
}
|
||||
|
|
@ -419,7 +419,7 @@ func GetSlirp4netnsGateway(subnet *net.IPNet) (*net.IP, error) {
|
|||
}
|
||||
expectedGatewayIP, err := addToIP(slirpSubnet, uint32(2))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error calculating expected gateway ip for slirp4netns")
|
||||
return nil, fmt.Errorf("error calculating expected gateway ip for slirp4netns: %w", err)
|
||||
}
|
||||
return expectedGatewayIP, nil
|
||||
}
|
||||
|
|
@ -433,7 +433,7 @@ func GetSlirp4netnsDNS(subnet *net.IPNet) (*net.IP, error) {
|
|||
}
|
||||
expectedDNSIP, err := addToIP(slirpSubnet, uint32(3))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error calculating expected dns ip for slirp4netns")
|
||||
return nil, fmt.Errorf("error calculating expected dns ip for slirp4netns: %w", err)
|
||||
}
|
||||
return expectedDNSIP, nil
|
||||
}
|
||||
|
|
@ -448,11 +448,11 @@ func addToIP(subnet *net.IPNet, offset uint32) (*net.IP, error) {
|
|||
ipNewRaw := ipInteger + offset
|
||||
// Avoid overflows
|
||||
if ipNewRaw < ipInteger {
|
||||
return nil, errors.Errorf("integer overflow while calculating ip address offset, %s + %d", ipFixed, offset)
|
||||
return nil, fmt.Errorf("integer overflow while calculating ip address offset, %s + %d", ipFixed, offset)
|
||||
}
|
||||
ipNew := net.IPv4(byte(ipNewRaw>>24), byte(ipNewRaw>>16&0xFF), byte(ipNewRaw>>8)&0xFF, byte(ipNewRaw&0xFF))
|
||||
if !subnet.Contains(ipNew) {
|
||||
return nil, errors.Errorf("calculated ip address %s is not within given subnet %s", ipNew.String(), subnet.String())
|
||||
return nil, fmt.Errorf("calculated ip address %s is not within given subnet %s", ipNew.String(), subnet.String())
|
||||
}
|
||||
return &ipNew, nil
|
||||
}
|
||||
|
|
@ -465,7 +465,7 @@ func waitForSync(syncR *os.File, cmd *exec.Cmd, logFile io.ReadSeeker, timeout t
|
|||
b := make([]byte, 16)
|
||||
for {
|
||||
if err := syncR.SetDeadline(time.Now().Add(timeout)); err != nil {
|
||||
return errors.Wrapf(err, "error setting %s pipe timeout", prog)
|
||||
return fmt.Errorf("error setting %s pipe timeout: %w", prog, err)
|
||||
}
|
||||
// FIXME: return err as soon as proc exits, without waiting for timeout
|
||||
if _, err := syncR.Read(b); err == nil {
|
||||
|
|
@ -476,7 +476,7 @@ func waitForSync(syncR *os.File, cmd *exec.Cmd, logFile io.ReadSeeker, timeout t
|
|||
var status syscall.WaitStatus
|
||||
pid, err := syscall.Wait4(cmd.Process.Pid, &status, syscall.WNOHANG, nil)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to read %s process status", prog)
|
||||
return fmt.Errorf("failed to read %s process status: %w", prog, err)
|
||||
}
|
||||
if pid != cmd.Process.Pid {
|
||||
continue
|
||||
|
|
@ -488,16 +488,16 @@ func waitForSync(syncR *os.File, cmd *exec.Cmd, logFile io.ReadSeeker, timeout t
|
|||
}
|
||||
logContent, err := ioutil.ReadAll(logFile)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "%s failed", prog)
|
||||
return fmt.Errorf("%s failed: %w", prog, err)
|
||||
}
|
||||
return errors.Errorf("%s failed: %q", prog, logContent)
|
||||
return fmt.Errorf("%s failed: %q", prog, logContent)
|
||||
}
|
||||
if status.Signaled() {
|
||||
return errors.Errorf("%s killed by signal", prog)
|
||||
return fmt.Errorf("%s killed by signal", prog)
|
||||
}
|
||||
continue
|
||||
}
|
||||
return errors.Wrapf(err, "failed to read from %s sync pipe", prog)
|
||||
return fmt.Errorf("failed to read from %s sync pipe: %w", prog, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
@ -506,7 +506,7 @@ func waitForSync(syncR *os.File, cmd *exec.Cmd, logFile io.ReadSeeker, timeout t
|
|||
func (r *Runtime) setupRootlessPortMappingViaRLK(ctr *Container, netnsPath string, netStatus map[string]types.StatusBlock) error {
|
||||
syncR, syncW, err := os.Pipe()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open pipe")
|
||||
return fmt.Errorf("failed to open pipe: %w", err)
|
||||
}
|
||||
defer errorhandling.CloseQuiet(syncR)
|
||||
defer errorhandling.CloseQuiet(syncW)
|
||||
|
|
@ -514,19 +514,19 @@ func (r *Runtime) setupRootlessPortMappingViaRLK(ctr *Container, netnsPath strin
|
|||
logPath := filepath.Join(ctr.runtime.config.Engine.TmpDir, fmt.Sprintf("rootlessport-%s.log", ctr.config.ID))
|
||||
logFile, err := os.Create(logPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open rootlessport log file %s", logPath)
|
||||
return fmt.Errorf("failed to open rootlessport log file %s: %w", logPath, err)
|
||||
}
|
||||
defer logFile.Close()
|
||||
// Unlink immediately the file so we won't need to worry about cleaning it up later.
|
||||
// It is still accessible through the open fd logFile.
|
||||
if err := os.Remove(logPath); err != nil {
|
||||
return errors.Wrapf(err, "delete file %s", logPath)
|
||||
return fmt.Errorf("delete file %s: %w", logPath, err)
|
||||
}
|
||||
|
||||
if !ctr.config.PostConfigureNetNS {
|
||||
ctr.rootlessPortSyncR, ctr.rootlessPortSyncW, err = os.Pipe()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create rootless port sync pipe")
|
||||
return fmt.Errorf("failed to create rootless port sync pipe: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -566,7 +566,7 @@ func (r *Runtime) setupRootlessPortMappingViaRLK(ctr *Container, netnsPath strin
|
|||
Setpgid: true,
|
||||
}
|
||||
if err := cmd.Start(); err != nil {
|
||||
return errors.Wrapf(err, "failed to start rootlessport process")
|
||||
return fmt.Errorf("failed to start rootlessport process: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
servicereaper.AddPID(cmd.Process.Pid)
|
||||
|
|
@ -579,7 +579,7 @@ func (r *Runtime) setupRootlessPortMappingViaRLK(ctr *Container, netnsPath strin
|
|||
if stdoutStr != "" {
|
||||
// err contains full debug log and too verbose, so return stdoutStr
|
||||
logrus.Debug(err)
|
||||
return errors.Errorf("rootlessport " + strings.TrimSuffix(stdoutStr, "\n"))
|
||||
return fmt.Errorf("rootlessport " + strings.TrimSuffix(stdoutStr, "\n"))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
|
@ -612,7 +612,7 @@ func (r *Runtime) setupRootlessPortMappingViaSlirp(ctr *Container, cmd *exec.Cmd
|
|||
|
||||
// wait that API socket file appears before trying to use it.
|
||||
if _, err := WaitForFile(apiSocket, chWait, pidWaitTimeout); err != nil {
|
||||
return errors.Wrapf(err, "waiting for slirp4nets to create the api socket file %s", apiSocket)
|
||||
return fmt.Errorf("waiting for slirp4nets to create the api socket file %s: %w", apiSocket, err)
|
||||
}
|
||||
|
||||
// for each port we want to add we need to open a connection to the slirp4netns control socket
|
||||
|
|
@ -639,7 +639,7 @@ func (r *Runtime) setupRootlessPortMappingViaSlirp(ctr *Container, cmd *exec.Cmd
|
|||
func openSlirp4netnsPort(apiSocket, proto, hostip string, hostport, guestport uint16) error {
|
||||
conn, err := net.Dial("unix", apiSocket)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cannot open connection to %s", apiSocket)
|
||||
return fmt.Errorf("cannot open connection to %s: %w", apiSocket, err)
|
||||
}
|
||||
defer func() {
|
||||
if err := conn.Close(); err != nil {
|
||||
|
|
@ -659,27 +659,27 @@ func openSlirp4netnsPort(apiSocket, proto, hostip string, hostport, guestport ui
|
|||
// to the socket, as requested by slirp4netns.
|
||||
data, err := json.Marshal(&apiCmd)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cannot marshal JSON for slirp4netns")
|
||||
return fmt.Errorf("cannot marshal JSON for slirp4netns: %w", err)
|
||||
}
|
||||
if _, err := conn.Write([]byte(fmt.Sprintf("%s\n", data))); err != nil {
|
||||
return errors.Wrapf(err, "cannot write to control socket %s", apiSocket)
|
||||
return fmt.Errorf("cannot write to control socket %s: %w", apiSocket, err)
|
||||
}
|
||||
if err := conn.(*net.UnixConn).CloseWrite(); err != nil {
|
||||
return errors.Wrapf(err, "cannot shutdown the socket %s", apiSocket)
|
||||
return fmt.Errorf("cannot shutdown the socket %s: %w", apiSocket, err)
|
||||
}
|
||||
buf := make([]byte, 2048)
|
||||
readLength, err := conn.Read(buf)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "cannot read from control socket %s", apiSocket)
|
||||
return fmt.Errorf("cannot read from control socket %s: %w", apiSocket, err)
|
||||
}
|
||||
// if there is no 'error' key in the received JSON data, then the operation was
|
||||
// successful.
|
||||
var y map[string]interface{}
|
||||
if err := json.Unmarshal(buf[0:readLength], &y); err != nil {
|
||||
return errors.Wrapf(err, "error parsing error status from slirp4netns")
|
||||
return fmt.Errorf("error parsing error status from slirp4netns: %w", err)
|
||||
}
|
||||
if e, found := y["error"]; found {
|
||||
return errors.Errorf("from slirp4netns while setting up port redirection: %v", e)
|
||||
return fmt.Errorf("from slirp4netns while setting up port redirection: %v", e)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -722,21 +722,21 @@ func (c *Container) reloadRootlessRLKPortMapping() error {
|
|||
|
||||
conn, err := openUnixSocket(filepath.Join(c.runtime.config.Engine.TmpDir, "rp", c.config.ID))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not reload rootless port mappings, port forwarding may no longer work correctly")
|
||||
return fmt.Errorf("could not reload rootless port mappings, port forwarding may no longer work correctly: %w", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
enc := json.NewEncoder(conn)
|
||||
err = enc.Encode(childIP)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "port reloading failed")
|
||||
return fmt.Errorf("port reloading failed: %w", err)
|
||||
}
|
||||
b, err := ioutil.ReadAll(conn)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "port reloading failed")
|
||||
return fmt.Errorf("port reloading failed: %w", err)
|
||||
}
|
||||
data := string(b)
|
||||
if data != "OK" {
|
||||
return errors.Errorf("port reloading failed: %s", data)
|
||||
return fmt.Errorf("port reloading failed: %s", data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@
|
|||
package libpod
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
|
|
@ -17,7 +18,6 @@ import (
|
|||
"github.com/containers/podman/v4/pkg/kubeutils"
|
||||
"github.com/containers/podman/v4/utils"
|
||||
"github.com/moby/term"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
|
@ -45,14 +45,14 @@ func (r *ConmonOCIRuntime) Attach(c *Container, params *AttachOptions) error {
|
|||
passthrough := c.LogDriver() == define.PassthroughLogging
|
||||
|
||||
if params == nil || params.Streams == nil {
|
||||
return errors.Wrapf(define.ErrInternal, "must provide parameters to Attach")
|
||||
return fmt.Errorf("must provide parameters to Attach: %w", define.ErrInternal)
|
||||
}
|
||||
|
||||
if !params.Streams.AttachOutput && !params.Streams.AttachError && !params.Streams.AttachInput && !passthrough {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "must provide at least one stream to attach to")
|
||||
return fmt.Errorf("must provide at least one stream to attach to: %w", define.ErrInvalidArg)
|
||||
}
|
||||
if params.Start && params.Started == nil {
|
||||
return errors.Wrapf(define.ErrInternal, "started chan not passed when startContainer set")
|
||||
return fmt.Errorf("started chan not passed when startContainer set: %w", define.ErrInternal)
|
||||
}
|
||||
|
||||
keys := config.DefaultDetachKeys
|
||||
|
|
@ -83,7 +83,7 @@ func (r *ConmonOCIRuntime) Attach(c *Container, params *AttachOptions) error {
|
|||
|
||||
conn, err = openUnixSocket(attachSock)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to connect to container's attach socket: %v", attachSock)
|
||||
return fmt.Errorf("failed to connect to container's attach socket: %v: %w", attachSock, err)
|
||||
}
|
||||
defer func() {
|
||||
if err := conn.Close(); err != nil {
|
||||
|
|
@ -132,10 +132,10 @@ func (r *ConmonOCIRuntime) Attach(c *Container, params *AttachOptions) error {
|
|||
// attachToExec is responsible for closing startFd and attachFd
|
||||
func (c *Container) attachToExec(streams *define.AttachStreams, keys *string, sessionID string, startFd, attachFd *os.File, newSize *define.TerminalSize) error {
|
||||
if !streams.AttachOutput && !streams.AttachError && !streams.AttachInput {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "must provide at least one stream to attach to")
|
||||
return fmt.Errorf("must provide at least one stream to attach to: %w", define.ErrInvalidArg)
|
||||
}
|
||||
if startFd == nil || attachFd == nil {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "start sync pipe and attach sync pipe must be defined for exec attach")
|
||||
return fmt.Errorf("start sync pipe and attach sync pipe must be defined for exec attach: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
defer errorhandling.CloseQuiet(startFd)
|
||||
|
|
@ -174,7 +174,7 @@ func (c *Container) attachToExec(streams *define.AttachStreams, keys *string, se
|
|||
// 2: then attach
|
||||
conn, err := openUnixSocket(sockPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to connect to container's attach socket: %v", sockPath)
|
||||
return fmt.Errorf("failed to connect to container's attach socket: %v: %w", sockPath, err)
|
||||
}
|
||||
defer func() {
|
||||
if err := conn.Close(); err != nil {
|
||||
|
|
@ -200,7 +200,7 @@ func processDetachKeys(keys string) ([]byte, error) {
|
|||
}
|
||||
detachKeys, err := term.ToBytes(keys)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid detach keys")
|
||||
return nil, fmt.Errorf("invalid detach keys: %w", err)
|
||||
}
|
||||
return detachKeys, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package libpod
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
|
@ -19,7 +20,6 @@ import (
|
|||
"github.com/containers/podman/v4/pkg/util"
|
||||
"github.com/containers/podman/v4/utils"
|
||||
spec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
|
@ -27,14 +27,14 @@ import (
|
|||
// ExecContainer executes a command in a running container
|
||||
func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options *ExecOptions, streams *define.AttachStreams, newSize *define.TerminalSize) (int, chan error, error) {
|
||||
if options == nil {
|
||||
return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide an ExecOptions struct to ExecContainer")
|
||||
return -1, nil, fmt.Errorf("must provide an ExecOptions struct to ExecContainer: %w", define.ErrInvalidArg)
|
||||
}
|
||||
if len(options.Cmd) == 0 {
|
||||
return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide a command to execute")
|
||||
return -1, nil, fmt.Errorf("must provide a command to execute: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
if sessionID == "" {
|
||||
return -1, nil, errors.Wrapf(define.ErrEmptyID, "must provide a session ID for exec")
|
||||
return -1, nil, fmt.Errorf("must provide a session ID for exec: %w", define.ErrEmptyID)
|
||||
}
|
||||
|
||||
// TODO: Should we default this to false?
|
||||
|
|
@ -73,7 +73,7 @@ func (r *ConmonOCIRuntime) ExecContainer(c *Container, sessionID string, options
|
|||
}()
|
||||
|
||||
if err := execCmd.Wait(); err != nil {
|
||||
return -1, nil, errors.Wrapf(err, "cannot run conmon")
|
||||
return -1, nil, fmt.Errorf("cannot run conmon: %w", err)
|
||||
}
|
||||
|
||||
pid, err := readConmonPipeData(r.name, pipes.syncPipe, ociLog)
|
||||
|
|
@ -87,12 +87,12 @@ func (r *ConmonOCIRuntime) ExecContainerHTTP(ctr *Container, sessionID string, o
|
|||
streams *HTTPAttachStreams, cancel <-chan bool, hijackDone chan<- bool, holdConnOpen <-chan bool, newSize *define.TerminalSize) (int, chan error, error) {
|
||||
if streams != nil {
|
||||
if !streams.Stdin && !streams.Stdout && !streams.Stderr {
|
||||
return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide at least one stream to attach to")
|
||||
return -1, nil, fmt.Errorf("must provide at least one stream to attach to: %w", define.ErrInvalidArg)
|
||||
}
|
||||
}
|
||||
|
||||
if options == nil {
|
||||
return -1, nil, errors.Wrapf(define.ErrInvalidArg, "must provide exec options to ExecContainerHTTP")
|
||||
return -1, nil, fmt.Errorf("must provide exec options to ExecContainerHTTP: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
detachString := config.DefaultDetachKeys
|
||||
|
|
@ -156,7 +156,7 @@ type conmonPipeData struct {
|
|||
// not attach to it.
|
||||
func (r *ConmonOCIRuntime) ExecContainerDetached(ctr *Container, sessionID string, options *ExecOptions, stdin bool) (int, error) {
|
||||
if options == nil {
|
||||
return -1, errors.Wrapf(define.ErrInvalidArg, "must provide exec options to ExecContainerHTTP")
|
||||
return -1, fmt.Errorf("must provide exec options to ExecContainerHTTP: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
var ociLog string
|
||||
|
|
@ -187,7 +187,7 @@ func (r *ConmonOCIRuntime) ExecContainerDetached(ctr *Container, sessionID strin
|
|||
|
||||
// Wait for conmon to succeed, when return.
|
||||
if err := execCmd.Wait(); err != nil {
|
||||
return -1, errors.Wrapf(err, "cannot run conmon")
|
||||
return -1, fmt.Errorf("cannot run conmon: %w", err)
|
||||
}
|
||||
|
||||
pid, err := readConmonPipeData(r.name, pipes.syncPipe, ociLog)
|
||||
|
|
@ -204,7 +204,7 @@ func (r *ConmonOCIRuntime) ExecAttachResize(ctr *Container, sessionID string, ne
|
|||
defer controlFile.Close()
|
||||
|
||||
if _, err = fmt.Fprintf(controlFile, "%d %d %d\n", 1, newSize.Height, newSize.Width); err != nil {
|
||||
return errors.Wrapf(err, "failed to write to ctl file to resize terminal")
|
||||
return fmt.Errorf("failed to write to ctl file to resize terminal: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -225,7 +225,7 @@ func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, t
|
|||
if err == unix.ESRCH {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid)
|
||||
return fmt.Errorf("error pinging container %s exec session %s PID %d with signal 0: %w", ctr.ID(), sessionID, pid, err)
|
||||
}
|
||||
|
||||
if timeout > 0 {
|
||||
|
|
@ -235,7 +235,7 @@ func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, t
|
|||
if err == unix.ESRCH {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGTERM", ctr.ID(), sessionID, pid)
|
||||
return fmt.Errorf("error killing container %s exec session %s PID %d with SIGTERM: %w", ctr.ID(), sessionID, pid, err)
|
||||
}
|
||||
|
||||
// Wait for the PID to stop
|
||||
|
|
@ -253,12 +253,12 @@ func (r *ConmonOCIRuntime) ExecStopContainer(ctr *Container, sessionID string, t
|
|||
if err == unix.ESRCH {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "error killing container %s exec session %s PID %d with SIGKILL", ctr.ID(), sessionID, pid)
|
||||
return fmt.Errorf("error killing container %s exec session %s PID %d with SIGKILL: %w", ctr.ID(), sessionID, pid, err)
|
||||
}
|
||||
|
||||
// Wait for the PID to stop
|
||||
if err := waitPidStop(pid, killContainerTimeout); err != nil {
|
||||
return errors.Wrapf(err, "timed out waiting for container %s exec session %s PID %d to stop after SIGKILL", ctr.ID(), sessionID, pid)
|
||||
return fmt.Errorf("timed out waiting for container %s exec session %s PID %d to stop after SIGKILL: %w", ctr.ID(), sessionID, pid, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -279,7 +279,7 @@ func (r *ConmonOCIRuntime) ExecUpdateStatus(ctr *Container, sessionID string) (b
|
|||
if err == unix.ESRCH {
|
||||
return false, nil
|
||||
}
|
||||
return false, errors.Wrapf(err, "error pinging container %s exec session %s PID %d with signal 0", ctr.ID(), sessionID, pid)
|
||||
return false, fmt.Errorf("error pinging container %s exec session %s PID %d with signal 0: %w", ctr.ID(), sessionID, pid, err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
|
|
@ -289,7 +289,7 @@ func (r *ConmonOCIRuntime) ExecUpdateStatus(ctr *Container, sessionID string) (b
|
|||
func (r *ConmonOCIRuntime) ExecAttachSocketPath(ctr *Container, sessionID string) (string, error) {
|
||||
// We don't even use container, so don't validity check it
|
||||
if sessionID == "" {
|
||||
return "", errors.Wrapf(define.ErrInvalidArg, "must provide a valid session ID to get attach socket path")
|
||||
return "", fmt.Errorf("must provide a valid session ID to get attach socket path: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
return filepath.Join(ctr.execBundlePath(sessionID), "attach"), nil
|
||||
|
|
@ -325,20 +325,20 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex
|
|||
pipes := new(execPipes)
|
||||
|
||||
if options == nil {
|
||||
return nil, nil, errors.Wrapf(define.ErrInvalidArg, "must provide an ExecOptions struct to ExecContainer")
|
||||
return nil, nil, fmt.Errorf("must provide an ExecOptions struct to ExecContainer: %w", define.ErrInvalidArg)
|
||||
}
|
||||
if len(options.Cmd) == 0 {
|
||||
return nil, nil, errors.Wrapf(define.ErrInvalidArg, "must provide a command to execute")
|
||||
return nil, nil, fmt.Errorf("must provide a command to execute: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
if sessionID == "" {
|
||||
return nil, nil, errors.Wrapf(define.ErrEmptyID, "must provide a session ID for exec")
|
||||
return nil, nil, fmt.Errorf("must provide a session ID for exec: %w", define.ErrEmptyID)
|
||||
}
|
||||
|
||||
// create sync pipe to receive the pid
|
||||
parentSyncPipe, childSyncPipe, err := newPipe()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "error creating socket pair")
|
||||
return nil, nil, fmt.Errorf("error creating socket pair: %w", err)
|
||||
}
|
||||
pipes.syncPipe = parentSyncPipe
|
||||
|
||||
|
|
@ -352,7 +352,7 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex
|
|||
// attachToExec is responsible for closing parentStartPipe
|
||||
childStartPipe, parentStartPipe, err := newPipe()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "error creating socket pair")
|
||||
return nil, nil, fmt.Errorf("error creating socket pair: %w", err)
|
||||
}
|
||||
pipes.startPipe = parentStartPipe
|
||||
|
||||
|
|
@ -362,7 +362,7 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex
|
|||
// attachToExec is responsible for closing parentAttachPipe
|
||||
parentAttachPipe, childAttachPipe, err := newPipe()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "error creating socket pair")
|
||||
return nil, nil, fmt.Errorf("error creating socket pair: %w", err)
|
||||
}
|
||||
pipes.attachPipe = parentAttachPipe
|
||||
|
||||
|
|
@ -471,7 +471,7 @@ func (r *ConmonOCIRuntime) startExec(c *Container, sessionID string, options *Ex
|
|||
childrenClosed = true
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "cannot start container %s", c.ID())
|
||||
return nil, nil, fmt.Errorf("cannot start container %s: %w", c.ID(), err)
|
||||
}
|
||||
if err := r.moveConmonToCgroupAndSignal(c, execCmd, parentStartPipe); err != nil {
|
||||
return nil, nil, err
|
||||
|
|
@ -494,7 +494,7 @@ func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.Resp
|
|||
// conmonPipeDataChan in case of an error.
|
||||
|
||||
if pipes == nil || pipes.startPipe == nil || pipes.attachPipe == nil {
|
||||
err := errors.Wrapf(define.ErrInvalidArg, "must provide a start and attach pipe to finish an exec attach")
|
||||
err := fmt.Errorf("must provide a start and attach pipe to finish an exec attach: %w", define.ErrInvalidArg)
|
||||
conmonPipeDataChan <- conmonPipeData{-1, err}
|
||||
return err
|
||||
}
|
||||
|
|
@ -537,7 +537,7 @@ func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.Resp
|
|||
conn, err := openUnixSocket(sockPath)
|
||||
if err != nil {
|
||||
conmonPipeDataChan <- conmonPipeData{-1, err}
|
||||
return errors.Wrapf(err, "failed to connect to container's attach socket: %v", sockPath)
|
||||
return fmt.Errorf("failed to connect to container's attach socket: %v: %w", sockPath, err)
|
||||
}
|
||||
defer func() {
|
||||
if err := conn.Close(); err != nil {
|
||||
|
|
@ -558,13 +558,13 @@ func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.Resp
|
|||
hijacker, ok := w.(http.Hijacker)
|
||||
if !ok {
|
||||
conmonPipeDataChan <- conmonPipeData{-1, err}
|
||||
return errors.Errorf("unable to hijack connection")
|
||||
return errors.New("unable to hijack connection")
|
||||
}
|
||||
|
||||
httpCon, httpBuf, err := hijacker.Hijack()
|
||||
if err != nil {
|
||||
conmonPipeDataChan <- conmonPipeData{-1, err}
|
||||
return errors.Wrapf(err, "error hijacking connection")
|
||||
return fmt.Errorf("error hijacking connection: %w", err)
|
||||
}
|
||||
|
||||
hijackDone <- true
|
||||
|
|
@ -575,7 +575,7 @@ func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.Resp
|
|||
// Force a flush after the header is written.
|
||||
if err := httpBuf.Flush(); err != nil {
|
||||
conmonPipeDataChan <- conmonPipeData{-1, err}
|
||||
return errors.Wrapf(err, "error flushing HTTP hijack header")
|
||||
return fmt.Errorf("error flushing HTTP hijack header: %w", err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
|
|
@ -723,7 +723,7 @@ func prepareProcessExec(c *Container, options *ExecOptions, env []string, sessio
|
|||
if len(addGroups) > 0 {
|
||||
sgids, err = lookup.GetContainerGroups(addGroups, c.state.Mountpoint, overrides)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error looking up supplemental groups for container %s exec session %s", c.ID(), sessionID)
|
||||
return nil, fmt.Errorf("error looking up supplemental groups for container %s exec session %s: %w", c.ID(), sessionID, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ import (
|
|||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
|
@ -41,7 +42,6 @@ import (
|
|||
pmount "github.com/containers/storage/pkg/mount"
|
||||
spec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
|
@ -81,7 +81,7 @@ type ConmonOCIRuntime struct {
|
|||
// libpod.
|
||||
func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtimeFlags []string, runtimeCfg *config.Config) (OCIRuntime, error) {
|
||||
if name == "" {
|
||||
return nil, errors.Wrapf(define.ErrInvalidArg, "the OCI runtime must be provided a non-empty name")
|
||||
return nil, fmt.Errorf("the OCI runtime must be provided a non-empty name: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
// Make lookup tables for runtime support
|
||||
|
|
@ -125,7 +125,7 @@ func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtime
|
|||
if os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
return nil, errors.Wrapf(err, "cannot stat OCI runtime %s path", name)
|
||||
return nil, fmt.Errorf("cannot stat OCI runtime %s path: %w", name, err)
|
||||
}
|
||||
if !stat.Mode().IsRegular() {
|
||||
continue
|
||||
|
|
@ -146,7 +146,7 @@ func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtime
|
|||
}
|
||||
|
||||
if !foundPath {
|
||||
return nil, errors.Wrapf(define.ErrInvalidArg, "no valid executable found for OCI runtime %s", name)
|
||||
return nil, fmt.Errorf("no valid executable found for OCI runtime %s: %w", name, define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
runtime.exitsDir = filepath.Join(runtime.tmpDir, "exits")
|
||||
|
|
@ -155,7 +155,7 @@ func newConmonOCIRuntime(name string, paths []string, conmonPath string, runtime
|
|||
if err := os.MkdirAll(runtime.exitsDir, 0750); err != nil {
|
||||
// The directory is allowed to exist
|
||||
if !os.IsExist(err) {
|
||||
return nil, errors.Wrapf(err, "error creating OCI runtime exit files directory")
|
||||
return nil, fmt.Errorf("error creating OCI runtime exit files directory: %w", err)
|
||||
}
|
||||
}
|
||||
return runtime, nil
|
||||
|
|
@ -231,7 +231,7 @@ func (r *ConmonOCIRuntime) CreateContainer(ctr *Container, restoreOptions *Conta
|
|||
// changes are propagated to the host.
|
||||
err = unix.Mount("/sys", "/sys", "none", unix.MS_REC|unix.MS_SLAVE, "")
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "cannot make /sys slave")
|
||||
return 0, fmt.Errorf("cannot make /sys slave: %w", err)
|
||||
}
|
||||
|
||||
mounts, err := pmount.GetMounts()
|
||||
|
|
@ -244,7 +244,7 @@ func (r *ConmonOCIRuntime) CreateContainer(ctr *Container, restoreOptions *Conta
|
|||
}
|
||||
err = unix.Unmount(m.Mountpoint, 0)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return 0, errors.Wrapf(err, "cannot unmount %s", m.Mountpoint)
|
||||
return 0, fmt.Errorf("cannot unmount %s: %w", m.Mountpoint, err)
|
||||
}
|
||||
}
|
||||
return r.createOCIContainer(ctr, restoreOptions)
|
||||
|
|
@ -282,17 +282,17 @@ func (r *ConmonOCIRuntime) UpdateContainerStatus(ctr *Container) error {
|
|||
|
||||
outPipe, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "getting stdout pipe")
|
||||
return fmt.Errorf("getting stdout pipe: %w", err)
|
||||
}
|
||||
errPipe, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "getting stderr pipe")
|
||||
return fmt.Errorf("getting stderr pipe: %w", err)
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
out, err2 := ioutil.ReadAll(errPipe)
|
||||
if err2 != nil {
|
||||
return errors.Wrapf(err, "error getting container %s state", ctr.ID())
|
||||
return fmt.Errorf("error getting container %s state: %w", ctr.ID(), err)
|
||||
}
|
||||
if strings.Contains(string(out), "does not exist") || strings.Contains(string(out), "No such file") {
|
||||
if err := ctr.removeConmonFiles(); err != nil {
|
||||
|
|
@ -303,7 +303,7 @@ func (r *ConmonOCIRuntime) UpdateContainerStatus(ctr *Container) error {
|
|||
ctr.state.State = define.ContainerStateExited
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "error getting container %s state. stderr/out: %s", ctr.ID(), out)
|
||||
return fmt.Errorf("error getting container %s state. stderr/out: %s: %w", ctr.ID(), out, err)
|
||||
}
|
||||
defer func() {
|
||||
_ = cmd.Wait()
|
||||
|
|
@ -314,10 +314,10 @@ func (r *ConmonOCIRuntime) UpdateContainerStatus(ctr *Container) error {
|
|||
}
|
||||
out, err := ioutil.ReadAll(outPipe)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading stdout: %s", ctr.ID())
|
||||
return fmt.Errorf("error reading stdout: %s: %w", ctr.ID(), err)
|
||||
}
|
||||
if err := json.NewDecoder(bytes.NewBuffer(out)).Decode(state); err != nil {
|
||||
return errors.Wrapf(err, "error decoding container status for container %s", ctr.ID())
|
||||
return fmt.Errorf("error decoding container status for container %s: %w", ctr.ID(), err)
|
||||
}
|
||||
ctr.state.PID = state.Pid
|
||||
|
||||
|
|
@ -331,8 +331,8 @@ func (r *ConmonOCIRuntime) UpdateContainerStatus(ctr *Container) error {
|
|||
case "stopped":
|
||||
ctr.state.State = define.ContainerStateStopped
|
||||
default:
|
||||
return errors.Wrapf(define.ErrInternal, "unrecognized status returned by runtime for container %s: %s",
|
||||
ctr.ID(), state.Status)
|
||||
return fmt.Errorf("unrecognized status returned by runtime for container %s: %s: %w",
|
||||
ctr.ID(), state.Status, define.ErrInternal)
|
||||
}
|
||||
|
||||
// Only grab exit status if we were not already stopped
|
||||
|
|
@ -400,7 +400,7 @@ func (r *ConmonOCIRuntime) KillContainer(ctr *Container, signal uint, all bool)
|
|||
if ctr.ensureState(define.ContainerStateStopped, define.ContainerStateExited) {
|
||||
return define.ErrCtrStateInvalid
|
||||
}
|
||||
return errors.Wrapf(err, "error sending signal to container %s", ctr.ID())
|
||||
return fmt.Errorf("error sending signal to container %s: %w", ctr.ID(), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -457,7 +457,7 @@ func (r *ConmonOCIRuntime) StopContainer(ctr *Container, timeout uint, all bool)
|
|||
return nil
|
||||
}
|
||||
|
||||
return errors.Wrapf(err, "error sending SIGKILL to container %s", ctr.ID())
|
||||
return fmt.Errorf("error sending SIGKILL to container %s: %w", ctr.ID(), err)
|
||||
}
|
||||
|
||||
// Give runtime a few seconds to make it happen
|
||||
|
|
@ -514,7 +514,7 @@ func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, req *http.Request, w http.
|
|||
|
||||
if streams != nil {
|
||||
if !streams.Stdin && !streams.Stdout && !streams.Stderr {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "must specify at least one stream to attach to")
|
||||
return fmt.Errorf("must specify at least one stream to attach to: %w", define.ErrInvalidArg)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -527,7 +527,7 @@ func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, req *http.Request, w http.
|
|||
if streamAttach {
|
||||
newConn, err := openUnixSocket(attachSock)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to connect to container's attach socket: %v", attachSock)
|
||||
return fmt.Errorf("failed to connect to container's attach socket: %v: %w", attachSock, err)
|
||||
}
|
||||
conn = newConn
|
||||
defer func() {
|
||||
|
|
@ -562,12 +562,12 @@ func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, req *http.Request, w http.
|
|||
// Alright, let's hijack.
|
||||
hijacker, ok := w.(http.Hijacker)
|
||||
if !ok {
|
||||
return errors.Errorf("unable to hijack connection")
|
||||
return fmt.Errorf("unable to hijack connection")
|
||||
}
|
||||
|
||||
httpCon, httpBuf, err := hijacker.Hijack()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error hijacking connection")
|
||||
return fmt.Errorf("error hijacking connection: %w", err)
|
||||
}
|
||||
|
||||
hijackDone <- true
|
||||
|
|
@ -576,7 +576,7 @@ func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, req *http.Request, w http.
|
|||
|
||||
// Force a flush after the header is written.
|
||||
if err := httpBuf.Flush(); err != nil {
|
||||
return errors.Wrapf(err, "error flushing HTTP hijack header")
|
||||
return fmt.Errorf("error flushing HTTP hijack header: %w", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
|
|
@ -722,7 +722,8 @@ func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, req *http.Request, w http.
|
|||
// isRetryable returns whether the error was caused by a blocked syscall or the
|
||||
// specified operation on a non blocking file descriptor wasn't ready for completion.
|
||||
func isRetryable(err error) bool {
|
||||
if errno, isErrno := errors.Cause(err).(syscall.Errno); isErrno {
|
||||
var errno syscall.Errno
|
||||
if errors.As(err, &errno) {
|
||||
return errno == syscall.EINTR || errno == syscall.EAGAIN
|
||||
}
|
||||
return false
|
||||
|
|
@ -737,11 +738,11 @@ func openControlFile(ctr *Container, parentDir string) (*os.File, error) {
|
|||
return controlFile, nil
|
||||
}
|
||||
if !isRetryable(err) {
|
||||
return nil, errors.Wrapf(err, "could not open ctl file for terminal resize for container %s", ctr.ID())
|
||||
return nil, fmt.Errorf("could not open ctl file for terminal resize for container %s: %w", ctr.ID(), err)
|
||||
}
|
||||
time.Sleep(time.Second / 10)
|
||||
}
|
||||
return nil, errors.Errorf("timeout waiting for %q", controlPath)
|
||||
return nil, fmt.Errorf("timeout waiting for %q", controlPath)
|
||||
}
|
||||
|
||||
// AttachResize resizes the terminal used by the given container.
|
||||
|
|
@ -754,7 +755,7 @@ func (r *ConmonOCIRuntime) AttachResize(ctr *Container, newSize define.TerminalS
|
|||
|
||||
logrus.Debugf("Received a resize event for container %s: %+v", ctr.ID(), newSize)
|
||||
if _, err = fmt.Fprintf(controlFile, "%d %d %d\n", 1, newSize.Height, newSize.Width); err != nil {
|
||||
return errors.Wrapf(err, "failed to write to ctl file to resize terminal")
|
||||
return fmt.Errorf("failed to write to ctl file to resize terminal: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -862,7 +863,7 @@ func (r *ConmonOCIRuntime) CheckConmonRunning(ctr *Container) (bool, error) {
|
|||
if err == unix.ESRCH {
|
||||
return false, nil
|
||||
}
|
||||
return false, errors.Wrapf(err, "error pinging container %s conmon with signal 0", ctr.ID())
|
||||
return false, fmt.Errorf("error pinging container %s conmon with signal 0: %w", ctr.ID(), err)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
|
@ -894,7 +895,7 @@ func (r *ConmonOCIRuntime) SupportsKVM() bool {
|
|||
// AttachSocketPath is the path to a single container's attach socket.
|
||||
func (r *ConmonOCIRuntime) AttachSocketPath(ctr *Container) (string, error) {
|
||||
if ctr == nil {
|
||||
return "", errors.Wrapf(define.ErrInvalidArg, "must provide a valid container to get attach socket path")
|
||||
return "", fmt.Errorf("must provide a valid container to get attach socket path: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
return filepath.Join(ctr.bundlePath(), "attach"), nil
|
||||
|
|
@ -903,7 +904,7 @@ func (r *ConmonOCIRuntime) AttachSocketPath(ctr *Container) (string, error) {
|
|||
// ExitFilePath is the path to a container's exit file.
|
||||
func (r *ConmonOCIRuntime) ExitFilePath(ctr *Container) (string, error) {
|
||||
if ctr == nil {
|
||||
return "", errors.Wrapf(define.ErrInvalidArg, "must provide a valid container to get exit file path")
|
||||
return "", fmt.Errorf("must provide a valid container to get exit file path: %w", define.ErrInvalidArg)
|
||||
}
|
||||
return filepath.Join(r.exitsDir, ctr.ID()), nil
|
||||
}
|
||||
|
|
@ -914,11 +915,11 @@ func (r *ConmonOCIRuntime) RuntimeInfo() (*define.ConmonInfo, *define.OCIRuntime
|
|||
conmonPackage := packageVersion(r.conmonPath)
|
||||
runtimeVersion, err := r.getOCIRuntimeVersion()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "error getting version of OCI runtime %s", r.name)
|
||||
return nil, nil, fmt.Errorf("error getting version of OCI runtime %s: %w", r.name, err)
|
||||
}
|
||||
conmonVersion, err := r.getConmonVersion()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "error getting conmon version")
|
||||
return nil, nil, fmt.Errorf("error getting conmon version: %w", err)
|
||||
}
|
||||
|
||||
conmon := define.ConmonInfo{
|
||||
|
|
@ -988,7 +989,7 @@ func waitPidStop(pid int, timeout time.Duration) error {
|
|||
return nil
|
||||
case <-time.After(timeout):
|
||||
close(chControl)
|
||||
return errors.Errorf("given PIDs did not die within timeout")
|
||||
return fmt.Errorf("given PIDs did not die within timeout")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1004,7 +1005,7 @@ func (r *ConmonOCIRuntime) getLogTag(ctr *Container) (string, error) {
|
|||
}
|
||||
tmpl, err := template.New("container").Parse(logTag)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "template parsing error %s", logTag)
|
||||
return "", fmt.Errorf("template parsing error %s: %w", logTag, err)
|
||||
}
|
||||
var b bytes.Buffer
|
||||
err = tmpl.Execute(&b, data)
|
||||
|
|
@ -1025,13 +1026,13 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co
|
|||
|
||||
parentSyncPipe, childSyncPipe, err := newPipe()
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "error creating socket pair")
|
||||
return 0, fmt.Errorf("error creating socket pair: %w", err)
|
||||
}
|
||||
defer errorhandling.CloseQuiet(parentSyncPipe)
|
||||
|
||||
childStartPipe, parentStartPipe, err := newPipe()
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "error creating socket pair for start pipe")
|
||||
return 0, fmt.Errorf("error creating socket pair for start pipe: %w", err)
|
||||
}
|
||||
|
||||
defer errorhandling.CloseQuiet(parentStartPipe)
|
||||
|
|
@ -1202,12 +1203,12 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co
|
|||
if havePortMapping {
|
||||
ctr.rootlessPortSyncR, ctr.rootlessPortSyncW, err = os.Pipe()
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "failed to create rootless port sync pipe")
|
||||
return 0, fmt.Errorf("failed to create rootless port sync pipe: %w", err)
|
||||
}
|
||||
}
|
||||
ctr.rootlessSlirpSyncR, ctr.rootlessSlirpSyncW, err = os.Pipe()
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "failed to create rootless network sync pipe")
|
||||
return 0, fmt.Errorf("failed to create rootless network sync pipe: %w", err)
|
||||
}
|
||||
} else {
|
||||
if ctr.rootlessSlirpSyncR != nil {
|
||||
|
|
@ -1544,7 +1545,7 @@ func readConmonPipeData(runtimeName string, pipe *os.File, ociLog string) (int,
|
|||
}
|
||||
}
|
||||
}
|
||||
return -1, errors.Wrapf(ss.err, "container create failed (no logs from conmon)")
|
||||
return -1, fmt.Errorf("container create failed (no logs from conmon): %w", ss.err)
|
||||
}
|
||||
logrus.Debugf("Received: %d", ss.si.Data)
|
||||
if ss.si.Data < 0 {
|
||||
|
|
@ -1561,11 +1562,11 @@ func readConmonPipeData(runtimeName string, pipe *os.File, ociLog string) (int,
|
|||
if ss.si.Message != "" {
|
||||
return ss.si.Data, getOCIRuntimeError(runtimeName, ss.si.Message)
|
||||
}
|
||||
return ss.si.Data, errors.Wrapf(define.ErrInternal, "container create failed")
|
||||
return ss.si.Data, fmt.Errorf("container create failed: %w", define.ErrInternal)
|
||||
}
|
||||
data = ss.si.Data
|
||||
case <-time.After(define.ContainerCreateTimeout):
|
||||
return -1, errors.Wrapf(define.ErrInternal, "container creation timeout")
|
||||
return -1, fmt.Errorf("container creation timeout: %w", define.ErrInternal)
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,7 +7,6 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -209,7 +208,7 @@ func (r *MissingRuntime) ExecAttachSocketPath(ctr *Container, sessionID string)
|
|||
// the container, but Conmon should still place an exit file for it.
|
||||
func (r *MissingRuntime) ExitFilePath(ctr *Container) (string, error) {
|
||||
if ctr == nil {
|
||||
return "", errors.Wrapf(define.ErrInvalidArg, "must provide a valid container to get exit file path")
|
||||
return "", fmt.Errorf("must provide a valid container to get exit file path: %w", define.ErrInvalidArg)
|
||||
}
|
||||
return filepath.Join(r.exitsDir, ctr.ID()), nil
|
||||
}
|
||||
|
|
@ -227,5 +226,5 @@ func (r *MissingRuntime) RuntimeInfo() (*define.ConmonInfo, *define.OCIRuntimeIn
|
|||
|
||||
// Return an error indicating the runtime is missing
|
||||
func (r *MissingRuntime) printError() error {
|
||||
return errors.Wrapf(define.ErrOCIRuntimeNotFound, "runtime %s is missing", r.name)
|
||||
return fmt.Errorf("runtime %s is missing: %w", r.name, define.ErrOCIRuntimeNotFound)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@ import (
|
|||
|
||||
"github.com/containers/common/libnetwork/types"
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -70,7 +69,7 @@ func bindPort(protocol, hostIP string, port uint16, isV6 bool, sctpWarning *bool
|
|||
addr, err = net.ResolveUDPAddr("udp4", fmt.Sprintf("%s:%d", hostIP, port))
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot resolve the UDP address")
|
||||
return nil, fmt.Errorf("cannot resolve the UDP address: %w", err)
|
||||
}
|
||||
|
||||
proto := "udp4"
|
||||
|
|
@ -79,11 +78,11 @@ func bindPort(protocol, hostIP string, port uint16, isV6 bool, sctpWarning *bool
|
|||
}
|
||||
server, err := net.ListenUDP(proto, addr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot listen on the UDP port")
|
||||
return nil, fmt.Errorf("cannot listen on the UDP port: %w", err)
|
||||
}
|
||||
file, err = server.File()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot get file for UDP socket")
|
||||
return nil, fmt.Errorf("cannot get file for UDP socket: %w", err)
|
||||
}
|
||||
// close the listener
|
||||
// note that this does not affect the fd, see the godoc for server.File()
|
||||
|
|
@ -103,7 +102,7 @@ func bindPort(protocol, hostIP string, port uint16, isV6 bool, sctpWarning *bool
|
|||
addr, err = net.ResolveTCPAddr("tcp4", fmt.Sprintf("%s:%d", hostIP, port))
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot resolve the TCP address")
|
||||
return nil, fmt.Errorf("cannot resolve the TCP address: %w", err)
|
||||
}
|
||||
|
||||
proto := "tcp4"
|
||||
|
|
@ -112,11 +111,11 @@ func bindPort(protocol, hostIP string, port uint16, isV6 bool, sctpWarning *bool
|
|||
}
|
||||
server, err := net.ListenTCP(proto, addr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot listen on the TCP port")
|
||||
return nil, fmt.Errorf("cannot listen on the TCP port: %w", err)
|
||||
}
|
||||
file, err = server.File()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot get file for TCP socket")
|
||||
return nil, fmt.Errorf("cannot get file for TCP socket: %w", err)
|
||||
}
|
||||
// close the listener
|
||||
// note that this does not affect the fd, see the godoc for server.File()
|
||||
|
|
@ -144,14 +143,14 @@ func getOCIRuntimeError(name, runtimeMsg string) error {
|
|||
if includeFullOutput {
|
||||
errStr = runtimeMsg
|
||||
}
|
||||
return errors.Wrapf(define.ErrOCIRuntimePermissionDenied, "%s: %s", name, strings.Trim(errStr, "\n"))
|
||||
return fmt.Errorf("%s: %s: %w", name, strings.Trim(errStr, "\n"), define.ErrOCIRuntimePermissionDenied)
|
||||
}
|
||||
if match := regexp.MustCompile("(?i).*executable file not found in.*|.*no such file or directory.*").FindString(runtimeMsg); match != "" {
|
||||
errStr := match
|
||||
if includeFullOutput {
|
||||
errStr = runtimeMsg
|
||||
}
|
||||
return errors.Wrapf(define.ErrOCIRuntimeNotFound, "%s: %s", name, strings.Trim(errStr, "\n"))
|
||||
return fmt.Errorf("%s: %s: %w", name, strings.Trim(errStr, "\n"), define.ErrOCIRuntimeNotFound)
|
||||
}
|
||||
if match := regexp.MustCompile("`/proc/[a-z0-9-].+/attr.*`").FindString(runtimeMsg); match != "" {
|
||||
errStr := match
|
||||
|
|
@ -159,11 +158,11 @@ func getOCIRuntimeError(name, runtimeMsg string) error {
|
|||
errStr = runtimeMsg
|
||||
}
|
||||
if strings.HasSuffix(match, "/exec`") {
|
||||
return errors.Wrapf(define.ErrSetSecurityAttribute, "%s: %s", name, strings.Trim(errStr, "\n"))
|
||||
return fmt.Errorf("%s: %s: %w", name, strings.Trim(errStr, "\n"), define.ErrSetSecurityAttribute)
|
||||
} else if strings.HasSuffix(match, "/current`") {
|
||||
return errors.Wrapf(define.ErrGetSecurityAttribute, "%s: %s", name, strings.Trim(errStr, "\n"))
|
||||
return fmt.Errorf("%s: %s: %w", name, strings.Trim(errStr, "\n"), define.ErrGetSecurityAttribute)
|
||||
}
|
||||
return errors.Wrapf(define.ErrSecurityAttribute, "%s: %s", name, strings.Trim(errStr, "\n"))
|
||||
return fmt.Errorf("%s: %s: %w", name, strings.Trim(errStr, "\n"), define.ErrSecurityAttribute)
|
||||
}
|
||||
return errors.Wrapf(define.ErrOCIRuntime, "%s: %s", name, strings.Trim(runtimeMsg, "\n"))
|
||||
return fmt.Errorf("%s: %s: %w", name, strings.Trim(runtimeMsg, "\n"), define.ErrOCIRuntime)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package libpod
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
|
|
@ -25,7 +26,6 @@ import (
|
|||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/opencontainers/runtime-tools/generate"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -141,7 +141,7 @@ func WithOCIRuntime(runtime string) RuntimeOption {
|
|||
}
|
||||
|
||||
if runtime == "" {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "must provide a valid path")
|
||||
return fmt.Errorf("must provide a valid path: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
rt.config.Engine.OCIRuntime = runtime
|
||||
|
|
@ -159,7 +159,7 @@ func WithConmonPath(path string) RuntimeOption {
|
|||
}
|
||||
|
||||
if path == "" {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "must provide a valid path")
|
||||
return fmt.Errorf("must provide a valid path: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
rt.config.Engine.ConmonPath = []string{path}
|
||||
|
|
@ -219,8 +219,8 @@ func WithCgroupManager(manager string) RuntimeOption {
|
|||
}
|
||||
|
||||
if manager != config.CgroupfsCgroupsManager && manager != config.SystemdCgroupsManager {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "Cgroup manager must be one of %s and %s",
|
||||
config.CgroupfsCgroupsManager, config.SystemdCgroupsManager)
|
||||
return fmt.Errorf("cgroup manager must be one of %s and %s: %w",
|
||||
config.CgroupfsCgroupsManager, config.SystemdCgroupsManager, define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
rt.config.Engine.CgroupManager = manager
|
||||
|
|
@ -250,7 +250,7 @@ func WithRegistriesConf(path string) RuntimeOption {
|
|||
logrus.Debugf("Setting custom registries.conf: %q", path)
|
||||
return func(rt *Runtime) error {
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
return errors.Wrap(err, "locating specified registries.conf")
|
||||
return fmt.Errorf("locating specified registries.conf: %w", err)
|
||||
}
|
||||
if rt.imageContext == nil {
|
||||
rt.imageContext = &types.SystemContext{
|
||||
|
|
@ -272,7 +272,7 @@ func WithHooksDir(hooksDirs ...string) RuntimeOption {
|
|||
|
||||
for _, hooksDir := range hooksDirs {
|
||||
if hooksDir == "" {
|
||||
return errors.Wrap(define.ErrInvalidArg, "empty-string hook directories are not supported")
|
||||
return fmt.Errorf("empty-string hook directories are not supported: %w", define.ErrInvalidArg)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -494,7 +494,7 @@ func WithMigrateRuntime(requestedRuntime string) RuntimeOption {
|
|||
}
|
||||
|
||||
if requestedRuntime == "" {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "must provide a non-empty name for new runtime")
|
||||
return fmt.Errorf("must provide a non-empty name for new runtime: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
rt.migrateRuntime = requestedRuntime
|
||||
|
|
@ -513,7 +513,7 @@ func WithEventsLogger(logger string) RuntimeOption {
|
|||
}
|
||||
|
||||
if !events.IsValidEventer(logger) {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "%q is not a valid events backend", logger)
|
||||
return fmt.Errorf("%q is not a valid events backend: %w", logger, define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
rt.config.Engine.EventsLogger = logger
|
||||
|
|
@ -622,7 +622,7 @@ func WithSdNotifyMode(mode string) CtrCreateOption {
|
|||
|
||||
// verify values
|
||||
if len(mode) > 0 && !cutil.StringInSlice(strings.ToLower(mode), SdNotifyModeValues) {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "--sdnotify values must be one of %q", strings.Join(SdNotifyModeValues, ", "))
|
||||
return fmt.Errorf("--sdnotify values must be one of %q: %w", strings.Join(SdNotifyModeValues, ", "), define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
ctr.config.SdNotifyMode = mode
|
||||
|
|
@ -770,9 +770,9 @@ func WithStopSignal(signal syscall.Signal) CtrCreateOption {
|
|||
}
|
||||
|
||||
if signal == 0 {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "stop signal cannot be 0")
|
||||
return fmt.Errorf("stop signal cannot be 0: %w", define.ErrInvalidArg)
|
||||
} else if signal > 64 {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "stop signal cannot be greater than 64 (SIGRTMAX)")
|
||||
return fmt.Errorf("stop signal cannot be greater than 64 (SIGRTMAX): %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
ctr.config.StopSignal = uint(signal)
|
||||
|
|
@ -1080,11 +1080,11 @@ func WithLogDriver(driver string) CtrCreateOption {
|
|||
}
|
||||
switch driver {
|
||||
case "":
|
||||
return errors.Wrapf(define.ErrInvalidArg, "log driver must be set")
|
||||
return fmt.Errorf("log driver must be set: %w", define.ErrInvalidArg)
|
||||
case define.JournaldLogging, define.KubernetesLogging, define.JSONLogging, define.NoLogging, define.PassthroughLogging:
|
||||
break
|
||||
default:
|
||||
return errors.Wrapf(define.ErrInvalidArg, "invalid log driver")
|
||||
return fmt.Errorf("invalid log driver: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
ctr.config.LogDriver = driver
|
||||
|
|
@ -1100,7 +1100,7 @@ func WithLogPath(path string) CtrCreateOption {
|
|||
return define.ErrCtrFinalized
|
||||
}
|
||||
if path == "" {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "log path must be set")
|
||||
return fmt.Errorf("log path must be set: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
ctr.config.LogPath = path
|
||||
|
|
@ -1116,7 +1116,7 @@ func WithLogTag(tag string) CtrCreateOption {
|
|||
return define.ErrCtrFinalized
|
||||
}
|
||||
if tag == "" {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "log tag must be set")
|
||||
return fmt.Errorf("log tag must be set: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
ctr.config.LogTag = tag
|
||||
|
|
@ -1139,7 +1139,7 @@ func WithCgroupsMode(mode string) CtrCreateOption {
|
|||
case "enabled", "no-conmon", cgroupSplit:
|
||||
ctr.config.CgroupsMode = mode
|
||||
default:
|
||||
return errors.Wrapf(define.ErrInvalidArg, "Invalid cgroup mode %q", mode)
|
||||
return fmt.Errorf("invalid cgroup mode %q: %w", mode, define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -1154,7 +1154,7 @@ func WithCgroupParent(parent string) CtrCreateOption {
|
|||
}
|
||||
|
||||
if parent == "" {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "cgroup parent cannot be empty")
|
||||
return fmt.Errorf("cgroup parent cannot be empty: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
ctr.config.CgroupParent = parent
|
||||
|
|
@ -1184,7 +1184,7 @@ func WithDNS(dnsServers []string) CtrCreateOption {
|
|||
for _, i := range dnsServers {
|
||||
result := net.ParseIP(i)
|
||||
if result == nil {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "invalid IP address %s", i)
|
||||
return fmt.Errorf("invalid IP address %s: %w", i, define.ErrInvalidArg)
|
||||
}
|
||||
dns = append(dns, result)
|
||||
}
|
||||
|
|
@ -1201,7 +1201,7 @@ func WithDNSOption(dnsOptions []string) CtrCreateOption {
|
|||
return define.ErrCtrFinalized
|
||||
}
|
||||
if ctr.config.UseImageResolvConf {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "cannot add DNS options if container will not create /etc/resolv.conf")
|
||||
return fmt.Errorf("cannot add DNS options if container will not create /etc/resolv.conf: %w", define.ErrInvalidArg)
|
||||
}
|
||||
ctr.config.DNSOption = append(ctr.config.DNSOption, dnsOptions...)
|
||||
return nil
|
||||
|
|
@ -1375,7 +1375,7 @@ func WithRestartPolicy(policy string) CtrCreateOption {
|
|||
case define.RestartPolicyNone, define.RestartPolicyNo, define.RestartPolicyOnFailure, define.RestartPolicyAlways, define.RestartPolicyUnlessStopped:
|
||||
ctr.config.RestartPolicy = policy
|
||||
default:
|
||||
return errors.Wrapf(define.ErrInvalidArg, "%q is not a valid restart policy", policy)
|
||||
return fmt.Errorf("%q is not a valid restart policy: %w", policy, define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -1407,7 +1407,7 @@ func WithNamedVolumes(volumes []*ContainerNamedVolume) CtrCreateOption {
|
|||
for _, vol := range volumes {
|
||||
mountOpts, err := util.ProcessOptions(vol.Options, false, "")
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "processing options for named volume %q mounted at %q", vol.Name, vol.Dest)
|
||||
return fmt.Errorf("processing options for named volume %q mounted at %q: %w", vol.Name, vol.Dest, err)
|
||||
}
|
||||
|
||||
ctr.config.NamedVolumes = append(ctr.config.NamedVolumes, &ContainerNamedVolume{
|
||||
|
|
@ -1720,7 +1720,7 @@ func WithTimezone(path string) CtrCreateOption {
|
|||
}
|
||||
// We don't want to mount a timezone directory
|
||||
if file.IsDir() {
|
||||
return errors.New("Invalid timezone: is a directory")
|
||||
return errors.New("invalid timezone: is a directory")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1736,7 +1736,7 @@ func WithUmask(umask string) CtrCreateOption {
|
|||
return define.ErrCtrFinalized
|
||||
}
|
||||
if !define.UmaskRegex.MatchString(umask) {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "Invalid umask string %s", umask)
|
||||
return fmt.Errorf("invalid umask string %s: %w", umask, define.ErrInvalidArg)
|
||||
}
|
||||
ctr.config.Umask = umask
|
||||
return nil
|
||||
|
|
@ -1809,7 +1809,7 @@ func WithInitCtrType(containerType string) CtrCreateOption {
|
|||
ctr.config.InitContainerType = containerType
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf("%s is invalid init container type", containerType)
|
||||
return fmt.Errorf("%s is invalid init container type", containerType)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1843,12 +1843,12 @@ func WithInfraConfig(compatibleOptions InfraInherit) CtrCreateOption {
|
|||
}
|
||||
compatMarshal, err := json.Marshal(compatibleOptions)
|
||||
if err != nil {
|
||||
return errors.New("Could not marshal compatible options")
|
||||
return errors.New("could not marshal compatible options")
|
||||
}
|
||||
|
||||
err = json.Unmarshal(compatMarshal, ctr.config)
|
||||
if err != nil {
|
||||
return errors.New("Could not unmarshal compatible options into contrainer config")
|
||||
return errors.New("could not unmarshal compatible options into contrainer config")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package plugin
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
|
|
@ -12,11 +13,12 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"errors"
|
||||
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/docker/go-plugins-helpers/sdk"
|
||||
"github.com/docker/go-plugins-helpers/volume"
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -75,7 +77,7 @@ func validatePlugin(newPlugin *VolumePlugin) error {
|
|||
// Hit the Activate endpoint to find out if it is, and if so what kind
|
||||
req, err := http.NewRequest("POST", "http://plugin"+activatePath, nil)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error making request to volume plugin %s activation endpoint", newPlugin.Name)
|
||||
return fmt.Errorf("error making request to volume plugin %s activation endpoint: %w", newPlugin.Name, err)
|
||||
}
|
||||
|
||||
req.Header.Set("Host", newPlugin.getURI())
|
||||
|
|
@ -83,25 +85,25 @@ func validatePlugin(newPlugin *VolumePlugin) error {
|
|||
|
||||
resp, err := newPlugin.Client.Do(req)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error sending request to plugin %s activation endpoint", newPlugin.Name)
|
||||
return fmt.Errorf("error sending request to plugin %s activation endpoint: %w", newPlugin.Name, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Response code MUST be 200. Anything else, we have to assume it's not
|
||||
// a valid plugin.
|
||||
if resp.StatusCode != 200 {
|
||||
return errors.Wrapf(ErrNotPlugin, "got status code %d from activation endpoint for plugin %s", resp.StatusCode, newPlugin.Name)
|
||||
return fmt.Errorf("got status code %d from activation endpoint for plugin %s: %w", resp.StatusCode, newPlugin.Name, ErrNotPlugin)
|
||||
}
|
||||
|
||||
// Read and decode the body so we can tell if this is a volume plugin.
|
||||
respBytes, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading activation response body from plugin %s", newPlugin.Name)
|
||||
return fmt.Errorf("error reading activation response body from plugin %s: %w", newPlugin.Name, err)
|
||||
}
|
||||
|
||||
respStruct := new(activateResponse)
|
||||
if err := json.Unmarshal(respBytes, respStruct); err != nil {
|
||||
return errors.Wrapf(err, "error unmarshalling plugin %s activation response", newPlugin.Name)
|
||||
return fmt.Errorf("error unmarshalling plugin %s activation response: %w", newPlugin.Name, err)
|
||||
}
|
||||
|
||||
foundVolume := false
|
||||
|
|
@ -113,7 +115,7 @@ func validatePlugin(newPlugin *VolumePlugin) error {
|
|||
}
|
||||
|
||||
if !foundVolume {
|
||||
return errors.Wrapf(ErrNotVolumePlugin, "plugin %s does not implement volume plugin, instead provides %s", newPlugin.Name, strings.Join(respStruct.Implements, ", "))
|
||||
return fmt.Errorf("plugin %s does not implement volume plugin, instead provides %s: %w", newPlugin.Name, strings.Join(respStruct.Implements, ", "), ErrNotVolumePlugin)
|
||||
}
|
||||
|
||||
if plugins == nil {
|
||||
|
|
@ -135,7 +137,7 @@ func GetVolumePlugin(name string, path string, timeout int) (*VolumePlugin, erro
|
|||
if exists {
|
||||
// This shouldn't be possible, but just in case...
|
||||
if plugin.SocketPath != filepath.Clean(path) {
|
||||
return nil, errors.Wrapf(define.ErrInvalidArg, "requested path %q for volume plugin %s does not match pre-existing path for plugin, %q", path, name, plugin.SocketPath)
|
||||
return nil, fmt.Errorf("requested path %q for volume plugin %s does not match pre-existing path for plugin, %q: %w", path, name, plugin.SocketPath, define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
return plugin, nil
|
||||
|
|
@ -169,10 +171,10 @@ func GetVolumePlugin(name string, path string, timeout int) (*VolumePlugin, erro
|
|||
|
||||
stat, err := os.Stat(newPlugin.SocketPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot access plugin %s socket %q", name, newPlugin.SocketPath)
|
||||
return nil, fmt.Errorf("cannot access plugin %s socket %q: %w", name, newPlugin.SocketPath, err)
|
||||
}
|
||||
if stat.Mode()&os.ModeSocket == 0 {
|
||||
return nil, errors.Wrapf(ErrNotPlugin, "volume %s path %q is not a unix socket", name, newPlugin.SocketPath)
|
||||
return nil, fmt.Errorf("volume %s path %q is not a unix socket: %w", name, newPlugin.SocketPath, ErrNotPlugin)
|
||||
}
|
||||
|
||||
if err := validatePlugin(newPlugin); err != nil {
|
||||
|
|
@ -194,10 +196,10 @@ func (p *VolumePlugin) verifyReachable() error {
|
|||
pluginsLock.Lock()
|
||||
defer pluginsLock.Unlock()
|
||||
delete(plugins, p.Name)
|
||||
return errors.Wrapf(ErrPluginRemoved, p.Name)
|
||||
return fmt.Errorf("%s: %w", p.Name, ErrPluginRemoved)
|
||||
}
|
||||
|
||||
return errors.Wrapf(err, "error accessing plugin %s", p.Name)
|
||||
return fmt.Errorf("error accessing plugin %s: %w", p.Name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -213,13 +215,13 @@ func (p *VolumePlugin) sendRequest(toJSON interface{}, endpoint string) (*http.R
|
|||
if toJSON != nil {
|
||||
reqJSON, err = json.Marshal(toJSON)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error marshalling request JSON for volume plugin %s endpoint %s", p.Name, endpoint)
|
||||
return nil, fmt.Errorf("error marshalling request JSON for volume plugin %s endpoint %s: %w", p.Name, endpoint, err)
|
||||
}
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", "http://plugin"+endpoint, bytes.NewReader(reqJSON))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error making request to volume plugin %s endpoint %s", p.Name, endpoint)
|
||||
return nil, fmt.Errorf("error making request to volume plugin %s endpoint %s: %w", p.Name, endpoint, err)
|
||||
}
|
||||
|
||||
req.Header.Set("Host", p.getURI())
|
||||
|
|
@ -227,7 +229,7 @@ func (p *VolumePlugin) sendRequest(toJSON interface{}, endpoint string) (*http.R
|
|||
|
||||
resp, err := p.Client.Do(req)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error sending request to volume plugin %s endpoint %s", p.Name, endpoint)
|
||||
return nil, fmt.Errorf("error sending request to volume plugin %s endpoint %s: %w", p.Name, endpoint, err)
|
||||
}
|
||||
// We are *deliberately not closing* response here. It is the
|
||||
// responsibility of the caller to do so after reading the response.
|
||||
|
|
@ -241,9 +243,9 @@ func (p *VolumePlugin) makeErrorResponse(err, endpoint, volName string) error {
|
|||
err = "empty error from plugin"
|
||||
}
|
||||
if volName != "" {
|
||||
return errors.Wrapf(errors.New(err), "error on %s on volume %s in volume plugin %s", endpoint, volName, p.Name)
|
||||
return fmt.Errorf("error on %s on volume %s in volume plugin %s: %w", endpoint, volName, p.Name, errors.New(err))
|
||||
}
|
||||
return errors.Wrapf(errors.New(err), "error on %s in volume plugin %s", endpoint, p.Name)
|
||||
return fmt.Errorf("error on %s in volume plugin %s: %w", endpoint, p.Name, errors.New(err))
|
||||
}
|
||||
|
||||
// Handle error responses from plugin
|
||||
|
|
@ -255,12 +257,12 @@ func (p *VolumePlugin) handleErrorResponse(resp *http.Response, endpoint, volNam
|
|||
if resp.StatusCode != 200 {
|
||||
errResp, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error reading response body from volume plugin %s", p.Name)
|
||||
return fmt.Errorf("error reading response body from volume plugin %s: %w", p.Name, err)
|
||||
}
|
||||
|
||||
errStruct := new(volume.ErrorResponse)
|
||||
if err := json.Unmarshal(errResp, errStruct); err != nil {
|
||||
return errors.Wrapf(err, "error unmarshalling JSON response from volume plugin %s", p.Name)
|
||||
return fmt.Errorf("error unmarshalling JSON response from volume plugin %s: %w", p.Name, err)
|
||||
}
|
||||
|
||||
return p.makeErrorResponse(errStruct.Err, endpoint, volName)
|
||||
|
|
@ -272,7 +274,7 @@ func (p *VolumePlugin) handleErrorResponse(resp *http.Response, endpoint, volNam
|
|||
// CreateVolume creates a volume in the plugin.
|
||||
func (p *VolumePlugin) CreateVolume(req *volume.CreateRequest) error {
|
||||
if req == nil {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "must provide non-nil request to CreateVolume")
|
||||
return fmt.Errorf("must provide non-nil request to CreateVolume: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
if err := p.verifyReachable(); err != nil {
|
||||
|
|
@ -310,12 +312,12 @@ func (p *VolumePlugin) ListVolumes() ([]*volume.Volume, error) {
|
|||
|
||||
volumeRespBytes, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading response body from volume plugin %s", p.Name)
|
||||
return nil, fmt.Errorf("error reading response body from volume plugin %s: %w", p.Name, err)
|
||||
}
|
||||
|
||||
volumeResp := new(volume.ListResponse)
|
||||
if err := json.Unmarshal(volumeRespBytes, volumeResp); err != nil {
|
||||
return nil, errors.Wrapf(err, "error unmarshalling volume plugin %s list response", p.Name)
|
||||
return nil, fmt.Errorf("error unmarshalling volume plugin %s list response: %w", p.Name, err)
|
||||
}
|
||||
|
||||
return volumeResp.Volumes, nil
|
||||
|
|
@ -324,7 +326,7 @@ func (p *VolumePlugin) ListVolumes() ([]*volume.Volume, error) {
|
|||
// GetVolume gets a single volume from the plugin.
|
||||
func (p *VolumePlugin) GetVolume(req *volume.GetRequest) (*volume.Volume, error) {
|
||||
if req == nil {
|
||||
return nil, errors.Wrapf(define.ErrInvalidArg, "must provide non-nil request to GetVolume")
|
||||
return nil, fmt.Errorf("must provide non-nil request to GetVolume: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
if err := p.verifyReachable(); err != nil {
|
||||
|
|
@ -345,12 +347,12 @@ func (p *VolumePlugin) GetVolume(req *volume.GetRequest) (*volume.Volume, error)
|
|||
|
||||
getRespBytes, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading response body from volume plugin %s", p.Name)
|
||||
return nil, fmt.Errorf("error reading response body from volume plugin %s: %w", p.Name, err)
|
||||
}
|
||||
|
||||
getResp := new(volume.GetResponse)
|
||||
if err := json.Unmarshal(getRespBytes, getResp); err != nil {
|
||||
return nil, errors.Wrapf(err, "error unmarshalling volume plugin %s get response", p.Name)
|
||||
return nil, fmt.Errorf("error unmarshalling volume plugin %s get response: %w", p.Name, err)
|
||||
}
|
||||
|
||||
return getResp.Volume, nil
|
||||
|
|
@ -359,7 +361,7 @@ func (p *VolumePlugin) GetVolume(req *volume.GetRequest) (*volume.Volume, error)
|
|||
// RemoveVolume removes a single volume from the plugin.
|
||||
func (p *VolumePlugin) RemoveVolume(req *volume.RemoveRequest) error {
|
||||
if req == nil {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "must provide non-nil request to RemoveVolume")
|
||||
return fmt.Errorf("must provide non-nil request to RemoveVolume: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
if err := p.verifyReachable(); err != nil {
|
||||
|
|
@ -380,7 +382,7 @@ func (p *VolumePlugin) RemoveVolume(req *volume.RemoveRequest) error {
|
|||
// GetVolumePath gets the path the given volume is mounted at.
|
||||
func (p *VolumePlugin) GetVolumePath(req *volume.PathRequest) (string, error) {
|
||||
if req == nil {
|
||||
return "", errors.Wrapf(define.ErrInvalidArg, "must provide non-nil request to GetVolumePath")
|
||||
return "", fmt.Errorf("must provide non-nil request to GetVolumePath: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
if err := p.verifyReachable(); err != nil {
|
||||
|
|
@ -401,12 +403,12 @@ func (p *VolumePlugin) GetVolumePath(req *volume.PathRequest) (string, error) {
|
|||
|
||||
pathRespBytes, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error reading response body from volume plugin %s", p.Name)
|
||||
return "", fmt.Errorf("error reading response body from volume plugin %s: %w", p.Name, err)
|
||||
}
|
||||
|
||||
pathResp := new(volume.PathResponse)
|
||||
if err := json.Unmarshal(pathRespBytes, pathResp); err != nil {
|
||||
return "", errors.Wrapf(err, "error unmarshalling volume plugin %s path response", p.Name)
|
||||
return "", fmt.Errorf("error unmarshalling volume plugin %s path response: %w", p.Name, err)
|
||||
}
|
||||
|
||||
return pathResp.Mountpoint, nil
|
||||
|
|
@ -417,7 +419,7 @@ func (p *VolumePlugin) GetVolumePath(req *volume.PathRequest) (string, error) {
|
|||
// the path the volume has been mounted at.
|
||||
func (p *VolumePlugin) MountVolume(req *volume.MountRequest) (string, error) {
|
||||
if req == nil {
|
||||
return "", errors.Wrapf(define.ErrInvalidArg, "must provide non-nil request to MountVolume")
|
||||
return "", fmt.Errorf("must provide non-nil request to MountVolume: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
if err := p.verifyReachable(); err != nil {
|
||||
|
|
@ -438,12 +440,12 @@ func (p *VolumePlugin) MountVolume(req *volume.MountRequest) (string, error) {
|
|||
|
||||
mountRespBytes, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error reading response body from volume plugin %s", p.Name)
|
||||
return "", fmt.Errorf("error reading response body from volume plugin %s: %w", p.Name, err)
|
||||
}
|
||||
|
||||
mountResp := new(volume.MountResponse)
|
||||
if err := json.Unmarshal(mountRespBytes, mountResp); err != nil {
|
||||
return "", errors.Wrapf(err, "error unmarshalling volume plugin %s path response", p.Name)
|
||||
return "", fmt.Errorf("error unmarshalling volume plugin %s path response: %w", p.Name, err)
|
||||
}
|
||||
|
||||
return mountResp.Mountpoint, nil
|
||||
|
|
@ -453,7 +455,7 @@ func (p *VolumePlugin) MountVolume(req *volume.MountRequest) (string, error) {
|
|||
// container that is unmounting, used for internal record-keeping by the plugin.
|
||||
func (p *VolumePlugin) UnmountVolume(req *volume.UnmountRequest) error {
|
||||
if req == nil {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "must provide non-nil request to UnmountVolume")
|
||||
return fmt.Errorf("must provide non-nil request to UnmountVolume: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
if err := p.verifyReachable(); err != nil {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package libpod
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
|
@ -10,7 +11,6 @@ import (
|
|||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/libpod/lock"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Pod represents a group of containers that are managed together.
|
||||
|
|
@ -312,7 +312,7 @@ func (p *Pod) CgroupPath() (string, error) {
|
|||
return "", err
|
||||
}
|
||||
if p.state.InfraContainerID == "" {
|
||||
return "", errors.Wrap(define.ErrNoSuchCtr, "pod has no infra container")
|
||||
return "", fmt.Errorf("pod has no infra container: %w", define.ErrNoSuchCtr)
|
||||
}
|
||||
return p.state.CgroupPath, nil
|
||||
}
|
||||
|
|
@ -386,7 +386,7 @@ func (p *Pod) infraContainer() (*Container, error) {
|
|||
return nil, err
|
||||
}
|
||||
if id == "" {
|
||||
return nil, errors.Wrap(define.ErrNoSuchCtr, "pod has no infra container")
|
||||
return nil, fmt.Errorf("pod has no infra container: %w", define.ErrNoSuchCtr)
|
||||
}
|
||||
|
||||
return p.runtime.state.Container(id)
|
||||
|
|
@ -426,7 +426,7 @@ func (p *Pod) GetPodStats(previousContainerStats map[string]*define.ContainerSta
|
|||
newStats, err := c.GetContainerStats(previousContainerStats[c.ID()])
|
||||
// If the container wasn't running, don't include it
|
||||
// but also suppress the error
|
||||
if err != nil && errors.Cause(err) != define.ErrCtrStateInvalid {
|
||||
if err != nil && !errors.Is(err, define.ErrCtrStateInvalid) {
|
||||
return nil, err
|
||||
}
|
||||
if err == nil {
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package libpod
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/common/pkg/cgroups"
|
||||
|
|
@ -10,7 +11,6 @@ import (
|
|||
"github.com/containers/podman/v4/pkg/parallel"
|
||||
"github.com/containers/podman/v4/pkg/rootless"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -32,7 +32,7 @@ func (p *Pod) startInitContainers(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
if rc != 0 {
|
||||
return errors.Errorf("init container %s exited with code %d", initCon.ID(), rc)
|
||||
return fmt.Errorf("init container %s exited with code %d", initCon.ID(), rc)
|
||||
}
|
||||
// If the container is a once init container, we need to remove it
|
||||
// after it runs
|
||||
|
|
@ -42,7 +42,7 @@ func (p *Pod) startInitContainers(ctx context.Context) error {
|
|||
var time *uint
|
||||
if err := p.runtime.removeContainer(ctx, initCon, false, false, true, time); err != nil {
|
||||
icLock.Unlock()
|
||||
return errors.Wrapf(err, "failed to remove once init container %s", initCon.ID())
|
||||
return fmt.Errorf("failed to remove once init container %s: %w", initCon.ID(), err)
|
||||
}
|
||||
// Removing a container this way requires an explicit call to clean up the db
|
||||
if err := p.runtime.state.RemoveContainerFromPod(p, initCon); err != nil {
|
||||
|
|
@ -92,12 +92,12 @@ func (p *Pod) Start(ctx context.Context) (map[string]error, error) {
|
|||
// Build a dependency graph of containers in the pod
|
||||
graph, err := BuildContainerGraph(allCtrs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error generating dependency graph for pod %s", p.ID())
|
||||
return nil, fmt.Errorf("error generating dependency graph for pod %s: %w", p.ID(), err)
|
||||
}
|
||||
// If there are no containers without dependencies, we can't start
|
||||
// Error out
|
||||
if len(graph.noDepNodes) == 0 {
|
||||
return nil, errors.Wrapf(define.ErrNoSuchCtr, "no containers in pod %s have no dependencies, cannot start pod", p.ID())
|
||||
return nil, fmt.Errorf("no containers in pod %s have no dependencies, cannot start pod: %w", p.ID(), define.ErrNoSuchCtr)
|
||||
}
|
||||
|
||||
ctrErrors := make(map[string]error)
|
||||
|
|
@ -109,7 +109,7 @@ func (p *Pod) Start(ctx context.Context) (map[string]error, error) {
|
|||
}
|
||||
|
||||
if len(ctrErrors) > 0 {
|
||||
return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error starting some containers")
|
||||
return ctrErrors, fmt.Errorf("error starting some containers: %w", define.ErrPodPartialFail)
|
||||
}
|
||||
defer p.newPodEvent(events.Start)
|
||||
return nil, nil
|
||||
|
|
@ -193,7 +193,7 @@ func (p *Pod) stopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m
|
|||
// Get returned error for every container we worked on
|
||||
for id, channel := range ctrErrChan {
|
||||
if err := <-channel; err != nil {
|
||||
if errors.Cause(err) == define.ErrCtrStateInvalid || errors.Cause(err) == define.ErrCtrStopped {
|
||||
if errors.Is(err, define.ErrCtrStateInvalid) || errors.Is(err, define.ErrCtrStopped) {
|
||||
continue
|
||||
}
|
||||
ctrErrors[id] = err
|
||||
|
|
@ -201,7 +201,7 @@ func (p *Pod) stopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m
|
|||
}
|
||||
|
||||
if len(ctrErrors) > 0 {
|
||||
return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error stopping some containers")
|
||||
return ctrErrors, fmt.Errorf("error stopping some containers: %w", define.ErrPodPartialFail)
|
||||
}
|
||||
|
||||
if err := p.maybeStopServiceContainer(); err != nil {
|
||||
|
|
@ -297,7 +297,7 @@ func (p *Pod) Cleanup(ctx context.Context) (map[string]error, error) {
|
|||
// Get returned error for every container we worked on
|
||||
for id, channel := range ctrErrChan {
|
||||
if err := <-channel; err != nil {
|
||||
if errors.Cause(err) == define.ErrCtrStateInvalid || errors.Cause(err) == define.ErrCtrStopped {
|
||||
if errors.Is(err, define.ErrCtrStateInvalid) || errors.Is(err, define.ErrCtrStopped) {
|
||||
continue
|
||||
}
|
||||
ctrErrors[id] = err
|
||||
|
|
@ -305,7 +305,7 @@ func (p *Pod) Cleanup(ctx context.Context) (map[string]error, error) {
|
|||
}
|
||||
|
||||
if len(ctrErrors) > 0 {
|
||||
return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error cleaning up some containers")
|
||||
return ctrErrors, fmt.Errorf("error cleaning up some containers: %w", define.ErrPodPartialFail)
|
||||
}
|
||||
|
||||
if err := p.maybeStopServiceContainer(); err != nil {
|
||||
|
|
@ -338,10 +338,10 @@ func (p *Pod) Pause(ctx context.Context) (map[string]error, error) {
|
|||
if rootless.IsRootless() {
|
||||
cgroupv2, err := cgroups.IsCgroup2UnifiedMode()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to determine cgroupversion")
|
||||
return nil, fmt.Errorf("failed to determine cgroupversion: %w", err)
|
||||
}
|
||||
if !cgroupv2 {
|
||||
return nil, errors.Wrap(define.ErrNoCgroups, "can not pause pods containing rootless containers with cgroup V1")
|
||||
return nil, fmt.Errorf("can not pause pods containing rootless containers with cgroup V1: %w", define.ErrNoCgroups)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -368,7 +368,7 @@ func (p *Pod) Pause(ctx context.Context) (map[string]error, error) {
|
|||
// Get returned error for every container we worked on
|
||||
for id, channel := range ctrErrChan {
|
||||
if err := <-channel; err != nil {
|
||||
if errors.Cause(err) == define.ErrCtrStateInvalid || errors.Cause(err) == define.ErrCtrStopped {
|
||||
if errors.Is(err, define.ErrCtrStateInvalid) || errors.Is(err, define.ErrCtrStopped) {
|
||||
continue
|
||||
}
|
||||
ctrErrors[id] = err
|
||||
|
|
@ -376,7 +376,7 @@ func (p *Pod) Pause(ctx context.Context) (map[string]error, error) {
|
|||
}
|
||||
|
||||
if len(ctrErrors) > 0 {
|
||||
return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error pausing some containers")
|
||||
return ctrErrors, fmt.Errorf("error pausing some containers: %w", define.ErrPodPartialFail)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
|
@ -424,7 +424,7 @@ func (p *Pod) Unpause(ctx context.Context) (map[string]error, error) {
|
|||
// Get returned error for every container we worked on
|
||||
for id, channel := range ctrErrChan {
|
||||
if err := <-channel; err != nil {
|
||||
if errors.Cause(err) == define.ErrCtrStateInvalid || errors.Cause(err) == define.ErrCtrStopped {
|
||||
if errors.Is(err, define.ErrCtrStateInvalid) || errors.Is(err, define.ErrCtrStopped) {
|
||||
continue
|
||||
}
|
||||
ctrErrors[id] = err
|
||||
|
|
@ -432,7 +432,7 @@ func (p *Pod) Unpause(ctx context.Context) (map[string]error, error) {
|
|||
}
|
||||
|
||||
if len(ctrErrors) > 0 {
|
||||
return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error unpausing some containers")
|
||||
return ctrErrors, fmt.Errorf("error unpausing some containers: %w", define.ErrPodPartialFail)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
|
@ -470,7 +470,7 @@ func (p *Pod) Restart(ctx context.Context) (map[string]error, error) {
|
|||
// Build a dependency graph of containers in the pod
|
||||
graph, err := BuildContainerGraph(allCtrs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error generating dependency graph for pod %s", p.ID())
|
||||
return nil, fmt.Errorf("error generating dependency graph for pod %s: %w", p.ID(), err)
|
||||
}
|
||||
|
||||
ctrErrors := make(map[string]error)
|
||||
|
|
@ -479,7 +479,7 @@ func (p *Pod) Restart(ctx context.Context) (map[string]error, error) {
|
|||
// If there are no containers without dependencies, we can't start
|
||||
// Error out
|
||||
if len(graph.noDepNodes) == 0 {
|
||||
return nil, errors.Wrapf(define.ErrNoSuchCtr, "no containers in pod %s have no dependencies, cannot start pod", p.ID())
|
||||
return nil, fmt.Errorf("no containers in pod %s have no dependencies, cannot start pod: %w", p.ID(), define.ErrNoSuchCtr)
|
||||
}
|
||||
|
||||
// Traverse the graph beginning at nodes with no dependencies
|
||||
|
|
@ -488,7 +488,7 @@ func (p *Pod) Restart(ctx context.Context) (map[string]error, error) {
|
|||
}
|
||||
|
||||
if len(ctrErrors) > 0 {
|
||||
return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error stopping some containers")
|
||||
return ctrErrors, fmt.Errorf("error stopping some containers: %w", define.ErrPodPartialFail)
|
||||
}
|
||||
p.newPodEvent(events.Stop)
|
||||
p.newPodEvent(events.Start)
|
||||
|
|
@ -539,7 +539,7 @@ func (p *Pod) Kill(ctx context.Context, signal uint) (map[string]error, error) {
|
|||
// Get returned error for every container we worked on
|
||||
for id, channel := range ctrErrChan {
|
||||
if err := <-channel; err != nil {
|
||||
if errors.Cause(err) == define.ErrCtrStateInvalid || errors.Cause(err) == define.ErrCtrStopped {
|
||||
if errors.Is(err, define.ErrCtrStateInvalid) || errors.Is(err, define.ErrCtrStopped) {
|
||||
continue
|
||||
}
|
||||
ctrErrors[id] = err
|
||||
|
|
@ -547,7 +547,7 @@ func (p *Pod) Kill(ctx context.Context, signal uint) (map[string]error, error) {
|
|||
}
|
||||
|
||||
if len(ctrErrors) > 0 {
|
||||
return ctrErrors, errors.Wrapf(define.ErrPodPartialFail, "error killing some containers")
|
||||
return ctrErrors, fmt.Errorf("error killing some containers: %w", define.ErrPodPartialFail)
|
||||
}
|
||||
|
||||
if err := p.maybeStopServiceContainer(); err != nil {
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ import (
|
|||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/pkg/rootless"
|
||||
"github.com/containers/storage/pkg/stringid"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -39,7 +38,7 @@ func (p *Pod) updatePod() error {
|
|||
// Save pod state to database
|
||||
func (p *Pod) save() error {
|
||||
if err := p.runtime.state.SavePod(p); err != nil {
|
||||
return errors.Wrapf(err, "error saving pod %s state", p.ID())
|
||||
return fmt.Errorf("error saving pod %s state: %w", p.ID(), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -61,7 +60,7 @@ func (p *Pod) refresh() error {
|
|||
// Retrieve the pod's lock
|
||||
lock, err := p.runtime.lockManager.AllocateAndRetrieveLock(p.config.LockID)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error retrieving lock %d for pod %s", p.config.LockID, p.ID())
|
||||
return fmt.Errorf("error retrieving lock %d for pod %s: %w", p.config.LockID, p.ID(), err)
|
||||
}
|
||||
p.lock = lock
|
||||
|
||||
|
|
@ -81,7 +80,7 @@ func (p *Pod) refresh() error {
|
|||
logrus.Debugf("setting pod cgroup to %s", p.state.CgroupPath)
|
||||
}
|
||||
default:
|
||||
return errors.Wrapf(define.ErrInvalidArg, "unknown cgroups manager %s specified", p.runtime.config.Engine.CgroupManager)
|
||||
return fmt.Errorf("unknown cgroups manager %s specified: %w", p.runtime.config.Engine.CgroupManager, define.ErrInvalidArg)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package libpod
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
|
@ -13,7 +14,6 @@ import (
|
|||
"github.com/containers/podman/v4/pkg/rootless"
|
||||
"github.com/containers/podman/v4/pkg/util"
|
||||
"github.com/containers/storage"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -49,7 +49,7 @@ func (r *Runtime) removeAllDirs() error {
|
|||
|
||||
// Volume path
|
||||
if err := os.RemoveAll(r.config.Engine.VolumePath); err != nil {
|
||||
lastErr = errors.Wrapf(err, "removing volume path")
|
||||
lastErr = fmt.Errorf("removing volume path: %w", err)
|
||||
}
|
||||
|
||||
// Tmpdir
|
||||
|
|
@ -57,7 +57,7 @@ func (r *Runtime) removeAllDirs() error {
|
|||
if lastErr != nil {
|
||||
logrus.Errorf("Reset: %v", lastErr)
|
||||
}
|
||||
lastErr = errors.Wrapf(err, "removing tmp dir")
|
||||
lastErr = fmt.Errorf("removing tmp dir: %w", err)
|
||||
}
|
||||
|
||||
// Runroot
|
||||
|
|
@ -65,7 +65,7 @@ func (r *Runtime) removeAllDirs() error {
|
|||
if lastErr != nil {
|
||||
logrus.Errorf("Reset: %v", lastErr)
|
||||
}
|
||||
lastErr = errors.Wrapf(err, "removing run root")
|
||||
lastErr = fmt.Errorf("removing run root: %w", err)
|
||||
}
|
||||
|
||||
// Static dir
|
||||
|
|
@ -73,7 +73,7 @@ func (r *Runtime) removeAllDirs() error {
|
|||
if lastErr != nil {
|
||||
logrus.Errorf("Reset: %v", lastErr)
|
||||
}
|
||||
lastErr = errors.Wrapf(err, "removing static dir")
|
||||
lastErr = fmt.Errorf("removing static dir: %w", err)
|
||||
}
|
||||
|
||||
// Graph root
|
||||
|
|
@ -81,7 +81,7 @@ func (r *Runtime) removeAllDirs() error {
|
|||
if lastErr != nil {
|
||||
logrus.Errorf("Reset: %v", lastErr)
|
||||
}
|
||||
lastErr = errors.Wrapf(err, "removing graph root")
|
||||
lastErr = fmt.Errorf("removing graph root: %w", err)
|
||||
}
|
||||
|
||||
return lastErr
|
||||
|
|
@ -96,7 +96,7 @@ func (r *Runtime) reset(ctx context.Context) error {
|
|||
}
|
||||
for _, p := range pods {
|
||||
if err := r.RemovePod(ctx, p, true, true, timeout); err != nil {
|
||||
if errors.Cause(err) == define.ErrNoSuchPod {
|
||||
if errors.Is(err, define.ErrNoSuchPod) {
|
||||
continue
|
||||
}
|
||||
logrus.Errorf("Removing Pod %s: %v", p.ID(), err)
|
||||
|
|
@ -111,7 +111,7 @@ func (r *Runtime) reset(ctx context.Context) error {
|
|||
for _, c := range ctrs {
|
||||
if err := r.RemoveContainer(ctx, c, true, true, timeout); err != nil {
|
||||
if err := r.RemoveStorageContainer(c.ID(), true); err != nil {
|
||||
if errors.Cause(err) == define.ErrNoSuchCtr {
|
||||
if errors.Is(err, define.ErrNoSuchCtr) {
|
||||
continue
|
||||
}
|
||||
logrus.Errorf("Removing container %s: %v", c.ID(), err)
|
||||
|
|
@ -134,7 +134,7 @@ func (r *Runtime) reset(ctx context.Context) error {
|
|||
}
|
||||
for _, v := range volumes {
|
||||
if err := r.RemoveVolume(ctx, v, true, timeout); err != nil {
|
||||
if errors.Cause(err) == define.ErrNoSuchVolume {
|
||||
if errors.Is(err, define.ErrNoSuchVolume) {
|
||||
continue
|
||||
}
|
||||
logrus.Errorf("Removing volume %s: %v", v.config.Name, err)
|
||||
|
|
@ -164,7 +164,7 @@ func (r *Runtime) reset(ctx context.Context) error {
|
|||
if prevError != nil {
|
||||
logrus.Error(prevError)
|
||||
}
|
||||
prevError = errors.Errorf("failed to remove runtime graph root dir %s, since it is the same as XDG_RUNTIME_DIR", graphRoot)
|
||||
prevError = fmt.Errorf("failed to remove runtime graph root dir %s, since it is the same as XDG_RUNTIME_DIR", graphRoot)
|
||||
} else {
|
||||
if err := os.RemoveAll(graphRoot); err != nil {
|
||||
if prevError != nil {
|
||||
|
|
@ -178,7 +178,7 @@ func (r *Runtime) reset(ctx context.Context) error {
|
|||
if prevError != nil {
|
||||
logrus.Error(prevError)
|
||||
}
|
||||
prevError = errors.Errorf("failed to remove runtime root dir %s, since it is the same as XDG_RUNTIME_DIR", runRoot)
|
||||
prevError = fmt.Errorf("failed to remove runtime root dir %s, since it is the same as XDG_RUNTIME_DIR", runRoot)
|
||||
} else {
|
||||
if err := os.RemoveAll(runRoot); err != nil {
|
||||
if prevError != nil {
|
||||
|
|
@ -199,7 +199,7 @@ func (r *Runtime) reset(ctx context.Context) error {
|
|||
if prevError != nil {
|
||||
logrus.Error(prevError)
|
||||
}
|
||||
prevError = errors.Errorf("failed to remove runtime tmpdir %s, since it is the same as XDG_RUNTIME_DIR", tempDir)
|
||||
prevError = fmt.Errorf("failed to remove runtime tmpdir %s, since it is the same as XDG_RUNTIME_DIR", tempDir)
|
||||
} else {
|
||||
if err := os.RemoveAll(tempDir); err != nil {
|
||||
if prevError != nil {
|
||||
|
|
|
|||
|
|
@ -2,10 +2,10 @@ package libpod
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -29,7 +29,7 @@ func (p *Pod) hasServiceContainer() bool {
|
|||
func (p *Pod) serviceContainer() (*Container, error) {
|
||||
id := p.config.ServiceContainerID
|
||||
if id == "" {
|
||||
return nil, errors.Wrap(define.ErrNoSuchCtr, "pod has no service container")
|
||||
return nil, fmt.Errorf("pod has no service container: %w", define.ErrNoSuchCtr)
|
||||
}
|
||||
return p.runtime.state.Container(id)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,18 +1,18 @@
|
|||
package shutdown
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
logrusImport "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrHandlerExists error = errors.New("handler with given name already exists")
|
||||
ErrHandlerExists = errors.New("handler with given name already exists")
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@
|
|||
package libpod
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
|
@ -13,7 +14,6 @@ import (
|
|||
|
||||
"github.com/containers/common/pkg/cgroups"
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// GetContainerStats gets the running stats for a given container.
|
||||
|
|
@ -25,7 +25,7 @@ func (c *Container) GetContainerStats(previousStats *define.ContainerStats) (*de
|
|||
stats.Name = c.Name()
|
||||
|
||||
if c.config.NoCgroups {
|
||||
return nil, errors.Wrapf(define.ErrNoCgroups, "cannot run top on container %s as it did not create a cgroup", c.ID())
|
||||
return nil, fmt.Errorf("cannot run top on container %s as it did not create a cgroup: %w", c.ID(), define.ErrNoCgroups)
|
||||
}
|
||||
|
||||
if !c.batched {
|
||||
|
|
@ -55,13 +55,13 @@ func (c *Container) GetContainerStats(previousStats *define.ContainerStats) (*de
|
|||
}
|
||||
cgroup, err := cgroups.Load(cgroupPath)
|
||||
if err != nil {
|
||||
return stats, errors.Wrapf(err, "unable to load cgroup at %s", cgroupPath)
|
||||
return stats, fmt.Errorf("unable to load cgroup at %s: %w", cgroupPath, err)
|
||||
}
|
||||
|
||||
// Ubuntu does not have swap memory in cgroups because swap is often not enabled.
|
||||
cgroupStats, err := cgroup.Stat()
|
||||
if err != nil {
|
||||
return stats, errors.Wrapf(err, "unable to obtain cgroup stats")
|
||||
return stats, fmt.Errorf("unable to obtain cgroup stats: %w", err)
|
||||
}
|
||||
conState := c.state.State
|
||||
netStats, err := getContainerNetIO(c)
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package libpod
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
istorage "github.com/containers/image/v5/storage"
|
||||
|
|
@ -10,7 +11,6 @@ import (
|
|||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -184,7 +184,7 @@ func (r *storageService) DeleteContainer(idOrName string) error {
|
|||
}
|
||||
err = r.store.DeleteContainer(container.ID)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == storage.ErrNotAContainer || errors.Cause(err) == storage.ErrContainerUnknown {
|
||||
if errors.Is(err, storage.ErrNotAContainer) || errors.Is(err, storage.ErrContainerUnknown) {
|
||||
logrus.Infof("Storage for container %s already removed", container.ID)
|
||||
} else {
|
||||
logrus.Debugf("Failed to delete container %q: %v", container.ID, err)
|
||||
|
|
@ -218,7 +218,7 @@ func (r *storageService) GetContainerMetadata(idOrName string) (RuntimeContainer
|
|||
func (r *storageService) MountContainerImage(idOrName string) (string, error) {
|
||||
container, err := r.store.Container(idOrName)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == storage.ErrContainerUnknown {
|
||||
if errors.Is(err, storage.ErrContainerUnknown) {
|
||||
return "", define.ErrNoSuchCtr
|
||||
}
|
||||
return "", err
|
||||
|
|
@ -281,7 +281,7 @@ func (r *storageService) MountedContainerImage(idOrName string) (int, error) {
|
|||
func (r *storageService) GetMountpoint(id string) (string, error) {
|
||||
container, err := r.store.Container(id)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == storage.ErrContainerUnknown {
|
||||
if errors.Is(err, storage.ErrContainerUnknown) {
|
||||
return "", define.ErrNoSuchCtr
|
||||
}
|
||||
return "", err
|
||||
|
|
@ -297,7 +297,7 @@ func (r *storageService) GetMountpoint(id string) (string, error) {
|
|||
func (r *storageService) GetWorkDir(id string) (string, error) {
|
||||
container, err := r.store.Container(id)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == storage.ErrContainerUnknown {
|
||||
if errors.Is(err, storage.ErrContainerUnknown) {
|
||||
return "", define.ErrNoSuchCtr
|
||||
}
|
||||
return "", err
|
||||
|
|
@ -308,7 +308,7 @@ func (r *storageService) GetWorkDir(id string) (string, error) {
|
|||
func (r *storageService) GetRunDir(id string) (string, error) {
|
||||
container, err := r.store.Container(id)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == storage.ErrContainerUnknown {
|
||||
if errors.Is(err, storage.ErrContainerUnknown) {
|
||||
return "", define.ErrNoSuchCtr
|
||||
}
|
||||
return "", err
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ import (
|
|||
"github.com/fsnotify/fsnotify"
|
||||
spec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -93,7 +92,7 @@ func WaitForFile(path string, chWait chan error, timeout time.Duration) (bool, e
|
|||
return false, err
|
||||
}
|
||||
case <-timeoutChan:
|
||||
return false, errors.Wrapf(define.ErrInternal, "timed out waiting for file %s", path)
|
||||
return false, fmt.Errorf("timed out waiting for file %s: %w", path, define.ErrInternal)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -123,15 +122,15 @@ func sortMounts(m []spec.Mount) []spec.Mount {
|
|||
|
||||
func validPodNSOption(p *Pod, ctrPod string) error {
|
||||
if p == nil {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "pod passed in was nil. Container may not be associated with a pod")
|
||||
return fmt.Errorf("pod passed in was nil. Container may not be associated with a pod: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
if ctrPod == "" {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "container is not a member of any pod")
|
||||
return fmt.Errorf("container is not a member of any pod: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
if ctrPod != p.ID() {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "pod passed in is not the pod the container is associated with")
|
||||
return fmt.Errorf("pod passed in is not the pod the container is associated with: %w", define.ErrInvalidArg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -232,18 +231,18 @@ func DefaultSeccompPath() (string, error) {
|
|||
func checkDependencyContainer(depCtr, ctr *Container) error {
|
||||
state, err := depCtr.State()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error accessing dependency container %s state", depCtr.ID())
|
||||
return fmt.Errorf("error accessing dependency container %s state: %w", depCtr.ID(), err)
|
||||
}
|
||||
if state == define.ContainerStateRemoving {
|
||||
return errors.Wrapf(define.ErrCtrStateInvalid, "cannot use container %s as a dependency as it is being removed", depCtr.ID())
|
||||
return fmt.Errorf("cannot use container %s as a dependency as it is being removed: %w", depCtr.ID(), define.ErrCtrStateInvalid)
|
||||
}
|
||||
|
||||
if depCtr.ID() == ctr.ID() {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "must specify another container")
|
||||
return fmt.Errorf("must specify another container: %w", define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
if ctr.config.Pod != "" && depCtr.PodID() != ctr.config.Pod {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "container has joined pod %s and dependency container %s is not a member of the pod", ctr.config.Pod, depCtr.ID())
|
||||
return fmt.Errorf("container has joined pod %s and dependency container %s is not a member of the pod: %w", ctr.config.Pod, depCtr.ID(), define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -347,7 +346,7 @@ func makeInspectPortBindings(bindings []types.PortMapping, expose map[uint16][]s
|
|||
func writeStringToPath(path, contents, mountLabel string, uid, gid int) error {
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to create %s", path)
|
||||
return fmt.Errorf("unable to create %s: %w", path, err)
|
||||
}
|
||||
defer f.Close()
|
||||
if err := f.Chown(uid, gid); err != nil {
|
||||
|
|
@ -355,7 +354,7 @@ func writeStringToPath(path, contents, mountLabel string, uid, gid int) error {
|
|||
}
|
||||
|
||||
if _, err := f.WriteString(contents); err != nil {
|
||||
return errors.Wrapf(err, "unable to write %s", path)
|
||||
return fmt.Errorf("unable to write %s: %w", path, err)
|
||||
}
|
||||
// Relabel runDirResolv for the container
|
||||
if err := label.Relabel(path, mountLabel, false); err != nil {
|
||||
|
|
|
|||
|
|
@ -13,7 +13,6 @@ import (
|
|||
"github.com/containers/podman/v4/pkg/rootless"
|
||||
spec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
|
@ -30,7 +29,7 @@ func systemdSliceFromPath(parent, name string, resources *spec.LinuxResources) (
|
|||
logrus.Debugf("Created cgroup path %s for parent %s and name %s", cgroupPath, parent, name)
|
||||
|
||||
if err := makeSystemdCgroup(cgroupPath, resources); err != nil {
|
||||
return "", errors.Wrapf(err, "error creating cgroup %s", cgroupPath)
|
||||
return "", fmt.Errorf("error creating cgroup %s: %w", cgroupPath, err)
|
||||
}
|
||||
|
||||
logrus.Debugf("Created cgroup %s", cgroupPath)
|
||||
|
|
@ -95,7 +94,7 @@ func assembleSystemdCgroupName(baseSlice, newSlice string) (string, error) {
|
|||
const sliceSuffix = ".slice"
|
||||
|
||||
if !strings.HasSuffix(baseSlice, sliceSuffix) {
|
||||
return "", errors.Wrapf(define.ErrInvalidArg, "cannot assemble cgroup path with base %q - must end in .slice", baseSlice)
|
||||
return "", fmt.Errorf("cannot assemble cgroup path with base %q - must end in .slice: %w", baseSlice, define.ErrInvalidArg)
|
||||
}
|
||||
|
||||
noSlice := strings.TrimSuffix(baseSlice, sliceSuffix)
|
||||
|
|
@ -113,17 +112,17 @@ var lvpReleaseLabel = label.ReleaseLabel
|
|||
func LabelVolumePath(path string) error {
|
||||
_, mountLabel, err := lvpInitLabels([]string{})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error getting default mountlabels")
|
||||
return fmt.Errorf("error getting default mountlabels: %w", err)
|
||||
}
|
||||
if err := lvpReleaseLabel(mountLabel); err != nil {
|
||||
return errors.Wrapf(err, "error releasing label %q", mountLabel)
|
||||
return fmt.Errorf("error releasing label %q: %w", mountLabel, err)
|
||||
}
|
||||
|
||||
if err := lvpRelabel(path, mountLabel, true); err != nil {
|
||||
if err == syscall.ENOTSUP {
|
||||
logrus.Debugf("Labeling not supported on %q", path)
|
||||
} else {
|
||||
return errors.Wrapf(err, "error setting selinux label for %s to %q as shared", path, mountLabel)
|
||||
return fmt.Errorf("error setting selinux label for %s to %q as shared: %w", path, mountLabel, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -1,9 +1,10 @@
|
|||
package libpod
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
pluginapi "github.com/docker/go-plugins-helpers/volume"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -29,7 +30,7 @@ func (v *Volume) Inspect() (*define.InspectVolumeData, error) {
|
|||
data.Mountpoint = v.state.MountPoint
|
||||
|
||||
if v.plugin == nil {
|
||||
return nil, errors.Wrapf(define.ErrMissingPlugin, "volume %s uses volume plugin %s but it is not available, cannot inspect", v.Name(), v.config.Driver)
|
||||
return nil, fmt.Errorf("volume %s uses volume plugin %s but it is not available, cannot inspect: %w", v.Name(), v.config.Driver, define.ErrMissingPlugin)
|
||||
}
|
||||
|
||||
// Retrieve status for the volume.
|
||||
|
|
@ -38,7 +39,7 @@ func (v *Volume) Inspect() (*define.InspectVolumeData, error) {
|
|||
req.Name = v.Name()
|
||||
resp, err := v.plugin.GetVolume(req)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error retrieving volume %s information from plugin %s", v.Name(), v.Driver())
|
||||
return nil, fmt.Errorf("error retrieving volume %s information from plugin %s: %w", v.Name(), v.Driver(), err)
|
||||
}
|
||||
if resp != nil {
|
||||
data.Status = resp.Status
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
package libpod
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Creates a new volume
|
||||
|
|
@ -90,7 +90,7 @@ func (v *Volume) save() error {
|
|||
func (v *Volume) refresh() error {
|
||||
lock, err := v.runtime.lockManager.AllocateAndRetrieveLock(v.config.LockID)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "acquiring lock %d for volume %s", v.config.LockID, v.Name())
|
||||
return fmt.Errorf("acquiring lock %d for volume %s: %w", v.config.LockID, v.Name(), err)
|
||||
}
|
||||
v.lock = lock
|
||||
|
||||
|
|
|
|||
|
|
@ -4,12 +4,13 @@
|
|||
package libpod
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
pluginapi "github.com/docker/go-plugins-helpers/volume"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
|
@ -51,7 +52,7 @@ func (v *Volume) mount() error {
|
|||
// the same one for everything.
|
||||
if v.UsesVolumeDriver() {
|
||||
if v.plugin == nil {
|
||||
return errors.Wrapf(define.ErrMissingPlugin, "volume plugin %s (needed by volume %s) missing", v.Driver(), v.Name())
|
||||
return fmt.Errorf("volume plugin %s (needed by volume %s) missing: %w", v.Driver(), v.Name(), define.ErrMissingPlugin)
|
||||
}
|
||||
|
||||
req := new(pluginapi.MountRequest)
|
||||
|
|
@ -83,7 +84,7 @@ func (v *Volume) mount() error {
|
|||
// TODO: might want to cache this path in the runtime?
|
||||
mountPath, err := exec.LookPath("mount")
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "locating 'mount' binary")
|
||||
return fmt.Errorf("locating 'mount' binary: %w", err)
|
||||
}
|
||||
mountArgs := []string{}
|
||||
if volOptions != "" {
|
||||
|
|
@ -103,7 +104,7 @@ func (v *Volume) mount() error {
|
|||
logrus.Debugf("Running mount command: %s %s", mountPath, strings.Join(mountArgs, " "))
|
||||
if output, err := mountCmd.CombinedOutput(); err != nil {
|
||||
logrus.Debugf("Mount %v failed with %v", mountCmd, err)
|
||||
return errors.Errorf(string(output))
|
||||
return errors.New(string(output))
|
||||
}
|
||||
|
||||
logrus.Debugf("Mounted volume %s", v.Name())
|
||||
|
|
@ -148,7 +149,7 @@ func (v *Volume) unmount(force bool) error {
|
|||
if v.state.MountCount == 0 {
|
||||
if v.UsesVolumeDriver() {
|
||||
if v.plugin == nil {
|
||||
return errors.Wrapf(define.ErrMissingPlugin, "volume plugin %s (needed by volume %s) missing", v.Driver(), v.Name())
|
||||
return fmt.Errorf("volume plugin %s (needed by volume %s) missing: %w", v.Driver(), v.Name(), define.ErrMissingPlugin)
|
||||
}
|
||||
|
||||
req := new(pluginapi.UnmountRequest)
|
||||
|
|
@ -168,7 +169,7 @@ func (v *Volume) unmount(force bool) error {
|
|||
// Ignore EINVAL - the mount no longer exists.
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "unmounting volume %s", v.Name())
|
||||
return fmt.Errorf("unmounting volume %s: %w", v.Name(), err)
|
||||
}
|
||||
logrus.Debugf("Unmounted volume %s", v.Name())
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,10 +2,13 @@ package compat
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"errors"
|
||||
|
||||
"github.com/containers/podman/v4/libpod"
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/pkg/api/handlers/utils"
|
||||
|
|
@ -14,7 +17,6 @@ import (
|
|||
"github.com/containers/podman/v4/pkg/domain/entities"
|
||||
"github.com/containers/podman/v4/pkg/domain/infra/abi"
|
||||
"github.com/gorilla/schema"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -28,7 +30,7 @@ func Archive(w http.ResponseWriter, r *http.Request) {
|
|||
case http.MethodHead, http.MethodGet:
|
||||
handleHeadAndGet(w, r, decoder, runtime)
|
||||
default:
|
||||
utils.Error(w, http.StatusNotImplemented, errors.Errorf("unsupported method: %v", r.Method))
|
||||
utils.Error(w, http.StatusNotImplemented, fmt.Errorf("unsupported method: %v", r.Method))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -39,7 +41,7 @@ func handleHeadAndGet(w http.ResponseWriter, r *http.Request, decoder *schema.De
|
|||
|
||||
err := decoder.Decode(&query, r.URL.Query())
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrap(err, "couldn't decode the query"))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("couldn't decode the query: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -65,7 +67,7 @@ func handleHeadAndGet(w http.ResponseWriter, r *http.Request, decoder *schema.De
|
|||
w.Header().Add(copy.XDockerContainerPathStatHeader, statHeader)
|
||||
}
|
||||
|
||||
if errors.Cause(err) == define.ErrNoSuchCtr || errors.Cause(err) == copy.ErrENOENT {
|
||||
if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, copy.ErrENOENT) {
|
||||
// 404 is returned for an absent container and path. The
|
||||
// clients must deal with it accordingly.
|
||||
utils.Error(w, http.StatusNotFound, err)
|
||||
|
|
@ -105,14 +107,14 @@ func handlePut(w http.ResponseWriter, r *http.Request, decoder *schema.Decoder,
|
|||
|
||||
err := decoder.Decode(&query, r.URL.Query())
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrap(err, "couldn't decode the query"))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("couldn't decode the query: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
var rename map[string]string
|
||||
if query.Rename != "" {
|
||||
if err := json.Unmarshal([]byte(query.Rename), &rename); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrap(err, "couldn't decode the query field 'rename'"))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("couldn't decode the query field 'rename': %w", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
@ -128,10 +130,10 @@ func handlePut(w http.ResponseWriter, r *http.Request, decoder *schema.Decoder,
|
|||
})
|
||||
if err != nil {
|
||||
switch {
|
||||
case errors.Cause(err) == define.ErrNoSuchCtr || os.IsNotExist(err):
|
||||
case errors.Is(err, define.ErrNoSuchCtr) || os.IsNotExist(err):
|
||||
// 404 is returned for an absent container and path. The
|
||||
// clients must deal with it accordingly.
|
||||
utils.Error(w, http.StatusNotFound, errors.Wrap(err, "the container doesn't exists"))
|
||||
utils.Error(w, http.StatusNotFound, fmt.Errorf("the container doesn't exists: %w", err))
|
||||
case strings.Contains(err.Error(), "copier: put: error creating file"):
|
||||
// Not the best test but need to break this out for compatibility
|
||||
// See vendor/github.com/containers/buildah/copier/copier.go:1585
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package compat
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
|
|
@ -26,7 +27,6 @@ import (
|
|||
"github.com/containers/storage"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/gorilla/schema"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func CreateContainer(w http.ResponseWriter, r *http.Request) {
|
||||
|
|
@ -38,14 +38,14 @@ func CreateContainer(w http.ResponseWriter, r *http.Request) {
|
|||
// override any golang type defaults
|
||||
}
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
// compatible configuration
|
||||
body := handlers.CreateContainerConfig{}
|
||||
if err := json.NewDecoder(r.Body).Decode(&body); err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("decode(): %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -53,37 +53,37 @@ func CreateContainer(w http.ResponseWriter, r *http.Request) {
|
|||
body.Name = query.Name
|
||||
|
||||
if len(body.HostConfig.Links) > 0 {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(utils.ErrLinkNotSupport, "bad parameter"))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("bad parameter: %w", utils.ErrLinkNotSupport))
|
||||
return
|
||||
}
|
||||
rtc, err := runtime.GetConfig()
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to get runtime config"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to get runtime config: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
imageName, err := utils.NormalizeToDockerHub(r, body.Config.Image)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error normalizing image"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error normalizing image: %w", err))
|
||||
return
|
||||
}
|
||||
body.Config.Image = imageName
|
||||
|
||||
newImage, resolvedName, err := runtime.LibimageRuntime().LookupImage(body.Config.Image, nil)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == storage.ErrImageUnknown {
|
||||
utils.Error(w, http.StatusNotFound, errors.Wrap(err, "No such image"))
|
||||
if errors.Is(err, storage.ErrImageUnknown) {
|
||||
utils.Error(w, http.StatusNotFound, fmt.Errorf("no such image: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error looking up image"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error looking up image: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Take body structure and convert to cliopts
|
||||
cliOpts, args, err := cliOpts(body, rtc)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "make cli opts()"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("make cli opts(): %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -100,7 +100,7 @@ func CreateContainer(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
sg := specgen.NewSpecGenerator(imgNameOrID, cliOpts.RootFS)
|
||||
if err := specgenutil.FillOutSpecGen(sg, cliOpts, args); err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "fill out specgen"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("fill out specgen: %w", err))
|
||||
return
|
||||
}
|
||||
// moby always create the working directory
|
||||
|
|
@ -109,7 +109,7 @@ func CreateContainer(w http.ResponseWriter, r *http.Request) {
|
|||
ic := abi.ContainerEngine{Libpod: runtime}
|
||||
report, err := ic.ContainerCreate(r.Context(), sg)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "container create"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("container create: %w", err))
|
||||
return
|
||||
}
|
||||
createResponse := entities.ContainerCreateResponse{
|
||||
|
|
@ -300,7 +300,7 @@ func cliOpts(cc handlers.CreateContainerConfig, rtc *config.Config) (*entities.C
|
|||
if len(endpoint.IPAddress) > 0 {
|
||||
staticIP := net.ParseIP(endpoint.IPAddress)
|
||||
if staticIP == nil {
|
||||
return nil, nil, errors.Errorf("failed to parse the ip address %q", endpoint.IPAddress)
|
||||
return nil, nil, fmt.Errorf("failed to parse the ip address %q", endpoint.IPAddress)
|
||||
}
|
||||
netOpts.StaticIPs = append(netOpts.StaticIPs, staticIP)
|
||||
}
|
||||
|
|
@ -310,7 +310,7 @@ func cliOpts(cc handlers.CreateContainerConfig, rtc *config.Config) (*entities.C
|
|||
if len(endpoint.IPAMConfig.IPv4Address) > 0 {
|
||||
staticIP := net.ParseIP(endpoint.IPAMConfig.IPv4Address)
|
||||
if staticIP == nil {
|
||||
return nil, nil, errors.Errorf("failed to parse the ipv4 address %q", endpoint.IPAMConfig.IPv4Address)
|
||||
return nil, nil, fmt.Errorf("failed to parse the ipv4 address %q", endpoint.IPAMConfig.IPv4Address)
|
||||
}
|
||||
netOpts.StaticIPs = append(netOpts.StaticIPs, staticIP)
|
||||
}
|
||||
|
|
@ -318,7 +318,7 @@ func cliOpts(cc handlers.CreateContainerConfig, rtc *config.Config) (*entities.C
|
|||
if len(endpoint.IPAMConfig.IPv6Address) > 0 {
|
||||
staticIP := net.ParseIP(endpoint.IPAMConfig.IPv6Address)
|
||||
if staticIP == nil {
|
||||
return nil, nil, errors.Errorf("failed to parse the ipv6 address %q", endpoint.IPAMConfig.IPv6Address)
|
||||
return nil, nil, fmt.Errorf("failed to parse the ipv6 address %q", endpoint.IPAMConfig.IPv6Address)
|
||||
}
|
||||
netOpts.StaticIPs = append(netOpts.StaticIPs, staticIP)
|
||||
}
|
||||
|
|
@ -327,7 +327,7 @@ func cliOpts(cc handlers.CreateContainerConfig, rtc *config.Config) (*entities.C
|
|||
if len(endpoint.MacAddress) > 0 {
|
||||
staticMac, err := net.ParseMAC(endpoint.MacAddress)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Errorf("failed to parse the mac address %q", endpoint.MacAddress)
|
||||
return nil, nil, fmt.Errorf("failed to parse the mac address %q", endpoint.MacAddress)
|
||||
}
|
||||
netOpts.StaticMAC = types.HardwareAddr(staticMac)
|
||||
}
|
||||
|
|
@ -433,7 +433,7 @@ func cliOpts(cc handlers.CreateContainerConfig, rtc *config.Config) (*entities.C
|
|||
}
|
||||
if cc.HostConfig.Resources.NanoCPUs > 0 {
|
||||
if cliOpts.CPUPeriod != 0 || cliOpts.CPUQuota != 0 {
|
||||
return nil, nil, errors.Errorf("NanoCpus conflicts with CpuPeriod and CpuQuota")
|
||||
return nil, nil, fmt.Errorf("NanoCpus conflicts with CpuPeriod and CpuQuota")
|
||||
}
|
||||
cliOpts.CPUPeriod = 100000
|
||||
cliOpts.CPUQuota = cc.HostConfig.Resources.NanoCPUs / 10000
|
||||
|
|
@ -479,7 +479,7 @@ func cliOpts(cc handlers.CreateContainerConfig, rtc *config.Config) (*entities.C
|
|||
}
|
||||
if err := os.MkdirAll(vol, 0o755); err != nil {
|
||||
if !os.IsExist(err) {
|
||||
return nil, nil, errors.Wrapf(err, "error making volume mountpoint for volume %s", vol)
|
||||
return nil, nil, fmt.Errorf("error making volume mountpoint for volume %s: %w", vol, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
package compat
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/containers/podman/v4/libpod"
|
||||
|
|
@ -10,7 +12,6 @@ import (
|
|||
"github.com/containers/podman/v4/pkg/domain/entities"
|
||||
"github.com/containers/podman/v4/pkg/domain/infra/abi"
|
||||
"github.com/gorilla/schema"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func RestartContainer(w http.ResponseWriter, r *http.Request) {
|
||||
|
|
@ -29,7 +30,7 @@ func RestartContainer(w http.ResponseWriter, r *http.Request) {
|
|||
// override any golang type defaults
|
||||
}
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -44,7 +45,7 @@ func RestartContainer(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
report, err := containerEngine.ContainerRestart(r.Context(), []string{name}, options)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == define.ErrNoSuchCtr {
|
||||
if errors.Is(err, define.ErrNoSuchCtr) {
|
||||
utils.ContainerNotFound(w, name, err)
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
package compat
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/containers/podman/v4/libpod"
|
||||
|
|
@ -10,7 +12,6 @@ import (
|
|||
"github.com/containers/podman/v4/pkg/domain/entities"
|
||||
"github.com/containers/podman/v4/pkg/domain/infra/abi"
|
||||
"github.com/gorilla/schema"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func StopContainer(w http.ResponseWriter, r *http.Request) {
|
||||
|
|
@ -29,7 +30,7 @@ func StopContainer(w http.ResponseWriter, r *http.Request) {
|
|||
// override any golang type defaults
|
||||
}
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -63,7 +64,7 @@ func StopContainer(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
report, err := containerEngine.ContainerStop(r.Context(), []string{name}, options)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == define.ErrNoSuchCtr {
|
||||
if errors.Is(err, define.ErrNoSuchCtr) {
|
||||
utils.ContainerNotFound(w, name, err)
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,6 +2,8 @@ package compat
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
|
|
@ -14,7 +16,6 @@ import (
|
|||
"github.com/containers/podman/v4/pkg/domain/entities"
|
||||
"github.com/containers/podman/v4/pkg/specgenutil"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -24,7 +25,7 @@ func ExecCreateHandler(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
input := new(handlers.ExecCreateConfig)
|
||||
if err := json.NewDecoder(r.Body).Decode(&input); err != nil {
|
||||
utils.InternalServerError(w, errors.Wrapf(err, "error decoding request body as JSON"))
|
||||
utils.InternalServerError(w, fmt.Errorf("error decoding request body as JSON: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -48,7 +49,7 @@ func ExecCreateHandler(w http.ResponseWriter, r *http.Request) {
|
|||
for _, envStr := range input.Env {
|
||||
split := strings.SplitN(envStr, "=", 2)
|
||||
if len(split) != 2 {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Errorf("environment variable %q badly formed, must be key=value", envStr))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("environment variable %q badly formed, must be key=value", envStr))
|
||||
return
|
||||
}
|
||||
libpodConfig.Environment[split[0]] = split[1]
|
||||
|
|
@ -78,14 +79,14 @@ func ExecCreateHandler(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
sessID, err := ctr.ExecCreate(libpodConfig)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == define.ErrCtrStateInvalid {
|
||||
if errors.Is(err, define.ErrCtrStateInvalid) {
|
||||
// Check if the container is paused. If so, return a 409
|
||||
state, err := ctr.State()
|
||||
if err == nil {
|
||||
// Ignore the error != nil case. We're already
|
||||
// throwing an InternalServerError below.
|
||||
if state == define.ContainerStatePaused {
|
||||
utils.Error(w, http.StatusConflict, errors.Errorf("cannot create exec session as container %s is paused", ctr.ID()))
|
||||
utils.Error(w, http.StatusConflict, fmt.Errorf("cannot create exec session as container %s is paused", ctr.ID()))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
@ -112,7 +113,7 @@ func ExecInspectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
session, err := sessionCtr.ExecSession(sessionID)
|
||||
if err != nil {
|
||||
utils.InternalServerError(w, errors.Wrapf(err, "error retrieving exec session %s from container %s", sessionID, sessionCtr.ID()))
|
||||
utils.InternalServerError(w, fmt.Errorf("error retrieving exec session %s from container %s: %w", sessionID, sessionCtr.ID(), err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -135,7 +136,7 @@ func ExecStartHandler(w http.ResponseWriter, r *http.Request) {
|
|||
bodyParams := new(handlers.ExecStartConfig)
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&bodyParams); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to decode parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to decode parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
// TODO: Verify TTY setting against what inspect session was made with
|
||||
|
|
@ -154,7 +155,7 @@ func ExecStartHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
if state != define.ContainerStateRunning {
|
||||
utils.Error(w, http.StatusConflict, errors.Errorf("cannot exec in a container that is not running; container %s is %s", sessionCtr.ID(), state.String()))
|
||||
utils.Error(w, http.StatusConflict, fmt.Errorf("cannot exec in a container that is not running; container %s is %s", sessionCtr.ID(), state.String()))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -172,7 +173,7 @@ func ExecStartHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
logErr := func(e error) {
|
||||
logrus.Error(errors.Wrapf(e, "error attaching to container %s exec session %s", sessionCtr.ID(), sessionID))
|
||||
logrus.Error(fmt.Errorf("error attaching to container %s exec session %s: %w", sessionCtr.ID(), sessionID, e))
|
||||
}
|
||||
|
||||
var size *define.TerminalSize
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package compat
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
|
@ -24,7 +25,6 @@ import (
|
|||
"github.com/containers/storage"
|
||||
"github.com/gorilla/schema"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -50,7 +50,7 @@ func ExportImage(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
tmpfile, err := ioutil.TempFile("", "api.tar")
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to create tempfile"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to create tempfile: %w", err))
|
||||
return
|
||||
}
|
||||
defer os.Remove(tmpfile.Name())
|
||||
|
|
@ -58,7 +58,7 @@ func ExportImage(w http.ResponseWriter, r *http.Request) {
|
|||
name := utils.GetName(r)
|
||||
possiblyNormalizedName, err := utils.NormalizeToDockerHub(r, name)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error normalizing image"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error normalizing image: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -70,22 +70,22 @@ func ExportImage(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
if err := imageEngine.Save(r.Context(), possiblyNormalizedName, nil, saveOptions); err != nil {
|
||||
if errors.Cause(err) == storage.ErrImageUnknown {
|
||||
utils.ImageNotFound(w, name, errors.Wrapf(err, "failed to find image %s", name))
|
||||
if errors.Is(err, storage.ErrImageUnknown) {
|
||||
utils.ImageNotFound(w, name, fmt.Errorf("failed to find image %s: %w", name, err))
|
||||
return
|
||||
}
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to create tempfile"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to create tempfile: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
if err := tmpfile.Close(); err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to close tempfile"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to close tempfile: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
rdr, err := os.Open(tmpfile.Name())
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "failed to read the exported tarfile"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to read the exported tarfile: %w", err))
|
||||
return
|
||||
}
|
||||
defer rdr.Close()
|
||||
|
|
@ -111,12 +111,12 @@ func CommitContainer(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
rtc, err := runtime.GetConfig()
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("Decode(): %w", err))
|
||||
return
|
||||
}
|
||||
sc := runtime.SystemContext()
|
||||
|
|
@ -132,7 +132,7 @@ func CommitContainer(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
input := handlers.CreateContainerConfig{}
|
||||
if err := json.NewDecoder(r.Body).Decode(&input); err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("Decode(): %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -154,7 +154,7 @@ func CommitContainer(w http.ResponseWriter, r *http.Request) {
|
|||
destImage = fmt.Sprintf("%s:%s", query.Repo, query.Tag)
|
||||
possiblyNormalizedName, err := utils.NormalizeToDockerHub(r, destImage)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error normalizing image"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error normalizing image: %w", err))
|
||||
return
|
||||
}
|
||||
destImage = possiblyNormalizedName
|
||||
|
|
@ -162,7 +162,7 @@ func CommitContainer(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
commitImage, err := ctr.Commit(r.Context(), destImage, options)
|
||||
if err != nil && !strings.Contains(err.Error(), "is not running") {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "CommitFailure"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("CommitFailure: %w", err))
|
||||
return
|
||||
}
|
||||
utils.WriteResponse(w, http.StatusCreated, entities.IDResponse{ID: commitImage.ID()})
|
||||
|
|
@ -186,7 +186,7 @@ func CreateImageFromSrc(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
// fromSrc – Source to import. The value may be a URL from which the image can be retrieved or - to read the image from the request body. This parameter may only be used when importing an image.
|
||||
|
|
@ -194,13 +194,13 @@ func CreateImageFromSrc(w http.ResponseWriter, r *http.Request) {
|
|||
if source == "-" {
|
||||
f, err := ioutil.TempFile("", "api_load.tar")
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "failed to create tempfile"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to create tempfile: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
source = f.Name()
|
||||
if err := SaveFromBody(f, r); err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "failed to write temporary file"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to write temporary file: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -208,7 +208,7 @@ func CreateImageFromSrc(w http.ResponseWriter, r *http.Request) {
|
|||
if query.Repo != "" {
|
||||
possiblyNormalizedName, err := utils.NormalizeToDockerHub(r, reference)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error normalizing image"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error normalizing image: %w", err))
|
||||
return
|
||||
}
|
||||
reference = possiblyNormalizedName
|
||||
|
|
@ -229,7 +229,7 @@ func CreateImageFromSrc(w http.ResponseWriter, r *http.Request) {
|
|||
imageEngine := abi.ImageEngine{Libpod: runtime}
|
||||
report, err := imageEngine.Import(r.Context(), opts)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to import tarball"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to import tarball: %w", err))
|
||||
return
|
||||
}
|
||||
// Success
|
||||
|
|
@ -265,13 +265,13 @@ func CreateImageFromImage(w http.ResponseWriter, r *http.Request) {
|
|||
// This is where you can override the golang default value for one of fields
|
||||
}
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
possiblyNormalizedName, err := utils.NormalizeToDockerHub(r, mergeNameAndTagOrDigest(query.FromImage, query.Tag))
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error normalizing image"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error normalizing image: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -388,7 +388,7 @@ func GetImage(w http.ResponseWriter, r *http.Request) {
|
|||
name := utils.GetName(r)
|
||||
possiblyNormalizedName, err := utils.NormalizeToDockerHub(r, name)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error normalizing image"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error normalizing image: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -397,12 +397,12 @@ func GetImage(w http.ResponseWriter, r *http.Request) {
|
|||
// Here we need to fiddle with the error message because docker-py is looking for "No
|
||||
// such image" to determine on how to raise the correct exception.
|
||||
errMsg := strings.ReplaceAll(err.Error(), "image not known", "No such image")
|
||||
utils.Error(w, http.StatusNotFound, errors.Errorf("failed to find image %s: %s", name, errMsg))
|
||||
utils.Error(w, http.StatusNotFound, fmt.Errorf("failed to find image %s: %s", name, errMsg))
|
||||
return
|
||||
}
|
||||
inspect, err := handlers.ImageDataToImageInspect(r.Context(), newImage)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "failed to convert ImageData to ImageInspect '%s'", inspect.ID))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to convert ImageData to ImageInspect '%s': %w", inspect.ID, err))
|
||||
return
|
||||
}
|
||||
utils.WriteResponse(w, http.StatusOK, inspect)
|
||||
|
|
@ -421,7 +421,7 @@ func GetImages(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest,
|
||||
errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
if _, found := r.URL.Query()["digests"]; found && query.Digests {
|
||||
|
|
@ -472,7 +472,7 @@ func LoadImages(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -480,7 +480,7 @@ func LoadImages(w http.ResponseWriter, r *http.Request) {
|
|||
// to load.
|
||||
f, err := ioutil.TempFile("", "api_load.tar")
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "failed to create tempfile"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to create tempfile: %w", err))
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
|
|
@ -490,7 +490,7 @@ func LoadImages(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}()
|
||||
if err := SaveFromBody(f, r); err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "failed to write temporary file"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to write temporary file: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -499,12 +499,12 @@ func LoadImages(w http.ResponseWriter, r *http.Request) {
|
|||
loadOptions := entities.ImageLoadOptions{Input: f.Name()}
|
||||
loadReport, err := imageEngine.Load(r.Context(), loadOptions)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "failed to load image"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to load image: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
if len(loadReport.Names) < 1 {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Errorf("one or more images are required"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("one or more images are required"))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -527,7 +527,7 @@ func ExportImages(w http.ResponseWriter, r *http.Request) {
|
|||
// This is where you can override the golang default value for one of fields
|
||||
}
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
if len(query.Names) == 0 {
|
||||
|
|
@ -539,7 +539,7 @@ func ExportImages(w http.ResponseWriter, r *http.Request) {
|
|||
for i, img := range query.Names {
|
||||
possiblyNormalizedName, err := utils.NormalizeToDockerHub(r, img)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error normalizing image"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error normalizing image: %w", err))
|
||||
return
|
||||
}
|
||||
images[i] = possiblyNormalizedName
|
||||
|
|
@ -547,12 +547,12 @@ func ExportImages(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
tmpfile, err := ioutil.TempFile("", "api.tar")
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to create tempfile"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to create tempfile: %w", err))
|
||||
return
|
||||
}
|
||||
defer os.Remove(tmpfile.Name())
|
||||
if err := tmpfile.Close(); err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to close tempfile"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to close tempfile: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -566,7 +566,7 @@ func ExportImages(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
rdr, err := os.Open(tmpfile.Name())
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "failed to read the exported tarfile"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to read the exported tarfile: %w", err))
|
||||
return
|
||||
}
|
||||
defer rdr.Close()
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
package compat
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/containers/podman/v4/libpod"
|
||||
|
|
@ -10,7 +12,6 @@ import (
|
|||
"github.com/containers/podman/v4/pkg/domain/infra/abi"
|
||||
"github.com/containers/storage"
|
||||
"github.com/gorilla/schema"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func RemoveImage(w http.ResponseWriter, r *http.Request) {
|
||||
|
|
@ -25,7 +26,7 @@ func RemoveImage(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
if _, found := r.URL.Query()["noprune"]; found {
|
||||
|
|
@ -36,7 +37,7 @@ func RemoveImage(w http.ResponseWriter, r *http.Request) {
|
|||
name := utils.GetName(r)
|
||||
possiblyNormalizedName, err := utils.NormalizeToDockerHub(r, name)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "error normalizing image"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("error normalizing image: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -48,12 +49,12 @@ func RemoveImage(w http.ResponseWriter, r *http.Request) {
|
|||
report, rmerrors := imageEngine.Remove(r.Context(), []string{possiblyNormalizedName}, options)
|
||||
if len(rmerrors) > 0 && rmerrors[0] != nil {
|
||||
err := rmerrors[0]
|
||||
if errors.Cause(err) == storage.ErrImageUnknown {
|
||||
utils.ImageNotFound(w, name, errors.Wrapf(err, "failed to find image %s", name))
|
||||
if errors.Is(err, storage.ErrImageUnknown) {
|
||||
utils.ImageNotFound(w, name, fmt.Errorf("failed to find image %s: %w", name, err))
|
||||
return
|
||||
}
|
||||
if errors.Cause(err) == storage.ErrImageUsedByContainer {
|
||||
utils.Error(w, http.StatusConflict, errors.Wrapf(err, "image %s is in use", name))
|
||||
if errors.Is(err, storage.ErrImageUsedByContainer) {
|
||||
utils.Error(w, http.StatusConflict, fmt.Errorf("image %s is in use: %w", name, err))
|
||||
return
|
||||
}
|
||||
utils.Error(w, http.StatusInternalServerError, err)
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package compat
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
|
|
@ -19,7 +20,6 @@ import (
|
|||
|
||||
dockerNetwork "github.com/docker/docker/api/types/network"
|
||||
"github.com/gorilla/schema"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -36,7 +36,7 @@ func InspectNetwork(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
decoder := r.Context().Value(api.DecoderKey).(*schema.Decoder)
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -133,7 +133,7 @@ func ListNetworks(w http.ResponseWriter, r *http.Request) {
|
|||
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
|
||||
filterMap, err := util.PrepareFilters(r)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -167,7 +167,7 @@ func CreateNetwork(w http.ResponseWriter, r *http.Request) {
|
|||
)
|
||||
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
|
||||
if err := json.NewDecoder(r.Body).Decode(&networkCreate); err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("Decode(): %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -227,7 +227,7 @@ func CreateNetwork(w http.ResponseWriter, r *http.Request) {
|
|||
var err error
|
||||
subnet, err := nettypes.ParseCIDR(conf.Subnet)
|
||||
if err != nil {
|
||||
utils.InternalServerError(w, errors.Wrap(err, "failed to parse subnet"))
|
||||
utils.InternalServerError(w, fmt.Errorf("failed to parse subnet: %w", err))
|
||||
return
|
||||
}
|
||||
s.Subnet = subnet
|
||||
|
|
@ -235,7 +235,7 @@ func CreateNetwork(w http.ResponseWriter, r *http.Request) {
|
|||
if len(conf.Gateway) > 0 {
|
||||
gw := net.ParseIP(conf.Gateway)
|
||||
if gw == nil {
|
||||
utils.InternalServerError(w, errors.Errorf("failed to parse gateway ip %s", conf.Gateway))
|
||||
utils.InternalServerError(w, fmt.Errorf("failed to parse gateway ip %s", conf.Gateway))
|
||||
return
|
||||
}
|
||||
s.Gateway = gw
|
||||
|
|
@ -243,17 +243,17 @@ func CreateNetwork(w http.ResponseWriter, r *http.Request) {
|
|||
if len(conf.IPRange) > 0 {
|
||||
_, net, err := net.ParseCIDR(conf.IPRange)
|
||||
if err != nil {
|
||||
utils.InternalServerError(w, errors.Wrap(err, "failed to parse ip range"))
|
||||
utils.InternalServerError(w, fmt.Errorf("failed to parse ip range: %w", err))
|
||||
return
|
||||
}
|
||||
startIP, err := netutil.FirstIPInSubnet(net)
|
||||
if err != nil {
|
||||
utils.InternalServerError(w, errors.Wrap(err, "failed to get first ip in range"))
|
||||
utils.InternalServerError(w, fmt.Errorf("failed to get first ip in range: %w", err))
|
||||
return
|
||||
}
|
||||
lastIP, err := netutil.LastIPInSubnet(net)
|
||||
if err != nil {
|
||||
utils.InternalServerError(w, errors.Wrap(err, "failed to get last ip in range"))
|
||||
utils.InternalServerError(w, fmt.Errorf("failed to get last ip in range: %w", err))
|
||||
return
|
||||
}
|
||||
s.LeaseRange = &nettypes.LeaseRange{
|
||||
|
|
@ -296,7 +296,7 @@ func RemoveNetwork(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
decoder := r.Context().Value(api.DecoderKey).(*schema.Decoder)
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -312,12 +312,12 @@ func RemoveNetwork(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
if len(reports) == 0 {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Errorf("internal error"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("internal error"))
|
||||
return
|
||||
}
|
||||
report := reports[0]
|
||||
if report.Err != nil {
|
||||
if errors.Cause(report.Err) == define.ErrNoSuchNetwork {
|
||||
if errors.Is(report.Err, define.ErrNoSuchNetwork) {
|
||||
utils.Error(w, http.StatusNotFound, define.ErrNoSuchNetwork)
|
||||
return
|
||||
}
|
||||
|
|
@ -334,7 +334,7 @@ func Connect(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
var netConnect types.NetworkConnect
|
||||
if err := json.NewDecoder(r.Body).Decode(&netConnect); err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("Decode(): %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -351,7 +351,7 @@ func Connect(w http.ResponseWriter, r *http.Request) {
|
|||
staticIP := net.ParseIP(netConnect.EndpointConfig.IPAddress)
|
||||
if staticIP == nil {
|
||||
utils.Error(w, http.StatusInternalServerError,
|
||||
errors.Errorf("failed to parse the ip address %q", netConnect.EndpointConfig.IPAddress))
|
||||
fmt.Errorf("failed to parse the ip address %q", netConnect.EndpointConfig.IPAddress))
|
||||
return
|
||||
}
|
||||
netOpts.StaticIPs = append(netOpts.StaticIPs, staticIP)
|
||||
|
|
@ -363,7 +363,7 @@ func Connect(w http.ResponseWriter, r *http.Request) {
|
|||
staticIP := net.ParseIP(netConnect.EndpointConfig.IPAMConfig.IPv4Address)
|
||||
if staticIP == nil {
|
||||
utils.Error(w, http.StatusInternalServerError,
|
||||
errors.Errorf("failed to parse the ipv4 address %q", netConnect.EndpointConfig.IPAMConfig.IPv4Address))
|
||||
fmt.Errorf("failed to parse the ipv4 address %q", netConnect.EndpointConfig.IPAMConfig.IPv4Address))
|
||||
return
|
||||
}
|
||||
netOpts.StaticIPs = append(netOpts.StaticIPs, staticIP)
|
||||
|
|
@ -373,7 +373,7 @@ func Connect(w http.ResponseWriter, r *http.Request) {
|
|||
staticIP := net.ParseIP(netConnect.EndpointConfig.IPAMConfig.IPv6Address)
|
||||
if staticIP == nil {
|
||||
utils.Error(w, http.StatusInternalServerError,
|
||||
errors.Errorf("failed to parse the ipv6 address %q", netConnect.EndpointConfig.IPAMConfig.IPv6Address))
|
||||
fmt.Errorf("failed to parse the ipv6 address %q", netConnect.EndpointConfig.IPAMConfig.IPv6Address))
|
||||
return
|
||||
}
|
||||
netOpts.StaticIPs = append(netOpts.StaticIPs, staticIP)
|
||||
|
|
@ -384,7 +384,7 @@ func Connect(w http.ResponseWriter, r *http.Request) {
|
|||
staticMac, err := net.ParseMAC(netConnect.EndpointConfig.MacAddress)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError,
|
||||
errors.Errorf("failed to parse the mac address %q", netConnect.EndpointConfig.IPAMConfig.IPv6Address))
|
||||
fmt.Errorf("failed to parse the mac address %q", netConnect.EndpointConfig.IPAMConfig.IPv6Address))
|
||||
return
|
||||
}
|
||||
netOpts.StaticMAC = nettypes.HardwareAddr(staticMac)
|
||||
|
|
@ -392,11 +392,11 @@ func Connect(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
err := runtime.ConnectContainerToNetwork(netConnect.Container, name, netOpts)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == define.ErrNoSuchCtr {
|
||||
if errors.Is(err, define.ErrNoSuchCtr) {
|
||||
utils.ContainerNotFound(w, netConnect.Container, err)
|
||||
return
|
||||
}
|
||||
if errors.Cause(err) == define.ErrNoSuchNetwork {
|
||||
if errors.Is(err, define.ErrNoSuchNetwork) {
|
||||
utils.Error(w, http.StatusNotFound, err)
|
||||
return
|
||||
}
|
||||
|
|
@ -412,18 +412,18 @@ func Disconnect(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
var netDisconnect types.NetworkDisconnect
|
||||
if err := json.NewDecoder(r.Body).Decode(&netDisconnect); err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("Decode(): %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
name := utils.GetName(r)
|
||||
err := runtime.DisconnectContainerFromNetwork(netDisconnect.Container, name, netDisconnect.Force)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == define.ErrNoSuchCtr {
|
||||
if errors.Is(err, define.ErrNoSuchCtr) {
|
||||
utils.Error(w, http.StatusNotFound, err)
|
||||
return
|
||||
}
|
||||
if errors.Cause(err) == define.ErrNoSuchNetwork {
|
||||
if errors.Is(err, define.ErrNoSuchNetwork) {
|
||||
utils.Error(w, http.StatusNotFound, err)
|
||||
return
|
||||
}
|
||||
|
|
@ -438,7 +438,7 @@ func Prune(w http.ResponseWriter, r *http.Request) {
|
|||
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
|
||||
filterMap, err := util.PrepareFilters(r)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("Decode(): %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package compat
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
|
@ -11,7 +12,6 @@ import (
|
|||
api "github.com/containers/podman/v4/pkg/api/types"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/gorilla/schema"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func ResizeTTY(w http.ResponseWriter, r *http.Request) {
|
||||
|
|
@ -28,7 +28,7 @@ func ResizeTTY(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -47,8 +47,8 @@ func ResizeTTY(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
if err := ctnr.AttachResize(sz); err != nil {
|
||||
if errors.Cause(err) != define.ErrCtrStateInvalid {
|
||||
utils.InternalServerError(w, errors.Wrapf(err, "cannot resize container"))
|
||||
if !errors.Is(err, define.ErrCtrStateInvalid) {
|
||||
utils.InternalServerError(w, fmt.Errorf("cannot resize container: %w", err))
|
||||
} else {
|
||||
utils.Error(w, http.StatusConflict, err)
|
||||
}
|
||||
|
|
@ -65,15 +65,15 @@ func ResizeTTY(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
if state, err := ctnr.State(); err != nil {
|
||||
utils.InternalServerError(w, errors.Wrapf(err, "cannot obtain session container state"))
|
||||
utils.InternalServerError(w, fmt.Errorf("cannot obtain session container state: %w", err))
|
||||
return
|
||||
} else if state != define.ContainerStateRunning && !query.IgnoreNotRunning {
|
||||
utils.Error(w, http.StatusConflict, fmt.Errorf("container %q in wrong state %q", name, state.String()))
|
||||
return
|
||||
}
|
||||
if err := ctnr.ExecResize(name, sz); err != nil {
|
||||
if errors.Cause(err) != define.ErrExecSessionStateInvalid || !query.IgnoreNotRunning {
|
||||
utils.InternalServerError(w, errors.Wrapf(err, "cannot resize session"))
|
||||
if !errors.Is(err, define.ErrExecSessionStateInvalid) || !query.IgnoreNotRunning {
|
||||
utils.InternalServerError(w, fmt.Errorf("cannot resize session: %w", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,10 @@ import (
|
|||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/podman/v4/libpod"
|
||||
"github.com/containers/podman/v4/pkg/api/handlers/utils"
|
||||
|
|
@ -12,14 +15,13 @@ import (
|
|||
"github.com/containers/podman/v4/pkg/domain/entities"
|
||||
"github.com/containers/podman/v4/pkg/domain/infra/abi"
|
||||
"github.com/containers/podman/v4/pkg/util"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func ListSecrets(w http.ResponseWriter, r *http.Request) {
|
||||
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
|
||||
filtersMap, err := util.PrepareFilters(r)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
ic := abi.ContainerEngine{Libpod: runtime}
|
||||
|
|
@ -106,11 +108,11 @@ func CreateSecret(w http.ResponseWriter, r *http.Request) {
|
|||
}{}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&createParams); err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("Decode(): %w", err))
|
||||
return
|
||||
}
|
||||
if len(createParams.Labels) > 0 {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(errors.New("bad parameter"), "labels not supported"))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("labels not supported: %w", errors.New("bad parameter")))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -121,7 +123,7 @@ func CreateSecret(w http.ResponseWriter, r *http.Request) {
|
|||
ic := abi.ContainerEngine{Libpod: runtime}
|
||||
report, err := ic.SecretCreate(r.Context(), createParams.Name, reader, opts)
|
||||
if err != nil {
|
||||
if errors.Cause(err).Error() == "secret name in use" {
|
||||
if strings.Contains(err.Error(), "secret name in use") {
|
||||
utils.Error(w, http.StatusConflict, err)
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,6 +3,8 @@ package compat
|
|||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
|
@ -18,7 +20,6 @@ import (
|
|||
docker_api_types "github.com/docker/docker/api/types"
|
||||
docker_api_types_volume "github.com/docker/docker/api/types/volume"
|
||||
"github.com/gorilla/schema"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func ListVolumes(w http.ResponseWriter, r *http.Request) {
|
||||
|
|
@ -27,7 +28,7 @@ func ListVolumes(w http.ResponseWriter, r *http.Request) {
|
|||
filtersMap, err := util.PrepareFilters(r)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError,
|
||||
errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -36,7 +37,7 @@ func ListVolumes(w http.ResponseWriter, r *http.Request) {
|
|||
for filter := range *filtersMap {
|
||||
if filter == "opts" {
|
||||
utils.Error(w, http.StatusInternalServerError,
|
||||
errors.Errorf("unsupported libpod filters passed to docker endpoint"))
|
||||
fmt.Errorf("unsupported libpod filters passed to docker endpoint"))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
@ -86,13 +87,13 @@ func CreateVolume(w http.ResponseWriter, r *http.Request) {
|
|||
query := struct{}{}
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError,
|
||||
errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
// decode params from body
|
||||
input := docker_api_types_volume.VolumeCreateBody{}
|
||||
if err := json.NewDecoder(r.Body).Decode(&input); err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("Decode(): %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -103,7 +104,7 @@ func CreateVolume(w http.ResponseWriter, r *http.Request) {
|
|||
if len(input.Name) != 0 {
|
||||
// See if the volume exists already
|
||||
existingVolume, err = runtime.GetVolume(input.Name)
|
||||
if err != nil && errors.Cause(err) != define.ErrNoSuchVolume {
|
||||
if err != nil && !errors.Is(err, define.ErrNoSuchVolume) {
|
||||
utils.InternalServerError(w, err)
|
||||
return
|
||||
}
|
||||
|
|
@ -219,7 +220,7 @@ func RemoveVolume(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError,
|
||||
errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -239,7 +240,7 @@ func RemoveVolume(w http.ResponseWriter, r *http.Request) {
|
|||
if err == nil {
|
||||
// As above, we do not pass `force` from the query parameters here
|
||||
if err := runtime.RemoveVolume(r.Context(), vol, false, query.Timeout); err != nil {
|
||||
if errors.Cause(err) == define.ErrVolumeBeingUsed {
|
||||
if errors.Is(err, define.ErrVolumeBeingUsed) {
|
||||
utils.Error(w, http.StatusConflict, err)
|
||||
} else {
|
||||
utils.InternalServerError(w, err)
|
||||
|
|
@ -264,14 +265,14 @@ func PruneVolumes(w http.ResponseWriter, r *http.Request) {
|
|||
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
|
||||
filterMap, err := util.PrepareFilters(r)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "Decode()"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("Decode(): %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
f := (url.Values)(*filterMap)
|
||||
filterFuncs, err := filters.GeneratePruneVolumeFilters(f)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "failed to parse filters for %s", f.Encode()))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to parse filters for %s: %w", f.Encode(), err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package libpod
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
|
@ -16,7 +17,6 @@ import (
|
|||
"github.com/containers/podman/v4/pkg/domain/infra/abi"
|
||||
"github.com/containers/podman/v4/pkg/util"
|
||||
"github.com/gorilla/schema"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -35,7 +35,7 @@ func ContainerExists(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -45,7 +45,7 @@ func ContainerExists(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
report, err := containerEngine.ContainerExists(r.Context(), name, options)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == define.ErrNoSuchCtr {
|
||||
if errors.Is(err, define.ErrNoSuchCtr) {
|
||||
utils.ContainerNotFound(w, name, err)
|
||||
return
|
||||
}
|
||||
|
|
@ -75,12 +75,12 @@ func ListContainers(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
filterMap, err := util.PrepareFilters(r)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "failed to decode filter parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to decode filter parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -127,7 +127,7 @@ func GetContainer(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
|
||||
|
|
@ -221,7 +221,7 @@ func Checkpoint(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -307,7 +307,7 @@ func Restore(w http.ResponseWriter, r *http.Request) {
|
|||
// override any golang type defaults
|
||||
}
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -344,11 +344,11 @@ func Restore(w http.ResponseWriter, r *http.Request) {
|
|||
ir := abi.ImageEngine{Libpod: runtime}
|
||||
report, err := ir.Exists(r.Context(), name)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusNotFound, errors.Wrapf(err, "failed to find container or checkpoint image %s", name))
|
||||
utils.Error(w, http.StatusNotFound, fmt.Errorf("failed to find container or checkpoint image %s: %w", name, err))
|
||||
return
|
||||
}
|
||||
if !report.Value {
|
||||
utils.Error(w, http.StatusNotFound, errors.Errorf("failed to find container or checkpoint image %s", name))
|
||||
utils.Error(w, http.StatusNotFound, fmt.Errorf("failed to find container or checkpoint image %s", name))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
@ -380,7 +380,7 @@ func InitContainer(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
err = ctr.Init(r.Context(), ctr.PodID() != "")
|
||||
if errors.Cause(err) == define.ErrCtrStateInvalid {
|
||||
if errors.Is(err, define.ErrCtrStateInvalid) {
|
||||
utils.Error(w, http.StatusNotModified, err)
|
||||
return
|
||||
}
|
||||
|
|
@ -400,7 +400,7 @@ func ShouldRestart(w http.ResponseWriter, r *http.Request) {
|
|||
name := utils.GetName(r)
|
||||
report, err := containerEngine.ShouldRestart(r.Context(), name)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == define.ErrNoSuchCtr {
|
||||
if errors.Is(err, define.ErrNoSuchCtr) {
|
||||
utils.ContainerNotFound(w, name, err)
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package libpod
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
|
@ -29,7 +30,6 @@ import (
|
|||
utils2 "github.com/containers/podman/v4/utils"
|
||||
"github.com/containers/storage"
|
||||
"github.com/gorilla/schema"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Commit
|
||||
|
|
@ -50,11 +50,11 @@ func ImageExists(w http.ResponseWriter, r *http.Request) {
|
|||
ir := abi.ImageEngine{Libpod: runtime}
|
||||
report, err := ir.Exists(r.Context(), name)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusNotFound, errors.Wrapf(err, "failed to find image %s", name))
|
||||
utils.Error(w, http.StatusNotFound, fmt.Errorf("failed to find image %s: %w", name, err))
|
||||
return
|
||||
}
|
||||
if !report.Value {
|
||||
utils.Error(w, http.StatusNotFound, errors.Errorf("failed to find image %s", name))
|
||||
utils.Error(w, http.StatusNotFound, fmt.Errorf("failed to find image %s", name))
|
||||
return
|
||||
}
|
||||
utils.WriteResponse(w, http.StatusNoContent, "")
|
||||
|
|
@ -70,18 +70,18 @@ func ImageTree(w http.ResponseWriter, r *http.Request) {
|
|||
WhatRequires: false,
|
||||
}
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
ir := abi.ImageEngine{Libpod: runtime}
|
||||
options := entities.ImageTreeOptions{WhatRequires: query.WhatRequires}
|
||||
report, err := ir.Tree(r.Context(), name, options)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == storage.ErrImageUnknown {
|
||||
utils.Error(w, http.StatusNotFound, errors.Wrapf(err, "failed to find image %s", name))
|
||||
if errors.Is(err, storage.ErrImageUnknown) {
|
||||
utils.Error(w, http.StatusNotFound, fmt.Errorf("failed to find image %s: %w", name, err))
|
||||
return
|
||||
}
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "failed to generate image tree for %s", name))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to generate image tree for %s: %w", name, err))
|
||||
return
|
||||
}
|
||||
utils.WriteResponse(w, http.StatusOK, report)
|
||||
|
|
@ -91,13 +91,13 @@ func GetImage(w http.ResponseWriter, r *http.Request) {
|
|||
name := utils.GetName(r)
|
||||
newImage, err := utils.GetImage(r, name)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusNotFound, errors.Wrapf(err, "failed to find image %s", name))
|
||||
utils.Error(w, http.StatusNotFound, fmt.Errorf("failed to find image %s: %w", name, err))
|
||||
return
|
||||
}
|
||||
options := &libimage.InspectOptions{WithParent: true, WithSize: true}
|
||||
inspect, err := newImage.Inspect(r.Context(), options)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "failed in inspect image %s", inspect.ID))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed in inspect image %s: %w", inspect.ID, err))
|
||||
return
|
||||
}
|
||||
utils.WriteResponse(w, http.StatusOK, inspect)
|
||||
|
|
@ -117,15 +117,13 @@ func PruneImages(w http.ResponseWriter, r *http.Request) {
|
|||
filterMap, err := util.PrepareFilters(r)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError,
|
||||
errors.
|
||||
Wrapf(err, "failed to decode filter parameters for %s", r.URL.String()))
|
||||
fmt.Errorf("failed to decode filter parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError,
|
||||
errors.
|
||||
Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -174,7 +172,7 @@ func ExportImage(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest,
|
||||
errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
name := utils.GetName(r)
|
||||
|
|
@ -188,23 +186,23 @@ func ExportImage(w http.ResponseWriter, r *http.Request) {
|
|||
case define.OCIArchive, define.V2s2Archive:
|
||||
tmpfile, err := ioutil.TempFile("", "api.tar")
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to create tempfile"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to create tempfile: %w", err))
|
||||
return
|
||||
}
|
||||
output = tmpfile.Name()
|
||||
if err := tmpfile.Close(); err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to close tempfile"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to close tempfile: %w", err))
|
||||
return
|
||||
}
|
||||
case define.OCIManifestDir, define.V2s2ManifestDir:
|
||||
tmpdir, err := ioutil.TempDir("", "save")
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to create tempdir"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to create tempdir: %w", err))
|
||||
return
|
||||
}
|
||||
output = tmpdir
|
||||
default:
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Errorf("unknown format %q", query.Format))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unknown format %q", query.Format))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -233,7 +231,7 @@ func ExportImage(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
rdr, err := os.Open(output)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "failed to read the exported tarfile"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to read the exported tarfile: %w", err))
|
||||
return
|
||||
}
|
||||
defer rdr.Close()
|
||||
|
|
@ -254,20 +252,20 @@ func ExportImages(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
// References are mandatory!
|
||||
if len(query.References) == 0 {
|
||||
utils.Error(w, http.StatusBadRequest, errors.New("No references"))
|
||||
utils.Error(w, http.StatusBadRequest, errors.New("no references"))
|
||||
return
|
||||
}
|
||||
|
||||
// Format is mandatory! Currently, we only support multi-image docker
|
||||
// archives.
|
||||
if len(query.References) > 1 && query.Format != define.V2s2Archive {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Errorf("multi-image archives must use format of %s", define.V2s2Archive))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("multi-image archives must use format of %s", define.V2s2Archive))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -285,23 +283,23 @@ func ExportImages(w http.ResponseWriter, r *http.Request) {
|
|||
case define.V2s2Archive, define.OCIArchive:
|
||||
tmpfile, err := ioutil.TempFile("", "api.tar")
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to create tempfile"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to create tempfile: %w", err))
|
||||
return
|
||||
}
|
||||
output = tmpfile.Name()
|
||||
if err := tmpfile.Close(); err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to close tempfile"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to close tempfile: %w", err))
|
||||
return
|
||||
}
|
||||
case define.OCIManifestDir, define.V2s2ManifestDir:
|
||||
tmpdir, err := ioutil.TempDir("", "save")
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to create tmpdir"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to create tmpdir: %w", err))
|
||||
return
|
||||
}
|
||||
output = tmpdir
|
||||
default:
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Errorf("unsupported format %q", query.Format))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unsupported format %q", query.Format))
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(output)
|
||||
|
|
@ -323,7 +321,7 @@ func ExportImages(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
rdr, err := os.Open(output)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "failed to read the exported tarfile"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to read the exported tarfile: %w", err))
|
||||
return
|
||||
}
|
||||
defer rdr.Close()
|
||||
|
|
@ -335,7 +333,7 @@ func ImagesLoad(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
tmpfile, err := ioutil.TempFile("", "libpod-images-load.tar")
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to create tempfile"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to create tempfile: %w", err))
|
||||
return
|
||||
}
|
||||
defer os.Remove(tmpfile.Name())
|
||||
|
|
@ -344,7 +342,7 @@ func ImagesLoad(w http.ResponseWriter, r *http.Request) {
|
|||
tmpfile.Close()
|
||||
|
||||
if err != nil && err != io.EOF {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to write archive to temporary file"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to write archive to temporary file: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -353,7 +351,7 @@ func ImagesLoad(w http.ResponseWriter, r *http.Request) {
|
|||
loadOptions := entities.ImageLoadOptions{Input: tmpfile.Name()}
|
||||
loadReport, err := imageEngine.Load(r.Context(), loadOptions)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to load image"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to load image: %w", err))
|
||||
return
|
||||
}
|
||||
utils.WriteResponse(w, http.StatusOK, loadReport)
|
||||
|
|
@ -375,7 +373,7 @@ func ImagesImport(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -384,14 +382,14 @@ func ImagesImport(w http.ResponseWriter, r *http.Request) {
|
|||
if len(query.URL) == 0 {
|
||||
tmpfile, err := ioutil.TempFile("", "libpod-images-import.tar")
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to create tempfile"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to create tempfile: %w", err))
|
||||
return
|
||||
}
|
||||
defer os.Remove(tmpfile.Name())
|
||||
defer tmpfile.Close()
|
||||
|
||||
if _, err := io.Copy(tmpfile, r.Body); err != nil && err != io.EOF {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to write archive to temporary file"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to write archive to temporary file: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -411,7 +409,7 @@ func ImagesImport(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
report, err := imageEngine.Import(r.Context(), importOptions)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "unable to import tarball"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("unable to import tarball: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -433,7 +431,7 @@ func PushImage(w http.ResponseWriter, r *http.Request) {
|
|||
// This is where you can override the golang default value for one of fields
|
||||
}
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -479,7 +477,7 @@ func PushImage(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
imageEngine := abi.ImageEngine{Libpod: runtime}
|
||||
if err := imageEngine.Push(context.Background(), source, destination, options); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "error pushing image %q", destination))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("error pushing image %q: %w", destination, err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -509,12 +507,12 @@ func CommitContainer(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
rtc, err := runtime.GetConfig()
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "failed to get runtime config"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to get runtime config: %w", err))
|
||||
return
|
||||
}
|
||||
sc := runtime.SystemContext()
|
||||
|
|
@ -532,7 +530,7 @@ func CommitContainer(w http.ResponseWriter, r *http.Request) {
|
|||
case "docker":
|
||||
mimeType = manifest.DockerV2Schema2MediaType
|
||||
default:
|
||||
utils.InternalServerError(w, errors.Errorf("unrecognized image format %q", query.Format))
|
||||
utils.InternalServerError(w, fmt.Errorf("unrecognized image format %q", query.Format))
|
||||
return
|
||||
}
|
||||
options.CommitOptions = buildah.CommitOptions{
|
||||
|
|
@ -561,7 +559,7 @@ func CommitContainer(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
commitImage, err := ctr.Commit(r.Context(), destImage, options)
|
||||
if err != nil && !strings.Contains(err.Error(), "is not running") {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrapf(err, "CommitFailure"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("CommitFailure: %w", err))
|
||||
return
|
||||
}
|
||||
utils.WriteResponse(w, http.StatusOK, entities.IDResponse{ID: commitImage.ID()})
|
||||
|
|
@ -600,8 +598,8 @@ func UntagImage(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
name := utils.GetName(r)
|
||||
if err := imageEngine.Untag(r.Context(), name, tags, opts); err != nil {
|
||||
if errors.Cause(err) == storage.ErrImageUnknown {
|
||||
utils.ImageNotFound(w, name, errors.Wrapf(err, "failed to find image %s", name))
|
||||
if errors.Is(err, storage.ErrImageUnknown) {
|
||||
utils.ImageNotFound(w, name, fmt.Errorf("failed to find image %s: %w", name, err))
|
||||
} else {
|
||||
utils.Error(w, http.StatusInternalServerError, err)
|
||||
}
|
||||
|
|
@ -623,7 +621,7 @@ func ImagesBatchRemove(w http.ResponseWriter, r *http.Request) {
|
|||
}{}
|
||||
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -647,7 +645,7 @@ func ImagesRemove(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -684,7 +682,7 @@ func ImageScp(w http.ResponseWriter, r *http.Request) {
|
|||
// This is where you can override the golang default value for one of fields
|
||||
}
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -697,7 +695,7 @@ func ImageScp(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
if source != nil || dest != nil {
|
||||
utils.Error(w, http.StatusBadRequest, errors.Wrapf(define.ErrInvalidArg, "cannot use the user transfer function on the remote client"))
|
||||
utils.Error(w, http.StatusBadRequest, fmt.Errorf("cannot use the user transfer function on the remote client: %w", define.ErrInvalidArg))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -2,8 +2,11 @@ package libpod
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"errors"
|
||||
|
||||
"github.com/containers/common/libnetwork/types"
|
||||
"github.com/containers/podman/v4/libpod"
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
|
|
@ -13,7 +16,6 @@ import (
|
|||
"github.com/containers/podman/v4/pkg/domain/infra/abi"
|
||||
"github.com/containers/podman/v4/pkg/util"
|
||||
"github.com/gorilla/schema"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func CreateNetwork(w http.ResponseWriter, r *http.Request) {
|
||||
|
|
@ -25,7 +27,7 @@ func CreateNetwork(w http.ResponseWriter, r *http.Request) {
|
|||
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
|
||||
network := types.Network{}
|
||||
if err := json.NewDecoder(r.Body).Decode(&network); err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "failed to decode request JSON payload"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to decode request JSON payload: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -52,7 +54,7 @@ func ListNetworks(w http.ResponseWriter, r *http.Request) {
|
|||
filterMap, err := util.PrepareFilters(r)
|
||||
if err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError,
|
||||
errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -83,7 +85,7 @@ func RemoveNetwork(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError,
|
||||
errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
name := utils.GetName(r)
|
||||
|
|
@ -99,7 +101,7 @@ func RemoveNetwork(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
if reports[0].Err != nil {
|
||||
// If the network cannot be found, we return a 404.
|
||||
if errors.Cause(reports[0].Err) == define.ErrNoSuchNetwork {
|
||||
if errors.Is(reports[0].Err, define.ErrNoSuchNetwork) {
|
||||
utils.Error(w, http.StatusNotFound, reports[0].Err)
|
||||
return
|
||||
}
|
||||
|
|
@ -142,18 +144,18 @@ func Connect(w http.ResponseWriter, r *http.Request) {
|
|||
runtime := r.Context().Value(api.RuntimeKey).(*libpod.Runtime)
|
||||
var netConnect entities.NetworkConnectOptions
|
||||
if err := json.NewDecoder(r.Body).Decode(&netConnect); err != nil {
|
||||
utils.Error(w, http.StatusInternalServerError, errors.Wrap(err, "failed to decode request JSON payload"))
|
||||
utils.Error(w, http.StatusInternalServerError, fmt.Errorf("failed to decode request JSON payload: %w", err))
|
||||
return
|
||||
}
|
||||
name := utils.GetName(r)
|
||||
|
||||
err := runtime.ConnectContainerToNetwork(netConnect.Container, name, netConnect.PerNetworkOptions)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == define.ErrNoSuchCtr {
|
||||
if errors.Is(err, define.ErrNoSuchCtr) {
|
||||
utils.ContainerNotFound(w, netConnect.Container, err)
|
||||
return
|
||||
}
|
||||
if errors.Cause(err) == define.ErrNoSuchNetwork {
|
||||
if errors.Is(err, define.ErrNoSuchNetwork) {
|
||||
utils.Error(w, http.StatusNotFound, err)
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package utils
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
|
@ -19,7 +20,6 @@ import (
|
|||
|
||||
"github.com/containers/podman/v4/libpod"
|
||||
"github.com/gorilla/schema"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type waitQueryDocker struct {
|
||||
|
|
@ -39,7 +39,7 @@ func WaitContainerDocker(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
decoder := ctx.Value(api.DecoderKey).(*schema.Decoder)
|
||||
if err = decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -106,7 +106,7 @@ func WaitContainerLibpod(w http.ResponseWriter, r *http.Request) {
|
|||
decoder := r.Context().Value(api.DecoderKey).(*schema.Decoder)
|
||||
query := waitQueryLibpod{}
|
||||
if err := decoder.Decode(&query, r.URL.Query()); err != nil {
|
||||
Error(w, http.StatusBadRequest, errors.Wrapf(err, "failed to parse parameters for %s", r.URL.String()))
|
||||
Error(w, http.StatusBadRequest, fmt.Errorf("failed to parse parameters for %s: %w", r.URL.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -130,7 +130,7 @@ func WaitContainerLibpod(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
exitCode, err := waitFn(conditions...)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == define.ErrNoSuchCtr {
|
||||
if errors.Is(err, define.ErrNoSuchCtr) {
|
||||
ContainerNotFound(w, name, err)
|
||||
return
|
||||
}
|
||||
|
|
@ -197,7 +197,7 @@ var notRunningStates = []define.ContainerStatus{
|
|||
|
||||
func waitRemoved(ctrWait containerWaitFn) (int32, error) {
|
||||
code, err := ctrWait(define.ContainerStateUnknown)
|
||||
if err != nil && errors.Cause(err) == define.ErrNoSuchCtr {
|
||||
if err != nil && errors.Is(err, define.ErrNoSuchCtr) {
|
||||
return code, nil
|
||||
}
|
||||
return code, err
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
|
@ -15,7 +16,6 @@ import (
|
|||
"github.com/containers/podman/v4/pkg/util"
|
||||
"github.com/containers/storage"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// NormalizeToDockerHub normalizes the specified nameOrID to Docker Hub if the
|
||||
|
|
@ -32,7 +32,7 @@ func NormalizeToDockerHub(r *http.Request, nameOrID string) (string, error) {
|
|||
// 'busybox' -> 'registry.com/busybox'.
|
||||
img, candidate, err := runtime.LibimageRuntime().LookupImage(nameOrID, nil)
|
||||
if err != nil {
|
||||
if errors.Cause(err) != storage.ErrImageUnknown {
|
||||
if !errors.Is(err, storage.ErrImageUnknown) {
|
||||
return "", fmt.Errorf("normalizing name for compat API: %v", err)
|
||||
}
|
||||
// If the image could not be resolved locally, set the
|
||||
|
|
@ -73,7 +73,7 @@ func IsRegistryReference(name string) error {
|
|||
if imageRef.Transport().Name() == docker.Transport.Name() {
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf("unsupported transport %s in %q: only docker transport is supported", imageRef.Transport().Name(), name)
|
||||
return fmt.Errorf("unsupported transport %s in %q: only docker transport is supported", imageRef.Transport().Name(), name)
|
||||
}
|
||||
|
||||
// ParseStorageReference parses the specified image name to a
|
||||
|
|
@ -83,12 +83,12 @@ func ParseStorageReference(name string) (types.ImageReference, error) {
|
|||
storagePrefix := storageTransport.Transport.Name()
|
||||
imageRef, err := alltransports.ParseImageName(name)
|
||||
if err == nil && imageRef.Transport().Name() != docker.Transport.Name() {
|
||||
return nil, errors.Errorf("reference %q must be a storage reference", name)
|
||||
return nil, fmt.Errorf("reference %q must be a storage reference", name)
|
||||
} else if err != nil {
|
||||
origErr := err
|
||||
imageRef, err = alltransports.ParseImageName(fmt.Sprintf("%s:%s", storagePrefix, name))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(origErr, "reference %q must be a storage reference", name)
|
||||
return nil, fmt.Errorf("reference %q must be a storage reference: %w", name, origErr)
|
||||
}
|
||||
}
|
||||
return imageRef, nil
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package filters
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
|
@ -10,7 +11,6 @@ import (
|
|||
"github.com/containers/podman/v4/libpod"
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/pkg/util"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// GenerateContainerFilterFuncs return ContainerFilter functions based of filter.
|
||||
|
|
@ -36,7 +36,7 @@ func GenerateContainerFilterFuncs(filter string, filterValues []string, r *libpo
|
|||
for _, exitCode := range filterValues {
|
||||
ec, err := strconv.ParseInt(exitCode, 10, 32)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "exited code out of range %q", ec)
|
||||
return nil, fmt.Errorf("exited code out of range %q: %w", ec, err)
|
||||
}
|
||||
exitCodes = append(exitCodes, int32(ec))
|
||||
}
|
||||
|
|
@ -184,7 +184,7 @@ func GenerateContainerFilterFuncs(filter string, filterValues []string, r *libpo
|
|||
for _, podNameOrID := range filterValues {
|
||||
p, err := r.LookupPod(podNameOrID)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == define.ErrNoSuchPod {
|
||||
if errors.Is(err, define.ErrNoSuchPod) {
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
|
|
@ -291,7 +291,7 @@ func GenerateContainerFilterFuncs(filter string, filterValues []string, r *libpo
|
|||
return false
|
||||
}, filterValueError
|
||||
}
|
||||
return nil, errors.Errorf("%s is an invalid filter", filter)
|
||||
return nil, fmt.Errorf("%s is an invalid filter", filter)
|
||||
}
|
||||
|
||||
// GeneratePruneContainerFilterFuncs return ContainerFilter functions based of filter for prune operation
|
||||
|
|
@ -304,7 +304,7 @@ func GeneratePruneContainerFilterFuncs(filter string, filterValues []string, r *
|
|||
case "until":
|
||||
return prepareUntilFilterFunc(filterValues)
|
||||
}
|
||||
return nil, errors.Errorf("%s is an invalid filter", filter)
|
||||
return nil, fmt.Errorf("%s is an invalid filter", filter)
|
||||
}
|
||||
|
||||
func prepareUntilFilterFunc(filterValues []string) (func(container *libpod.Container) bool, error) {
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package abi
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
|
@ -14,7 +15,6 @@ import (
|
|||
envLib "github.com/containers/podman/v4/pkg/env"
|
||||
"github.com/containers/podman/v4/utils"
|
||||
"github.com/google/shlex"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -40,7 +40,7 @@ func (ic *ContainerEngine) ContainerRunlabel(ctx context.Context, label string,
|
|||
}
|
||||
|
||||
if len(pulledImages) != 1 {
|
||||
return errors.Errorf("internal error: expected an image to be pulled (or an error)")
|
||||
return errors.New("internal error: expected an image to be pulled (or an error)")
|
||||
}
|
||||
|
||||
// Extract the runlabel from the image.
|
||||
|
|
@ -57,7 +57,7 @@ func (ic *ContainerEngine) ContainerRunlabel(ctx context.Context, label string,
|
|||
}
|
||||
}
|
||||
if runlabel == "" {
|
||||
return errors.Errorf("cannot find the value of label: %s in image: %s", label, imageRef)
|
||||
return fmt.Errorf("cannot find the value of label: %s in image: %s", label, imageRef)
|
||||
}
|
||||
|
||||
cmd, env, err := generateRunlabelCommand(runlabel, pulledImages[0], imageRef, args, options)
|
||||
|
|
@ -86,7 +86,7 @@ func (ic *ContainerEngine) ContainerRunlabel(ctx context.Context, label string,
|
|||
name := cmd[i+1]
|
||||
ctr, err := ic.Libpod.LookupContainer(name)
|
||||
if err != nil {
|
||||
if errors.Cause(err) != define.ErrNoSuchCtr {
|
||||
if !errors.Is(err, define.ErrNoSuchCtr) {
|
||||
logrus.Debugf("Error occurred searching for container %s: %v", name, err)
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package abi
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
|
|
@ -34,7 +35,6 @@ import (
|
|||
dockerRef "github.com/docker/distribution/reference"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -128,14 +128,14 @@ func (ir *ImageEngine) History(ctx context.Context, nameOrID string, opts entiti
|
|||
|
||||
func (ir *ImageEngine) Mount(ctx context.Context, nameOrIDs []string, opts entities.ImageMountOptions) ([]*entities.ImageMountReport, error) {
|
||||
if opts.All && len(nameOrIDs) > 0 {
|
||||
return nil, errors.Errorf("cannot mix --all with images")
|
||||
return nil, errors.New("cannot mix --all with images")
|
||||
}
|
||||
|
||||
if os.Geteuid() != 0 {
|
||||
if driver := ir.Libpod.StorageConfig().GraphDriverName; driver != "vfs" {
|
||||
// Do not allow to mount a graphdriver that is not vfs if we are creating the userns as part
|
||||
// of the mount command.
|
||||
return nil, errors.Errorf("cannot mount using driver %s in rootless mode", driver)
|
||||
return nil, fmt.Errorf("cannot mount using driver %s in rootless mode", driver)
|
||||
}
|
||||
|
||||
became, ret, err := rootless.BecomeRootInUserNS("")
|
||||
|
|
@ -194,7 +194,7 @@ func (ir *ImageEngine) Mount(ctx context.Context, nameOrIDs []string, opts entit
|
|||
|
||||
func (ir *ImageEngine) Unmount(ctx context.Context, nameOrIDs []string, options entities.ImageUnmountOptions) ([]*entities.ImageUnmountReport, error) {
|
||||
if options.All && len(nameOrIDs) > 0 {
|
||||
return nil, errors.Errorf("cannot mix --all with images")
|
||||
return nil, errors.New("cannot mix --all with images")
|
||||
}
|
||||
|
||||
listImagesOptions := &libimage.ListImagesOptions{}
|
||||
|
|
@ -292,7 +292,7 @@ func (ir *ImageEngine) Push(ctx context.Context, source string, destination stri
|
|||
case "v2s2", "docker":
|
||||
manifestType = manifest.DockerV2Schema2MediaType
|
||||
default:
|
||||
return errors.Errorf("unknown format %q. Choose on of the supported formats: 'oci', 'v2s1', or 'v2s2'", options.Format)
|
||||
return fmt.Errorf("unknown format %q. Choose on of the supported formats: 'oci', 'v2s1', or 'v2s2'", options.Format)
|
||||
}
|
||||
|
||||
pushOptions := &libimage.PushOptions{}
|
||||
|
|
@ -523,12 +523,12 @@ func removeErrorsToExitCode(rmErrors []error) int {
|
|||
}
|
||||
|
||||
for _, e := range rmErrors {
|
||||
switch errors.Cause(e) {
|
||||
case storage.ErrImageUnknown, storage.ErrLayerUnknown:
|
||||
//nolint:gocritic
|
||||
if errors.Is(e, storage.ErrImageUnknown) || errors.Is(e, storage.ErrLayerUnknown) {
|
||||
noSuchImageErrors = true
|
||||
case storage.ErrImageUsedByContainer:
|
||||
} else if errors.Is(e, storage.ErrImageUsedByContainer) {
|
||||
inUseErrors = true
|
||||
default:
|
||||
} else {
|
||||
otherErrors = true
|
||||
}
|
||||
}
|
||||
|
|
@ -590,11 +590,11 @@ func (ir *ImageEngine) Shutdown(_ context.Context) {
|
|||
func (ir *ImageEngine) Sign(ctx context.Context, names []string, options entities.SignOptions) (*entities.SignReport, error) {
|
||||
mech, err := signature.NewGPGSigningMechanism()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error initializing GPG")
|
||||
return nil, fmt.Errorf("error initializing GPG: %w", err)
|
||||
}
|
||||
defer mech.Close()
|
||||
if err := mech.SupportsSigning(); err != nil {
|
||||
return nil, errors.Wrap(err, "signing is not supported")
|
||||
return nil, fmt.Errorf("signing is not supported: %w", err)
|
||||
}
|
||||
sc := ir.Libpod.SystemContext()
|
||||
sc.DockerCertPath = options.CertDir
|
||||
|
|
@ -604,11 +604,11 @@ func (ir *ImageEngine) Sign(ctx context.Context, names []string, options entitie
|
|||
err = func() error {
|
||||
srcRef, err := alltransports.ParseImageName(signimage)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error parsing image name")
|
||||
return fmt.Errorf("error parsing image name: %w", err)
|
||||
}
|
||||
rawSource, err := srcRef.NewImageSource(ctx, sc)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error getting image source")
|
||||
return fmt.Errorf("error getting image source: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err = rawSource.Close(); err != nil {
|
||||
|
|
@ -617,17 +617,17 @@ func (ir *ImageEngine) Sign(ctx context.Context, names []string, options entitie
|
|||
}()
|
||||
topManifestBlob, manifestType, err := rawSource.GetManifest(ctx, nil)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error getting manifest blob")
|
||||
return fmt.Errorf("error getting manifest blob: %w", err)
|
||||
}
|
||||
dockerReference := rawSource.Reference().DockerReference()
|
||||
if dockerReference == nil {
|
||||
return errors.Errorf("cannot determine canonical Docker reference for destination %s", transports.ImageName(rawSource.Reference()))
|
||||
return fmt.Errorf("cannot determine canonical Docker reference for destination %s", transports.ImageName(rawSource.Reference()))
|
||||
}
|
||||
var sigStoreDir string
|
||||
if options.Directory != "" {
|
||||
repo := reference.Path(dockerReference)
|
||||
if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references
|
||||
return errors.Errorf("Unexpected path elements in Docker reference %s for signature storage", dockerReference.String())
|
||||
return fmt.Errorf("unexpected path elements in Docker reference %s for signature storage", dockerReference.String())
|
||||
}
|
||||
sigStoreDir = filepath.Join(options.Directory, repo)
|
||||
} else {
|
||||
|
|
@ -647,11 +647,11 @@ func (ir *ImageEngine) Sign(ctx context.Context, names []string, options entitie
|
|||
|
||||
if options.All {
|
||||
if !manifest.MIMETypeIsMultiImage(manifestType) {
|
||||
return errors.Errorf("%s is not a multi-architecture image (manifest type %s)", signimage, manifestType)
|
||||
return fmt.Errorf("%s is not a multi-architecture image (manifest type %s)", signimage, manifestType)
|
||||
}
|
||||
list, err := manifest.ListFromBlob(topManifestBlob, manifestType)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error parsing manifest list %q", string(topManifestBlob))
|
||||
return fmt.Errorf("error parsing manifest list %q: %w", string(topManifestBlob), err)
|
||||
}
|
||||
instanceDigests := list.Instances()
|
||||
for _, instanceDigest := range instanceDigests {
|
||||
|
|
@ -661,13 +661,13 @@ func (ir *ImageEngine) Sign(ctx context.Context, names []string, options entitie
|
|||
return err
|
||||
}
|
||||
if err = putSignature(man, mech, sigStoreDir, instanceDigest, dockerReference, options); err != nil {
|
||||
return errors.Wrapf(err, "error storing signature for %s, %v", dockerReference.String(), instanceDigest)
|
||||
return fmt.Errorf("error storing signature for %s, %v: %w", dockerReference.String(), instanceDigest, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err = putSignature(topManifestBlob, mech, sigStoreDir, manifestDigest, dockerReference, options); err != nil {
|
||||
return errors.Wrapf(err, "error storing signature for %s, %v", dockerReference.String(), manifestDigest)
|
||||
return fmt.Errorf("error storing signature for %s, %v: %w", dockerReference.String(), manifestDigest, err)
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
|
|
@ -694,7 +694,7 @@ func (ir *ImageEngine) Scp(ctx context.Context, src, dst string, parentFlags []s
|
|||
|
||||
func Transfer(ctx context.Context, source entities.ImageScpOptions, dest entities.ImageScpOptions, parentFlags []string) error {
|
||||
if source.User == "" {
|
||||
return errors.Wrapf(define.ErrInvalidArg, "you must define a user when transferring from root to rootless storage")
|
||||
return fmt.Errorf("you must define a user when transferring from root to rootless storage: %w", define.ErrInvalidArg)
|
||||
}
|
||||
podman, err := os.Executable()
|
||||
if err != nil {
|
||||
|
|
@ -881,7 +881,7 @@ func getSigFilename(sigStoreDirPath string) (string, error) {
|
|||
|
||||
func localPathFromURI(url *url.URL) (string, error) {
|
||||
if url.Scheme != "file" {
|
||||
return "", errors.Errorf("writing to %s is not supported. Use a supported scheme", url.String())
|
||||
return "", fmt.Errorf("writing to %s is not supported. Use a supported scheme", url.String())
|
||||
}
|
||||
return url.Path, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,9 +4,12 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"errors"
|
||||
|
||||
"github.com/containers/common/libimage"
|
||||
cp "github.com/containers/image/v5/copy"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
|
|
@ -17,7 +20,6 @@ import (
|
|||
"github.com/containers/storage"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -46,7 +48,7 @@ func (ir *ImageEngine) ManifestCreate(ctx context.Context, name string, images [
|
|||
func (ir *ImageEngine) ManifestExists(ctx context.Context, name string) (*entities.BoolReport, error) {
|
||||
_, err := ir.Libpod.LibimageRuntime().LookupManifestList(name)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == storage.ErrImageUnknown {
|
||||
if errors.Is(err, storage.ErrImageUnknown) {
|
||||
return &entities.BoolReport{Value: false}, nil
|
||||
}
|
||||
return nil, err
|
||||
|
|
@ -63,15 +65,13 @@ func (ir *ImageEngine) ManifestInspect(ctx context.Context, name string) ([]byte
|
|||
|
||||
manifestList, err := ir.Libpod.LibimageRuntime().LookupManifestList(name)
|
||||
if err != nil {
|
||||
switch errors.Cause(err) {
|
||||
// Do a remote inspect if there's no local image or if the
|
||||
// local image is not a manifest list.
|
||||
case storage.ErrImageUnknown, libimage.ErrNotAManifestList:
|
||||
if errors.Is(err, storage.ErrImageUnknown) || errors.Is(err, libimage.ErrNotAManifestList) {
|
||||
// Do a remote inspect if there's no local image or if the
|
||||
// local image is not a manifest list.
|
||||
return ir.remoteManifestInspect(ctx, name)
|
||||
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
schema2List, err := manifestList.Inspect()
|
||||
|
|
@ -86,7 +86,7 @@ func (ir *ImageEngine) ManifestInspect(ctx context.Context, name string) ([]byte
|
|||
|
||||
var b bytes.Buffer
|
||||
if err := json.Indent(&b, rawSchema2List, "", " "); err != nil {
|
||||
return nil, errors.Wrapf(err, "error rendering manifest %s for display", name)
|
||||
return nil, fmt.Errorf("error rendering manifest %s for display: %w", name, err)
|
||||
}
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
|
@ -113,8 +113,7 @@ func (ir *ImageEngine) remoteManifestInspect(ctx context.Context, name string) (
|
|||
// FIXME should we use multierror package instead?
|
||||
|
||||
// we want the new line here so ignore the linter
|
||||
//nolint:revive
|
||||
latestErr = errors.Wrapf(latestErr, "tried %v\n", e)
|
||||
latestErr = fmt.Errorf("tried %v\n: %w", e, latestErr)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -125,14 +124,14 @@ func (ir *ImageEngine) remoteManifestInspect(ctx context.Context, name string) (
|
|||
}
|
||||
src, err := ref.NewImageSource(ctx, sys)
|
||||
if err != nil {
|
||||
appendErr(errors.Wrapf(err, "reading image %q", transports.ImageName(ref)))
|
||||
appendErr(fmt.Errorf("reading image %q: %w", transports.ImageName(ref), err))
|
||||
continue
|
||||
}
|
||||
defer src.Close()
|
||||
|
||||
manifestBytes, manifestType, err := src.GetManifest(ctx, nil)
|
||||
if err != nil {
|
||||
appendErr(errors.Wrapf(err, "loading manifest %q", transports.ImageName(ref)))
|
||||
appendErr(fmt.Errorf("loading manifest %q: %w", transports.ImageName(ref), err))
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
@ -150,7 +149,7 @@ func (ir *ImageEngine) remoteManifestInspect(ctx context.Context, name string) (
|
|||
logrus.Warnf("The manifest type %s is not a manifest list but a single image.", manType)
|
||||
schema2Manifest, err := manifest.Schema2FromManifest(result)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing manifest blob %q as a %q", string(result), manType)
|
||||
return nil, fmt.Errorf("error parsing manifest blob %q as a %q: %w", string(result), manType, err)
|
||||
}
|
||||
if result, err = schema2Manifest.Serialize(); err != nil {
|
||||
return nil, err
|
||||
|
|
@ -158,7 +157,7 @@ func (ir *ImageEngine) remoteManifestInspect(ctx context.Context, name string) (
|
|||
default:
|
||||
listBlob, err := manifest.ListFromBlob(result, manType)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing manifest blob %q as a %q", string(result), manType)
|
||||
return nil, fmt.Errorf("error parsing manifest blob %q as a %q: %w", string(result), manType, err)
|
||||
}
|
||||
list, err := listBlob.ConvertToMIMEType(manifest.DockerV2ListMediaType)
|
||||
if err != nil {
|
||||
|
|
@ -170,7 +169,7 @@ func (ir *ImageEngine) remoteManifestInspect(ctx context.Context, name string) (
|
|||
}
|
||||
|
||||
if err = json.Indent(&b, result, "", " "); err != nil {
|
||||
return nil, errors.Wrapf(err, "error rendering manifest %s for display", name)
|
||||
return nil, fmt.Errorf("error rendering manifest %s for display: %w", name, err)
|
||||
}
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
|
@ -213,7 +212,7 @@ func (ir *ImageEngine) ManifestAdd(ctx context.Context, name string, images []st
|
|||
for _, annotationSpec := range opts.Annotation {
|
||||
spec := strings.SplitN(annotationSpec, "=", 2)
|
||||
if len(spec) != 2 {
|
||||
return "", errors.Errorf("no value given for annotation %q", spec[0])
|
||||
return "", fmt.Errorf("no value given for annotation %q", spec[0])
|
||||
}
|
||||
annotations[spec[0]] = spec[1]
|
||||
}
|
||||
|
|
@ -231,7 +230,7 @@ func (ir *ImageEngine) ManifestAdd(ctx context.Context, name string, images []st
|
|||
func (ir *ImageEngine) ManifestAnnotate(ctx context.Context, name, image string, opts entities.ManifestAnnotateOptions) (string, error) {
|
||||
instanceDigest, err := digest.Parse(image)
|
||||
if err != nil {
|
||||
return "", errors.Errorf(`invalid image digest "%s": %v`, image, err)
|
||||
return "", fmt.Errorf(`invalid image digest "%s": %v`, image, err)
|
||||
}
|
||||
|
||||
manifestList, err := ir.Libpod.LibimageRuntime().LookupManifestList(name)
|
||||
|
|
@ -251,7 +250,7 @@ func (ir *ImageEngine) ManifestAnnotate(ctx context.Context, name, image string,
|
|||
for _, annotationSpec := range opts.Annotation {
|
||||
spec := strings.SplitN(annotationSpec, "=", 2)
|
||||
if len(spec) != 2 {
|
||||
return "", errors.Errorf("no value given for annotation %q", spec[0])
|
||||
return "", fmt.Errorf("no value given for annotation %q", spec[0])
|
||||
}
|
||||
annotations[spec[0]] = spec[1]
|
||||
}
|
||||
|
|
@ -269,7 +268,7 @@ func (ir *ImageEngine) ManifestAnnotate(ctx context.Context, name, image string,
|
|||
func (ir *ImageEngine) ManifestRemoveDigest(ctx context.Context, name, image string) (string, error) {
|
||||
instanceDigest, err := digest.Parse(image)
|
||||
if err != nil {
|
||||
return "", errors.Errorf(`invalid image digest "%s": %v`, image, err)
|
||||
return "", fmt.Errorf(`invalid image digest "%s": %v`, image, err)
|
||||
}
|
||||
|
||||
manifestList, err := ir.Libpod.LibimageRuntime().LookupManifestList(name)
|
||||
|
|
@ -293,7 +292,7 @@ func (ir *ImageEngine) ManifestRm(ctx context.Context, names []string) (report *
|
|||
func (ir *ImageEngine) ManifestPush(ctx context.Context, name, destination string, opts entities.ImagePushOptions) (string, error) {
|
||||
manifestList, err := ir.Libpod.LibimageRuntime().LookupManifestList(name)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error retrieving local image from image name %s", name)
|
||||
return "", fmt.Errorf("error retrieving local image from image name %s: %w", name, err)
|
||||
}
|
||||
|
||||
var manifestType string
|
||||
|
|
@ -304,7 +303,7 @@ func (ir *ImageEngine) ManifestPush(ctx context.Context, name, destination strin
|
|||
case "v2s2", "docker":
|
||||
manifestType = manifest.DockerV2Schema2MediaType
|
||||
default:
|
||||
return "", errors.Errorf("unknown format %q. Choose one of the supported formats: 'oci' or 'v2s2'", opts.Format)
|
||||
return "", fmt.Errorf("unknown format %q. Choose one of the supported formats: 'oci' or 'v2s2'", opts.Format)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -333,7 +332,7 @@ func (ir *ImageEngine) ManifestPush(ctx context.Context, name, destination strin
|
|||
|
||||
if opts.Rm {
|
||||
if _, rmErrors := ir.Libpod.LibimageRuntime().RemoveImages(ctx, []string{manifestList.ID()}, nil); len(rmErrors) > 0 {
|
||||
return "", errors.Wrap(rmErrors[0], "error removing manifest after push")
|
||||
return "", fmt.Errorf("error removing manifest after push: %w", rmErrors[0])
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -2,6 +2,8 @@ package abi
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/containers/common/libnetwork/types"
|
||||
|
|
@ -9,7 +11,6 @@ import (
|
|||
"github.com/containers/common/pkg/util"
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/pkg/domain/entities"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (ic *ContainerEngine) NetworkList(ctx context.Context, options entities.NetworkListOptions) ([]types.Network, error) {
|
||||
|
|
@ -20,16 +21,16 @@ func (ic *ContainerEngine) NetworkList(ctx context.Context, options entities.Net
|
|||
if filterDangling {
|
||||
switch len(val) {
|
||||
case 0:
|
||||
return nil, errors.Errorf("got no values for filter key \"dangling\"")
|
||||
return nil, fmt.Errorf("got no values for filter key \"dangling\"")
|
||||
case 1:
|
||||
var err error
|
||||
wantDangling, err = strconv.ParseBool(val[0])
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("invalid dangling filter value \"%v\"", val[0])
|
||||
return nil, fmt.Errorf("invalid dangling filter value \"%v\"", val[0])
|
||||
}
|
||||
delete(options.Filters, "dangling")
|
||||
default:
|
||||
return nil, errors.Errorf("got more than one value for filter key \"dangling\"")
|
||||
return nil, fmt.Errorf("got more than one value for filter key \"dangling\"")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -56,11 +57,11 @@ func (ic *ContainerEngine) NetworkInspect(ctx context.Context, namesOrIds []stri
|
|||
for _, name := range namesOrIds {
|
||||
net, err := ic.Libpod.Network().NetworkInspect(name)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == define.ErrNoSuchNetwork {
|
||||
errs = append(errs, errors.Wrapf(err, "network %s", name))
|
||||
if errors.Is(err, define.ErrNoSuchNetwork) {
|
||||
errs = append(errs, fmt.Errorf("network %s: %w", name, err))
|
||||
continue
|
||||
} else {
|
||||
return nil, nil, errors.Wrapf(err, "error inspecting network %s", name)
|
||||
return nil, nil, fmt.Errorf("error inspecting network %s: %w", name, err)
|
||||
}
|
||||
}
|
||||
networks = append(networks, net)
|
||||
|
|
@ -80,8 +81,8 @@ func (ic *ContainerEngine) NetworkReload(ctx context.Context, names []string, op
|
|||
report.Id = ctr.ID()
|
||||
report.Err = ctr.ReloadNetwork()
|
||||
// ignore errors for invalid ctr state and network mode when --all is used
|
||||
if options.All && (errors.Cause(report.Err) == define.ErrCtrStateInvalid ||
|
||||
errors.Cause(report.Err) == define.ErrNetworkModeInvalid) {
|
||||
if options.All && (errors.Is(report.Err, define.ErrCtrStateInvalid) ||
|
||||
errors.Is(report.Err, define.ErrNetworkModeInvalid)) {
|
||||
continue
|
||||
}
|
||||
reports = append(reports, report)
|
||||
|
|
@ -113,7 +114,7 @@ func (ic *ContainerEngine) NetworkRm(ctx context.Context, namesOrIds []string, o
|
|||
// if user passes force, we nuke containers and pods
|
||||
if !options.Force {
|
||||
// Without the force option, we return an error
|
||||
return reports, errors.Wrapf(define.ErrNetworkInUse, "%q has associated containers with it. Use -f to forcibly delete containers and pods", name)
|
||||
return reports, fmt.Errorf("%q has associated containers with it. Use -f to forcibly delete containers and pods: %w", name, define.ErrNetworkInUse)
|
||||
}
|
||||
if c.IsInfra() {
|
||||
// if we have a infra container we need to remove the pod
|
||||
|
|
@ -124,7 +125,7 @@ func (ic *ContainerEngine) NetworkRm(ctx context.Context, namesOrIds []string, o
|
|||
if err := ic.Libpod.RemovePod(ctx, pod, true, true, options.Timeout); err != nil {
|
||||
return reports, err
|
||||
}
|
||||
} else if err := ic.Libpod.RemoveContainer(ctx, c, true, true, options.Timeout); err != nil && errors.Cause(err) != define.ErrNoSuchCtr {
|
||||
} else if err := ic.Libpod.RemoveContainer(ctx, c, true, true, options.Timeout); err != nil && !errors.Is(err, define.ErrNoSuchCtr) {
|
||||
return reports, err
|
||||
}
|
||||
}
|
||||
|
|
@ -139,7 +140,7 @@ func (ic *ContainerEngine) NetworkRm(ctx context.Context, namesOrIds []string, o
|
|||
|
||||
func (ic *ContainerEngine) NetworkCreate(ctx context.Context, network types.Network) (*types.Network, error) {
|
||||
if util.StringInSlice(network.Name, []string{"none", "host", "bridge", "private", "slirp4netns", "container", "ns"}) {
|
||||
return nil, errors.Errorf("cannot create network with name %q because it conflicts with a valid network mode", network.Name)
|
||||
return nil, fmt.Errorf("cannot create network with name %q because it conflicts with a valid network mode", network.Name)
|
||||
}
|
||||
network, err := ic.Libpod.Network().NetworkCreate(network)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package abi
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
|
@ -29,7 +30,6 @@ import (
|
|||
"github.com/containers/podman/v4/pkg/util"
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
yamlv3 "gopkg.in/yaml.v3"
|
||||
)
|
||||
|
|
@ -114,7 +114,7 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
|
|||
// sort kube kinds
|
||||
documentList, err = sortKubeKinds(documentList)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to sort kube kinds")
|
||||
return nil, fmt.Errorf("unable to sort kube kinds: %w", err)
|
||||
}
|
||||
|
||||
ipIndex := 0
|
||||
|
|
@ -126,7 +126,7 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
|
|||
for _, document := range documentList {
|
||||
kind, err := getKubeKind(document)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to read kube YAML")
|
||||
return nil, fmt.Errorf("unable to read kube YAML: %w", err)
|
||||
}
|
||||
|
||||
// TODO: create constants for the various "kinds" of yaml files.
|
||||
|
|
@ -154,14 +154,14 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
|
|||
var podTemplateSpec v1.PodTemplateSpec
|
||||
|
||||
if err := yaml.Unmarshal(document, &podYAML); err != nil {
|
||||
return nil, errors.Wrap(err, "unable to read YAML as Kube Pod")
|
||||
return nil, fmt.Errorf("unable to read YAML as Kube Pod: %w", err)
|
||||
}
|
||||
|
||||
podTemplateSpec.ObjectMeta = podYAML.ObjectMeta
|
||||
podTemplateSpec.Spec = podYAML.Spec
|
||||
for name, val := range podYAML.Annotations {
|
||||
if len(val) > define.MaxKubeAnnotation {
|
||||
return nil, errors.Errorf("invalid annotation %q=%q value length exceeds Kubernetetes max %d", name, val, define.MaxKubeAnnotation)
|
||||
return nil, fmt.Errorf("invalid annotation %q=%q value length exceeds Kubernetetes max %d", name, val, define.MaxKubeAnnotation)
|
||||
}
|
||||
}
|
||||
for name, val := range options.Annotations {
|
||||
|
|
@ -182,7 +182,7 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
|
|||
var deploymentYAML v1apps.Deployment
|
||||
|
||||
if err := yaml.Unmarshal(document, &deploymentYAML); err != nil {
|
||||
return nil, errors.Wrap(err, "unable to read YAML as Kube Deployment")
|
||||
return nil, fmt.Errorf("unable to read YAML as Kube Deployment: %w", err)
|
||||
}
|
||||
|
||||
r, err := ic.playKubeDeployment(ctx, &deploymentYAML, options, &ipIndex, configMaps, serviceContainer)
|
||||
|
|
@ -196,7 +196,7 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
|
|||
var pvcYAML v1.PersistentVolumeClaim
|
||||
|
||||
if err := yaml.Unmarshal(document, &pvcYAML); err != nil {
|
||||
return nil, errors.Wrap(err, "unable to read YAML as Kube PersistentVolumeClaim")
|
||||
return nil, fmt.Errorf("unable to read YAML as Kube PersistentVolumeClaim: %w", err)
|
||||
}
|
||||
|
||||
r, err := ic.playKubePVC(ctx, &pvcYAML)
|
||||
|
|
@ -210,7 +210,7 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options
|
|||
var configMap v1.ConfigMap
|
||||
|
||||
if err := yaml.Unmarshal(document, &configMap); err != nil {
|
||||
return nil, errors.Wrap(err, "unable to read YAML as Kube ConfigMap")
|
||||
return nil, fmt.Errorf("unable to read YAML as Kube ConfigMap: %w", err)
|
||||
}
|
||||
configMaps = append(configMaps, configMap)
|
||||
default:
|
||||
|
|
@ -240,7 +240,7 @@ func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAM
|
|||
|
||||
deploymentName = deploymentYAML.ObjectMeta.Name
|
||||
if deploymentName == "" {
|
||||
return nil, errors.Errorf("Deployment does not have a name")
|
||||
return nil, errors.New("deployment does not have a name")
|
||||
}
|
||||
numReplicas = 1
|
||||
if deploymentYAML.Spec.Replicas != nil {
|
||||
|
|
@ -253,7 +253,7 @@ func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAM
|
|||
podName := fmt.Sprintf("%s-pod-%d", deploymentName, i)
|
||||
podReport, err := ic.playKubePod(ctx, podName, &podSpec, options, ipIndex, deploymentYAML.Annotations, configMaps, serviceContainer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error encountered while bringing up pod %s", podName)
|
||||
return nil, fmt.Errorf("error encountered while bringing up pod %s: %w", podName, err)
|
||||
}
|
||||
report.Pods = append(report.Pods, podReport.Pods...)
|
||||
}
|
||||
|
|
@ -275,7 +275,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
|
|||
|
||||
// Assert the pod has a name
|
||||
if podName == "" {
|
||||
return nil, errors.Errorf("pod does not have a name")
|
||||
return nil, fmt.Errorf("pod does not have a name")
|
||||
}
|
||||
|
||||
podOpt := entities.PodCreateOptions{
|
||||
|
|
@ -295,7 +295,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
|
|||
}
|
||||
|
||||
if (ns.IsBridge() && len(networks) == 0) || ns.IsHost() {
|
||||
return nil, errors.Errorf("invalid value passed to --network: bridge or host networking must be configured in YAML")
|
||||
return nil, fmt.Errorf("invalid value passed to --network: bridge or host networking must be configured in YAML")
|
||||
}
|
||||
|
||||
podOpt.Net.Network = ns
|
||||
|
|
@ -316,10 +316,10 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
|
|||
// FIXME This is very hard to support properly with a good ux
|
||||
if len(options.StaticIPs) > *ipIndex {
|
||||
if !podOpt.Net.Network.IsBridge() {
|
||||
return nil, errors.Wrap(define.ErrInvalidArg, "static ip addresses can only be set when the network mode is bridge")
|
||||
return nil, fmt.Errorf("static ip addresses can only be set when the network mode is bridge: %w", define.ErrInvalidArg)
|
||||
}
|
||||
if len(podOpt.Net.Networks) != 1 {
|
||||
return nil, errors.Wrap(define.ErrInvalidArg, "cannot set static ip addresses for more than network, use netname:ip=<ip> syntax to specify ips for more than network")
|
||||
return nil, fmt.Errorf("cannot set static ip addresses for more than network, use netname:ip=<ip> syntax to specify ips for more than network: %w", define.ErrInvalidArg)
|
||||
}
|
||||
for name, netOpts := range podOpt.Net.Networks {
|
||||
netOpts.StaticIPs = append(netOpts.StaticIPs, options.StaticIPs[*ipIndex])
|
||||
|
|
@ -331,10 +331,10 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
|
|||
}
|
||||
if len(options.StaticMACs) > *ipIndex {
|
||||
if !podOpt.Net.Network.IsBridge() {
|
||||
return nil, errors.Wrap(define.ErrInvalidArg, "static mac address can only be set when the network mode is bridge")
|
||||
return nil, fmt.Errorf("static mac address can only be set when the network mode is bridge: %w", define.ErrInvalidArg)
|
||||
}
|
||||
if len(podOpt.Net.Networks) != 1 {
|
||||
return nil, errors.Wrap(define.ErrInvalidArg, "cannot set static mac address for more than network, use netname:mac=<mac> syntax to specify mac for more than network")
|
||||
return nil, fmt.Errorf("cannot set static mac address for more than network, use netname:mac=<mac> syntax to specify mac for more than network: %w", define.ErrInvalidArg)
|
||||
}
|
||||
for name, netOpts := range podOpt.Net.Networks {
|
||||
netOpts.StaticMAC = nettypes.HardwareAddr(options.StaticMACs[*ipIndex])
|
||||
|
|
@ -370,11 +370,11 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
|
|||
|
||||
cm, err := readConfigMapFromFile(f)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "%q", p)
|
||||
return nil, fmt.Errorf("%q: %w", p, err)
|
||||
}
|
||||
|
||||
if _, present := configMapIndex[cm.Name]; present {
|
||||
return nil, errors.Errorf("ambiguous configuration: the same config map %s is present in YAML and in --configmaps %s file", cm.Name, p)
|
||||
return nil, fmt.Errorf("ambiguous configuration: the same config map %s is present in YAML and in --configmaps %s file", cm.Name, p)
|
||||
}
|
||||
|
||||
configMaps = append(configMaps, cm)
|
||||
|
|
@ -396,22 +396,22 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
|
|||
// error out instead reuse the current volume.
|
||||
vol, err = ic.Libpod.GetVolume(v.Source)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot re-use local volume for volume from configmap %q", v.Source)
|
||||
return nil, fmt.Errorf("cannot re-use local volume for volume from configmap %q: %w", v.Source, err)
|
||||
}
|
||||
} else {
|
||||
return nil, errors.Wrapf(err, "cannot create a local volume for volume from configmap %q", v.Source)
|
||||
return nil, fmt.Errorf("cannot create a local volume for volume from configmap %q: %w", v.Source, err)
|
||||
}
|
||||
}
|
||||
mountPoint, err := vol.MountPoint()
|
||||
if err != nil || mountPoint == "" {
|
||||
return nil, errors.Wrapf(err, "unable to get mountpoint of volume %q", vol.Name())
|
||||
return nil, fmt.Errorf("unable to get mountpoint of volume %q: %w", vol.Name(), err)
|
||||
}
|
||||
// Create files and add data to the volume mountpoint based on the Items in the volume
|
||||
for k, v := range v.Items {
|
||||
dataPath := filepath.Join(mountPoint, k)
|
||||
f, err := os.Create(dataPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot create file %q at volume mountpoint %q", k, mountPoint)
|
||||
return nil, fmt.Errorf("cannot create file %q at volume mountpoint %q: %w", k, mountPoint, err)
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = f.WriteString(v)
|
||||
|
|
@ -492,12 +492,12 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
|
|||
for _, initCtr := range podYAML.Spec.InitContainers {
|
||||
// Error out if same name is used for more than one container
|
||||
if _, ok := ctrNames[initCtr.Name]; ok {
|
||||
return nil, errors.Errorf("the pod %q is invalid; duplicate container name %q detected", podName, initCtr.Name)
|
||||
return nil, fmt.Errorf("the pod %q is invalid; duplicate container name %q detected", podName, initCtr.Name)
|
||||
}
|
||||
ctrNames[initCtr.Name] = ""
|
||||
// Init containers cannot have either of lifecycle, livenessProbe, readinessProbe, or startupProbe set
|
||||
if initCtr.Lifecycle != nil || initCtr.LivenessProbe != nil || initCtr.ReadinessProbe != nil || initCtr.StartupProbe != nil {
|
||||
return nil, errors.Errorf("cannot create an init container that has either of lifecycle, livenessProbe, readinessProbe, or startupProbe set")
|
||||
return nil, fmt.Errorf("cannot create an init container that has either of lifecycle, livenessProbe, readinessProbe, or startupProbe set")
|
||||
}
|
||||
pulledImage, labels, err := ic.getImageAndLabelInfo(ctx, cwd, annotations, writer, initCtr, options)
|
||||
if err != nil {
|
||||
|
|
@ -548,7 +548,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
|
|||
for _, container := range podYAML.Spec.Containers {
|
||||
// Error out if the same name is used for more than one container
|
||||
if _, ok := ctrNames[container.Name]; ok {
|
||||
return nil, errors.Errorf("the pod %q is invalid; duplicate container name %q detected", podName, container.Name)
|
||||
return nil, fmt.Errorf("the pod %q is invalid; duplicate container name %q detected", podName, container.Name)
|
||||
}
|
||||
ctrNames[container.Name] = ""
|
||||
pulledImage, labels, err := ic.getImageAndLabelInfo(ctx, cwd, annotations, writer, container, options)
|
||||
|
|
@ -599,11 +599,11 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY
|
|||
if options.Start != types.OptionalBoolFalse {
|
||||
// Start the containers
|
||||
podStartErrors, err := pod.Start(ctx)
|
||||
if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
|
||||
if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
|
||||
return nil, err
|
||||
}
|
||||
for id, err := range podStartErrors {
|
||||
playKubePod.ContainerErrors = append(playKubePod.ContainerErrors, errors.Wrapf(err, "error starting container %s", id).Error())
|
||||
playKubePod.ContainerErrors = append(playKubePod.ContainerErrors, fmt.Errorf("error starting container %s: %w", id, err).Error())
|
||||
fmt.Println(playKubePod.ContainerErrors)
|
||||
}
|
||||
}
|
||||
|
|
@ -735,14 +735,14 @@ func (ic *ContainerEngine) playKubePVC(ctx context.Context, pvcYAML *v1.Persiste
|
|||
case util.VolumeUIDAnnotation:
|
||||
uid, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot convert uid %s to integer", v)
|
||||
return nil, fmt.Errorf("cannot convert uid %s to integer: %w", v, err)
|
||||
}
|
||||
volOptions = append(volOptions, libpod.WithVolumeUID(uid))
|
||||
opts["UID"] = v
|
||||
case util.VolumeGIDAnnotation:
|
||||
gid, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot convert gid %s to integer", v)
|
||||
return nil, fmt.Errorf("cannot convert gid %s to integer: %w", v, err)
|
||||
}
|
||||
volOptions = append(volOptions, libpod.WithVolumeGID(gid))
|
||||
opts["GID"] = v
|
||||
|
|
@ -771,15 +771,15 @@ func readConfigMapFromFile(r io.Reader) (v1.ConfigMap, error) {
|
|||
|
||||
content, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return cm, errors.Wrapf(err, "unable to read ConfigMap YAML content")
|
||||
return cm, fmt.Errorf("unable to read ConfigMap YAML content: %w", err)
|
||||
}
|
||||
|
||||
if err := yaml.Unmarshal(content, &cm); err != nil {
|
||||
return cm, errors.Wrapf(err, "unable to read YAML as Kube ConfigMap")
|
||||
return cm, fmt.Errorf("unable to read YAML as Kube ConfigMap: %w", err)
|
||||
}
|
||||
|
||||
if cm.Kind != "ConfigMap" {
|
||||
return cm, errors.Errorf("invalid YAML kind: %q. [ConfigMap] is the only supported by --configmap", cm.Kind)
|
||||
return cm, fmt.Errorf("invalid YAML kind: %q. [ConfigMap] is the only supported by --configmap", cm.Kind)
|
||||
}
|
||||
|
||||
return cm, nil
|
||||
|
|
@ -799,14 +799,14 @@ func splitMultiDocYAML(yamlContent []byte) ([][]byte, error) {
|
|||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "multi doc yaml could not be split")
|
||||
return nil, fmt.Errorf("multi doc yaml could not be split: %w", err)
|
||||
}
|
||||
|
||||
if o != nil {
|
||||
// back to bytes
|
||||
document, err := yamlv3.Marshal(o)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "individual doc yaml could not be marshalled")
|
||||
return nil, fmt.Errorf("individual doc yaml could not be marshalled: %w", err)
|
||||
}
|
||||
|
||||
documentList = append(documentList, document)
|
||||
|
|
@ -915,27 +915,27 @@ func (ic *ContainerEngine) PlayKubeDown(ctx context.Context, body io.Reader, _ e
|
|||
// sort kube kinds
|
||||
documentList, err = sortKubeKinds(documentList)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to sort kube kinds")
|
||||
return nil, fmt.Errorf("unable to sort kube kinds: %w", err)
|
||||
}
|
||||
|
||||
for _, document := range documentList {
|
||||
kind, err := getKubeKind(document)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to read as kube YAML")
|
||||
return nil, fmt.Errorf("unable to read as kube YAML: %w", err)
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case "Pod":
|
||||
var podYAML v1.Pod
|
||||
if err := yaml.Unmarshal(document, &podYAML); err != nil {
|
||||
return nil, errors.Wrap(err, "unable to read YAML as Kube Pod")
|
||||
return nil, fmt.Errorf("unable to read YAML as Kube Pod: %w", err)
|
||||
}
|
||||
podNames = append(podNames, podYAML.ObjectMeta.Name)
|
||||
case "Deployment":
|
||||
var deploymentYAML v1apps.Deployment
|
||||
|
||||
if err := yaml.Unmarshal(document, &deploymentYAML); err != nil {
|
||||
return nil, errors.Wrap(err, "unable to read YAML as Kube Deployment")
|
||||
return nil, fmt.Errorf("unable to read YAML as Kube Deployment: %w", err)
|
||||
}
|
||||
var numReplicas int32 = 1
|
||||
deploymentName := deploymentYAML.ObjectMeta.Name
|
||||
|
|
|
|||
|
|
@ -2,6 +2,8 @@ package abi
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
|
|
@ -12,7 +14,6 @@ import (
|
|||
"github.com/containers/podman/v4/pkg/signal"
|
||||
"github.com/containers/podman/v4/pkg/specgen"
|
||||
"github.com/containers/podman/v4/pkg/specgen/generate"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -49,7 +50,7 @@ func getPodsByContext(all, latest bool, pods []string, runtime *libpod.Runtime)
|
|||
|
||||
func (ic *ContainerEngine) PodExists(ctx context.Context, nameOrID string) (*entities.BoolReport, error) {
|
||||
_, err := ic.Libpod.LookupPod(nameOrID)
|
||||
if err != nil && errors.Cause(err) != define.ErrNoSuchPod {
|
||||
if err != nil && !errors.Is(err, define.ErrNoSuchPod) {
|
||||
return nil, err
|
||||
}
|
||||
return &entities.BoolReport{Value: err == nil}, nil
|
||||
|
|
@ -69,14 +70,14 @@ func (ic *ContainerEngine) PodKill(ctx context.Context, namesOrIds []string, opt
|
|||
for _, p := range pods {
|
||||
report := entities.PodKillReport{Id: p.ID()}
|
||||
conErrs, err := p.Kill(ctx, uint(sig))
|
||||
if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
|
||||
if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
|
||||
report.Errs = []error{err}
|
||||
reports = append(reports, &report)
|
||||
continue
|
||||
}
|
||||
if len(conErrs) > 0 {
|
||||
for id, err := range conErrs {
|
||||
report.Errs = append(report.Errs, errors.Wrapf(err, "error killing container %s", id))
|
||||
report.Errs = append(report.Errs, fmt.Errorf("error killing container %s: %w", id, err))
|
||||
}
|
||||
reports = append(reports, &report)
|
||||
continue
|
||||
|
|
@ -110,7 +111,7 @@ func (ic *ContainerEngine) PodLogs(ctx context.Context, nameOrID string, options
|
|||
}
|
||||
}
|
||||
if !ctrFound {
|
||||
return errors.Wrapf(define.ErrNoSuchCtr, "container %s is not in pod %s", options.ContainerName, nameOrID)
|
||||
return fmt.Errorf("container %s is not in pod %s: %w", options.ContainerName, nameOrID, define.ErrNoSuchCtr)
|
||||
}
|
||||
} else {
|
||||
// No container name specified select all containers
|
||||
|
|
@ -135,13 +136,13 @@ func (ic *ContainerEngine) PodPause(ctx context.Context, namesOrIds []string, op
|
|||
for _, p := range pods {
|
||||
report := entities.PodPauseReport{Id: p.ID()}
|
||||
errs, err := p.Pause(ctx)
|
||||
if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
|
||||
if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
|
||||
report.Errs = []error{err}
|
||||
continue
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
for id, v := range errs {
|
||||
report.Errs = append(report.Errs, errors.Wrapf(v, "error pausing container %s", id))
|
||||
report.Errs = append(report.Errs, fmt.Errorf("error pausing container %s: %w", id, v))
|
||||
}
|
||||
reports = append(reports, &report)
|
||||
continue
|
||||
|
|
@ -160,13 +161,13 @@ func (ic *ContainerEngine) PodUnpause(ctx context.Context, namesOrIds []string,
|
|||
for _, p := range pods {
|
||||
report := entities.PodUnpauseReport{Id: p.ID()}
|
||||
errs, err := p.Unpause(ctx)
|
||||
if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
|
||||
if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
|
||||
report.Errs = []error{err}
|
||||
continue
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
for id, v := range errs {
|
||||
report.Errs = append(report.Errs, errors.Wrapf(v, "error unpausing container %s", id))
|
||||
report.Errs = append(report.Errs, fmt.Errorf("error unpausing container %s: %w", id, v))
|
||||
}
|
||||
reports = append(reports, &report)
|
||||
continue
|
||||
|
|
@ -179,19 +180,19 @@ func (ic *ContainerEngine) PodUnpause(ctx context.Context, namesOrIds []string,
|
|||
func (ic *ContainerEngine) PodStop(ctx context.Context, namesOrIds []string, options entities.PodStopOptions) ([]*entities.PodStopReport, error) {
|
||||
reports := []*entities.PodStopReport{}
|
||||
pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
|
||||
if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchPod) {
|
||||
if err != nil && !(options.Ignore && errors.Is(err, define.ErrNoSuchPod)) {
|
||||
return nil, err
|
||||
}
|
||||
for _, p := range pods {
|
||||
report := entities.PodStopReport{Id: p.ID()}
|
||||
errs, err := p.StopWithTimeout(ctx, false, options.Timeout)
|
||||
if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
|
||||
if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
|
||||
report.Errs = []error{err}
|
||||
continue
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
for id, v := range errs {
|
||||
report.Errs = append(report.Errs, errors.Wrapf(v, "error stopping container %s", id))
|
||||
report.Errs = append(report.Errs, fmt.Errorf("error stopping container %s: %w", id, v))
|
||||
}
|
||||
reports = append(reports, &report)
|
||||
continue
|
||||
|
|
@ -210,14 +211,14 @@ func (ic *ContainerEngine) PodRestart(ctx context.Context, namesOrIds []string,
|
|||
for _, p := range pods {
|
||||
report := entities.PodRestartReport{Id: p.ID()}
|
||||
errs, err := p.Restart(ctx)
|
||||
if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
|
||||
if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
|
||||
report.Errs = []error{err}
|
||||
reports = append(reports, &report)
|
||||
continue
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
for id, v := range errs {
|
||||
report.Errs = append(report.Errs, errors.Wrapf(v, "error restarting container %s", id))
|
||||
report.Errs = append(report.Errs, fmt.Errorf("error restarting container %s: %w", id, v))
|
||||
}
|
||||
reports = append(reports, &report)
|
||||
continue
|
||||
|
|
@ -237,14 +238,14 @@ func (ic *ContainerEngine) PodStart(ctx context.Context, namesOrIds []string, op
|
|||
for _, p := range pods {
|
||||
report := entities.PodStartReport{Id: p.ID()}
|
||||
errs, err := p.Start(ctx)
|
||||
if err != nil && errors.Cause(err) != define.ErrPodPartialFail {
|
||||
if err != nil && !errors.Is(err, define.ErrPodPartialFail) {
|
||||
report.Errs = []error{err}
|
||||
reports = append(reports, &report)
|
||||
continue
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
for id, v := range errs {
|
||||
report.Errs = append(report.Errs, errors.Wrapf(v, "error starting container %s", id))
|
||||
report.Errs = append(report.Errs, fmt.Errorf("error starting container %s: %w", id, v))
|
||||
}
|
||||
reports = append(reports, &report)
|
||||
continue
|
||||
|
|
@ -256,7 +257,7 @@ func (ic *ContainerEngine) PodStart(ctx context.Context, namesOrIds []string, op
|
|||
|
||||
func (ic *ContainerEngine) PodRm(ctx context.Context, namesOrIds []string, options entities.PodRmOptions) ([]*entities.PodRmReport, error) {
|
||||
pods, err := getPodsByContext(options.All, options.Latest, namesOrIds, ic.Libpod)
|
||||
if err != nil && !(options.Ignore && errors.Cause(err) == define.ErrNoSuchPod) {
|
||||
if err != nil && !(options.Ignore && errors.Is(err, define.ErrNoSuchPod)) {
|
||||
return nil, err
|
||||
}
|
||||
reports := make([]*entities.PodRmReport, 0, len(pods))
|
||||
|
|
@ -393,7 +394,7 @@ func (ic *ContainerEngine) PodTop(ctx context.Context, options entities.PodTopOp
|
|||
pod, err = ic.Libpod.LookupPod(options.NameOrID)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to look up requested container")
|
||||
return nil, fmt.Errorf("unable to look up requested container: %w", err)
|
||||
}
|
||||
|
||||
// Run Top.
|
||||
|
|
@ -505,7 +506,7 @@ func (ic *ContainerEngine) PodInspect(ctx context.Context, options entities.PodI
|
|||
pod, err = ic.Libpod.LookupPod(options.NameOrID)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to look up requested container")
|
||||
return nil, fmt.Errorf("unable to look up requested container: %w", err)
|
||||
}
|
||||
inspect, err := pod.Inspect()
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -2,13 +2,14 @@ package abi
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/podman/v4/pkg/domain/entities"
|
||||
"github.com/containers/podman/v4/pkg/domain/utils"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (ic *ContainerEngine) SecretCreate(ctx context.Context, name string, reader io.Reader, options entities.SecretCreateOptions) (*entities.SecretCreateReport, error) {
|
||||
|
|
@ -60,11 +61,11 @@ func (ic *ContainerEngine) SecretInspect(ctx context.Context, nameOrIDs []string
|
|||
for _, nameOrID := range nameOrIDs {
|
||||
secret, err := manager.Lookup(nameOrID)
|
||||
if err != nil {
|
||||
if errors.Cause(err).Error() == "no such secret" {
|
||||
if strings.Contains(err.Error(), "no such secret") {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
} else {
|
||||
return nil, nil, errors.Wrapf(err, "error inspecting secret %s", nameOrID)
|
||||
return nil, nil, fmt.Errorf("error inspecting secret %s: %w", nameOrID, err)
|
||||
}
|
||||
}
|
||||
report := &entities.SecretInfoReport{
|
||||
|
|
@ -141,7 +142,7 @@ func (ic *ContainerEngine) SecretRm(ctx context.Context, nameOrIDs []string, opt
|
|||
}
|
||||
for _, nameOrID := range toRemove {
|
||||
deletedID, err := manager.Delete(nameOrID)
|
||||
if err == nil || errors.Cause(err).Error() == "no such secret" {
|
||||
if err == nil || strings.Contains(err.Error(), "no such secret") {
|
||||
reports = append(reports, &entities.SecretRmReport{
|
||||
Err: err,
|
||||
ID: deletedID,
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package abi
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
|
|
@ -19,7 +20,6 @@ import (
|
|||
"github.com/containers/podman/v4/utils"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
|
@ -99,7 +99,7 @@ func (ic *ContainerEngine) SetupRootless(_ context.Context, noMoveProcess bool)
|
|||
}
|
||||
pausePidPath, err := util.GetRootlessPauseProcessPidPathGivenDir(tmpDir)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not get pause process pid file path")
|
||||
return fmt.Errorf("could not get pause process pid file path: %w", err)
|
||||
}
|
||||
|
||||
became, ret, err := rootless.TryJoinPauseProcess(pausePidPath)
|
||||
|
|
@ -134,7 +134,7 @@ func (ic *ContainerEngine) SetupRootless(_ context.Context, noMoveProcess bool)
|
|||
}
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Error(errors.Wrapf(err, "invalid internal status, try resetting the pause process with %q", os.Args[0]+" system migrate"))
|
||||
logrus.Error(fmt.Errorf("invalid internal status, try resetting the pause process with %q: %w", os.Args[0]+" system migrate", err))
|
||||
os.Exit(1)
|
||||
}
|
||||
if became {
|
||||
|
|
@ -271,22 +271,22 @@ func (ic *ContainerEngine) SystemDf(ctx context.Context, options entities.System
|
|||
iid, _ := c.Image()
|
||||
state, err := c.State()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Failed to get state of container %s", c.ID())
|
||||
return nil, fmt.Errorf("failed to get state of container %s: %w", c.ID(), err)
|
||||
}
|
||||
conSize, err := c.RootFsSize()
|
||||
if err != nil {
|
||||
if errors.Cause(err) == storage.ErrContainerUnknown {
|
||||
logrus.Error(errors.Wrapf(err, "Failed to get root file system size of container %s", c.ID()))
|
||||
if errors.Is(err, storage.ErrContainerUnknown) {
|
||||
logrus.Error(fmt.Errorf("failed to get root file system size of container %s: %w", c.ID(), err))
|
||||
} else {
|
||||
return nil, errors.Wrapf(err, "Failed to get root file system size of container %s", c.ID())
|
||||
return nil, fmt.Errorf("failed to get root file system size of container %s: %w", c.ID(), err)
|
||||
}
|
||||
}
|
||||
rwsize, err := c.RWSize()
|
||||
if err != nil {
|
||||
if errors.Cause(err) == storage.ErrContainerUnknown {
|
||||
logrus.Error(errors.Wrapf(err, "Failed to get read/write size of container %s", c.ID()))
|
||||
if errors.Is(err, storage.ErrContainerUnknown) {
|
||||
logrus.Error(fmt.Errorf("failed to get read/write size of container %s: %w", c.ID(), err))
|
||||
} else {
|
||||
return nil, errors.Wrapf(err, "Failed to get read/write size of container %s", c.ID())
|
||||
return nil, fmt.Errorf("failed to get read/write size of container %s: %w", c.ID(), err)
|
||||
}
|
||||
}
|
||||
report := entities.SystemDfContainerReport{
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package terminal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
|
|
@ -8,7 +9,6 @@ import (
|
|||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/libpod/shutdown"
|
||||
"github.com/containers/podman/v4/pkg/signal"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -39,7 +39,7 @@ func ProxySignals(ctr *libpod.Container) {
|
|||
}
|
||||
|
||||
if err := ctr.Kill(uint(s.(syscall.Signal))); err != nil {
|
||||
if errors.Cause(err) == define.ErrCtrStateInvalid {
|
||||
if errors.Is(err, define.ErrCtrStateInvalid) {
|
||||
logrus.Infof("Ceasing signal forwarding to container %s as it has stopped", ctr.ID())
|
||||
} else {
|
||||
logrus.Errorf("forwarding signal %d to container %s: %v", s, ctr.ID(), err)
|
||||
|
|
|
|||
|
|
@ -2,6 +2,8 @@ package abi
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/podman/v4/libpod"
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
|
|
@ -9,7 +11,6 @@ import (
|
|||
"github.com/containers/podman/v4/pkg/domain/entities/reports"
|
||||
"github.com/containers/podman/v4/pkg/domain/filters"
|
||||
"github.com/containers/podman/v4/pkg/domain/infra/abi/parse"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (ic *ContainerEngine) VolumeCreate(ctx context.Context, opts entities.VolumeCreateOptions) (*entities.IDOrNameResponse, error) {
|
||||
|
|
@ -91,11 +92,11 @@ func (ic *ContainerEngine) VolumeInspect(ctx context.Context, namesOrIds []strin
|
|||
for _, v := range namesOrIds {
|
||||
vol, err := ic.Libpod.LookupVolume(v)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == define.ErrNoSuchVolume {
|
||||
errs = append(errs, errors.Errorf("no such volume %s", v))
|
||||
if errors.Is(err, define.ErrNoSuchVolume) {
|
||||
errs = append(errs, fmt.Errorf("no such volume %s", v))
|
||||
continue
|
||||
} else {
|
||||
return nil, nil, errors.Wrapf(err, "error inspecting volume %s", v)
|
||||
return nil, nil, fmt.Errorf("error inspecting volume %s: %w", v, err)
|
||||
}
|
||||
}
|
||||
vols = append(vols, vol)
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package tunnel
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
|
@ -23,7 +24,6 @@ import (
|
|||
"github.com/containers/podman/v4/pkg/specgen"
|
||||
"github.com/containers/podman/v4/pkg/util"
|
||||
"github.com/containers/storage/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -64,7 +64,7 @@ func (ic *ContainerEngine) ContainerPause(ctx context.Context, namesOrIds []stri
|
|||
reports := make([]*entities.PauseUnpauseReport, 0, len(ctrs))
|
||||
for _, c := range ctrs {
|
||||
err := containers.Pause(ic.ClientCtx, c.ID, nil)
|
||||
if err != nil && options.All && errors.Cause(err).Error() == define.ErrCtrStateInvalid.Error() {
|
||||
if err != nil && options.All && strings.Contains(err.Error(), define.ErrCtrStateInvalid.Error()) {
|
||||
logrus.Debugf("Container %s is not running", c.ID)
|
||||
continue
|
||||
}
|
||||
|
|
@ -81,7 +81,7 @@ func (ic *ContainerEngine) ContainerUnpause(ctx context.Context, namesOrIds []st
|
|||
}
|
||||
for _, c := range ctrs {
|
||||
err := containers.Unpause(ic.ClientCtx, c.ID, nil)
|
||||
if err != nil && options.All && errors.Cause(err).Error() == define.ErrCtrStateInvalid.Error() {
|
||||
if err != nil && options.All && strings.Contains(err.Error(), define.ErrCtrStateInvalid.Error()) {
|
||||
logrus.Debugf("Container %s is not paused", c.ID)
|
||||
continue
|
||||
}
|
||||
|
|
@ -111,11 +111,11 @@ func (ic *ContainerEngine) ContainerStop(ctx context.Context, namesOrIds []strin
|
|||
}
|
||||
if err = containers.Stop(ic.ClientCtx, c.ID, options); err != nil {
|
||||
// These first two are considered non-fatal under the right conditions
|
||||
if errors.Cause(err).Error() == define.ErrCtrStopped.Error() {
|
||||
if strings.Contains(err.Error(), define.ErrCtrStopped.Error()) {
|
||||
logrus.Debugf("Container %s is already stopped", c.ID)
|
||||
reports = append(reports, &report)
|
||||
continue
|
||||
} else if opts.All && errors.Cause(err).Error() == define.ErrCtrStateInvalid.Error() {
|
||||
} else if opts.All && strings.Contains(err.Error(), define.ErrCtrStateInvalid.Error()) {
|
||||
logrus.Debugf("Container %s is not running, could not stop", c.ID)
|
||||
reports = append(reports, &report)
|
||||
continue
|
||||
|
|
@ -146,7 +146,7 @@ func (ic *ContainerEngine) ContainerKill(ctx context.Context, namesOrIds []strin
|
|||
reports := make([]*entities.KillReport, 0, len(ctrs))
|
||||
for _, c := range ctrs {
|
||||
err := containers.Kill(ic.ClientCtx, c.ID, options)
|
||||
if err != nil && opts.All && errors.Cause(err).Error() == define.ErrCtrStateInvalid.Error() {
|
||||
if err != nil && opts.All && strings.Contains(err.Error(), define.ErrCtrStateInvalid.Error()) {
|
||||
logrus.Debugf("Container %s is not running", c.ID)
|
||||
continue
|
||||
}
|
||||
|
|
@ -258,7 +258,7 @@ func (ic *ContainerEngine) ContainerInspect(ctx context.Context, namesOrIds []st
|
|||
return nil, nil, err
|
||||
}
|
||||
if errModel.ResponseCode == 404 {
|
||||
errs = append(errs, errors.Errorf("no such container %q", name))
|
||||
errs = append(errs, fmt.Errorf("no such container %q", name))
|
||||
continue
|
||||
}
|
||||
return nil, nil, err
|
||||
|
|
@ -291,7 +291,7 @@ func (ic *ContainerEngine) ContainerCommit(ctx context.Context, nameOrID string,
|
|||
if len(opts.ImageName) > 0 {
|
||||
ref, err := reference.Parse(opts.ImageName)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing reference %q", opts.ImageName)
|
||||
return nil, fmt.Errorf("error parsing reference %q: %w", opts.ImageName, err)
|
||||
}
|
||||
if t, ok := ref.(reference.Tagged); ok {
|
||||
tag = t.Tag()
|
||||
|
|
@ -300,7 +300,7 @@ func (ic *ContainerEngine) ContainerCommit(ctx context.Context, nameOrID string,
|
|||
repo = r.Name()
|
||||
}
|
||||
if len(repo) < 1 {
|
||||
return nil, errors.Errorf("invalid image name %q", opts.ImageName)
|
||||
return nil, fmt.Errorf("invalid image name %q", opts.ImageName)
|
||||
}
|
||||
}
|
||||
options := new(containers.CommitOptions).WithAuthor(opts.Author).WithChanges(opts.Changes).WithComment(opts.Message).WithSquash(opts.Squash)
|
||||
|
|
@ -502,7 +502,7 @@ func (ic *ContainerEngine) ContainerAttach(ctx context.Context, nameOrID string,
|
|||
}
|
||||
ctr := ctrs[0]
|
||||
if ctr.State != define.ContainerStateRunning.String() {
|
||||
return errors.Errorf("you can only attach to running containers")
|
||||
return fmt.Errorf("you can only attach to running containers")
|
||||
}
|
||||
options := new(containers.AttachOptions).WithStream(true).WithDetachKeys(opts.DetachKeys)
|
||||
return containers.Attach(ic.ClientCtx, nameOrID, opts.Stdin, opts.Stdout, opts.Stderr, nil, options)
|
||||
|
|
@ -695,7 +695,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri
|
|||
report.ExitCode = define.ExitCode(report.Err)
|
||||
report.Err = err
|
||||
reports = append(reports, &report)
|
||||
return reports, errors.Wrapf(report.Err, "unable to start container %s", name)
|
||||
return reports, fmt.Errorf("unable to start container %s: %w", name, report.Err)
|
||||
}
|
||||
if ctr.AutoRemove {
|
||||
// Defer the removal, so we can return early if needed and
|
||||
|
|
@ -739,7 +739,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri
|
|||
reports, err := containers.Remove(ic.ClientCtx, ctr.ID, rmOptions)
|
||||
logIfRmError(ctr.ID, err, reports)
|
||||
}
|
||||
report.Err = errors.Wrapf(err, "unable to start container %q", name)
|
||||
report.Err = fmt.Errorf("unable to start container %q: %w", name, err)
|
||||
report.ExitCode = define.ExitCode(err)
|
||||
reports = append(reports, &report)
|
||||
continue
|
||||
|
|
@ -899,7 +899,7 @@ func (ic *ContainerEngine) ContainerInit(ctx context.Context, namesOrIds []strin
|
|||
err := containers.ContainerInit(ic.ClientCtx, ctr.ID, nil)
|
||||
// When using all, it is NOT considered an error if a container
|
||||
// has already been init'd.
|
||||
if err != nil && options.All && strings.Contains(errors.Cause(err).Error(), define.ErrCtrStateInvalid.Error()) {
|
||||
if err != nil && options.All && strings.Contains(err.Error(), define.ErrCtrStateInvalid.Error()) {
|
||||
err = nil
|
||||
}
|
||||
reports = append(reports, &entities.ContainerInitReport{
|
||||
|
|
|
|||
|
|
@ -2,12 +2,12 @@ package tunnel
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/containers/podman/v4/libpod/define"
|
||||
"github.com/containers/podman/v4/pkg/bindings/pods"
|
||||
"github.com/containers/podman/v4/pkg/domain/entities"
|
||||
"github.com/containers/podman/v4/pkg/util"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (ic *ContainerEngine) PodExists(ctx context.Context, nameOrID string) (*entities.BoolReport, error) {
|
||||
|
|
@ -97,7 +97,7 @@ func (ic *ContainerEngine) PodUnpause(ctx context.Context, namesOrIds []string,
|
|||
func (ic *ContainerEngine) PodStop(ctx context.Context, namesOrIds []string, opts entities.PodStopOptions) ([]*entities.PodStopReport, error) {
|
||||
timeout := -1
|
||||
foundPods, err := getPodsByContext(ic.ClientCtx, opts.All, namesOrIds)
|
||||
if err != nil && !(opts.Ignore && errors.Cause(err) == define.ErrNoSuchPod) {
|
||||
if err != nil && !(opts.Ignore && errors.Is(err, define.ErrNoSuchPod)) {
|
||||
return nil, err
|
||||
}
|
||||
if opts.Timeout != -1 {
|
||||
|
|
@ -164,7 +164,7 @@ func (ic *ContainerEngine) PodStart(ctx context.Context, namesOrIds []string, op
|
|||
|
||||
func (ic *ContainerEngine) PodRm(ctx context.Context, namesOrIds []string, opts entities.PodRmOptions) ([]*entities.PodRmReport, error) {
|
||||
foundPods, err := getPodsByContext(ic.ClientCtx, opts.All, namesOrIds)
|
||||
if err != nil && !(opts.Ignore && errors.Cause(err) == define.ErrNoSuchPod) {
|
||||
if err != nil && !(opts.Ignore && errors.Is(err, define.ErrNoSuchPod)) {
|
||||
return nil, err
|
||||
}
|
||||
reports := make([]*entities.PodRmReport, 0, len(foundPods))
|
||||
|
|
|
|||
|
|
@ -3,12 +3,12 @@ package exec
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
spec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
|
@ -18,13 +18,14 @@ func TestRuntimeConfigFilter(t *testing.T) {
|
|||
rootUint32 := uint32(0)
|
||||
binUser := int(1)
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
contextTimeout time.Duration
|
||||
hooks []spec.Hook
|
||||
input *spec.Spec
|
||||
expected *spec.Spec
|
||||
expectedHookError string
|
||||
expectedRunError error
|
||||
name string
|
||||
contextTimeout time.Duration
|
||||
hooks []spec.Hook
|
||||
input *spec.Spec
|
||||
expected *spec.Spec
|
||||
expectedHookError string
|
||||
expectedRunError error
|
||||
expectedRunErrorString string
|
||||
}{
|
||||
{
|
||||
name: "no-op",
|
||||
|
|
@ -231,7 +232,8 @@ func TestRuntimeConfigFilter(t *testing.T) {
|
|||
Path: "rootfs",
|
||||
},
|
||||
},
|
||||
expectedRunError: unexpectedEndOfJSONInput,
|
||||
expectedRunError: unexpectedEndOfJSONInput,
|
||||
expectedRunErrorString: unexpectedEndOfJSONInput.Error(),
|
||||
},
|
||||
} {
|
||||
test := tt
|
||||
|
|
@ -243,7 +245,13 @@ func TestRuntimeConfigFilter(t *testing.T) {
|
|||
defer cancel()
|
||||
}
|
||||
hookErr, err := RuntimeConfigFilter(ctx, test.hooks, test.input, DefaultPostKillTimeout)
|
||||
assert.Equal(t, test.expectedRunError, errors.Cause(err))
|
||||
if test.expectedRunError != nil {
|
||||
if test.expectedRunErrorString != "" {
|
||||
assert.Contains(t, err.Error(), test.expectedRunErrorString)
|
||||
} else {
|
||||
assert.True(t, errors.Is(err, test.expectedRunError))
|
||||
}
|
||||
}
|
||||
if test.expectedHookError == "" {
|
||||
if hookErr != nil {
|
||||
t.Fatal(hookErr)
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ import (
|
|||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
|
|
@ -30,7 +31,6 @@ import (
|
|||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/digitalocean/go-qemu/qmp"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
|
@ -434,12 +434,12 @@ func (v *MachineVM) Set(_ string, opts machine.SetOptions) ([]error, error) {
|
|||
if v.Name != machine.DefaultMachineName {
|
||||
suffix = " " + v.Name
|
||||
}
|
||||
return setErrors, errors.Errorf("cannot change settings while the vm is running, run 'podman machine stop%s' first", suffix)
|
||||
return setErrors, fmt.Errorf("cannot change settings while the vm is running, run 'podman machine stop%s' first", suffix)
|
||||
}
|
||||
|
||||
if opts.Rootful != nil && v.Rootful != *opts.Rootful {
|
||||
if err := v.setRootful(*opts.Rootful); err != nil {
|
||||
setErrors = append(setErrors, errors.Wrapf(err, "failed to set rootful option"))
|
||||
setErrors = append(setErrors, fmt.Errorf("failed to set rootful option: %w", err))
|
||||
} else {
|
||||
v.Rootful = *opts.Rootful
|
||||
}
|
||||
|
|
@ -457,7 +457,7 @@ func (v *MachineVM) Set(_ string, opts machine.SetOptions) ([]error, error) {
|
|||
|
||||
if opts.DiskSize != nil && v.DiskSize != *opts.DiskSize {
|
||||
if err := v.resizeDisk(*opts.DiskSize, v.DiskSize); err != nil {
|
||||
setErrors = append(setErrors, errors.Wrapf(err, "failed to resize disk"))
|
||||
setErrors = append(setErrors, fmt.Errorf("failed to resize disk: %w", err))
|
||||
} else {
|
||||
v.DiskSize = *opts.DiskSize
|
||||
}
|
||||
|
|
@ -514,7 +514,7 @@ func (v *MachineVM) Start(name string, _ machine.StartOptions) error {
|
|||
|
||||
forwardSock, forwardState, err := v.startHostNetworking()
|
||||
if err != nil {
|
||||
return errors.Errorf("unable to start host networking: %q", err)
|
||||
return fmt.Errorf("unable to start host networking: %q", err)
|
||||
}
|
||||
|
||||
rtPath, err := getRuntimeDir()
|
||||
|
|
@ -593,7 +593,7 @@ func (v *MachineVM) Start(name string, _ machine.StartOptions) error {
|
|||
}
|
||||
_, err = os.StartProcess(cmd[0], cmd, attr)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to execute %q", cmd)
|
||||
return fmt.Errorf("unable to execute %q: %w", cmd, err)
|
||||
}
|
||||
}
|
||||
fmt.Println("Waiting for VM ...")
|
||||
|
|
@ -700,7 +700,7 @@ func (v *MachineVM) checkStatus(monitor *qmp.SocketMonitor) (machine.Status, err
|
|||
}
|
||||
b, err := monitor.Run(input)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == os.ErrNotExist {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return machine.Stopped, nil
|
||||
}
|
||||
return "", err
|
||||
|
|
@ -879,7 +879,7 @@ func (v *MachineVM) Remove(_ string, opts machine.RemoveOptions) (string, func()
|
|||
}
|
||||
if state == machine.Running {
|
||||
if !opts.Force {
|
||||
return "", nil, errors.Errorf("running vm %q cannot be destroyed", v.Name)
|
||||
return "", nil, fmt.Errorf("running vm %q cannot be destroyed", v.Name)
|
||||
}
|
||||
err := v.Stop(v.Name, machine.StopOptions{})
|
||||
if err != nil {
|
||||
|
|
@ -1001,7 +1001,7 @@ func (v *MachineVM) SSH(_ string, opts machine.SSHOptions) error {
|
|||
return err
|
||||
}
|
||||
if state != machine.Running {
|
||||
return errors.Errorf("vm %q is not running", v.Name)
|
||||
return fmt.Errorf("vm %q is not running", v.Name)
|
||||
}
|
||||
|
||||
username := opts.Username
|
||||
|
|
@ -1165,7 +1165,7 @@ func (p *Provider) IsValidVMName(name string) (bool, error) {
|
|||
func (p *Provider) CheckExclusiveActiveVM() (bool, string, error) {
|
||||
vms, err := getVMInfos()
|
||||
if err != nil {
|
||||
return false, "", errors.Wrap(err, "error checking VM active")
|
||||
return false, "", fmt.Errorf("error checking VM active: %w", err)
|
||||
}
|
||||
for _, vm := range vms {
|
||||
if vm.Running || vm.Starting {
|
||||
|
|
@ -1217,7 +1217,7 @@ func (v *MachineVM) startHostNetworking() (string, apiForwardingState, error) {
|
|||
fmt.Println(cmd)
|
||||
}
|
||||
_, err = os.StartProcess(cmd[0], cmd, attr)
|
||||
return forwardSock, state, errors.Wrapf(err, "unable to execute: %q", cmd)
|
||||
return forwardSock, state, fmt.Errorf("unable to execute: %q: %w", cmd, err)
|
||||
}
|
||||
|
||||
func (v *MachineVM) setupAPIForwarding(cmd []string) ([]string, string, apiForwardingState) {
|
||||
|
|
@ -1486,7 +1486,7 @@ func (v *MachineVM) update() error {
|
|||
b, err := v.ConfigPath.Read()
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return errors.Wrap(machine.ErrNoSuchVM, v.Name)
|
||||
return fmt.Errorf("%v: %w", v.Name, machine.ErrNoSuchVM)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
|
@ -1562,7 +1562,7 @@ func (v *MachineVM) resizeDisk(diskSize uint64, oldSize uint64) error {
|
|||
// only if the virtualdisk size is less than
|
||||
// the given disk size
|
||||
if diskSize < oldSize {
|
||||
return errors.Errorf("new disk size must be larger than current disk size: %vGB", oldSize)
|
||||
return fmt.Errorf("new disk size must be larger than current disk size: %vGB", oldSize)
|
||||
}
|
||||
|
||||
// Find the qemu executable
|
||||
|
|
@ -1578,7 +1578,7 @@ func (v *MachineVM) resizeDisk(diskSize uint64, oldSize uint64) error {
|
|||
resize.Stdout = os.Stdout
|
||||
resize.Stderr = os.Stderr
|
||||
if err := resize.Run(); err != nil {
|
||||
return errors.Errorf("resizing image: %q", err)
|
||||
return fmt.Errorf("resizing image: %q", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
23
pkg/ps/ps.go
23
pkg/ps/ps.go
|
|
@ -1,6 +1,8 @@
|
|||
package ps
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
|
@ -16,7 +18,6 @@ import (
|
|||
psdefine "github.com/containers/podman/v4/pkg/ps/define"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
|
|
@ -65,7 +66,7 @@ func GetContainerLists(runtime *libpod.Runtime, options entities.ContainerListOp
|
|||
for _, con := range cons {
|
||||
listCon, err := ListContainerBatch(runtime, con, options)
|
||||
switch {
|
||||
case errors.Cause(err) == define.ErrNoSuchCtr:
|
||||
case errors.Is(err, define.ErrNoSuchCtr):
|
||||
continue
|
||||
case err != nil:
|
||||
return nil, err
|
||||
|
|
@ -108,7 +109,7 @@ func GetExternalContainerLists(runtime *libpod.Runtime) ([]entities.ListContaine
|
|||
for _, con := range externCons {
|
||||
listCon, err := ListStorageContainer(runtime, con)
|
||||
switch {
|
||||
case errors.Cause(err) == types.ErrLoadError:
|
||||
case errors.Is(err, types.ErrLoadError):
|
||||
continue
|
||||
case err != nil:
|
||||
return nil, err
|
||||
|
|
@ -138,19 +139,19 @@ func ListContainerBatch(rt *libpod.Runtime, ctr *libpod.Container, opts entities
|
|||
batchErr := ctr.Batch(func(c *libpod.Container) error {
|
||||
if opts.Sync {
|
||||
if err := c.Sync(); err != nil {
|
||||
return errors.Wrapf(err, "unable to update container state from OCI runtime")
|
||||
return fmt.Errorf("unable to update container state from OCI runtime: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
conConfig = c.Config()
|
||||
conState, err = c.State()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to obtain container state")
|
||||
return fmt.Errorf("unable to obtain container state: %w", err)
|
||||
}
|
||||
|
||||
exitCode, exited, err = c.ExitCode()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to obtain container exit code")
|
||||
return fmt.Errorf("unable to obtain container exit code: %w", err)
|
||||
}
|
||||
startedTime, err = c.StartedTime()
|
||||
if err != nil {
|
||||
|
|
@ -163,7 +164,7 @@ func ListContainerBatch(rt *libpod.Runtime, ctr *libpod.Container, opts entities
|
|||
|
||||
pid, err = c.PID()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to obtain container pid")
|
||||
return fmt.Errorf("unable to obtain container pid: %w", err)
|
||||
}
|
||||
|
||||
if !opts.Size && !opts.Namespace {
|
||||
|
|
@ -237,8 +238,8 @@ func ListContainerBatch(rt *libpod.Runtime, ctr *libpod.Container, opts entities
|
|||
if opts.Pod && len(conConfig.Pod) > 0 {
|
||||
podName, err := rt.GetName(conConfig.Pod)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == define.ErrNoSuchCtr {
|
||||
return entities.ListContainer{}, errors.Wrapf(define.ErrNoSuchPod, "could not find container %s pod (id %s) in state", conConfig.ID, conConfig.Pod)
|
||||
if errors.Is(err, define.ErrNoSuchCtr) {
|
||||
return entities.ListContainer{}, fmt.Errorf("could not find container %s pod (id %s) in state: %w", conConfig.ID, conConfig.Pod, define.ErrNoSuchPod)
|
||||
}
|
||||
return entities.ListContainer{}, err
|
||||
}
|
||||
|
|
@ -282,7 +283,7 @@ func ListStorageContainer(rt *libpod.Runtime, ctr storage.Container) (entities.L
|
|||
|
||||
buildahCtr, err := rt.IsBuildahContainer(ctr.ID)
|
||||
if err != nil {
|
||||
return ps, errors.Wrapf(err, "error determining buildah container for container %s", ctr.ID)
|
||||
return ps, fmt.Errorf("error determining buildah container for container %s: %w", ctr.ID, err)
|
||||
}
|
||||
|
||||
if buildahCtr {
|
||||
|
|
@ -311,7 +312,7 @@ func ListStorageContainer(rt *libpod.Runtime, ctr storage.Container) (entities.L
|
|||
func getNamespaceInfo(path string) (string, error) {
|
||||
val, err := os.Readlink(path)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "error getting info from %q", path)
|
||||
return "", fmt.Errorf("error getting info from %q: %w", path, err)
|
||||
}
|
||||
return getStrFromSquareBrackets(val), nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -560,7 +560,7 @@ var _ = Describe("Podman create", func() {
|
|||
session = podmanTest.Podman([]string{"create", "--umask", "9999", "--name", "bad", ALPINE})
|
||||
session.WaitWithDefaultTimeout()
|
||||
Expect(session).To(ExitWithError())
|
||||
Expect(session.ErrorToString()).To(ContainSubstring("Invalid umask"))
|
||||
Expect(session.ErrorToString()).To(ContainSubstring("invalid umask"))
|
||||
})
|
||||
|
||||
It("create container in pod with IP should fail", func() {
|
||||
|
|
|
|||
|
|
@ -1659,7 +1659,7 @@ USER mail`, BB)
|
|||
session = podmanTest.Podman([]string{"run", "--umask", "9999", "--rm", ALPINE, "sh", "-c", "umask"})
|
||||
session.WaitWithDefaultTimeout()
|
||||
Expect(session).To(ExitWithError())
|
||||
Expect(session.ErrorToString()).To(ContainSubstring("Invalid umask"))
|
||||
Expect(session.ErrorToString()).To(ContainSubstring("invalid umask"))
|
||||
})
|
||||
|
||||
It("podman run makes workdir from image", func() {
|
||||
|
|
|
|||
Loading…
Reference in New Issue