Merge pull request #1364 from mtrmac/warnings

Another bunch of warning cleanups
This commit is contained in:
Daniel J Walsh 2022-10-17 08:38:44 -04:00 committed by GitHub
commit aba77e1c98
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
71 changed files with 1027 additions and 970 deletions

View File

@ -1,7 +1,6 @@
package main
import (
"encoding/json"
"fmt"
"io"
"os"
@ -14,11 +13,10 @@ var (
paramContainerDataFile = ""
)
func container(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func container(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
images, err := m.Images()
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
matches := []*storage.Container{}
for _, arg := range args {
@ -27,7 +25,9 @@ func container(flags *mflag.FlagSet, action string, m storage.Store, args []stri
}
}
if jsonOutput {
json.NewEncoder(os.Stdout).Encode(matches)
if _, err := outputJSON(matches); err != nil {
return 1, err
}
} else {
for _, container := range matches {
fmt.Printf("ID: %s\n", container.ID)
@ -56,140 +56,125 @@ func container(flags *mflag.FlagSet, action string, m storage.Store, args []stri
}
}
if len(matches) != len(args) {
return 1
return 1, nil
}
return 0
return 0, nil
}
func listContainerBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func listContainerBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
container, err := m.Container(args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
d, err := m.ListContainerBigData(container.ID)
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
if jsonOutput {
json.NewEncoder(os.Stdout).Encode(d)
} else {
for _, name := range d {
fmt.Printf("%s\n", name)
}
return outputJSON(d)
}
return 0
for _, name := range d {
fmt.Printf("%s\n", name)
}
return 0, nil
}
func getContainerBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func getContainerBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
container, err := m.Container(args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
output := os.Stdout
if paramContainerDataFile != "" {
f, err := os.Create(paramContainerDataFile)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return 1
return 1, err
}
output = f
}
b, err := m.ContainerBigData(container.ID, args[1])
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
if _, err := output.Write(b); err != nil {
return 1, err
}
output.Write(b)
output.Close()
return 0
return 0, nil
}
func getContainerBigDataSize(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func getContainerBigDataSize(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
container, err := m.Container(args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
size, err := m.ContainerBigDataSize(container.ID, args[1])
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
fmt.Fprintf(os.Stdout, "%d\n", size)
return 0
return 0, nil
}
func getContainerBigDataDigest(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func getContainerBigDataDigest(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
container, err := m.Container(args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
d, err := m.ContainerBigDataDigest(container.ID, args[1])
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
if d.Validate() != nil {
fmt.Fprintf(os.Stderr, "%v\n", d.Validate())
return 1
if err := d.Validate(); err != nil {
return 1, err
}
fmt.Fprintf(os.Stdout, "%s\n", d.String())
return 0
return 0, nil
}
func setContainerBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func setContainerBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
container, err := m.Container(args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
input := os.Stdin
if paramContainerDataFile != "" {
f, err := os.Open(paramContainerDataFile)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return 1
return 1, err
}
input = f
}
b, err := io.ReadAll(input)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return 1
return 1, err
}
err = m.SetContainerBigData(container.ID, args[1], b)
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
return 0
return 0, nil
}
func getContainerDir(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func getContainerDir(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
path, err := m.ContainerDirectory(args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
fmt.Printf("%s\n", path)
return 0
return 0, nil
}
func getContainerRunDir(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func getContainerRunDir(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
path, err := m.ContainerRunDirectory(args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
fmt.Printf("%s\n", path)
return 0
return 0, nil
}
func containerParentOwners(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func containerParentOwners(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
matched := []*storage.Container{}
for _, arg := range args {
if container, err := m.Container(arg); err == nil {
@ -199,8 +184,7 @@ func containerParentOwners(flags *mflag.FlagSet, action string, m storage.Store,
for _, container := range matched {
uids, gids, err := m.ContainerParentOwners(container.ID)
if err != nil {
fmt.Fprintf(os.Stderr, "ContainerParentOwner: %+v\n", err)
return 1
return 1, fmt.Errorf("ContainerParentOwner: %+v", err)
}
if jsonOutput {
mappings := struct {
@ -212,7 +196,9 @@ func containerParentOwners(flags *mflag.FlagSet, action string, m storage.Store,
UIDs: uids,
GIDs: gids,
}
json.NewEncoder(os.Stdout).Encode(mappings)
if _, err := outputJSON(mappings); err != nil {
return 1, err
}
} else {
fmt.Printf("ID: %s\n", container.ID)
fmt.Printf("UIDs: %v\n", uids)
@ -220,9 +206,9 @@ func containerParentOwners(flags *mflag.FlagSet, action string, m storage.Store,
}
}
if len(matched) != len(args) {
return 1
return 1, nil
}
return 0
return 0, nil
}
func init() {

View File

@ -1,34 +1,30 @@
package main
import (
"encoding/json"
"fmt"
"os"
"github.com/containers/storage"
"github.com/containers/storage/pkg/mflag"
)
func containers(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func containers(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
containers, err := m.Containers()
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
if jsonOutput {
json.NewEncoder(os.Stdout).Encode(containers)
} else {
for _, container := range containers {
fmt.Printf("%s\n", container.ID)
for _, name := range container.Names {
fmt.Printf("\tname: %s\n", name)
}
for _, name := range container.BigDataNames {
fmt.Printf("\tdata: %s\n", name)
}
return outputJSON(containers)
}
for _, container := range containers {
fmt.Printf("%s\n", container.ID)
for _, name := range container.Names {
fmt.Printf("\tname: %s\n", name)
}
for _, name := range container.BigDataNames {
fmt.Printf("\tdata: %s\n", name)
}
}
return 0
return 0, nil
}
func init() {

View File

@ -17,11 +17,11 @@ var (
chownOptions = ""
)
func copyContent(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func copyContent(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
var untarIDMappings *idtools.IDMappings
var chownOpts *idtools.IDPair
if len(args) < 1 {
return 1
return 1, nil
}
if len(chownOptions) > 0 {
chownParts := strings.SplitN(chownOptions, ":", 2)
@ -30,13 +30,11 @@ func copyContent(flags *mflag.FlagSet, action string, m storage.Store, args []st
}
uid, err := strconv.ParseUint(chownParts[0], 10, 32)
if err != nil {
fmt.Fprintf(os.Stderr, "error %q as a numeric UID: %v", chownParts[0], err)
return 1
return 1, fmt.Errorf("error %q as a numeric UID: %v", chownParts[0], err)
}
gid, err := strconv.ParseUint(chownParts[1], 10, 32)
if err != nil {
fmt.Fprintf(os.Stderr, "error %q as a numeric GID: %v", chownParts[1], err)
return 1
return 1, fmt.Errorf("error %q as a numeric GID: %v", chownParts[1], err)
}
chownOpts = &idtools.IDPair{UID: int(uid), GID: int(gid)}
}
@ -44,22 +42,24 @@ func copyContent(flags *mflag.FlagSet, action string, m storage.Store, args []st
if strings.Contains(target, ":") {
targetParts := strings.SplitN(target, ":", 2)
if len(targetParts) != 2 {
fmt.Fprintf(os.Stderr, "error parsing target location %q: only one part\n", target)
return 1
return 1, fmt.Errorf("error parsing target location %q: only one part", target)
}
targetLayer, err := m.Layer(targetParts[0])
if err != nil {
fmt.Fprintf(os.Stderr, "error finding layer %q: %+v\n", targetParts[0], err)
return 1
return 1, fmt.Errorf("error finding layer %q: %+v", targetParts[0], err)
}
untarIDMappings = idtools.NewIDMappingsFromMaps(targetLayer.UIDMap, targetLayer.GIDMap)
targetMount, err := m.Mount(targetLayer.ID, targetLayer.MountLabel)
if err != nil {
fmt.Fprintf(os.Stderr, "error mounting layer %q: %+v\n", targetLayer.ID, err)
return 1
return 1, fmt.Errorf("error mounting layer %q: %+v", targetLayer.ID, err)
}
target = filepath.Join(targetMount, targetParts[1])
defer m.Unmount(targetLayer.ID, false)
defer func() {
if _, err := m.Unmount(targetLayer.ID, false); err != nil {
fmt.Fprintf(os.Stderr, "error unmounting layer %q: %+v\n", targetLayer.ID, err)
// Does not change the exit code
}
}()
}
args = args[:len(args)-1]
for _, srcSpec := range args {
@ -68,30 +68,31 @@ func copyContent(flags *mflag.FlagSet, action string, m storage.Store, args []st
if strings.Contains(source, ":") {
sourceParts := strings.SplitN(source, ":", 2)
if len(sourceParts) != 2 {
fmt.Fprintf(os.Stderr, "error parsing source location %q: only one part\n", source)
return 1
return 1, fmt.Errorf("error parsing source location %q: only one part", source)
}
sourceLayer, err := m.Layer(sourceParts[0])
if err != nil {
fmt.Fprintf(os.Stderr, "error finding layer %q: %+v\n", sourceParts[0], err)
return 1
return 1, fmt.Errorf("error finding layer %q: %+v", sourceParts[0], err)
}
tarIDMappings = idtools.NewIDMappingsFromMaps(sourceLayer.UIDMap, sourceLayer.GIDMap)
sourceMount, err := m.Mount(sourceLayer.ID, sourceLayer.MountLabel)
if err != nil {
fmt.Fprintf(os.Stderr, "error mounting layer %q: %+v\n", sourceLayer.ID, err)
return 1
return 1, fmt.Errorf("error mounting layer %q: %+v", sourceLayer.ID, err)
}
source = filepath.Join(sourceMount, sourceParts[1])
defer m.Unmount(sourceLayer.ID, false)
defer func() {
if _, err := m.Unmount(sourceLayer.ID, false); err != nil {
fmt.Fprintf(os.Stderr, "error unmounting layer %q: %+v\n", sourceLayer.ID, err)
// Does not change the exit code
}
}()
}
archiver := chrootarchive.NewArchiverWithChown(tarIDMappings, chownOpts, untarIDMappings)
if err := archiver.CopyWithTar(source, target); err != nil {
fmt.Fprintf(os.Stderr, "error copying %q to %q: %+v\n", source, target, err)
return 1
return 1, fmt.Errorf("error copying %q to %q: %+v", source, target, err)
}
}
return 0
return 0, nil
}
func init() {

View File

@ -1,7 +1,6 @@
package main
import (
"encoding/json"
"fmt"
"io"
"os"
@ -72,34 +71,31 @@ func paramIDMapping() (*types.IDMappingOptions, error) {
return &options, nil
}
func createLayer(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func createLayer(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
parent := ""
if len(args) > 0 {
parent = args[0]
}
mappings, err := paramIDMapping()
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return 1
return 1, err
}
options := &storage.LayerOptions{IDMappingOptions: *mappings}
layer, err := m.CreateLayer(paramID, parent, paramNames, paramMountLabel, !paramCreateRO, options)
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
if jsonOutput {
json.NewEncoder(os.Stdout).Encode(layer)
} else {
fmt.Printf("%s\n", layer.ID)
for _, name := range layer.Names {
fmt.Printf("\t%s\n", name)
}
return outputJSON(layer)
}
return 0
fmt.Printf("%s\n", layer.ID)
for _, name := range layer.Names {
fmt.Printf("\t%s\n", name)
}
return 0, nil
}
func importLayer(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func importLayer(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
parent := ""
if len(args) > 0 {
parent = args[0]
@ -108,45 +104,39 @@ func importLayer(flags *mflag.FlagSet, action string, m storage.Store, args []st
if applyDiffFile != "" {
f, err := os.Open(applyDiffFile)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return 1
return 1, err
}
diffStream = f
defer f.Close()
}
mappings, err := paramIDMapping()
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return 1
return 1, err
}
options := &storage.LayerOptions{IDMappingOptions: *mappings}
layer, _, err := m.PutLayer(paramID, parent, paramNames, paramMountLabel, !paramCreateRO, options, diffStream)
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
if jsonOutput {
json.NewEncoder(os.Stdout).Encode(layer)
} else {
fmt.Printf("%s\n", layer.ID)
for _, name := range layer.Names {
fmt.Printf("\t%s\n", name)
}
return outputJSON(layer)
}
return 0
fmt.Printf("%s\n", layer.ID)
for _, name := range layer.Names {
fmt.Printf("\t%s\n", name)
}
return 0, nil
}
func createImage(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func createImage(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
if paramMetadataFile != "" {
f, err := os.Open(paramMetadataFile)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return 1
return 1, err
}
b, err := io.ReadAll(f)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return 1
return 1, err
}
paramMetadata = string(b)
}
@ -159,55 +149,48 @@ func createImage(flags *mflag.FlagSet, action string, m storage.Store, args []st
}
image, err := m.CreateImage(paramID, paramNames, layer, paramMetadata, imageOptions)
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
if jsonOutput {
json.NewEncoder(os.Stdout).Encode(image)
} else {
fmt.Printf("%s\n", image.ID)
for _, name := range image.Names {
fmt.Printf("\t%s\n", name)
}
return outputJSON(image)
}
return 0
fmt.Printf("%s\n", image.ID)
for _, name := range image.Names {
fmt.Printf("\t%s\n", name)
}
return 0, nil
}
func createContainer(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func createContainer(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
if paramMetadataFile != "" {
f, err := os.Open(paramMetadataFile)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return 1
return 1, err
}
b, err := io.ReadAll(f)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return 1
return 1, err
}
paramMetadata = string(b)
}
mappings, err := paramIDMapping()
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return 1
return 1, err
}
options := &storage.ContainerOptions{IDMappingOptions: *mappings, Volatile: paramVolatile}
image := args[0]
container, err := m.CreateContainer(paramID, paramNames, image, paramLayer, paramMetadata, options)
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
if jsonOutput {
json.NewEncoder(os.Stdout).Encode(container)
} else {
fmt.Printf("%s\n", container.ID)
for _, name := range container.Names {
fmt.Printf("\t%s", name)
}
return outputJSON(container)
}
return 0
fmt.Printf("%s\n", container.ID)
for _, name := range container.Names {
fmt.Printf("\t%s", name)
}
return 0, nil
}
func init() {

View File

@ -1,7 +1,6 @@
package main
import (
"encoding/json"
"fmt"
"os"
@ -11,9 +10,9 @@ import (
var testDeleteImage = false
func deleteThing(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func deleteThing(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
if len(args) < 1 {
return 1
return 1, nil
}
deleted := make(map[string]string)
for _, what := range args {
@ -25,7 +24,9 @@ func deleteThing(flags *mflag.FlagSet, action string, m storage.Store, args []st
}
}
if jsonOutput {
json.NewEncoder(os.Stdout).Encode(deleted)
if _, err := outputJSON(deleted); err != nil {
return 1, err
}
} else {
for what, err := range deleted {
if err != "" {
@ -35,15 +36,15 @@ func deleteThing(flags *mflag.FlagSet, action string, m storage.Store, args []st
}
for _, err := range deleted {
if err != "" {
return 1
return 1, nil
}
}
return 0
return 0, nil
}
func deleteLayer(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func deleteLayer(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
if len(args) < 1 {
return 1
return 1, nil
}
deleted := make(map[string]string)
for _, what := range args {
@ -55,7 +56,9 @@ func deleteLayer(flags *mflag.FlagSet, action string, m storage.Store, args []st
}
}
if jsonOutput {
json.NewEncoder(os.Stdout).Encode(deleted)
if _, err := outputJSON(deleted); err != nil {
return 1, err
}
} else {
for what, err := range deleted {
if err != "" {
@ -65,10 +68,10 @@ func deleteLayer(flags *mflag.FlagSet, action string, m storage.Store, args []st
}
for _, err := range deleted {
if err != "" {
return 1
return 1, nil
}
}
return 0
return 0, nil
}
type deletedImage struct {
@ -76,9 +79,9 @@ type deletedImage struct {
Error string `json:"error,omitempty"`
}
func deleteImage(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func deleteImage(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
if len(args) < 1 {
return 1
return 1, nil
}
deleted := make(map[string]deletedImage)
for _, what := range args {
@ -93,7 +96,9 @@ func deleteImage(flags *mflag.FlagSet, action string, m storage.Store, args []st
}
}
if jsonOutput {
json.NewEncoder(os.Stdout).Encode(deleted)
if _, err := outputJSON(deleted); err != nil {
return 1, err
}
} else {
for what, record := range deleted {
if record.Error != "" {
@ -107,15 +112,15 @@ func deleteImage(flags *mflag.FlagSet, action string, m storage.Store, args []st
}
for _, record := range deleted {
if record.Error != "" {
return 1
return 1, nil
}
}
return 0
return 0, nil
}
func deleteContainer(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func deleteContainer(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
if len(args) < 1 {
return 1
return 1, nil
}
deleted := make(map[string]string)
for _, what := range args {
@ -127,7 +132,9 @@ func deleteContainer(flags *mflag.FlagSet, action string, m storage.Store, args
}
}
if jsonOutput {
json.NewEncoder(os.Stdout).Encode(deleted)
if _, err := outputJSON(deleted); err != nil {
return 1, err
}
} else {
for what, err := range deleted {
if err != "" {
@ -137,10 +144,10 @@ func deleteContainer(flags *mflag.FlagSet, action string, m storage.Store, args
}
for _, err := range deleted {
if err != "" {
return 1
return 1, nil
}
}
return 0
return 0, nil
}
func init() {

View File

@ -1,7 +1,6 @@
package main
import (
"encoding/json"
"fmt"
"io"
"os"
@ -20,9 +19,9 @@ var (
diffXz = false
)
func changes(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func changes(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
if len(args) < 1 {
return 1
return 1, nil
}
to := args[0]
from := ""
@ -31,31 +30,29 @@ func changes(flags *mflag.FlagSet, action string, m storage.Store, args []string
}
changes, err := m.Changes(from, to)
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
if jsonOutput {
json.NewEncoder(os.Stdout).Encode(changes)
} else {
for _, change := range changes {
what := "?"
switch change.Kind {
case archive.ChangeAdd:
what = "Add"
case archive.ChangeModify:
what = "Modify"
case archive.ChangeDelete:
what = "Delete"
}
fmt.Printf("%s %q\n", what, change.Path)
}
return outputJSON(changes)
}
return 0
for _, change := range changes {
what := "?"
switch change.Kind {
case archive.ChangeAdd:
what = "Add"
case archive.ChangeModify:
what = "Modify"
case archive.ChangeDelete:
what = "Delete"
}
fmt.Printf("%s %q\n", what, change.Path)
}
return 0, nil
}
func diff(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func diff(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
if len(args) < 1 {
return 1
return 1, nil
}
to := args[0]
from := ""
@ -66,8 +63,7 @@ func diff(flags *mflag.FlagSet, action string, m storage.Store, args []string) i
if diffFile != "" {
f, err := os.Create(diffFile)
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
diffStream = f
defer f.Close()
@ -90,43 +86,39 @@ func diff(flags *mflag.FlagSet, action string, m storage.Store, args []string) i
reader, err := m.Diff(from, to, &options)
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
_, err = io.Copy(diffStream, reader)
reader.Close()
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
return 0
return 0, nil
}
func applyDiff(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func applyDiff(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
if len(args) < 1 {
return 1
return 1, nil
}
diffStream := io.Reader(os.Stdin)
if applyDiffFile != "" {
f, err := os.Open(applyDiffFile)
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
diffStream = f
defer f.Close()
}
_, err := m.ApplyDiff(args[0], diffStream)
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
return 0
return 0, nil
}
func diffSize(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func diffSize(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
if len(args) < 1 {
return 1
return 1, nil
}
to := args[0]
from := ""
@ -135,11 +127,10 @@ func diffSize(flags *mflag.FlagSet, action string, m storage.Store, args []strin
}
n, err := m.DiffSize(from, to)
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
fmt.Printf("%d\n", n)
return 0
return 0, nil
}
func init() {

View File

@ -1,9 +1,7 @@
package main
import (
"encoding/json"
"fmt"
"os"
"github.com/containers/storage"
"github.com/containers/storage/pkg/mflag"
@ -16,9 +14,9 @@ var (
existQuiet = false
)
func exist(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func exist(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
if len(args) < 1 {
return 1
return 1, nil
}
anyMissing := false
existDict := make(map[string]bool)
@ -45,7 +43,9 @@ func exist(flags *mflag.FlagSet, action string, m storage.Store, args []string)
}
}
if jsonOutput {
json.NewEncoder(os.Stdout).Encode(existDict)
if _, err := outputJSON(existDict); err != nil {
return 1, err
}
} else {
if !existQuiet {
for what, exists := range existDict {
@ -54,9 +54,9 @@ func exist(flags *mflag.FlagSet, action string, m storage.Store, args []string)
}
}
if anyMissing {
return 1
return 1, nil
}
return 0
return 0, nil
}
func init() {

View File

@ -1,7 +1,6 @@
package main
import (
"encoding/json"
"fmt"
"io"
"os"
@ -15,7 +14,7 @@ var (
paramImageDataFile = ""
)
func image(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func image(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
matched := []*storage.Image{}
for _, arg := range args {
if image, err := m.Image(arg); err == nil {
@ -23,7 +22,9 @@ func image(flags *mflag.FlagSet, action string, m storage.Store, args []string)
}
}
if jsonOutput {
json.NewEncoder(os.Stdout).Encode(matched)
if _, err := outputJSON(matched); err != nil {
return 1, err
}
} else {
for _, image := range matched {
fmt.Printf("ID: %s\n", image.ID)
@ -52,121 +53,108 @@ func image(flags *mflag.FlagSet, action string, m storage.Store, args []string)
}
}
if len(matched) != len(args) {
return 1
return 1, nil
}
return 0
return 0, nil
}
func listImageBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func listImageBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
image, err := m.Image(args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
d, err := m.ListImageBigData(image.ID)
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
if jsonOutput {
json.NewEncoder(os.Stdout).Encode(d)
} else {
for _, name := range d {
fmt.Printf("%s\n", name)
}
return outputJSON(d)
}
return 0
for _, name := range d {
fmt.Printf("%s\n", name)
}
return 0, nil
}
func getImageBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func getImageBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
image, err := m.Image(args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
output := os.Stdout
if paramImageDataFile != "" {
f, err := os.Create(paramImageDataFile)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return 1
return 1, err
}
output = f
}
b, err := m.ImageBigData(image.ID, args[1])
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
if _, err := output.Write(b); err != nil {
return 1, err
}
output.Write(b)
output.Close()
return 0
return 0, nil
}
func wrongManifestDigest(b []byte) (digest.Digest, error) {
return digest.Canonical.FromBytes(b), nil
}
func getImageBigDataSize(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func getImageBigDataSize(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
image, err := m.Image(args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
size, err := m.ImageBigDataSize(image.ID, args[1])
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
fmt.Fprintf(os.Stdout, "%d\n", size)
return 0
return 0, nil
}
func getImageBigDataDigest(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func getImageBigDataDigest(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
image, err := m.Image(args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
d, err := m.ImageBigDataDigest(image.ID, args[1])
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
if d.Validate() != nil {
fmt.Fprintf(os.Stderr, "%v\n", d.Validate())
return 1
if err := d.Validate(); err != nil {
return 1, err
}
fmt.Fprintf(os.Stdout, "%s\n", d.String())
return 0
return 0, nil
}
func setImageBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func setImageBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
image, err := m.Image(args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return 1
return 1, err
}
input := os.Stdin
if paramImageDataFile != "" {
f, err := os.Open(paramImageDataFile)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return 1
return 1, err
}
input = f
}
b, err := io.ReadAll(input)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return 1
return 1, err
}
err = m.SetImageBigData(image.ID, args[1], b, wrongManifestDigest)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return 1
return 1, err
}
return 0
return 0, nil
}
func init() {

View File

@ -1,9 +1,7 @@
package main
import (
"encoding/json"
"fmt"
"os"
"github.com/containers/storage"
"github.com/containers/storage/pkg/mflag"
@ -14,69 +12,64 @@ var (
imagesQuiet = false
)
func images(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func images(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
images, err := m.Images()
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
if jsonOutput {
json.NewEncoder(os.Stdout).Encode(images)
} else {
for _, image := range images {
fmt.Printf("%s\n", image.ID)
if imagesQuiet {
continue
}
for _, name := range image.Names {
fmt.Printf("\tname: %s\n", name)
}
for _, digest := range image.Digests {
fmt.Printf("\tdigest: %s\n", digest.String())
}
for _, name := range image.BigDataNames {
fmt.Printf("\tdata: %s\n", name)
}
return outputJSON(images)
}
for _, image := range images {
fmt.Printf("%s\n", image.ID)
if imagesQuiet {
continue
}
for _, name := range image.Names {
fmt.Printf("\tname: %s\n", name)
}
for _, digest := range image.Digests {
fmt.Printf("\tdigest: %s\n", digest.String())
}
for _, name := range image.BigDataNames {
fmt.Printf("\tdata: %s\n", name)
}
}
return 0
return 0, nil
}
func imagesByDigest(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func imagesByDigest(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
images := []*storage.Image{}
for _, arg := range args {
d := digest.Digest(arg)
if err := d.Validate(); err != nil {
fmt.Fprintf(os.Stderr, "%s: %v\n", arg, err)
return 1
return 1, err
}
matched, err := m.ImagesByDigest(d)
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
images = append(images, matched...)
}
if jsonOutput {
json.NewEncoder(os.Stdout).Encode(images)
} else {
for _, image := range images {
fmt.Printf("%s\n", image.ID)
if imagesQuiet {
continue
}
for _, name := range image.Names {
fmt.Printf("\tname: %s\n", name)
}
for _, digest := range image.Digests {
fmt.Printf("\tdigest: %s\n", digest.String())
}
for _, name := range image.BigDataNames {
fmt.Printf("\tdata: %s\n", name)
}
return outputJSON(images)
}
for _, image := range images {
fmt.Printf("%s\n", image.ID)
if imagesQuiet {
continue
}
for _, name := range image.Names {
fmt.Printf("\tname: %s\n", name)
}
for _, digest := range image.Digests {
fmt.Printf("\tdigest: %s\n", digest.String())
}
for _, name := range image.BigDataNames {
fmt.Printf("\tdata: %s\n", name)
}
}
return 0
return 0, nil
}
func init() {

View File

@ -1,7 +1,6 @@
package main
import (
"encoding/json"
"fmt"
"io"
"os"
@ -14,79 +13,69 @@ var (
paramLayerDataFile = ""
)
func listLayerBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func listLayerBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
layer, err := m.Layer(args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
d, err := m.ListLayerBigData(layer.ID)
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
if jsonOutput {
json.NewEncoder(os.Stdout).Encode(d)
} else {
for _, name := range d {
fmt.Printf("%s\n", name)
}
return outputJSON(d)
}
return 0
for _, name := range d {
fmt.Printf("%s\n", name)
}
return 0, nil
}
func getLayerBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func getLayerBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
layer, err := m.Layer(args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
output := os.Stdout
if paramLayerDataFile != "" {
f, err := os.Create(paramLayerDataFile)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return 1
return 1, err
}
output = f
}
b, err := m.LayerBigData(layer.ID, args[1])
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
if _, err := io.Copy(output, b); err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
output.Close()
return 0
return 0, nil
}
func setLayerBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func setLayerBigData(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
layer, err := m.Layer(args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
input := os.Stdin
if paramLayerDataFile != "" {
f, err := os.Open(paramLayerDataFile)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return 1
return 1, err
}
input = f
}
err = m.SetLayerBigData(layer.ID, args[1], input)
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
return 0
return 0, nil
}
func layer(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func layer(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
matched := []*storage.Layer{}
for _, arg := range args {
if layer, err := m.Layer(arg); err == nil {
@ -94,7 +83,9 @@ func layer(flags *mflag.FlagSet, action string, m storage.Store, args []string)
}
}
if jsonOutput {
json.NewEncoder(os.Stdout).Encode(matched)
if _, err := outputJSON(matched); err != nil {
return 1, err
}
} else {
for _, layer := range matched {
fmt.Printf("ID: %s\n", layer.ID)
@ -116,12 +107,12 @@ func layer(flags *mflag.FlagSet, action string, m storage.Store, args []string)
}
}
if len(matched) != len(args) {
return 1
return 1, nil
}
return 0
return 0, nil
}
func layerParentOwners(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func layerParentOwners(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
matched := []*storage.Layer{}
for _, arg := range args {
if layer, err := m.Layer(arg); err == nil {
@ -131,8 +122,7 @@ func layerParentOwners(flags *mflag.FlagSet, action string, m storage.Store, arg
for _, layer := range matched {
uids, gids, err := m.LayerParentOwners(layer.ID)
if err != nil {
fmt.Fprintf(os.Stderr, "LayerParentOwner: %+v\n", err)
return 1
return 1, fmt.Errorf("LayerParentOwner: %+v", err)
}
if jsonOutput {
mappings := struct {
@ -144,7 +134,9 @@ func layerParentOwners(flags *mflag.FlagSet, action string, m storage.Store, arg
UIDs: uids,
GIDs: gids,
}
json.NewEncoder(os.Stdout).Encode(mappings)
if _, err := outputJSON(mappings); err != nil {
return 1, err
}
} else {
fmt.Printf("ID: %s\n", layer.ID)
if len(uids) > 0 {
@ -156,9 +148,9 @@ func layerParentOwners(flags *mflag.FlagSet, action string, m storage.Store, arg
}
}
if len(matched) != len(args) {
return 1
return 1, nil
}
return 0
return 0, nil
}
func init() {

View File

@ -1,9 +1,7 @@
package main
import (
"encoding/json"
"fmt"
"os"
"github.com/containers/storage"
"github.com/containers/storage/pkg/mflag"
@ -11,15 +9,13 @@ import (
var listLayersTree = false
func layers(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func layers(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
layers, err := m.Layers()
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
if jsonOutput {
json.NewEncoder(os.Stdout).Encode(layers)
return 0
return outputJSON(layers)
}
imageMap := make(map[string]*[]storage.Image)
if images, err := m.Images(); err == nil {
@ -97,7 +93,7 @@ func layers(flags *mflag.FlagSet, action string, m storage.Store, args []string)
if listLayersTree {
printTree(nodes)
}
return 0
return 0, nil
}
func init() {

View File

@ -1,6 +1,7 @@
package main
import (
"encoding/json"
"fmt"
"os"
@ -19,7 +20,7 @@ type command struct {
maxArgs int
usage string
addFlags func(*mflag.FlagSet, *command)
action func(*mflag.FlagSet, string, storage.Store, []string) int
action func(*mflag.FlagSet, string, storage.Store, []string) (int, error)
}
var (
@ -122,7 +123,11 @@ func main() {
os.Exit(1)
}
store.Free()
os.Exit(command.action(flags, cmd, store, args))
res, err := command.action(flags, cmd, store, args)
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
}
os.Exit(res)
break
}
}
@ -130,3 +135,12 @@ func main() {
fmt.Printf("%s: unrecognized command.\n", cmd)
os.Exit(1)
}
// outputJSON formats its input as JSON to stdout, and returns values suitable
// for directly returning from command.action
func outputJSON(data interface{}) (int, error) {
if err := json.NewEncoder(os.Stdout).Encode(data); err != nil {
return 1, err
}
return 0, nil
}

View File

@ -1,7 +1,7 @@
package main
import (
"encoding/json"
"errors"
"fmt"
"io"
"os"
@ -13,9 +13,9 @@ import (
var metadataQuiet = false
func metadata(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func metadata(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
if len(args) < 1 {
return 1
return 1, nil
}
metadataDict := make(map[string]string)
missingAny := false
@ -27,7 +27,9 @@ func metadata(flags *mflag.FlagSet, action string, m storage.Store, args []strin
}
}
if jsonOutput {
json.NewEncoder(os.Stdout).Encode(metadataDict)
if _, err := outputJSON(metadataDict); err != nil {
return 1, err
}
} else {
for _, what := range args {
if metadataQuiet {
@ -38,37 +40,33 @@ func metadata(flags *mflag.FlagSet, action string, m storage.Store, args []strin
}
}
if missingAny {
return 1
return 1, nil
}
return 0
return 0, nil
}
func setMetadata(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func setMetadata(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
if len(args) < 1 {
return 1
return 1, nil
}
if paramMetadataFile == "" && paramMetadata == "" {
fmt.Fprintf(os.Stderr, "no new metadata provided\n")
return 1
return 1, errors.New("no new metadata provided")
}
if paramMetadataFile != "" {
f, err := os.Open(paramMetadataFile)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return 1
return 1, err
}
b, err := io.ReadAll(f)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return 1
return 1, err
}
paramMetadata = string(b)
}
if err := m.SetMetadata(args[0], paramMetadata); err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
return 0
return 0, nil
}
func init() {

View File

@ -1,7 +1,6 @@
package main
import (
"encoding/json"
"fmt"
"os"
@ -19,7 +18,7 @@ type mountPointError struct {
Error string `json:"error"`
}
func mount(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func mount(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
moes := []mountPointOrError{}
for _, arg := range args {
if paramReadOnly {
@ -39,7 +38,9 @@ func mount(flags *mflag.FlagSet, action string, m storage.Store, args []string)
}
}
if jsonOutput {
json.NewEncoder(os.Stdout).Encode(moes)
if _, err := outputJSON(moes); err != nil {
return 1, err
}
} else {
for _, mountOrError := range moes {
if mountOrError.Error != "" {
@ -50,13 +51,13 @@ func mount(flags *mflag.FlagSet, action string, m storage.Store, args []string)
}
for _, mountOrErr := range moes {
if mountOrErr.Error != "" {
return 1
return 1, nil
}
}
return 0
return 0, nil
}
func unmount(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func unmount(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
mes := []mountPointError{}
errors := false
for _, arg := range args {
@ -77,7 +78,9 @@ func unmount(flags *mflag.FlagSet, action string, m storage.Store, args []string
mes = append(mes, mountPointError{arg, errText})
}
if jsonOutput {
json.NewEncoder(os.Stdout).Encode(mes)
if _, err := outputJSON(mes); err != nil {
return 1, err
}
} else {
for _, me := range mes {
if me.Error != "" {
@ -86,12 +89,12 @@ func unmount(flags *mflag.FlagSet, action string, m storage.Store, args []string
}
}
if errors {
return 1
return 1, nil
}
return 0
return 0, nil
}
func mounted(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func mounted(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
mes := []mountPointError{}
errors := false
for _, arg := range args {
@ -108,7 +111,9 @@ func mounted(flags *mflag.FlagSet, action string, m storage.Store, args []string
mes = append(mes, mountPointError{arg, errText})
}
if jsonOutput {
json.NewEncoder(os.Stdout).Encode(mes)
if _, err := outputJSON(mes); err != nil {
return 1, err
}
} else {
for _, me := range mes {
if me.Error != "" {
@ -117,9 +122,9 @@ func mounted(flags *mflag.FlagSet, action string, m storage.Store, args []string
}
}
if errors {
return 1
return 1, nil
}
return 0
return 0, nil
}
func init() {

View File

@ -1,52 +1,45 @@
package main
import (
"encoding/json"
"fmt"
"os"
"github.com/containers/storage"
"github.com/containers/storage/internal/opts"
"github.com/containers/storage/pkg/mflag"
)
func getNames(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func getNames(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
if len(args) < 1 {
return 1
return 1, nil
}
id, err := m.Lookup(args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
names, err := m.Names(id)
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
if jsonOutput {
json.NewEncoder(os.Stdout).Encode(append([]string{}, names...))
} else {
for _, name := range names {
fmt.Printf("%s\n", name)
}
return outputJSON(append([]string{}, names...))
}
return 0
for _, name := range names {
fmt.Printf("%s\n", name)
}
return 0, nil
}
func addNames(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func addNames(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
if len(args) < 1 {
return 1
return 1, nil
}
id, err := m.Lookup(args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
oldnames, err := m.Names(id)
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
newNames := []string{}
if oldnames != nil {
@ -56,42 +49,41 @@ func addNames(flags *mflag.FlagSet, action string, m storage.Store, args []strin
newNames = append(newNames, paramNames...)
}
if err := m.SetNames(id, newNames); err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
names, err := m.Names(id)
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
if jsonOutput {
json.NewEncoder(os.Stdout).Encode(names)
if _, err := outputJSON(names); err != nil {
return 1, err
}
}
return 0
return 0, nil
}
func setNames(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func setNames(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
if len(args) < 1 {
return 1
return 1, nil
}
id, err := m.Lookup(args[0])
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
if err := m.SetNames(id, paramNames); err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
names, err := m.Names(id)
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
return 1
return 1, err
}
if jsonOutput {
json.NewEncoder(os.Stdout).Encode(names)
if _, err := outputJSON(names); err != nil {
return 1, err
}
}
return 0
return 0, nil
}
func init() {

View File

@ -1,9 +1,7 @@
package main
import (
"encoding/json"
"fmt"
"os"
"github.com/containers/storage"
"github.com/containers/storage/pkg/mflag"
@ -13,23 +11,19 @@ var (
forceShutdown = false
)
func shutdown(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func shutdown(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
_, err := m.Shutdown(forceShutdown)
if jsonOutput {
if err == nil {
json.NewEncoder(os.Stdout).Encode(string(""))
} else {
json.NewEncoder(os.Stdout).Encode(err)
}
} else {
if err != nil {
fmt.Fprintf(os.Stderr, "%s: %+v\n", action, err)
}
}
if err != nil {
return 1
if jsonOutput {
_, err2 := outputJSON(err)
return 1, err2 // Note that err2 is usually nil
}
return 1, fmt.Errorf("%s: %+v", action, err)
}
return 0
if jsonOutput {
return outputJSON(string(""))
}
return 0, nil
}
func init() {

View File

@ -1,7 +1,6 @@
package main
import (
"encoding/json"
"fmt"
"os"
@ -9,11 +8,10 @@ import (
"github.com/containers/storage/pkg/mflag"
)
func status(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func status(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
status, err := m.Status()
if err != nil {
fmt.Fprintf(os.Stderr, "status: %+v\n", err)
return 1
return 1, fmt.Errorf("status: %+v", err)
}
basics := [][2]string{
{"Root", m.GraphRoot()},
@ -23,13 +21,12 @@ func status(flags *mflag.FlagSet, action string, m storage.Store, args []string)
}
status = append(basics, status...)
if jsonOutput {
json.NewEncoder(os.Stdout).Encode(status)
} else {
for _, pair := range status {
fmt.Fprintf(os.Stderr, "%s: %s\n", pair[0], pair[1])
}
return outputJSON(status)
}
return 0
for _, pair := range status {
fmt.Fprintf(os.Stderr, "%s: %s\n", pair[0], pair[1])
}
return 0, nil
}
func init() {

View File

@ -2,7 +2,7 @@ package main
import "testing"
func TestTree(*testing.T) {
func TestTree(_ *testing.T) {
nodes := []treeNode{
{"F", "H", []string{}},
{"F", "I", []string{}},

View File

@ -1,7 +1,6 @@
package main
import (
"encoding/json"
"fmt"
"os"
@ -9,20 +8,18 @@ import (
"github.com/containers/storage/pkg/mflag"
)
func version(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func version(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
version, err := m.Version()
if err != nil {
fmt.Fprintf(os.Stderr, "version: %+v\n", err)
return 1
return 1, fmt.Errorf("version: %+v", err)
}
if jsonOutput {
json.NewEncoder(os.Stdout).Encode(version)
} else {
for _, pair := range version {
fmt.Fprintf(os.Stderr, "%s: %s\n", pair[0], pair[1])
}
return outputJSON(version)
}
return 0
for _, pair := range version {
fmt.Fprintf(os.Stderr, "%s: %s\n", pair[0], pair[1])
}
return 0, nil
}
func init() {

View File

@ -1,31 +1,25 @@
package main
import (
"encoding/json"
"fmt"
"os"
"github.com/containers/storage"
"github.com/containers/storage/pkg/mflag"
)
func wipe(flags *mflag.FlagSet, action string, m storage.Store, args []string) int {
func wipe(flags *mflag.FlagSet, action string, m storage.Store, args []string) (int, error) {
err := m.Wipe()
if jsonOutput {
if err == nil {
json.NewEncoder(os.Stdout).Encode(string(""))
} else {
json.NewEncoder(os.Stdout).Encode(err)
}
} else {
if err != nil {
fmt.Fprintf(os.Stderr, "%s: %+v\n", action, err)
}
}
if err != nil {
return 1
if jsonOutput {
_, err2 := outputJSON(err)
return 1, err2 // Note that err2 is usually nil
}
return 1, fmt.Errorf("%s: %+v", action, err)
}
return 0
if jsonOutput {
return outputJSON(string(""))
}
return 0, nil
}
func init() {

View File

@ -147,12 +147,12 @@ func (c *Container) ProcessLabel() string {
}
func (c *Container) MountOpts() []string {
switch c.Flags[mountOptsFlag].(type) {
switch value := c.Flags[mountOptsFlag].(type) {
case []string:
return c.Flags[mountOptsFlag].([]string)
return value
case []interface{}:
var mountOpts []string
for _, v := range c.Flags[mountOptsFlag].([]interface{}) {
for _, v := range value {
if flag, ok := v.(string); ok {
mountOpts = append(mountOpts, flag)
}

View File

@ -8,13 +8,12 @@ import (
"path/filepath"
"strings"
"github.com/sirupsen/logrus"
"github.com/vbatts/tar-split/tar/storage"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/idtools"
digest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
"github.com/vbatts/tar-split/tar/storage"
)
// FsMagic unsigned id of the filesystem in use.

View File

@ -174,7 +174,7 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts)
defer driverPut(driver, id, &err)
defaultForceMask := os.FileMode(0700)
var forceMask *os.FileMode = nil
var forceMask *os.FileMode // = nil
if runtime.GOOS == "darwin" {
forceMask = &defaultForceMask
}

View File

@ -253,7 +253,7 @@ func DriverBenchDeepLayerRead(b *testing.B, layerCount int, drivername string, d
}
b.StopTimer()
if bytes.Compare(c, content) != 0 {
if !bytes.Equal(c, content) {
b.Fatalf("Wrong content in file %v, expected %v", c, content)
}
b.StartTimer()

View File

@ -1,3 +1,9 @@
//go:build linux || freebsd
// +build linux freebsd
// ^^ The code is conceptually portable, but only called from within *_unix.go in this package.
// So it is excluded to avoid warnings on other platforms.
package graphtest
import (
@ -57,7 +63,7 @@ func checkFile(drv graphdriver.Driver, layer, filename string, content []byte) e
return err
}
if bytes.Compare(fileContent, content) != 0 {
if !bytes.Equal(fileContent, content) {
return fmt.Errorf("mismatched file content %v, expecting %v", fileContent, content)
}
@ -216,7 +222,7 @@ func checkManyFiles(drv graphdriver.Driver, layer string, count int, seed int64)
content := randomContent(64, seed+int64(i+j))
if bytes.Compare(fileContent, content) != 0 {
if !bytes.Equal(fileContent, content) {
return fmt.Errorf("mismatched file content %v, expecting %v", fileContent, content)
}
}
@ -305,7 +311,7 @@ func checkManyLayers(drv graphdriver.Driver, layer string, count int) error {
return err
}
if bytes.Compare(layerIDBytes, []byte(layer)) != 0 {
if !bytes.Equal(layerIDBytes, []byte(layer)) {
return fmt.Errorf("mismatched file content %v, expecting %v", layerIDBytes, []byte(layer))
}
@ -316,7 +322,7 @@ func checkManyLayers(drv graphdriver.Driver, layer string, count int) error {
if err != nil {
return err
}
if bytes.Compare(thisLayerIDBytes, layerIDBytes) != 0 {
if !bytes.Equal(thisLayerIDBytes, layerIDBytes) {
return fmt.Errorf("mismatched file content %v, expecting %v", thisLayerIDBytes, layerIDBytes)
}
layerIDBytes, err = os.ReadFile(path.Join(layerDir, "parent-id"))

View File

@ -1,9 +1,8 @@
package graphdriver
import (
"github.com/sirupsen/logrus"
"github.com/containers/storage/pkg/idtools"
"github.com/sirupsen/logrus"
)
// TemplateDriver is just barely enough of a driver that we can implement a

View File

@ -194,7 +194,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
if parent != "" {
parentDir, err := d.Get(parent, graphdriver.MountOpts{})
if err != nil {
return fmt.Errorf("%s: %s", parent, err)
return fmt.Errorf("%s: %w", parent, err)
}
if err := dirCopy(parentDir, dir); err != nil {
return err

View File

@ -1,11 +1,4 @@
//go:build !linux && !freebsd
// +build !linux,!freebsd
package zfs
func checkRootdirFs(rootdir string) error {
return nil
}
func getMountpoint(id string) string {
return id
}

View File

@ -4,6 +4,9 @@ import (
"fmt"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestValidateIPAddress(t *testing.T) {
@ -32,12 +35,14 @@ func TestValidateIPAddress(t *testing.T) {
func TestMapOpts(t *testing.T) {
tmpMap := make(map[string]string)
o := NewMapOpts(tmpMap, logOptsValidator)
o.Set("max-size=1")
err := o.Set("max-size=1")
require.NoError(t, err)
if o.String() != "map[max-size:1]" {
t.Errorf("%s != [map[max-size:1]", o.String())
}
o.Set("max-file=2")
err = o.Set("max-file=2")
require.NoError(t, err)
if len(tmpMap) != 2 {
t.Errorf("map length %d != 2", len(tmpMap))
}
@ -56,15 +61,18 @@ func TestMapOpts(t *testing.T) {
func TestListOptsWithoutValidator(t *testing.T) {
o := NewListOpts(nil)
o.Set("foo")
err := o.Set("foo")
require.NoError(t, err)
if o.String() != "[foo]" {
t.Errorf("%s != [foo]", o.String())
}
o.Set("bar")
err = o.Set("bar")
require.NoError(t, err)
if o.Len() != 2 {
t.Errorf("%d != 2", o.Len())
}
o.Set("bar")
err = o.Set("bar")
require.NoError(t, err)
if o.Len() != 3 {
t.Errorf("%d != 3", o.Len())
}
@ -92,15 +100,18 @@ func TestListOptsWithoutValidator(t *testing.T) {
func TestListOptsWithValidator(t *testing.T) {
// Re-using logOptsvalidator (used by MapOpts)
o := NewListOpts(logOptsValidator)
o.Set("foo")
err := o.Set("foo")
assert.EqualError(t, err, "invalid key foo")
if o.String() != "[]" {
t.Errorf("%s != []", o.String())
}
o.Set("foo=bar")
err = o.Set("foo=bar")
assert.EqualError(t, err, "invalid key foo")
if o.String() != "[]" {
t.Errorf("%s != []", o.String())
}
o.Set("max-file=2")
err = o.Set("max-file=2")
require.NoError(t, err)
if o.Len() != 1 {
t.Errorf("%d != 1", o.Len())
}
@ -203,7 +214,8 @@ func TestNamedListOpts(t *testing.T) {
var v []string
o := NewNamedListOptsRef("foo-name", &v, nil)
o.Set("foo")
err := o.Set("foo")
require.NoError(t, err)
if o.String() != "[foo]" {
t.Errorf("%s != [foo]", o.String())
}
@ -219,7 +231,8 @@ func TestNamedMapOpts(t *testing.T) {
tmpMap := make(map[string]string)
o := NewNamedMapOpts("max-name", tmpMap, nil)
o.Set("max-size=1")
err := o.Set("max-size=1")
require.NoError(t, err)
if o.String() != "map[max-size:1]" {
t.Errorf("%s != [map[max-size:1]", o.String())
}

View File

@ -1235,8 +1235,7 @@ func (r *layerStore) deleteInternal(id string) error {
// We never unset incompleteFlag; below, we remove the entire object from r.layers.
id = layer.ID
err := r.driver.Remove(id)
if err != nil {
if err := r.driver.Remove(id); err != nil {
return err
}

View File

@ -864,7 +864,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
rebaseName := options.RebaseNames[include]
walkRoot := getWalkRoot(srcPath, include)
filepath.WalkDir(walkRoot, func(filePath string, d fs.DirEntry, err error) error {
if err := filepath.WalkDir(walkRoot, func(filePath string, d fs.DirEntry, err error) error {
if err != nil {
logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err)
return nil
@ -891,8 +891,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
if include != relFilePath {
matches, err := pm.IsMatch(relFilePath)
if err != nil {
logrus.Errorf("Matching %s: %v", relFilePath, err)
return err
return fmt.Errorf("matching %s: %w", relFilePath, err)
}
skip = matches
}
@ -955,7 +954,10 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
}
}
return nil
})
}); err != nil {
logrus.Errorf("%s", err)
return
}
}
}()

View File

@ -208,14 +208,22 @@ func TestExtensionXz(t *testing.T) {
}
}
func createEmptyFile(t *testing.T, path string) {
t.Helper()
f, err := os.Create(path)
require.NoError(t, err)
err = f.Close()
require.NoError(t, err)
}
func TestUntarPathWithInvalidDest(t *testing.T) {
tempFolder := t.TempDir()
invalidDestFolder := filepath.Join(tempFolder, "invalidDest")
// Create a src file
srcFile := filepath.Join(tempFolder, "src")
tarFile := filepath.Join(tempFolder, "src.tar")
os.Create(srcFile)
os.Create(invalidDestFolder) // being a file (not dir) should cause an error
createEmptyFile(t, srcFile)
createEmptyFile(t, invalidDestFolder) // being a file (not dir) should cause an error
// Translate back to Unix semantics as next exec.Command is run under sh
srcFileU := srcFile
@ -247,7 +255,7 @@ func TestUntarPath(t *testing.T) {
tmpFolder := t.TempDir()
srcFile := filepath.Join(tmpFolder, "src")
tarFile := filepath.Join(tmpFolder, "src.tar")
os.Create(filepath.Join(tmpFolder, "src"))
createEmptyFile(t, filepath.Join(tmpFolder, "src"))
destFolder := filepath.Join(tmpFolder, "dest")
err := os.MkdirAll(destFolder, 0740)
@ -282,7 +290,7 @@ func TestUntarPathWithDestinationFile(t *testing.T) {
tmpFolder := t.TempDir()
srcFile := filepath.Join(tmpFolder, "src")
tarFile := filepath.Join(tmpFolder, "src.tar")
os.Create(filepath.Join(tmpFolder, "src"))
createEmptyFile(t, filepath.Join(tmpFolder, "src"))
// Translate back to Unix semantics as next exec.Command is run under sh
srcFileU := srcFile
@ -297,10 +305,7 @@ func TestUntarPathWithDestinationFile(t *testing.T) {
t.Fatal(err)
}
destFile := filepath.Join(tmpFolder, "dest")
_, err = os.Create(destFile)
if err != nil {
t.Fatalf("Fail to create the destination file")
}
createEmptyFile(t, destFile)
err = defaultUntarPath(tarFile, destFile)
if err == nil {
t.Fatalf("UntarPath should throw an error if the destination if a file")
@ -314,7 +319,7 @@ func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) {
tmpFolder := t.TempDir()
srcFile := filepath.Join(tmpFolder, "src")
tarFile := filepath.Join(tmpFolder, "src.tar")
os.Create(srcFile)
createEmptyFile(t, srcFile)
// Translate back to Unix semantics as next exec.Command is run under sh
srcFileU := srcFile
@ -453,7 +458,8 @@ func TestCopyWithTarSrcFolder(t *testing.T) {
if err != nil {
t.Fatal(err)
}
os.WriteFile(filepath.Join(src, "file"), []byte("content"), 0777)
err = os.WriteFile(filepath.Join(src, "file"), []byte("content"), 0777)
require.NoError(t, err)
err = defaultCopyWithTar(src, dest)
if err != nil {
t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err)
@ -483,11 +489,8 @@ func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) {
tempFolder := t.TempDir()
srcFile := filepath.Join(tempFolder, "src")
inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists")
_, err := os.Create(srcFile)
if err != nil {
t.Fatal(err)
}
err = defaultCopyFileWithTar(srcFile, inexistentDestFolder)
createEmptyFile(t, srcFile)
err := defaultCopyFileWithTar(srcFile, inexistentDestFolder)
if err != nil {
t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.")
}
@ -529,7 +532,8 @@ func TestCopyFileWithTarSrcFile(t *testing.T) {
if err != nil {
t.Fatal(err)
}
os.WriteFile(src, []byte("content"), 0777)
err = os.WriteFile(src, []byte("content"), 0777)
require.NoError(t, err)
err = defaultCopyWithTar(src, dest+"/")
if err != nil {
t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err)
@ -558,7 +562,6 @@ func TestCopySocket(t *testing.T) {
if err != nil {
t.Fatal(err)
}
os.WriteFile(src, []byte("content"), 0777)
err = defaultCopyWithTar(src, dest+"/")
if err != nil {
t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err)
@ -1125,8 +1128,10 @@ func TestUntarInvalidSymlink(t *testing.T) {
func TestTempArchiveCloseMultipleTimes(t *testing.T) {
reader := io.NopCloser(strings.NewReader("hello"))
tempArchive, err := NewTempArchive(reader, "")
require.NoError(t, err)
buf := make([]byte, 10)
n, err := tempArchive.Read(buf)
require.NoError(t, err)
if n != 5 {
t.Fatalf("Expected to read 5 bytes. Read %d instead", n)
}

View File

@ -50,8 +50,8 @@ func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (
// Currently go does not fill in the major/minors
if s.Mode&unix.S_IFBLK != 0 ||
s.Mode&unix.S_IFCHR != 0 {
hdr.Devmajor = int64(major(uint64(s.Rdev))) // nolint: unconvert
hdr.Devminor = int64(minor(uint64(s.Rdev))) // nolint: unconvert
hdr.Devmajor = int64(major(uint64(s.Rdev))) //nolint: unconvert
hdr.Devminor = int64(minor(uint64(s.Rdev))) //nolint: unconvert
}
}

View File

@ -141,23 +141,29 @@ func TestChangesWithChanges(t *testing.T) {
// Mock the readonly layer
layer := t.TempDir()
createSampleDir(t, layer)
os.MkdirAll(path.Join(layer, "dir1/subfolder"), 0740)
err := os.MkdirAll(path.Join(layer, "dir1/subfolder"), 0740)
require.NoError(t, err)
// Mock the RW layer
rwLayer := t.TempDir()
// Create a folder in RW layer
dir1 := path.Join(rwLayer, "dir1")
os.MkdirAll(dir1, 0740)
err = os.MkdirAll(dir1, 0740)
require.NoError(t, err)
deletedFile := path.Join(dir1, ".wh.file1-2")
os.WriteFile(deletedFile, []byte{}, 0600)
err = os.WriteFile(deletedFile, []byte{}, 0600)
require.NoError(t, err)
modifiedFile := path.Join(dir1, "file1-1")
os.WriteFile(modifiedFile, []byte{0x00}, 01444)
err = os.WriteFile(modifiedFile, []byte{0x00}, 01444)
require.NoError(t, err)
// Let's add a subfolder for a newFile
subfolder := path.Join(dir1, "subfolder")
os.MkdirAll(subfolder, 0740)
err = os.MkdirAll(subfolder, 0740)
require.NoError(t, err)
newFile := path.Join(subfolder, "newFile")
os.WriteFile(newFile, []byte{}, 0740)
err = os.WriteFile(newFile, []byte{}, 0740)
require.NoError(t, err)
changes, err := Changes([]string{layer}, rwLayer)
require.NoError(t, err)
@ -182,10 +188,12 @@ func TestChangesWithChangesGH13590(t *testing.T) {
baseLayer := t.TempDir()
dir3 := path.Join(baseLayer, "dir1/dir2/dir3")
os.MkdirAll(dir3, 07400)
err := os.MkdirAll(dir3, 07400)
require.NoError(t, err)
file := path.Join(dir3, "file.txt")
os.WriteFile(file, []byte("hello"), 0666)
err = os.WriteFile(file, []byte("hello"), 0666)
require.NoError(t, err)
layer := t.TempDir()
@ -196,7 +204,8 @@ func TestChangesWithChangesGH13590(t *testing.T) {
os.Remove(path.Join(layer, "dir1/dir2/dir3/file.txt"))
file = path.Join(layer, "dir1/dir2/dir3/file1.txt")
os.WriteFile(file, []byte("bye"), 0666)
err = os.WriteFile(file, []byte("bye"), 0666)
require.NoError(t, err)
changes, err := Changes([]string{baseLayer}, layer)
require.NoError(t, err)
@ -217,7 +226,8 @@ func TestChangesWithChangesGH13590(t *testing.T) {
}
file = path.Join(layer, "dir1/dir2/dir3/file.txt")
os.WriteFile(file, []byte("bye"), 0666)
err = os.WriteFile(file, []byte("bye"), 0666)
require.NoError(t, err)
changes, err = Changes([]string{baseLayer}, layer)
require.NoError(t, err)

View File

@ -10,6 +10,8 @@ import (
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/require"
)
var testUntarFns = map[string]func(string, io.Reader) error{
@ -61,11 +63,12 @@ func testBreakout(t *testing.T, untarFn string, headers []*tar.Header) error {
reader, writer := io.Pipe()
go func() {
t := tar.NewWriter(writer)
tw := tar.NewWriter(writer)
for _, hdr := range headers {
t.WriteHeader(hdr)
err := tw.WriteHeader(hdr)
require.NoError(t, err)
}
t.Close()
tw.Close()
}()
untar := testUntarFns[untarFn]

View File

@ -30,7 +30,8 @@ func TestGenerateEmptyFile(t *testing.T) {
}
require.NoError(t, err)
buf := new(bytes.Buffer)
buf.ReadFrom(tr)
_, err = buf.ReadFrom(tr)
require.NoError(t, err)
content := buf.String()
actualFiles = append(actualFiles, []string{hdr.Name, content})
i++
@ -71,7 +72,8 @@ func TestGenerateWithContent(t *testing.T) {
}
require.NoError(t, err)
buf := new(bytes.Buffer)
buf.ReadFrom(tr)
_, err = buf.ReadFrom(tr)
require.NoError(t, err)
content := buf.String()
actualFiles = append(actualFiles, []string{hdr.Name, content})
i++

View File

@ -82,7 +82,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
}
}
r := io.NopCloser(tarArchive)
r := tarArchive
if decompress {
decompressedArchive, err := archive.DecompressStream(tarArchive)
if err != nil {

View File

@ -6,11 +6,7 @@ import (
"github.com/containers/storage/pkg/archive"
)
func chroot(path string) error {
return nil
}
func invokeUnpack(decompressedArchive io.ReadCloser,
func invokeUnpack(decompressedArchive io.Reader,
dest string,
options *archive.TarOptions, root string) error {
return archive.Unpack(decompressedArchive, dest, options)

View File

@ -16,6 +16,7 @@ import (
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/reexec"
"github.com/stretchr/testify/require"
)
const (
@ -423,7 +424,8 @@ func TestChrootUntarPath(t *testing.T) {
t.Fatal(err)
}
buf := new(bytes.Buffer)
buf.ReadFrom(stream)
_, err = buf.ReadFrom(stream)
require.NoError(t, err)
tarfile := filepath.Join(tmpdir, "src.tar")
if err := os.WriteFile(tarfile, buf.Bytes(), 0644); err != nil {
t.Fatal(err)
@ -472,7 +474,8 @@ func TestChrootUntarPathAndChown(t *testing.T) {
}
buf := new(bytes.Buffer)
buf.ReadFrom(stream)
_, err = buf.ReadFrom(stream)
require.NoError(t, err)
tarfile := filepath.Join(tmpdir, "src.tar")
if err := os.WriteFile(tarfile, buf.Bytes(), 0644); err != nil {
t.Fatal(err)

View File

@ -12,7 +12,7 @@ func chroot(path string) error {
return nil
}
func invokeUnpack(decompressedArchive io.ReadCloser,
func invokeUnpack(decompressedArchive io.Reader,
dest string,
options *archive.TarOptions, root string) error {
// Windows is different to Linux here because Windows does not support

View File

@ -27,13 +27,13 @@ func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions
tmpDir, err := os.MkdirTemp(os.Getenv("temp"), "temp-storage-extract")
if err != nil {
return 0, fmt.Errorf("ApplyLayer failed to create temp-storage-extract under %s. %s", dest, err)
return 0, fmt.Errorf("ApplyLayer failed to create temp-storage-extract under %s: %w", dest, err)
}
s, err := archive.UnpackLayer(dest, layer, options)
os.RemoveAll(tmpDir)
if err != nil {
return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err)
return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %w", layer, dest, err)
}
return s, nil

View File

@ -1,3 +1,6 @@
//go:build !windows && !darwin
// +build !windows,!darwin
package chrootarchive
import jsoniter "github.com/json-iterator/go"

View File

@ -1,21 +1,10 @@
package chunked
import (
archivetar "archive/tar"
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"strconv"
"github.com/containerd/stargz-snapshotter/estargz"
"github.com/containers/storage/pkg/chunked/compressor"
"github.com/containers/storage/pkg/chunked/internal"
"github.com/klauspost/compress/zstd"
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
"github.com/vbatts/tar-split/archive/tar"
)
const (
@ -29,247 +18,6 @@ const (
TypeSymlink = internal.TypeSymlink
)
var typesToTar = map[string]byte{
TypeReg: tar.TypeReg,
TypeLink: tar.TypeLink,
TypeChar: tar.TypeChar,
TypeBlock: tar.TypeBlock,
TypeDir: tar.TypeDir,
TypeFifo: tar.TypeFifo,
TypeSymlink: tar.TypeSymlink,
}
func typeToTarType(t string) (byte, error) {
r, found := typesToTar[t]
if !found {
return 0, fmt.Errorf("unknown type: %v", t)
}
return r, nil
}
func isZstdChunkedFrameMagic(data []byte) bool {
if len(data) < 8 {
return false
}
return bytes.Equal(internal.ZstdChunkedFrameMagic, data[:8])
}
func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) {
// information on the format here https://github.com/containerd/stargz-snapshotter/blob/main/docs/stargz-estargz.md
footerSize := int64(51)
if blobSize <= footerSize {
return nil, 0, errors.New("blob too small")
}
chunk := ImageSourceChunk{
Offset: uint64(blobSize - footerSize),
Length: uint64(footerSize),
}
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
if err != nil {
return nil, 0, err
}
var reader io.ReadCloser
select {
case r := <-parts:
reader = r
case err := <-errs:
return nil, 0, err
}
defer reader.Close()
footer := make([]byte, footerSize)
if _, err := io.ReadFull(reader, footer); err != nil {
return nil, 0, err
}
/* Read the ToC offset:
- 10 bytes gzip header
- 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ"))
- 2 bytes Extra: SI1 = 'S', SI2 = 'G'
- 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ"))
- 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC)
- 5 bytes flate header: BFINAL = 1(last block), BTYPE = 0(non-compressed block), LEN = 0
- 8 bytes gzip footer
*/
tocOffset, err := strconv.ParseInt(string(footer[16:16+22-6]), 16, 64)
if err != nil {
return nil, 0, fmt.Errorf("parse ToC offset: %w", err)
}
size := int64(blobSize - footerSize - tocOffset)
// set a reasonable limit
if size > (1<<20)*50 {
return nil, 0, errors.New("manifest too big")
}
chunk = ImageSourceChunk{
Offset: uint64(tocOffset),
Length: uint64(size),
}
parts, errs, err = blobStream.GetBlobAt([]ImageSourceChunk{chunk})
if err != nil {
return nil, 0, err
}
var tocReader io.ReadCloser
select {
case r := <-parts:
tocReader = r
case err := <-errs:
return nil, 0, err
}
defer tocReader.Close()
r, err := pgzip.NewReader(tocReader)
if err != nil {
return nil, 0, err
}
defer r.Close()
aTar := archivetar.NewReader(r)
header, err := aTar.Next()
if err != nil {
return nil, 0, err
}
// set a reasonable limit
if header.Size > (1<<20)*50 {
return nil, 0, errors.New("manifest too big")
}
manifestUncompressed := make([]byte, header.Size)
if _, err := io.ReadFull(aTar, manifestUncompressed); err != nil {
return nil, 0, err
}
manifestDigester := digest.Canonical.Digester()
manifestChecksum := manifestDigester.Hash()
if _, err := manifestChecksum.Write(manifestUncompressed); err != nil {
return nil, 0, err
}
d, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation])
if err != nil {
return nil, 0, err
}
if manifestDigester.Digest() != d {
return nil, 0, errors.New("invalid manifest checksum")
}
return manifestUncompressed, tocOffset, nil
}
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. The blob total size must
// be specified.
// This function uses the io.containers.zstd-chunked. annotations when specified.
func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) {
footerSize := int64(internal.FooterSizeSupported)
if blobSize <= footerSize {
return nil, 0, errors.New("blob too small")
}
manifestChecksumAnnotation := annotations[internal.ManifestChecksumKey]
if manifestChecksumAnnotation == "" {
return nil, 0, fmt.Errorf("manifest checksum annotation %q not found", internal.ManifestChecksumKey)
}
var offset, length, lengthUncompressed, manifestType uint64
if offsetMetadata := annotations[internal.ManifestInfoKey]; offsetMetadata != "" {
if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &offset, &length, &lengthUncompressed, &manifestType); err != nil {
return nil, 0, err
}
} else {
chunk := ImageSourceChunk{
Offset: uint64(blobSize - footerSize),
Length: uint64(footerSize),
}
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
if err != nil {
return nil, 0, err
}
var reader io.ReadCloser
select {
case r := <-parts:
reader = r
case err := <-errs:
return nil, 0, err
}
footer := make([]byte, footerSize)
if _, err := io.ReadFull(reader, footer); err != nil {
return nil, 0, err
}
offset = binary.LittleEndian.Uint64(footer[0:8])
length = binary.LittleEndian.Uint64(footer[8:16])
lengthUncompressed = binary.LittleEndian.Uint64(footer[16:24])
manifestType = binary.LittleEndian.Uint64(footer[24:32])
if !isZstdChunkedFrameMagic(footer[32:40]) {
return nil, 0, errors.New("invalid magic number")
}
}
if manifestType != internal.ManifestTypeCRFS {
return nil, 0, errors.New("invalid manifest type")
}
// set a reasonable limit
if length > (1<<20)*50 {
return nil, 0, errors.New("manifest too big")
}
if lengthUncompressed > (1<<20)*50 {
return nil, 0, errors.New("manifest too big")
}
chunk := ImageSourceChunk{
Offset: offset,
Length: length,
}
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
if err != nil {
return nil, 0, err
}
var reader io.ReadCloser
select {
case r := <-parts:
reader = r
case err := <-errs:
return nil, 0, err
}
manifest := make([]byte, length)
if _, err := io.ReadFull(reader, manifest); err != nil {
return nil, 0, err
}
manifestDigester := digest.Canonical.Digester()
manifestChecksum := manifestDigester.Hash()
if _, err := manifestChecksum.Write(manifest); err != nil {
return nil, 0, err
}
d, err := digest.Parse(manifestChecksumAnnotation)
if err != nil {
return nil, 0, err
}
if manifestDigester.Digest() != d {
return nil, 0, errors.New("invalid manifest checksum")
}
decoder, err := zstd.NewReader(nil)
if err != nil {
return nil, 0, err
}
defer decoder.Close()
b := make([]byte, 0, lengthUncompressed)
if decoded, err := decoder.DecodeAll(manifest, b); err == nil {
return decoded, int64(offset), nil
}
return manifest, int64(offset), nil
}
// ZstdCompressor is a CompressorFunc for the zstd compression algorithm.
// Deprecated: Use pkg/chunked/compressor.ZstdCompressor.
func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {

View File

@ -0,0 +1,259 @@
package chunked
import (
archivetar "archive/tar"
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"strconv"
"github.com/containerd/stargz-snapshotter/estargz"
"github.com/containers/storage/pkg/chunked/internal"
"github.com/klauspost/compress/zstd"
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
"github.com/vbatts/tar-split/archive/tar"
)
var typesToTar = map[string]byte{
TypeReg: tar.TypeReg,
TypeLink: tar.TypeLink,
TypeChar: tar.TypeChar,
TypeBlock: tar.TypeBlock,
TypeDir: tar.TypeDir,
TypeFifo: tar.TypeFifo,
TypeSymlink: tar.TypeSymlink,
}
func typeToTarType(t string) (byte, error) {
r, found := typesToTar[t]
if !found {
return 0, fmt.Errorf("unknown type: %v", t)
}
return r, nil
}
func isZstdChunkedFrameMagic(data []byte) bool {
if len(data) < 8 {
return false
}
return bytes.Equal(internal.ZstdChunkedFrameMagic, data[:8])
}
func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) {
// information on the format here https://github.com/containerd/stargz-snapshotter/blob/main/docs/stargz-estargz.md
footerSize := int64(51)
if blobSize <= footerSize {
return nil, 0, errors.New("blob too small")
}
chunk := ImageSourceChunk{
Offset: uint64(blobSize - footerSize),
Length: uint64(footerSize),
}
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
if err != nil {
return nil, 0, err
}
var reader io.ReadCloser
select {
case r := <-parts:
reader = r
case err := <-errs:
return nil, 0, err
}
defer reader.Close()
footer := make([]byte, footerSize)
if _, err := io.ReadFull(reader, footer); err != nil {
return nil, 0, err
}
/* Read the ToC offset:
- 10 bytes gzip header
- 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ"))
- 2 bytes Extra: SI1 = 'S', SI2 = 'G'
- 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ"))
- 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC)
- 5 bytes flate header: BFINAL = 1(last block), BTYPE = 0(non-compressed block), LEN = 0
- 8 bytes gzip footer
*/
tocOffset, err := strconv.ParseInt(string(footer[16:16+22-6]), 16, 64)
if err != nil {
return nil, 0, fmt.Errorf("parse ToC offset: %w", err)
}
size := int64(blobSize - footerSize - tocOffset)
// set a reasonable limit
if size > (1<<20)*50 {
return nil, 0, errors.New("manifest too big")
}
chunk = ImageSourceChunk{
Offset: uint64(tocOffset),
Length: uint64(size),
}
parts, errs, err = blobStream.GetBlobAt([]ImageSourceChunk{chunk})
if err != nil {
return nil, 0, err
}
var tocReader io.ReadCloser
select {
case r := <-parts:
tocReader = r
case err := <-errs:
return nil, 0, err
}
defer tocReader.Close()
r, err := pgzip.NewReader(tocReader)
if err != nil {
return nil, 0, err
}
defer r.Close()
aTar := archivetar.NewReader(r)
header, err := aTar.Next()
if err != nil {
return nil, 0, err
}
// set a reasonable limit
if header.Size > (1<<20)*50 {
return nil, 0, errors.New("manifest too big")
}
manifestUncompressed := make([]byte, header.Size)
if _, err := io.ReadFull(aTar, manifestUncompressed); err != nil {
return nil, 0, err
}
manifestDigester := digest.Canonical.Digester()
manifestChecksum := manifestDigester.Hash()
if _, err := manifestChecksum.Write(manifestUncompressed); err != nil {
return nil, 0, err
}
d, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation])
if err != nil {
return nil, 0, err
}
if manifestDigester.Digest() != d {
return nil, 0, errors.New("invalid manifest checksum")
}
return manifestUncompressed, tocOffset, nil
}
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. The blob total size must
// be specified.
// This function uses the io.containers.zstd-chunked. annotations when specified.
func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) {
footerSize := int64(internal.FooterSizeSupported)
if blobSize <= footerSize {
return nil, 0, errors.New("blob too small")
}
manifestChecksumAnnotation := annotations[internal.ManifestChecksumKey]
if manifestChecksumAnnotation == "" {
return nil, 0, fmt.Errorf("manifest checksum annotation %q not found", internal.ManifestChecksumKey)
}
var offset, length, lengthUncompressed, manifestType uint64
if offsetMetadata := annotations[internal.ManifestInfoKey]; offsetMetadata != "" {
if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &offset, &length, &lengthUncompressed, &manifestType); err != nil {
return nil, 0, err
}
} else {
chunk := ImageSourceChunk{
Offset: uint64(blobSize - footerSize),
Length: uint64(footerSize),
}
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
if err != nil {
return nil, 0, err
}
var reader io.ReadCloser
select {
case r := <-parts:
reader = r
case err := <-errs:
return nil, 0, err
}
footer := make([]byte, footerSize)
if _, err := io.ReadFull(reader, footer); err != nil {
return nil, 0, err
}
offset = binary.LittleEndian.Uint64(footer[0:8])
length = binary.LittleEndian.Uint64(footer[8:16])
lengthUncompressed = binary.LittleEndian.Uint64(footer[16:24])
manifestType = binary.LittleEndian.Uint64(footer[24:32])
if !isZstdChunkedFrameMagic(footer[32:40]) {
return nil, 0, errors.New("invalid magic number")
}
}
if manifestType != internal.ManifestTypeCRFS {
return nil, 0, errors.New("invalid manifest type")
}
// set a reasonable limit
if length > (1<<20)*50 {
return nil, 0, errors.New("manifest too big")
}
if lengthUncompressed > (1<<20)*50 {
return nil, 0, errors.New("manifest too big")
}
chunk := ImageSourceChunk{
Offset: offset,
Length: length,
}
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
if err != nil {
return nil, 0, err
}
var reader io.ReadCloser
select {
case r := <-parts:
reader = r
case err := <-errs:
return nil, 0, err
}
manifest := make([]byte, length)
if _, err := io.ReadFull(reader, manifest); err != nil {
return nil, 0, err
}
manifestDigester := digest.Canonical.Digester()
manifestChecksum := manifestDigester.Hash()
if _, err := manifestChecksum.Write(manifest); err != nil {
return nil, 0, err
}
d, err := digest.Parse(manifestChecksumAnnotation)
if err != nil {
return nil, 0, err
}
if manifestDigester.Digest() != d {
return nil, 0, errors.New("invalid manifest checksum")
}
decoder, err := zstd.NewReader(nil)
if err != nil {
return nil, 0, err
}
defer decoder.Close()
b := make([]byte, 0, lengthUncompressed)
if decoded, err := decoder.DecodeAll(manifest, b); err == nil {
return decoded, int64(offset), nil
}
return manifest, int64(offset), nil
}

View File

@ -33,11 +33,11 @@ const (
holesFinderStateEOF
)
// ReadByte reads a single byte from the underlying reader.
// readByte reads a single byte from the underlying reader.
// If a single byte is read, the return value is (0, RAW-BYTE-VALUE, nil).
// If there are at least f.THRESHOLD consecutive zeros, then the
// return value is (N_CONSECUTIVE_ZEROS, '\x00').
func (f *holesFinder) ReadByte() (int64, byte, error) {
func (f *holesFinder) readByte() (int64, byte, error) {
for {
switch f.state {
// reading the file stream
@ -159,7 +159,7 @@ func (rc *rollingChecksumReader) Read(b []byte) (bool, int, error) {
}
for i := 0; i < len(b); i++ {
holeLen, n, err := rc.reader.ReadByte()
holeLen, n, err := rc.reader.readByte()
if err != nil {
if err == io.EOF {
rc.closed = true

View File

@ -15,7 +15,7 @@ func TestHole(t *testing.T) {
reader: bufio.NewReader(bytes.NewReader(data)),
}
hole, _, err := hf.ReadByte()
hole, _, err := hf.readByte()
if err != nil {
t.Errorf("got error: %v", err)
}
@ -23,7 +23,7 @@ func TestHole(t *testing.T) {
t.Error("expected hole not found")
}
if _, _, err := hf.ReadByte(); err != io.EOF {
if _, _, err := hf.readByte(); err != io.EOF {
t.Errorf("EOF not found")
}
@ -32,18 +32,18 @@ func TestHole(t *testing.T) {
reader: bufio.NewReader(bytes.NewReader(data)),
}
for i := 0; i < 5; i++ {
hole, byte, err := hf.ReadByte()
hole, b, err := hf.readByte()
if err != nil {
t.Errorf("got error: %v", err)
}
if hole != 0 {
t.Error("hole found")
}
if byte != 0 {
if b != 0 {
t.Error("wrong read")
}
}
if _, _, err := hf.ReadByte(); err != io.EOF {
if _, _, err := hf.readByte(); err != io.EOF {
t.Error("didn't receive EOF")
}
}
@ -56,7 +56,7 @@ func TestTwoHoles(t *testing.T) {
reader: bufio.NewReader(bytes.NewReader(data)),
}
hole, _, err := hf.ReadByte()
hole, _, err := hf.readByte()
if err != nil {
t.Errorf("got error: %v", err)
}
@ -65,7 +65,7 @@ func TestTwoHoles(t *testing.T) {
}
for _, e := range []byte("FOO") {
hole, c, err := hf.ReadByte()
hole, c, err := hf.readByte()
if err != nil {
t.Errorf("got error: %v", err)
}
@ -76,7 +76,7 @@ func TestTwoHoles(t *testing.T) {
t.Errorf("wrong byte read %v instead of %v", c, e)
}
}
hole, _, err = hf.ReadByte()
hole, _, err = hf.readByte()
if err != nil {
t.Errorf("got error: %v", err)
}
@ -84,7 +84,7 @@ func TestTwoHoles(t *testing.T) {
t.Error("expected hole not found")
}
if _, _, err := hf.ReadByte(); err != io.EOF {
if _, _, err := hf.readByte(); err != io.EOF {
t.Error("didn't receive EOF")
}
}

View File

@ -114,7 +114,7 @@ func appendZstdSkippableFrame(dest io.Writer, data []byte) error {
return err
}
var size []byte = make([]byte, 4)
size := make([]byte, 4)
binary.LittleEndian.PutUint32(size, uint32(len(data)))
if _, err := dest.Write(size); err != nil {
return err
@ -168,7 +168,7 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off
}
// Store the offset to the manifest and its size in LE order
var manifestDataLE []byte = make([]byte, FooterSizeSupported)
manifestDataLE := make([]byte, FooterSizeSupported)
binary.LittleEndian.PutUint64(manifestDataLE, manifestOffset)
binary.LittleEndian.PutUint64(manifestDataLE[8:], uint64(len(compressedManifest)))
binary.LittleEndian.PutUint64(manifestDataLE[16:], uint64(len(manifest)))

View File

@ -6,6 +6,8 @@ import (
"reflect"
"sort"
"testing"
"github.com/stretchr/testify/require"
)
// Usage of an empty directory should be 0
@ -45,7 +47,8 @@ func TestUsageNonemptyFile(t *testing.T) {
}
d := []byte{97, 98, 99, 100, 101}
file.Write(d)
_, err = file.Write(d)
require.NoError(t, err)
usage, _ := Usage(dir)
expectSizeAndInodeCount(t, "directory with one 5-byte file", usage, &DiskUsage{
@ -92,7 +95,8 @@ func TestUsageFileAndNestedDirectoryEmpty(t *testing.T) {
}
d := []byte{100, 111, 99, 107, 101, 114}
file.Write(d)
_, err = file.Write(d)
require.NoError(t, err)
usage, _ := Usage(dir)
expectSizeAndInodeCount(t, "directory with 6-byte file and empty directory", usage, &DiskUsage{
@ -117,7 +121,8 @@ func TestUsageFileAndNestedDirectoryNonempty(t *testing.T) {
}
data := []byte{100, 111, 99, 107, 101, 114}
file.Write(data)
_, err = file.Write(data)
require.NoError(t, err)
var nestedFile *os.File
if nestedFile, err = os.CreateTemp(dirNested, "file"); err != nil {
@ -125,7 +130,8 @@ func TestUsageFileAndNestedDirectoryNonempty(t *testing.T) {
}
nestedData := []byte{100, 111, 99, 107, 101, 114}
nestedFile.Write(nestedData)
_, err = nestedFile.Write(nestedData)
require.NoError(t, err)
usage, _ := Usage(dir)
expectSizeAndInodeCount(t, "directory with 6-byte file and nested directory with 6-byte file", usage, &DiskUsage{
@ -150,7 +156,8 @@ func TestMoveToSubdir(t *testing.T) {
if file, err := os.Create(filepath.Join(outerDir, fName)); err != nil {
t.Fatalf("couldn't create temp file %q: %v", fName, err)
} else {
file.WriteString(fName)
_, err = file.WriteString(fName)
require.NoError(t, err)
file.Close()
}
}

View File

@ -321,14 +321,14 @@ func ReadSymlinkedDirectory(path string) (string, error) {
var realPath string
var err error
if realPath, err = filepath.Abs(path); err != nil {
return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err)
return "", fmt.Errorf("unable to get absolute path for %s: %w", path, err)
}
if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err)
return "", fmt.Errorf("failed to canonicalise path for %s: %w", path, err)
}
realPathInfo, err := os.Stat(realPath)
if err != nil {
return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err)
return "", fmt.Errorf("failed to stat target '%s' of '%s': %w", realPath, path, err)
}
if !realPathInfo.Mode().IsDir() {
return "", fmt.Errorf("canonical path points to a file '%s'", realPath)

View File

@ -2,6 +2,7 @@ package idtools
import (
"bufio"
"errors"
"fmt"
"os"
"os/user"
@ -359,7 +360,8 @@ func parseSubidFile(path, username string) (ranges, error) {
}
func checkChownErr(err error, name string, uid, gid int) error {
if e, ok := err.(*os.PathError); ok && e.Err == syscall.EINVAL {
var e *os.PathError
if errors.As(err, &e) && e.Err == syscall.EINVAL {
return fmt.Errorf("potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run podman-system-migrate: %w", uid, gid, name, err)
}
return err

View File

@ -3,6 +3,8 @@ package ioutils
import (
"bytes"
"testing"
"github.com/stretchr/testify/require"
)
func TestFixedBufferCap(t *testing.T) {
@ -17,13 +19,15 @@ func TestFixedBufferCap(t *testing.T) {
func TestFixedBufferLen(t *testing.T) {
buf := &fixedBuffer{buf: make([]byte, 0, 10)}
buf.Write([]byte("hello"))
_, err := buf.Write([]byte("hello"))
require.NoError(t, err)
l := buf.Len()
if l != 5 {
t.Fatalf("expected buffer length to be 5 bytes, got %d", l)
}
buf.Write([]byte("world"))
_, err = buf.Write([]byte("world"))
require.NoError(t, err)
l = buf.Len()
if l != 10 {
t.Fatalf("expected buffer length to be 10 bytes, got %d", l)
@ -31,7 +35,8 @@ func TestFixedBufferLen(t *testing.T) {
// read 5 bytes
b := make([]byte, 5)
buf.Read(b)
_, err = buf.Read(b)
require.NoError(t, err)
l = buf.Len()
if l != 5 {
@ -61,8 +66,10 @@ func TestFixedBufferLen(t *testing.T) {
func TestFixedBufferString(t *testing.T) {
buf := &fixedBuffer{buf: make([]byte, 0, 10)}
buf.Write([]byte("hello"))
buf.Write([]byte("world"))
_, err := buf.Write([]byte("hello"))
require.NoError(t, err)
_, err = buf.Write([]byte("world"))
require.NoError(t, err)
out := buf.String()
if out != "helloworld" {
@ -71,7 +78,8 @@ func TestFixedBufferString(t *testing.T) {
// read 5 bytes
b := make([]byte, 5)
buf.Read(b)
_, err = buf.Read(b)
require.NoError(t, err)
// test that fixedBuffer.String() only returns the part that hasn't been read
out = buf.String()

View File

@ -6,15 +6,22 @@ import (
"math/rand"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestBytesPipeRead(t *testing.T) {
buf := NewBytesPipe()
buf.Write([]byte("12"))
buf.Write([]byte("34"))
buf.Write([]byte("56"))
buf.Write([]byte("78"))
buf.Write([]byte("90"))
_, err := buf.Write([]byte("12"))
require.NoError(t, err)
_, err = buf.Write([]byte("34"))
require.NoError(t, err)
_, err = buf.Write([]byte("56"))
require.NoError(t, err)
_, err = buf.Write([]byte("78"))
require.NoError(t, err)
_, err = buf.Write([]byte("90"))
require.NoError(t, err)
rd := make([]byte, 4)
n, err := buf.Read(rd)
if err != nil {
@ -50,11 +57,16 @@ func TestBytesPipeRead(t *testing.T) {
func TestBytesPipeWrite(t *testing.T) {
buf := NewBytesPipe()
buf.Write([]byte("12"))
buf.Write([]byte("34"))
buf.Write([]byte("56"))
buf.Write([]byte("78"))
buf.Write([]byte("90"))
_, err := buf.Write([]byte("12"))
require.NoError(t, err)
_, err = buf.Write([]byte("34"))
require.NoError(t, err)
_, err = buf.Write([]byte("56"))
require.NoError(t, err)
_, err = buf.Write([]byte("78"))
require.NoError(t, err)
_, err = buf.Write([]byte("90"))
require.NoError(t, err)
if buf.buf[0].String() != "1234567890" {
t.Fatalf("Buffer %q, must be %q", buf.buf[0].String(), "1234567890")
}
@ -108,7 +120,8 @@ func TestBytesPipeWriteRandomChunks(t *testing.T) {
for i := 0; i < c.iterations; i++ {
for w := 0; w < c.writesPerLoop; w++ {
buf.Write(testMessage[:writeChunks[(i*c.writesPerLoop+w)%len(writeChunks)]])
_, err := buf.Write(testMessage[:writeChunks[(i*c.writesPerLoop+w)%len(writeChunks)]])
require.NoError(t, err)
}
}
buf.Close()
@ -135,7 +148,8 @@ func BenchmarkBytesPipeWrite(b *testing.B) {
}
}()
for j := 0; j < 1000; j++ {
buf.Write(testData)
_, err := buf.Write(testData)
require.NoError(b, err)
}
buf.Close()
}
@ -147,7 +161,8 @@ func BenchmarkBytesPipeRead(b *testing.B) {
b.StopTimer()
buf := NewBytesPipe()
for j := 0; j < 500; j++ {
buf.Write(make([]byte, 1024))
_, err := buf.Write(make([]byte, 1024))
require.NoError(b, err)
}
b.StartTimer()
for j := 0; j < 1000; j++ {

View File

@ -15,7 +15,7 @@ type AtomicFileWriterOptions struct {
NoSync bool
}
var defaultWriterOptions AtomicFileWriterOptions = AtomicFileWriterOptions{}
var defaultWriterOptions = AtomicFileWriterOptions{}
// SetDefaultOptions overrides the default options used when creating an
// atomic file writer.

View File

@ -4,6 +4,8 @@ import (
"bytes"
"strings"
"testing"
"github.com/stretchr/testify/require"
)
func TestWriteCloserWrapperClose(t *testing.T) {
@ -52,8 +54,10 @@ func TestWriteCounter(t *testing.T) {
var buffer bytes.Buffer
wc := NewWriteCounter(&buffer)
reader1.WriteTo(wc)
reader2.WriteTo(wc)
_, err := reader1.WriteTo(wc)
require.NoError(t, err)
_, err = reader2.WriteTo(wc)
require.NoError(t, err)
if wc.Count != totalLength {
t.Errorf("Wrong count: %d vs. %d", wc.Count, totalLength)

View File

@ -4,6 +4,8 @@ import (
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestLockCounter(t *testing.T) {
@ -76,7 +78,8 @@ func TestLockerUnlock(t *testing.T) {
l := New()
l.Lock("test")
l.Unlock("test")
err := l.Unlock("test")
require.NoError(t, err)
chDone := make(chan struct{})
go func() {
@ -100,7 +103,8 @@ func TestLockerConcurrency(t *testing.T) {
go func() {
l.Lock("test")
// if there is a concurrency issue, will very likely panic here
l.Unlock("test")
err := l.Unlock("test")
require.NoError(t, err)
wg.Done()
}()
}

View File

@ -3,6 +3,7 @@ package lockfile
import (
"io"
"os"
"os/exec"
"sync"
"sync/atomic"
"testing"
@ -36,8 +37,14 @@ func subTouchMain() {
}
tf.Lock()
os.Stdout.Close()
io.Copy(io.Discard, os.Stdin)
tf.Touch()
_, err = io.Copy(io.Discard, os.Stdin)
if err != nil {
logrus.Fatalf("error reading stdin: %v", err)
}
err = tf.Touch()
if err != nil {
logrus.Fatalf("error touching lock: %v", err)
}
tf.Unlock()
}
@ -46,26 +53,25 @@ func subTouchMain() {
// At that point, the child will have acquired the lock. It can then signal
// that the child should Touch() the lock by closing the WriteCloser. The
// second ReadCloser will be closed when the child has finished.
func subTouch(l *namedLocker) (io.WriteCloser, io.ReadCloser, io.ReadCloser, error) {
// The caller must call Wait() on the returned cmd.
func subTouch(l *namedLocker) (*exec.Cmd, io.WriteCloser, io.ReadCloser, io.ReadCloser, error) {
cmd := reexec.Command("subTouch", l.name)
wc, err := cmd.StdinPipe()
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, err
}
rc, err := cmd.StdoutPipe()
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, err
}
ec, err := cmd.StderrPipe()
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, err
}
go func() {
if err = cmd.Run(); err != nil {
logrus.Errorf("Running subTouch: %v", err)
}
}()
return wc, rc, ec, nil
if err := cmd.Start(); err != nil {
return nil, nil, nil, nil, err
}
return cmd, wc, rc, ec, nil
}
// subLockMain is a child process which opens the lock file, closes stdout to
@ -81,7 +87,10 @@ func subLockMain() {
}
tf.Lock()
os.Stdout.Close()
io.Copy(io.Discard, os.Stdin)
_, err = io.Copy(io.Discard, os.Stdin)
if err != nil {
logrus.Fatalf("error reading stdin: %v", err)
}
tf.Unlock()
}
@ -89,22 +98,21 @@ func subLockMain() {
// should wait for the first ReadCloser by reading it until it receives an EOF.
// At that point, the child will have acquired the lock. It can then signal
// that the child should release the lock by closing the WriteCloser.
func subLock(l *namedLocker) (io.WriteCloser, io.ReadCloser, error) {
// The caller must call Wait() on the returned cmd.
func subLock(l *namedLocker) (*exec.Cmd, io.WriteCloser, io.ReadCloser, error) {
cmd := reexec.Command("subLock", l.name)
wc, err := cmd.StdinPipe()
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
rc, err := cmd.StdoutPipe()
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
go func() {
if err = cmd.Run(); err != nil {
logrus.Errorf("Running subLock: %v", err)
}
}()
return wc, rc, nil
if err := cmd.Start(); err != nil {
return nil, nil, nil, err
}
return cmd, wc, rc, nil
}
// subRLockMain is a child process which opens the lock file, closes stdout to
@ -120,7 +128,10 @@ func subRLockMain() {
}
tf.RLock()
os.Stdout.Close()
io.Copy(io.Discard, os.Stdin)
_, err = io.Copy(io.Discard, os.Stdin)
if err != nil {
logrus.Fatalf("error reading stdin: %v", err)
}
tf.Unlock()
}
@ -128,22 +139,21 @@ func subRLockMain() {
// should wait for the first ReadCloser by reading it until it receives an EOF.
// At that point, the child will have acquired a read lock. It can then signal
// that the child should release the lock by closing the WriteCloser.
func subRLock(l *namedLocker) (io.WriteCloser, io.ReadCloser, error) {
// The caller must call Wait() on the returned cmd.
func subRLock(l *namedLocker) (*exec.Cmd, io.WriteCloser, io.ReadCloser, error) {
cmd := reexec.Command("subRLock", l.name)
wc, err := cmd.StdinPipe()
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
rc, err := cmd.StdoutPipe()
if err != nil {
return nil, nil, err
return nil, nil, nil, err
}
go func() {
if err = cmd.Run(); err != nil {
logrus.Errorf("Running subRLock: %v", err)
}
}()
return wc, rc, nil
if err := cmd.Start(); err != nil {
return nil, nil, nil, err
}
return cmd, wc, rc, nil
}
func init() {
@ -271,12 +281,16 @@ func TestLockfileTouch(t *testing.T) {
require.Nil(t, err, "got an error from Modified()")
assert.False(t, m, "lock file mistakenly indicated that someone else has modified it")
stdin, stdout, stderr, err := subTouch(l)
cmd, stdin, stdout, stderr, err := subTouch(l)
require.Nil(t, err, "got an error starting a subprocess to touch the lockfile")
l.Unlock()
io.Copy(io.Discard, stdout)
_, err = io.Copy(io.Discard, stdout)
require.NoError(t, err)
stdin.Close()
io.Copy(io.Discard, stderr)
_, err = io.Copy(io.Discard, stderr)
require.NoError(t, err)
err = cmd.Wait()
require.NoError(t, err)
l.Lock()
m, err = l.Modified()
l.Unlock()
@ -413,19 +427,22 @@ func TestLockfileMultiprocessRead(t *testing.T) {
var rcounter, rhighest int64
var highestMutex sync.Mutex
subs := make([]struct {
cmd *exec.Cmd
stdin io.WriteCloser
stdout io.ReadCloser
}, 100)
for i := range subs {
stdin, stdout, err := subRLock(l)
cmd, stdin, stdout, err := subRLock(l)
require.Nil(t, err, "error starting subprocess %d to take a read lock", i+1)
subs[i].cmd = cmd
subs[i].stdin = stdin
subs[i].stdout = stdout
}
for i := range subs {
wg.Add(1)
go func(i int) {
io.Copy(io.Discard, subs[i].stdout)
_, err := io.Copy(io.Discard, subs[i].stdout)
require.NoError(t, err)
if testing.Verbose() {
t.Logf("\tchild %4d acquired the read lock\n", i+1)
}
@ -441,6 +458,8 @@ func TestLockfileMultiprocessRead(t *testing.T) {
t.Logf("\ttelling child %4d to release the read lock\n", i+1)
}
subs[i].stdin.Close()
err = subs[i].cmd.Wait()
require.NoError(t, err)
wg.Done()
}(i)
}
@ -456,19 +475,22 @@ func TestLockfileMultiprocessWrite(t *testing.T) {
var wcounter, whighest int64
var highestMutex sync.Mutex
subs := make([]struct {
cmd *exec.Cmd
stdin io.WriteCloser
stdout io.ReadCloser
}, 10)
for i := range subs {
stdin, stdout, err := subLock(l)
cmd, stdin, stdout, err := subLock(l)
require.Nil(t, err, "error starting subprocess %d to take a write lock", i+1)
subs[i].cmd = cmd
subs[i].stdin = stdin
subs[i].stdout = stdout
}
for i := range subs {
wg.Add(1)
go func(i int) {
io.Copy(io.Discard, subs[i].stdout)
_, err := io.Copy(io.Discard, subs[i].stdout)
require.NoError(t, err)
if testing.Verbose() {
t.Logf("\tchild %4d acquired the write lock\n", i+1)
}
@ -484,6 +506,8 @@ func TestLockfileMultiprocessWrite(t *testing.T) {
t.Logf("\ttelling child %4d to release the write lock\n", i+1)
}
subs[i].stdin.Close()
err = subs[i].cmd.Wait()
require.NoError(t, err)
wg.Done()
}(i)
}
@ -507,19 +531,22 @@ func TestLockfileMultiprocessMixed(t *testing.T) {
writer := func(i int) bool { return (i % biasQ) < biasP }
subs := make([]struct {
cmd *exec.Cmd
stdin io.WriteCloser
stdout io.ReadCloser
}, biasQ*groups)
for i := range subs {
var cmd *exec.Cmd
var stdin io.WriteCloser
var stdout io.ReadCloser
if writer(i) {
stdin, stdout, err = subLock(l)
cmd, stdin, stdout, err = subLock(l)
require.Nil(t, err, "error starting subprocess %d to take a write lock", i+1)
} else {
stdin, stdout, err = subRLock(l)
cmd, stdin, stdout, err = subRLock(l)
require.Nil(t, err, "error starting subprocess %d to take a read lock", i+1)
}
subs[i].cmd = cmd
subs[i].stdin = stdin
subs[i].stdout = stdout
}
@ -527,7 +554,8 @@ func TestLockfileMultiprocessMixed(t *testing.T) {
wg.Add(1)
go func(i int) {
// wait for the child to acquire whatever lock it wants
io.Copy(io.Discard, subs[i].stdout)
_, err := io.Copy(io.Discard, subs[i].stdout)
require.NoError(t, err)
if writer(i) {
// child acquired a write lock
if testing.Verbose() {
@ -568,6 +596,8 @@ func TestLockfileMultiprocessMixed(t *testing.T) {
}
}
subs[i].stdin.Close()
err = subs[i].cmd.Wait()
require.NoError(t, err)
wg.Done()
}(i)
}

View File

@ -78,7 +78,7 @@ func openLock(path string, ro bool) (fd int, err error) {
}
fd, err = unix.Open(path, flags, 0o644)
if err == nil {
return
return fd, nil
}
// the directory of the lockfile seems to be removed, try to create it
@ -133,7 +133,7 @@ func createLockerForPath(path string, ro bool) (Locker, error) {
func (l *lockfile) lock(lType int16) {
lk := unix.Flock_t{
Type: lType,
Whence: int16(os.SEEK_SET),
Whence: int16(unix.SEEK_SET),
Start: 0,
Len: 0,
}
@ -184,7 +184,7 @@ func (l *lockfile) RLock() {
// Unlock unlocks the lockfile.
func (l *lockfile) Unlock() {
l.stateMutex.Lock()
if l.locked == false {
if !l.locked {
// Panic when unlocking an unlocked lock. That's a violation
// of the lock semantics and will reveal such.
panic("calling Unlock on unlocked lock")

View File

@ -233,7 +233,7 @@ func (s *stringValue) Set(val string) error {
func (s *stringValue) Get() interface{} { return string(*s) }
func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) }
func (s *stringValue) String() string { return string(*s) }
// -- float64 Value
type float64Value float64

View File

@ -12,6 +12,9 @@ import (
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// ResetForTesting clears all flag state and sets the usage function as directed.
@ -76,14 +79,19 @@ func TestEverything(t *testing.T) {
}
}
// Now set all flags
Set("test_bool", "true")
Set("test_int", "1")
Set("test_int64", "1")
Set("test_uint", "1")
Set("test_uint64", "1")
Set("test_string", "1")
Set("test_float64", "1")
Set("test_duration", "1s")
for _, e := range []struct{ name, value string }{
{"test_bool", "true"},
{"test_int", "1"},
{"test_int64", "1"},
{"test_uint", "1"},
{"test_uint64", "1"},
{"test_string", "1"},
{"test_float64", "1"},
{"test_duration", "1s"},
} {
err := Set(e.name, e.value)
require.NoError(t, err)
}
desired = "1"
Visit(visitor)
if len(m) != 8 {
@ -95,9 +103,7 @@ func TestEverything(t *testing.T) {
// Now test they're visited in sort order.
var flagNames []string
Visit(func(f *Flag) {
for _, name := range f.Names {
flagNames = append(flagNames, name)
}
flagNames = append(flagNames, f.Names...)
})
if !sort.StringsAreSorted(flagNames) {
t.Errorf("flag names not sorted: %v", flagNames)
@ -276,7 +282,7 @@ func testPanic(t *testing.T, f *FlagSet) {
args := []string{
"-int", "21",
}
f.Parse(args)
_ = f.Parse(args)
}
func TestParsePanic(t *testing.T) {
@ -368,7 +374,8 @@ func TestSetOutput(t *testing.T) {
var buf bytes.Buffer
flags.SetOutput(&buf)
flags.Init("test", ContinueOnError)
flags.Parse([]string{"-unknown"})
err := flags.Parse([]string{"-unknown"})
assert.ErrorContains(t, err, "unknown")
if out := buf.String(); !strings.Contains(out, "-unknown") {
t.Logf("expected output mentioning unknown; got %q", out)
}
@ -520,7 +527,8 @@ func TestMergeFlags(t *testing.T) {
base.String([]string{"f"}, "", "")
fs := NewFlagSet("test", ContinueOnError)
Merge(fs, base)
err := Merge(fs, base)
require.NoError(t, err)
if len(fs.formal) != 1 {
t.Fatalf("FlagCount (%d) != number (1) of elements merged", len(fs.formal))
}

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package kernel
@ -41,7 +42,7 @@ func TestParseRelease(t *testing.T) {
for _, invalid := range invalids {
expectedMessage := fmt.Sprintf("Can't parse kernel version %v", invalid)
if _, err := ParseRelease(invalid); err == nil || err.Error() != expectedMessage {
t.Fatalf("Parsing %q, got %#v", invalid, err)
}
}
}

View File

@ -1,4 +1,5 @@
// +build !linux,!solaris
//go:build freebsd || openbsd
// +build freebsd openbsd
package kernel
@ -6,13 +7,7 @@ import (
"errors"
)
// Utsname represents the system name structure.
// It is defined here to make it portable as it is available on linux but not
// on windows.
type Utsname struct {
Release [65]byte
}
// A stub called by kernel_unix.go .
func uname() (*Utsname, error) {
return nil, errors.New("Kernel version detection is available only on linux")
}

View File

@ -0,0 +1,11 @@
//go:build !linux && !solaris
// +build !linux,!solaris
package kernel
// Utsname represents the system name structure.
// It is defined here to make it portable as it is available on linux but not
// on windows.
type Utsname struct {
Release [65]byte
}

View File

@ -1,3 +1,4 @@
//go:build freebsd || darwin
// +build freebsd darwin
package operatingsystem
@ -21,5 +22,5 @@ func GetOperatingSystem() (string, error) {
// No-op on FreeBSD and Darwin, always returns false.
func IsContainerized() (bool, error) {
// TODO: Implement jail detection for freeBSD
return false, errors.New("Cannot detect if we are in container")
return false, errors.New("cannot detect if we are in container")
}

View File

@ -105,8 +105,9 @@ func TestBufioWriterPoolPutAndGet(t *testing.T) {
// Make sure we Flush all the way ?
writer.Flush()
bw.Flush()
if len(buf.Bytes()) != 6 {
t.Fatalf("The buffer should contain 6 bytes ('foobar') but contains %v ('%v')", buf.Bytes(), string(buf.Bytes()))
bufContents := buf.Bytes()
if len(bufContents) != 6 {
t.Fatalf("The buffer should contain 6 bytes ('foobar') but contains %v ('%v')", bufContents, string(bufContents))
}
// Reset the buffer
buf.Reset()

View File

@ -1,3 +1,4 @@
//go:build !windows
// +build !windows
package system
@ -14,7 +15,7 @@ import (
func Lstat(path string) (*StatT, error) {
s := &syscall.Stat_t{}
if err := syscall.Lstat(path, s); err != nil {
return nil, &os.PathError{"Lstat", path, err}
return nil, &os.PathError{Op: "Lstat", Path: path, Err: err}
}
return fromStatT(s)
}

View File

@ -14,7 +14,7 @@ import (
var (
// ErrEmptyPrefix is an error returned if the prefix was empty.
ErrEmptyPrefix = errors.New("Prefix can't be empty")
ErrEmptyPrefix = errors.New("prefix can't be empty")
// ErrIllegalChar is returned when a space is in the ID
ErrIllegalChar = errors.New("illegal character: ' '")

View File

@ -6,6 +6,7 @@ import (
"time"
"github.com/containers/storage/pkg/stringid"
"github.com/stretchr/testify/require"
)
// Test the behavior of TruncIndex, an index for querying IDs from a non-conflicting prefix.
@ -134,7 +135,8 @@ func assertIndexIterateDoNotPanic(t *testing.T) {
go func() {
<-iterationStarted
index.Delete("19b36c2c326ccc11e726eee6ee78a0baf166ef96")
err := index.Delete("19b36c2c326ccc11e726eee6ee78a0baf166ef96")
require.NoError(t, err)
}()
index.Iterate(func(targetId string) {

View File

@ -5,18 +5,12 @@ import (
"os"
"os/user"
"sync"
"github.com/sirupsen/logrus"
)
var (
homeDirOnce sync.Once
homeDirErr error
homeDir string
hasCapSysAdminOnce sync.Once
hasCapSysAdminRet bool
hasCapSysAdminErr error
)
// HomeDir returns the home directory for the current user.
@ -36,14 +30,3 @@ func HomeDir() (string, error) {
})
return homeDir, homeDirErr
}
func bailOnError(err error, format string, a ...interface{}) { // nolint: golint,goprintffuncname
if err != nil {
if format != "" {
logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err)
} else {
logrus.Errorf("%v", err)
}
os.Exit(1)
}
}

View File

@ -400,6 +400,12 @@ func hasFullUsersMappings() (bool, error) {
return bytes.Contains(content, []byte("4294967295")), nil
}
var (
hasCapSysAdminOnce sync.Once
hasCapSysAdminRet bool
hasCapSysAdminErr error
)
// IsRootless tells us if we are running in rootless mode
func IsRootless() bool {
isRootlessOnce.Do(func() {
@ -445,6 +451,17 @@ type Runnable interface {
Run() error
}
func bailOnError(err error, format string, a ...interface{}) { // nolint: golint,goprintffuncname
if err != nil {
if format != "" {
logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err)
} else {
logrus.Errorf("%v", err)
}
os.Exit(1)
}
}
// MaybeReexecUsingUserNamespace re-exec the process in a new namespace
func MaybeReexecUsingUserNamespace(evenForRoot bool) {
// If we've already been through this once, no need to try again.

View File

@ -3079,6 +3079,7 @@ func (s *store) ImagesByTopLayer(id string) ([]*Image, error) {
return true, err
}
for _, image := range imageList {
image := image
if image.TopLayer == layer.ID || stringutils.InSlice(image.MappedTopLayers, layer.ID) {
images = append(images, &image)
}

View File

@ -9,6 +9,7 @@ import (
"testing"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/require"
"gotest.tools/assert"
)
@ -115,7 +116,8 @@ func TestReloadConfigurationFile(t *testing.T) {
content := bytes.NewBufferString("")
logrus.SetOutput(content)
var storageOpts StoreOptions
ReloadConfigurationFile("./storage_broken.conf", &storageOpts)
err := ReloadConfigurationFile("./storage_broken.conf", &storageOpts)
require.NoError(t, err)
assert.Equal(t, storageOpts.RunRoot, "/run/containers/test")
logrus.SetOutput(os.Stderr)