Misc. warning cleanups

Signed-off-by: Miloslav Trmač <mitr@redhat.com>
This commit is contained in:
Miloslav Trmač 2022-10-01 00:24:27 +02:00
parent 02e6083c83
commit f42467020f
22 changed files with 48 additions and 35 deletions

View File

@ -25,7 +25,7 @@ func DirCopy(srcDir, dstDir string, _ Mode, _ bool) error {
}
// CopyRegularToFile copies the content of a file to another
func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error {
func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint: revive // "func name will be used as copy.CopyRegularToFile by other packages, and that stutters"
f, err := os.Open(srcPath)
if err != nil {
return err
@ -36,6 +36,6 @@ func CopyRegularToFile(srcPath string, dstFile *os.File, fileinfo os.FileInfo, c
}
// CopyRegular copies the content of a file to another
func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error {
func CopyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { //nolint:revive // "func name will be used as copy.CopyRegular by other packages, and that stutters"
return chrootarchive.NewArchiver(nil).CopyWithTar(srcPath, dstPath)
}

View File

@ -39,7 +39,7 @@ var (
ErrLayerUnknown = errors.New("unknown layer")
)
//CreateOpts contains optional arguments for Create() and CreateReadWrite()
// CreateOpts contains optional arguments for Create() and CreateReadWrite()
// methods.
type CreateOpts struct {
MountLabel string
@ -53,8 +53,8 @@ type MountOpts struct {
// Mount label is the MAC Labels to assign to mount point (SELINUX)
MountLabel string
// UidMaps & GidMaps are the User Namespace mappings to be assigned to content in the mount point
UidMaps []idtools.IDMap // nolint: golint
GidMaps []idtools.IDMap // nolint: golint
UidMaps []idtools.IDMap //nolint: golint,revive
GidMaps []idtools.IDMap //nolint: golint
Options []string
// Volatile specifies whether the container storage can be optimized

View File

@ -98,7 +98,7 @@ func (d *Driver) Status() [][2]string {
// Metadata is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any meta data.
func (d *Driver) Metadata(id string) (map[string]string, error) {
return nil, nil
return nil, nil //nolint: nilnil
}
// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver.

View File

@ -687,6 +687,7 @@ func TestIntervalIsZero(t *testing.T) {
// assertIntervalSame aserts `got` equals to `want` considering zero check. If the wanted interval
// is empty, we only want to assert IsZero() == true, instead of the exact number.
func assertIntervalSame(t *testing.T, got intervalset.Interval, want *interval, name string) {
t.Helper()
if want == nil && !got.IsZero() {
t.Errorf("%v = %v, want nil", name, got)
} else if want != nil && !reflect.DeepEqual(got, *want) {

View File

@ -9,6 +9,7 @@ import (
)
func newTestImageStore(t *testing.T) ImageStore {
t.Helper()
store, err := newImageStore(t.TempDir())
require.Nil(t, err)
return store

View File

@ -232,20 +232,20 @@ func (args Args) Contains(field string) bool {
return ok
}
type invalidFilter string
type invalidFilterError string
func (e invalidFilter) Error() string {
func (e invalidFilterError) Error() string {
return "Invalid filter '" + string(e) + "'"
}
func (invalidFilter) InvalidParameter() {}
func (invalidFilterError) InvalidParameter() {}
// Validate compared the set of accepted keys against the keys in the mapping.
// An error is returned if any mapping keys are not in the accepted set.
func (args Args) Validate(accepted map[string]bool) error {
for name := range args.fields {
if !accepted[name] {
return invalidFilter(name)
return invalidFilterError(name)
}
}
return nil

View File

@ -874,7 +874,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
if err != nil || (!options.IncludeSourceDir && relFilePath == "." && d.IsDir()) {
// Error getting relative path OR we are looking
// at the source directory path. Skip in both situations.
return nil
return nil //nolint: nilerr
}
if options.IncludeSourceDir && include == "." && relFilePath != "." {

View File

@ -56,7 +56,7 @@ func (change *Change) String() string {
return fmt.Sprintf("%s %s", change.Kind, change.Path)
}
// for sort.Sort
// changesByPath implements sort.Interface.
type changesByPath []Change
func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }

View File

@ -80,7 +80,7 @@ func testBreakout(t *testing.T, untarFn string, headers []*tar.Header) error {
}
// Here, untar detected the breakout.
// Let's move on verifying that indeed there was no breakout.
fmt.Printf("breakoutError: %v\n", err)
t.Logf("breakoutError: %v\n", err)
}
// Check victim folder
@ -148,7 +148,7 @@ func testBreakout(t *testing.T, untarFn string, headers []*tar.Header) error {
}
if err != nil {
// skip file if error
return nil
return nil //nolint: nilerr
}
b, err := os.ReadFile(path)
if err != nil {

View File

@ -142,6 +142,7 @@ func TestTarWithMaliciousSymlinks(t *testing.T) {
}
func isDataInTar(t *testing.T, tr *gotar.Reader, compare []byte, maxBytes int64) bool {
t.Helper()
for {
h, err := tr.Next()
if err == io.EOF {

View File

@ -17,7 +17,7 @@ type ImageSourceSeekable interface {
}
// ErrBadRequest is returned when the request is not valid
type ErrBadRequest struct {
type ErrBadRequest struct { //nolint: errname
}
func (e ErrBadRequest) Error() string {

View File

@ -186,6 +186,7 @@ func TestUsageNonExistingDirectory(t *testing.T) {
// A helper function that tests expectation of inode count and dir size against
// the found usage.
func expectSizeAndInodeCount(t *testing.T, testName string, current, expected *DiskUsage) {
t.Helper()
if current.Size != expected.Size {
t.Errorf("%s has size: %d, expected %d", testName, current.Size, expected.Size)
}

View File

@ -63,7 +63,7 @@ func StickRuntimeDirContents(files []string) ([]string, error) {
runtimeDir, err := GetRuntimeDir()
if err != nil {
// ignore error if runtimeDir is empty
return nil, nil
return nil, nil //nolint: nilerr
}
runtimeDir, err = filepath.Abs(runtimeDir)
if err != nil {

View File

@ -27,6 +27,13 @@ func SetDefaultOptions(opts AtomicFileWriterOptions) {
// temporary file and closing it atomically changes the temporary file to
// destination path. Writing and closing concurrently is not allowed.
func NewAtomicFileWriterWithOpts(filename string, perm os.FileMode, opts *AtomicFileWriterOptions) (io.WriteCloser, error) {
return newAtomicFileWriter(filename, perm, opts)
}
// newAtomicFileWriter returns WriteCloser so that writing to it writes to a
// temporary file and closing it atomically changes the temporary file to
// destination path. Writing and closing concurrently is not allowed.
func newAtomicFileWriter(filename string, perm os.FileMode, opts *AtomicFileWriterOptions) (*atomicFileWriter, error) {
f, err := os.CreateTemp(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
if err != nil {
return nil, err
@ -55,14 +62,14 @@ func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, err
// AtomicWriteFile atomically writes data to a file named by filename.
func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
f, err := NewAtomicFileWriter(filename, perm)
f, err := newAtomicFileWriter(filename, perm, nil)
if err != nil {
return err
}
n, err := f.Write(data)
if err == nil && n < len(data) {
err = io.ErrShortWrite
f.(*atomicFileWriter).writeErr = err
f.writeErr = err
}
if err1 := f.Close(); err == nil {
err = err1

View File

@ -1,7 +1,6 @@
package lockfile
import (
"fmt"
"io"
"os"
"sync"
@ -575,7 +574,7 @@ func TestLockfileMultiprocessRead(t *testing.T) {
go func(i int) {
io.Copy(io.Discard, subs[i].stdout)
if testing.Verbose() {
fmt.Printf("\tchild %4d acquired the read lock\n", i+1)
t.Logf("\tchild %4d acquired the read lock\n", i+1)
}
workingRcounter := atomic.AddInt64(&rcounter, 1)
highestMutex.Lock()
@ -586,7 +585,7 @@ func TestLockfileMultiprocessRead(t *testing.T) {
time.Sleep(1 * time.Second)
atomic.AddInt64(&rcounter, -1)
if testing.Verbose() {
fmt.Printf("\ttelling child %4d to release the read lock\n", i+1)
t.Logf("\ttelling child %4d to release the read lock\n", i+1)
}
subs[i].stdin.Close()
wg.Done()
@ -618,7 +617,7 @@ func TestLockfileMultiprocessWrite(t *testing.T) {
go func(i int) {
io.Copy(io.Discard, subs[i].stdout)
if testing.Verbose() {
fmt.Printf("\tchild %4d acquired the write lock\n", i+1)
t.Logf("\tchild %4d acquired the write lock\n", i+1)
}
workingWcounter := atomic.AddInt64(&wcounter, 1)
highestMutex.Lock()
@ -629,7 +628,7 @@ func TestLockfileMultiprocessWrite(t *testing.T) {
time.Sleep(1 * time.Second)
atomic.AddInt64(&wcounter, -1)
if testing.Verbose() {
fmt.Printf("\ttelling child %4d to release the write lock\n", i+1)
t.Logf("\ttelling child %4d to release the write lock\n", i+1)
}
subs[i].stdin.Close()
wg.Done()
@ -661,7 +660,7 @@ func TestLockfileMultiprocessRecursiveWrite(t *testing.T) {
go func(i int) {
io.Copy(io.Discard, subs[i].stdout)
if testing.Verbose() {
fmt.Printf("\tchild %4d acquired the recursive write lock\n", i+1)
t.Logf("\tchild %4d acquired the recursive write lock\n", i+1)
}
workingWcounter := atomic.AddInt64(&wcounter, 1)
highestMutex.Lock()
@ -672,7 +671,7 @@ func TestLockfileMultiprocessRecursiveWrite(t *testing.T) {
time.Sleep(1 * time.Second)
atomic.AddInt64(&wcounter, -1)
if testing.Verbose() {
fmt.Printf("\ttelling child %4d to release the recursive write lock\n", i+1)
t.Logf("\ttelling child %4d to release the recursive write lock\n", i+1)
}
subs[i].stdin.Close()
wg.Done()
@ -722,7 +721,7 @@ func TestLockfileMultiprocessMixed(t *testing.T) {
if writer(i) {
// child acquired a write lock
if testing.Verbose() {
fmt.Printf("\tchild %4d acquired the write lock\n", i+1)
t.Logf("\tchild %4d acquired the write lock\n", i+1)
}
workingWcounter := atomic.AddInt64(&wcounter, 1)
whighestMutex.Lock()
@ -735,7 +734,7 @@ func TestLockfileMultiprocessMixed(t *testing.T) {
} else {
// child acquired a read lock
if testing.Verbose() {
fmt.Printf("\tchild %4d acquired the read lock\n", i+1)
t.Logf("\tchild %4d acquired the read lock\n", i+1)
}
workingRcounter := atomic.AddInt64(&rcounter, 1)
rhighestMutex.Lock()
@ -750,12 +749,12 @@ func TestLockfileMultiprocessMixed(t *testing.T) {
if writer(i) {
atomic.AddInt64(&wcounter, -1)
if testing.Verbose() {
fmt.Printf("\ttelling child %4d to release the write lock\n", i+1)
t.Logf("\ttelling child %4d to release the write lock\n", i+1)
}
} else {
atomic.AddInt64(&rcounter, -1)
if testing.Verbose() {
fmt.Printf("\ttelling child %4d to release the read lock\n", i+1)
t.Logf("\ttelling child %4d to release the read lock\n", i+1)
}
}
subs[i].stdin.Close()

View File

@ -43,7 +43,7 @@ func getRelease() (string, error) {
prettyNames, err := shellwords.Parse(content[1])
if err != nil {
return "", fmt.Errorf("kernel version is invalid: %s", err.Error())
return "", fmt.Errorf("kernel version is invalid: %w", err)
}
if len(prettyNames) != 2 {

View File

@ -8,6 +8,7 @@ import (
)
func assertParseRelease(t *testing.T, release string, b *VersionInfo, result int) {
t.Helper()
var (
a *VersionInfo
)
@ -46,6 +47,7 @@ func TestParseRelease(t *testing.T) {
}
func assertKernelVersion(t *testing.T, a, b VersionInfo, result int) {
t.Helper()
if r := CompareKernelVersion(a, b); r != result {
t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result)
}

View File

@ -9,6 +9,7 @@ import (
// prepareTempFile creates a temporary file in a temporary directory.
func prepareTempFile(t *testing.T) string {
t.Helper()
file := filepath.Join(t.TempDir(), "exist")
if err := os.WriteFile(file, []byte("hello"), 0644); err != nil {
t.Fatal(err)

View File

@ -6,7 +6,7 @@ import (
"unsafe"
)
// Used by chtimes
// maxTime is used by chtimes.
var maxTime time.Time
func init() {

View File

@ -25,7 +25,7 @@ var (
// ErrAmbiguousPrefix is returned if the prefix was ambiguous
// (multiple ids for the prefix).
type ErrAmbiguousPrefix struct {
type ErrAmbiguousPrefix struct { //nolint: errname
prefix string
}

View File

@ -336,7 +336,7 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) erro
}
} else {
if !os.IsNotExist(err) {
fmt.Printf("Failed to read %s %v\n", configFile, err.Error())
logrus.Warningf("Failed to read %s %v\n", configFile, err.Error())
return err
}
}
@ -399,7 +399,7 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) erro
if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup != "" {
mappings, err := idtools.NewIDMappings(config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup)
if err != nil {
fmt.Printf("Error initializing ID mappings for %s:%s %v\n", config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup, err)
logrus.Warningf("Error initializing ID mappings for %s:%s %v\n", config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup, err)
return err
}
storeOptions.UIDMap = mappings.UIDs()

View File

@ -193,7 +193,7 @@ func reloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptio
fi, err := os.Stat(configFile)
if err != nil {
if !os.IsNotExist(err) {
fmt.Printf("Failed to read %s %v\n", configFile, err.Error())
logrus.Warningf("Failed to read %s %v\n", configFile, err.Error())
}
return
}