Remove unneeded conversion

Those are the cases where the value being converted is already of that
type (checked to be that way for all os/arch combinations).

Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
This commit is contained in:
Kir Kolyshkin 2025-03-29 14:56:21 -07:00
parent 29a197522e
commit b7fb12e894
15 changed files with 27 additions and 29 deletions

View File

@ -67,8 +67,8 @@ func layers(flags *mflag.FlagSet, action string, m storage.Store, args []string)
for _, layer := range layers { for _, layer := range layers {
if listLayersTree { if listLayersTree {
node := treeNode{ node := treeNode{
left: string(layer.Parent), left: layer.Parent,
right: string(layer.ID), right: layer.ID,
notes: []string{}, notes: []string{},
} }
if node.left == "" { if node.left == "" {

View File

@ -259,7 +259,7 @@ func supportsIdmappedLowerLayers(home string) (bool, error) {
} }
defer cleanupFunc() defer cleanupFunc()
if err := idmap.CreateIDMappedMount(lowerDir, lowerMappedDir, int(pid)); err != nil { if err := idmap.CreateIDMappedMount(lowerDir, lowerMappedDir, pid); err != nil {
return false, fmt.Errorf("create mapped mount: %w", err) return false, fmt.Errorf("create mapped mount: %w", err)
} }
defer func() { defer func() {

View File

@ -492,7 +492,7 @@ func parseOptions(options []string) (*overlayOptions, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
o.quota.Inodes = uint64(inodes) o.quota.Inodes = inodes
case "imagestore", "additionalimagestore": case "imagestore", "additionalimagestore":
logrus.Debugf("overlay: imagestore=%s", val) logrus.Debugf("overlay: imagestore=%s", val)
// Additional read only image stores to use for lower paths // Additional read only image stores to use for lower paths
@ -1163,7 +1163,7 @@ func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) e
if err != nil { if err != nil {
return err return err
} }
driver.options.quota.Inodes = uint64(inodes) driver.options.quota.Inodes = inodes
default: default:
return fmt.Errorf("unknown option %s", key) return fmt.Errorf("unknown option %s", key)
} }
@ -1551,7 +1551,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
permsKnown := false permsKnown := false
st, err := os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN))) st, err := os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN)))
if err == nil { if err == nil {
perms = os.FileMode(st.Mode()) perms = st.Mode()
permsKnown = true permsKnown = true
} }
for err == nil { for err == nil {
@ -1566,7 +1566,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
if err != nil { if err != nil {
return "", err return "", err
} }
idmappedMountProcessPid = int(pid) idmappedMountProcessPid = pid
defer cleanupFunc() defer cleanupFunc()
} }
@ -1638,7 +1638,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
lower = path.Join(p, d.name, l) lower = path.Join(p, d.name, l)
if st2, err2 := os.Stat(lower); err2 == nil { if st2, err2 := os.Stat(lower); err2 == nil {
if !permsKnown { if !permsKnown {
perms = os.FileMode(st2.Mode()) perms = st2.Mode()
permsKnown = true permsKnown = true
} }
break break
@ -1659,7 +1659,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
} }
} else { } else {
if !permsKnown { if !permsKnown {
perms = os.FileMode(st.Mode()) perms = st.Mode()
permsKnown = true permsKnown = true
} }
lower = newpath lower = newpath
@ -2505,7 +2505,7 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp
perms = *d.options.forceMask perms = *d.options.forceMask
} else { } else {
if err == nil { if err == nil {
perms = os.FileMode(st.Mode()) perms = st.Mode()
} }
} }
for err == nil { for err == nil {

View File

@ -94,7 +94,7 @@ func (d *dedupFiles) dedup(src, dst string, fiDst fs.FileInfo) (uint64, error) {
} }
err = unix.IoctlFileDedupeRange(int(srcFile.Fd()), &value) err = unix.IoctlFileDedupeRange(int(srcFile.Fd()), &value)
if err == nil { if err == nil {
return uint64(value.Info[0].Bytes_deduped), nil return value.Info[0].Bytes_deduped, nil
} }
if errors.Is(err, unix.ENOTSUP) { if errors.Is(err, unix.ENOTSUP) {

View File

@ -28,7 +28,7 @@ func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts {
} }
func (opts *ListOpts) String() string { func (opts *ListOpts) String() string {
return fmt.Sprintf("%v", []string((*opts.values))) return fmt.Sprintf("%v", *opts.values)
} }
// Set validates if needed the input value and adds it to the // Set validates if needed the input value and adds it to the
@ -150,7 +150,7 @@ func (opts *MapOpts) GetAll() map[string]string {
} }
func (opts *MapOpts) String() string { func (opts *MapOpts) String() string {
return fmt.Sprintf("%v", map[string]string((opts.values))) return fmt.Sprintf("%v", opts.values)
} }
// Type returns a string name for this Option type // Type returns a string name for this Option type

View File

@ -1406,7 +1406,7 @@ func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *id
} else if runtime.GOOS == darwin { } else if runtime.GOOS == darwin {
uid, gid = hdr.Uid, hdr.Gid uid, gid = hdr.Uid, hdr.Gid
if xstat, ok := hdr.PAXRecords[PaxSchilyXattr+idtools.ContainersOverrideXattr]; ok { if xstat, ok := hdr.PAXRecords[PaxSchilyXattr+idtools.ContainersOverrideXattr]; ok {
attrs := strings.Split(string(xstat), ":") attrs := strings.Split(xstat, ":")
if len(attrs) >= 3 { if len(attrs) >= 3 {
val, err := strconv.ParseUint(attrs[0], 10, 32) val, err := strconv.ParseUint(attrs[0], 10, 32)
if err != nil { if err != nil {

View File

@ -65,7 +65,7 @@ func (bf *bloomFilter) writeTo(writer io.Writer) error {
if err := binary.Write(writer, binary.LittleEndian, uint64(len(bf.bitArray))); err != nil { if err := binary.Write(writer, binary.LittleEndian, uint64(len(bf.bitArray))); err != nil {
return err return err
} }
if err := binary.Write(writer, binary.LittleEndian, uint32(bf.k)); err != nil { if err := binary.Write(writer, binary.LittleEndian, bf.k); err != nil {
return err return err
} }
if err := binary.Write(writer, binary.LittleEndian, bf.bitArray); err != nil { if err := binary.Write(writer, binary.LittleEndian, bf.bitArray); err != nil {

View File

@ -238,7 +238,7 @@ func FuzzReadCache(f *testing.F) {
dest = nil dest = nil
f.Fuzz(func(t *testing.T, orig []byte) { f.Fuzz(func(t *testing.T, orig []byte) {
cacheRead, err := readCacheFileFromMemory([]byte(orig)) cacheRead, err := readCacheFileFromMemory(orig)
if err != nil || cacheRead == nil { if err != nil || cacheRead == nil {
return return
} }

View File

@ -87,7 +87,7 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
return nil, 0, fmt.Errorf("parse ToC offset: %w", err) return nil, 0, fmt.Errorf("parse ToC offset: %w", err)
} }
size := int64(blobSize - footerSize - tocOffset) size := blobSize - footerSize - tocOffset
// set a reasonable limit // set a reasonable limit
if size > maxTocSize { if size > maxTocSize {
// Not errFallbackCanConvert: we would still use too much memory. // Not errFallbackCanConvert: we would still use too much memory.

View File

@ -43,7 +43,7 @@ func escaped(val []byte, escape int) string {
} }
var result string var result string
for _, c := range []byte(val) { for _, c := range val {
hexEscape := false hexEscape := false
var special string var special string

View File

@ -234,7 +234,7 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off
Offset: manifestOffset, Offset: manifestOffset,
LengthCompressed: uint64(len(compressedManifest)), LengthCompressed: uint64(len(compressedManifest)),
LengthUncompressed: uint64(len(manifest)), LengthUncompressed: uint64(len(manifest)),
OffsetTarSplit: uint64(tarSplitOffset), OffsetTarSplit: tarSplitOffset,
LengthCompressedTarSplit: uint64(len(tarSplitData.Data)), LengthCompressedTarSplit: uint64(len(tarSplitData.Data)),
LengthUncompressedTarSplit: uint64(tarSplitData.UncompressedSize), LengthUncompressedTarSplit: uint64(tarSplitData.UncompressedSize),
} }

View File

@ -1011,7 +1011,7 @@ func mergeMissingChunks(missingParts []missingPart, target int) []missingPart {
!missingParts[prevIndex].Hole && !missingParts[i].Hole && !missingParts[prevIndex].Hole && !missingParts[i].Hole &&
len(missingParts[prevIndex].Chunks) == 1 && len(missingParts[i].Chunks) == 1 && len(missingParts[prevIndex].Chunks) == 1 && len(missingParts[i].Chunks) == 1 &&
missingParts[prevIndex].Chunks[0].File.Name == missingParts[i].Chunks[0].File.Name { missingParts[prevIndex].Chunks[0].File.Name == missingParts[i].Chunks[0].File.Name {
missingParts[prevIndex].SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length missingParts[prevIndex].SourceChunk.Length += gap + missingParts[i].SourceChunk.Length
missingParts[prevIndex].Chunks[0].CompressedSize += missingParts[i].Chunks[0].CompressedSize missingParts[prevIndex].Chunks[0].CompressedSize += missingParts[i].Chunks[0].CompressedSize
missingParts[prevIndex].Chunks[0].UncompressedSize += missingParts[i].Chunks[0].UncompressedSize missingParts[prevIndex].Chunks[0].UncompressedSize += missingParts[i].Chunks[0].UncompressedSize
} else { } else {
@ -1069,7 +1069,7 @@ func mergeMissingChunks(missingParts []missingPart, target int) []missingPart {
} else { } else {
gap := getGap(missingParts, i) gap := getGap(missingParts, i)
prev := &newMissingParts[len(newMissingParts)-1] prev := &newMissingParts[len(newMissingParts)-1]
prev.SourceChunk.Length += uint64(gap) + missingParts[i].SourceChunk.Length prev.SourceChunk.Length += gap + missingParts[i].SourceChunk.Length
prev.Hole = false prev.Hole = false
prev.OriginFile = nil prev.OriginFile = nil
if gap > 0 { if gap > 0 {
@ -1761,7 +1761,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
// the file is missing, attempt to find individual chunks. // the file is missing, attempt to find individual chunks.
for _, chunk := range r.chunks { for _, chunk := range r.chunks {
compressedSize := int64(chunk.EndOffset - chunk.Offset) compressedSize := chunk.EndOffset - chunk.Offset
size := remainingSize size := remainingSize
if chunk.ChunkSize > 0 { if chunk.ChunkSize > 0 {
size = chunk.ChunkSize size = chunk.ChunkSize

View File

@ -42,13 +42,11 @@ func Usage(dir string) (usage *DiskUsage, err error) {
// Check inode to only count the sizes of files with multiple hard links once. // Check inode to only count the sizes of files with multiple hard links once.
inode := fileInfo.Sys().(*syscall.Stat_t).Ino inode := fileInfo.Sys().(*syscall.Stat_t).Ino
// inode is not a uint64 on all platforms. Cast it to avoid issues. if _, exists := data[inode]; exists {
if _, exists := data[uint64(inode)]; exists {
return nil return nil
} }
// inode is not a uint64 on all platforms. Cast it to avoid issues. data[inode] = struct{}{}
data[uint64(inode)] = struct{}{}
// Ignore directory sizes // Ignore directory sizes
if entry.IsDir() { if entry.IsDir() {
return nil return nil

View File

@ -38,7 +38,7 @@ func TestAtomicWriteToFile(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Error statting file: %v", err) t.Fatalf("Error statting file: %v", err)
} }
if expected := os.FileMode(testMode); st.Mode() != expected { if expected := testMode; st.Mode() != expected {
t.Fatalf("Mode mismatched, expected %o, got %o", expected, st.Mode()) t.Fatalf("Mode mismatched, expected %o, got %o", expected, st.Mode())
} }
} }
@ -131,7 +131,7 @@ func TestAtomicWriteSetCommit(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Error statting file: %v", err) t.Fatalf("Error statting file: %v", err)
} }
if expected := os.FileMode(testMode); st.Mode() != expected { if expected := testMode; st.Mode() != expected {
t.Fatalf("Mode mismatched, expected %o, got %o", expected, st.Mode()) t.Fatalf("Mode mismatched, expected %o, got %o", expected, st.Mode())
} }
} }

View File

@ -53,7 +53,7 @@ func FindLoopDeviceFor(file *os.File) *os.File {
} }
dev, inode, err := getLoopbackBackingFile(file) dev, inode, err := getLoopbackBackingFile(file)
if err == nil && dev == uint64(targetDevice) && inode == targetInode { if err == nil && dev == targetDevice && inode == targetInode {
return file return file
} }
file.Close() file.Close()