mirror of https://github.com/containers/podman.git
bump go to 1.22
Many dependencies started using go 1.22 which means we have to follow in order to update. Disable the now depracted exportloopref linter as it was replaced by copyloopvar as go fixed the loop copy problem in 1.22[1] Another new chnage in go 1.22 is the for loop syntax over ints, the intrange linter chacks for this but there a lot of loops that have to be converted so I didn't do it here and disable th elinter for now, th eold syntax is still fine. [1] https://go.dev/blog/loopvar-preview Signed-off-by: Paul Holzinger <pholzing@redhat.com>
This commit is contained in:
parent
d03e8ffc56
commit
f93fcf7dee
|
@ -8,6 +8,7 @@ linters:
|
|||
disable:
|
||||
# too many reports but requires attention
|
||||
- depguard
|
||||
- intrange # should be turned on but we have to convert each place manually as there is no auto fix function
|
||||
- tagalign
|
||||
- perfsprint
|
||||
- typecheck
|
||||
|
@ -62,6 +63,7 @@ linters:
|
|||
- exhaustruct
|
||||
# deprecated linters
|
||||
- execinquery
|
||||
- exportloopref
|
||||
linters-settings:
|
||||
errcheck:
|
||||
check-blank: false
|
||||
|
|
2
go.mod
2
go.mod
|
@ -2,7 +2,7 @@ module github.com/containers/podman/v5
|
|||
|
||||
// Warning: Ensure the "go" and "toolchain" versions match exactly to prevent unwanted auto-updates
|
||||
|
||||
go 1.21.0
|
||||
go 1.22.0
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.4.0
|
||||
|
|
|
@ -2227,7 +2227,6 @@ func (c *Container) postDeleteHooks(ctx context.Context) error {
|
|||
return err
|
||||
}
|
||||
for i, hook := range extensionHooks {
|
||||
hook := hook
|
||||
logrus.Debugf("container %s: invoke poststop hook %d, path %s", c.ID(), i, hook.Path)
|
||||
var stderr, stdout bytes.Buffer
|
||||
hookErr, err := exec.RunWithOptions(
|
||||
|
|
|
@ -596,7 +596,6 @@ func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, po
|
|||
// Deduplicate volumes, so if containers in the pod share a volume, it's only
|
||||
// listed in the volumes section once
|
||||
for _, vol := range volumes {
|
||||
vol := vol
|
||||
deDupPodVolumes[vol.Name] = &vol
|
||||
}
|
||||
}
|
||||
|
|
|
@ -93,7 +93,6 @@ func TestGetTailLog(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
file := filepath.Join(dir, "log")
|
||||
|
|
|
@ -232,7 +232,6 @@ func Test_ocicniPortsToNetTypesPorts(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := ocicniPortsToNetTypesPorts(tt.arg)
|
||||
assert.Equal(t, tt.want, result, "ports do not match")
|
||||
|
|
|
@ -150,7 +150,6 @@ data:
|
|||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
buf := bytes.NewReader([]byte(test.configMapContent))
|
||||
cm, err := readConfigMapFromFile(buf)
|
||||
|
@ -196,7 +195,6 @@ kind: Pod
|
|||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
kind, err := getKubeKind([]byte(test.kubeYAML))
|
||||
if test.expectError {
|
||||
|
@ -268,7 +266,6 @@ items:
|
|||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
docs, err := splitMultiDocYAML([]byte(test.kubeYAML))
|
||||
if test.expectError {
|
||||
|
|
|
@ -43,7 +43,6 @@ func TestCause(t *testing.T) {
|
|||
expectedErr: fmt.Errorf("0: %w", errors.New("error")),
|
||||
},
|
||||
} {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
err := Cause(tc.err())
|
||||
|
|
|
@ -44,7 +44,6 @@ func newFarmWithBuilders(_ context.Context, name string, cons []config.Connectio
|
|||
)
|
||||
// Set up the remote connections to handle the builds
|
||||
for _, con := range cons {
|
||||
con := con
|
||||
builderGroup.Go(func() error {
|
||||
fmt.Printf("Connecting to %q\n", con.Name)
|
||||
engine, err := infra.NewImageEngine(&entities.PodmanConfig{
|
||||
|
@ -115,7 +114,6 @@ func (f *Farm) Status(ctx context.Context) (map[string]error, error) {
|
|||
statusGroup multierror.Group
|
||||
)
|
||||
for _, engine := range f.builders {
|
||||
engine := engine
|
||||
statusGroup.Go(func() error {
|
||||
logrus.Debugf("getting status of %q", engine.FarmNodeName(ctx))
|
||||
defer logrus.Debugf("got status of %q", engine.FarmNodeName(ctx))
|
||||
|
@ -159,7 +157,6 @@ func (f *Farm) NativePlatforms(ctx context.Context) ([]string, error) {
|
|||
nativeGroup multierror.Group
|
||||
)
|
||||
for _, engine := range f.builders {
|
||||
engine := engine
|
||||
nativeGroup.Go(func() error {
|
||||
logrus.Debugf("getting native platform of %q\n", engine.FarmNodeName(ctx))
|
||||
defer logrus.Debugf("got native platform of %q", engine.FarmNodeName(ctx))
|
||||
|
@ -199,7 +196,6 @@ func (f *Farm) EmulatedPlatforms(ctx context.Context) ([]string, error) {
|
|||
emulatedGroup multierror.Group
|
||||
)
|
||||
for _, engine := range f.builders {
|
||||
engine := engine
|
||||
emulatedGroup.Go(func() error {
|
||||
logrus.Debugf("getting emulated platforms of %q", engine.FarmNodeName(ctx))
|
||||
defer logrus.Debugf("got emulated platforms of %q", engine.FarmNodeName(ctx))
|
||||
|
@ -260,7 +256,6 @@ func (f *Farm) Schedule(ctx context.Context, platforms []string) (Schedule, erro
|
|||
// Make notes of which platforms we can build for natively, and which
|
||||
// ones we can build for using emulation.
|
||||
for name, engine := range f.builders {
|
||||
name, engine := name, engine
|
||||
infoGroup.Go(func() error {
|
||||
inspect, err := engine.FarmNodeInspect(ctx)
|
||||
if err != nil {
|
||||
|
@ -377,7 +372,6 @@ func (f *Farm) Build(ctx context.Context, schedule Schedule, options entities.Bu
|
|||
builder entities.ImageEngine
|
||||
}
|
||||
for platform, builder := range schedule.platformBuilders {
|
||||
platform, builder := platform, builder
|
||||
outReader, outWriter := io.Pipe()
|
||||
errReader, errWriter := io.Pipe()
|
||||
go func() {
|
||||
|
|
|
@ -64,7 +64,6 @@ func (l *listLocal) build(ctx context.Context, images map[entities.BuildReport]e
|
|||
)
|
||||
refs := []string{}
|
||||
for image, engine := range images {
|
||||
image, engine := image, engine
|
||||
pushGroup.Go(func() error {
|
||||
logrus.Infof("pushing image %s", image.ID)
|
||||
defer logrus.Infof("pushed image %s", image.ID)
|
||||
|
@ -91,7 +90,6 @@ func (l *listLocal) build(ctx context.Context, images map[entities.BuildReport]e
|
|||
if engine.FarmNodeName(ctx) == entities.LocalFarmImageBuilderName {
|
||||
continue
|
||||
}
|
||||
image, engine := image, engine
|
||||
rmGroup.Go(func() error {
|
||||
_, err := engine.Remove(ctx, []string{image.ID}, entities.ImageRemoveOptions{})
|
||||
if len(err) > 0 {
|
||||
|
|
|
@ -119,7 +119,6 @@ func TestNewMachineFile(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := NewMachineFile(tt.args.path, tt.args.symlink)
|
||||
if (err != nil) != tt.wantErr {
|
||||
|
|
|
@ -224,7 +224,6 @@ func TestConfigMapVolumes(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
result, err := VolumeFromConfigMap(test.volume.ConfigMap, test.configmaps)
|
||||
if test.errorMessage == "" {
|
||||
|
@ -434,7 +433,6 @@ func TestEnvVarsFrom(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
result, err := envVarsFrom(test.envFrom, &test.options)
|
||||
assert.Equal(t, err == nil, test.succeed)
|
||||
|
@ -1027,7 +1025,6 @@ func TestEnvVarValue(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
result, err := envVarValue(test.envVar, &test.options)
|
||||
assert.Equal(t, err == nil, test.succeed)
|
||||
|
@ -1270,7 +1267,6 @@ func TestHttpLivenessProbe(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
err := setupLivenessProbe(&test.specGenerator, test.container, test.restartPolicy)
|
||||
if err == nil {
|
||||
|
@ -1393,7 +1389,6 @@ func TestTCPLivenessProbe(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
err := setupLivenessProbe(&test.specGenerator, test.container, test.restartPolicy)
|
||||
assert.Equal(t, err == nil, test.succeed)
|
||||
|
|
|
@ -212,7 +212,6 @@ func ParsePortMapping(portMappings []types.PortMapping, exposePorts map[uint16][
|
|||
|
||||
for hostIP, protoMap := range portMap {
|
||||
for protocol, ports := range protoMap {
|
||||
ports := ports
|
||||
if len(ports) == 0 {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -433,7 +433,6 @@ func TestParsePortMappingWithHostPort(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := ParsePortMapping(tt.arg, tt.arg2)
|
||||
assert.NoError(t, err, "error is not nil")
|
||||
|
@ -668,7 +667,6 @@ func TestParsePortMappingWithoutHostPort(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := ParsePortMapping(tt.arg, tt.arg2)
|
||||
assert.NoError(t, err, "error is not nil")
|
||||
|
@ -847,7 +845,6 @@ func TestParsePortMappingMixedHostPort(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := ParsePortMapping(tt.arg, nil)
|
||||
assert.NoError(t, err, "error is not nil")
|
||||
|
@ -982,7 +979,6 @@ func TestParsePortMappingError(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := ParsePortMapping(tt.arg, nil)
|
||||
assert.EqualError(t, err, tt.err, "error does not match")
|
||||
|
|
|
@ -239,7 +239,6 @@ func TestParseNetworkFlag(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, got1, got2, err := ParseNetworkFlag(tt.args)
|
||||
if tt.err != "" {
|
||||
|
|
|
@ -71,7 +71,6 @@ func TestMatchLabelFilters(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := filters.MatchLabelFilters(tt.args.filterValues, tt.args.labels); got != tt.want {
|
||||
t.Errorf("MatchLabelFilters() = %v, want %v", got, tt.want)
|
||||
|
|
|
@ -1118,9 +1118,6 @@ var _ = Describe("Podman checkpoint", func() {
|
|||
share,
|
||||
)
|
||||
|
||||
share := share // copy into local scope, for use inside function
|
||||
index := index
|
||||
|
||||
It(testName, func() {
|
||||
podName := "test_pod"
|
||||
|
||||
|
|
|
@ -217,7 +217,6 @@ var _ = Describe("Podman healthcheck run", func() {
|
|||
// Run this test with and without healthcheck events, even without events
|
||||
// podman inspect and ps should still show accurate healthcheck results.
|
||||
for _, hcEvent := range []bool{true, false} {
|
||||
hcEvent := hcEvent
|
||||
testName := "hc_events=" + strconv.FormatBool(hcEvent)
|
||||
It("podman healthcheck single healthy result changes failed to healthy "+testName, func() {
|
||||
if !hcEvent {
|
||||
|
|
|
@ -35,8 +35,6 @@ var _ = Describe("Podman logs", func() {
|
|||
})
|
||||
|
||||
for _, log := range []string{"k8s-file", "journald", "json-file"} {
|
||||
// This is important to move the 'log' var to the correct scope under Ginkgo flow.
|
||||
log := log
|
||||
|
||||
// Flake prevention: journalctl makes no timeliness guarantees
|
||||
logTimeout := time.Millisecond
|
||||
|
|
|
@ -549,7 +549,6 @@ var _ = Describe("Podman network", func() {
|
|||
})
|
||||
|
||||
for _, opt := range []string{"-o=parent=lo", "--interface-name=lo"} {
|
||||
opt := opt
|
||||
It(fmt.Sprintf("podman network create/remove macvlan as driver (-d) with %s", opt), func() {
|
||||
net := "macvlan" + stringid.GenerateRandomID()
|
||||
nc := podmanTest.Podman([]string{"network", "create", "-d", "macvlan", opt, net})
|
||||
|
|
|
@ -417,7 +417,6 @@ var _ = Describe("Podman pod create", func() {
|
|||
|
||||
tests := []string{"", "none"}
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
It("podman pod create --share="+test+" should not create an infra ctr", func() {
|
||||
session := podmanTest.Podman([]string{"pod", "create", "--share", test})
|
||||
session.WaitWithDefaultTimeout()
|
||||
|
|
|
@ -62,7 +62,6 @@ var _ = Describe("Podman run memory", func() {
|
|||
})
|
||||
|
||||
for _, limit := range []string{"0", "15", "100"} {
|
||||
limit := limit // Keep this value in a proper scope
|
||||
testName := fmt.Sprintf("podman run memory-swappiness test(%s)", limit)
|
||||
It(testName, func() {
|
||||
SkipIfCgroupV2("memory-swappiness not supported on cgroupV2")
|
||||
|
|
|
@ -548,7 +548,6 @@ EXPOSE 2004-2005/tcp`, ALPINE)
|
|||
})
|
||||
|
||||
for _, local := range []bool{true, false} {
|
||||
local := local
|
||||
testName := "HostIP"
|
||||
if local {
|
||||
testName = "127.0.0.1"
|
||||
|
|
Loading…
Reference in New Issue