Merge pull request #21954 from containers/renovate/github.com-containers-libhvee-digest

fix(deps): update github.com/containers/libhvee digest to 7cee23c
This commit is contained in:
openshift-merge-bot[bot] 2024-03-06 13:58:37 +00:00 committed by GitHub
commit ef53c76b3b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
13 changed files with 532 additions and 72 deletions

6
go.mod
View File

@ -15,10 +15,10 @@ require (
github.com/containers/conmon v2.0.20+incompatible github.com/containers/conmon v2.0.20+incompatible
github.com/containers/gvisor-tap-vsock v0.7.3 github.com/containers/gvisor-tap-vsock v0.7.3
github.com/containers/image/v5 v5.29.3-0.20240229213915-cdc68020a24f github.com/containers/image/v5 v5.29.3-0.20240229213915-cdc68020a24f
github.com/containers/libhvee v0.6.1-0.20240301191848-0ff33af3be2d github.com/containers/libhvee v0.6.1-0.20240305211055-7cee23cdfc54
github.com/containers/ocicrypt v1.1.9 github.com/containers/ocicrypt v1.1.9
github.com/containers/psgo v1.9.0 github.com/containers/psgo v1.9.0
github.com/containers/storage v1.52.1-0.20240301185114-bdd7d8188030 github.com/containers/storage v1.53.0
github.com/containers/winquit v1.1.0 github.com/containers/winquit v1.1.0
github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09 github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09
github.com/coreos/stream-metadata-go v0.4.4 github.com/coreos/stream-metadata-go v0.4.4
@ -49,7 +49,7 @@ require (
github.com/moby/sys/user v0.1.0 github.com/moby/sys/user v0.1.0
github.com/moby/term v0.5.0 github.com/moby/term v0.5.0
github.com/nxadm/tail v1.4.11 github.com/nxadm/tail v1.4.11
github.com/onsi/ginkgo/v2 v2.15.0 github.com/onsi/ginkgo/v2 v2.16.0
github.com/onsi/gomega v1.31.1 github.com/onsi/gomega v1.31.1
github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.1.0 github.com/opencontainers/image-spec v1.1.0

12
go.sum
View File

@ -84,8 +84,8 @@ github.com/containers/gvisor-tap-vsock v0.7.3 h1:yORnf15sP+sLFhxLNLgmB5/lOhldn9d
github.com/containers/gvisor-tap-vsock v0.7.3/go.mod h1:NI1fLMtKXQZoDrrOeqryGz7x7j/XSFWRmQILva7Fu9c= github.com/containers/gvisor-tap-vsock v0.7.3/go.mod h1:NI1fLMtKXQZoDrrOeqryGz7x7j/XSFWRmQILva7Fu9c=
github.com/containers/image/v5 v5.29.3-0.20240229213915-cdc68020a24f h1:DEK6PaY5/B6CYXjtdfAQGCUltHEPaoXvLb+C0PH6HiE= github.com/containers/image/v5 v5.29.3-0.20240229213915-cdc68020a24f h1:DEK6PaY5/B6CYXjtdfAQGCUltHEPaoXvLb+C0PH6HiE=
github.com/containers/image/v5 v5.29.3-0.20240229213915-cdc68020a24f/go.mod h1:a48d1rhHBl2zb630MSf20QQo4eIlIQvhZTqTcVJhbpA= github.com/containers/image/v5 v5.29.3-0.20240229213915-cdc68020a24f/go.mod h1:a48d1rhHBl2zb630MSf20QQo4eIlIQvhZTqTcVJhbpA=
github.com/containers/libhvee v0.6.1-0.20240301191848-0ff33af3be2d h1:UapmAtc33jKPaZgjcL1+3uya/5j6Bnod8k+hokySyak= github.com/containers/libhvee v0.6.1-0.20240305211055-7cee23cdfc54 h1:3OAYvB0Ch23g7S7xF2sIgeEqGU1NoSRD5pIVJMxq3VM=
github.com/containers/libhvee v0.6.1-0.20240301191848-0ff33af3be2d/go.mod h1:/rNb2NTQtsH/fYU4LDd2ofIRdFC1+l6d4ZeDDz8Klyw= github.com/containers/libhvee v0.6.1-0.20240305211055-7cee23cdfc54/go.mod h1:fRKB3AyIqHMvq6xaeYhTpckM2cdoq0oecolyoiuLP7M=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/luksy v0.0.0-20240129181507-b62d551ce6d8 h1:0p58QJRICjkRVCDix1nsnyrtJ3Qj4CWcGd1bOEY9sVY= github.com/containers/luksy v0.0.0-20240129181507-b62d551ce6d8 h1:0p58QJRICjkRVCDix1nsnyrtJ3Qj4CWcGd1bOEY9sVY=
@ -94,8 +94,8 @@ github.com/containers/ocicrypt v1.1.9 h1:2Csfba4jse85Raxk5HIyEk8OwZNjRvfkhEGijOj
github.com/containers/ocicrypt v1.1.9/go.mod h1:dTKx1918d8TDkxXvarscpNVY+lyPakPNFN4jwA9GBys= github.com/containers/ocicrypt v1.1.9/go.mod h1:dTKx1918d8TDkxXvarscpNVY+lyPakPNFN4jwA9GBys=
github.com/containers/psgo v1.9.0 h1:eJ74jzSaCHnWt26OlKZROSyUyRcGDf+gYBdXnxrMW4g= github.com/containers/psgo v1.9.0 h1:eJ74jzSaCHnWt26OlKZROSyUyRcGDf+gYBdXnxrMW4g=
github.com/containers/psgo v1.9.0/go.mod h1:0YoluUm43Mz2UnBIh1P+6V6NWcbpTL5uRtXyOcH0B5A= github.com/containers/psgo v1.9.0/go.mod h1:0YoluUm43Mz2UnBIh1P+6V6NWcbpTL5uRtXyOcH0B5A=
github.com/containers/storage v1.52.1-0.20240301185114-bdd7d8188030 h1:2Ksbho+rB+bkVbGMuaCAHPZnrkL9heCdisQIhnTWp+8= github.com/containers/storage v1.53.0 h1:VSES3C/u1pxjTJIXvLrSmyP7OBtDky04oGu07UvdTEA=
github.com/containers/storage v1.52.1-0.20240301185114-bdd7d8188030/go.mod h1:pujcoOSc+upx15Jirdkebhtd8uJiLwbSd/mYT6zDJK8= github.com/containers/storage v1.53.0/go.mod h1:pujcoOSc+upx15Jirdkebhtd8uJiLwbSd/mYT6zDJK8=
github.com/containers/winquit v1.1.0 h1:jArun04BNDQvt2W0Y78kh9TazN2EIEMG5Im6/JY7+pE= github.com/containers/winquit v1.1.0 h1:jArun04BNDQvt2W0Y78kh9TazN2EIEMG5Im6/JY7+pE=
github.com/containers/winquit v1.1.0/go.mod h1:PsPeZlnbkmGGIToMPHF1zhWjBUkd8aHjMOr/vFcPxw8= github.com/containers/winquit v1.1.0/go.mod h1:PsPeZlnbkmGGIToMPHF1zhWjBUkd8aHjMOr/vFcPxw8=
github.com/coreos/go-oidc/v3 v3.9.0 h1:0J/ogVOd4y8P0f0xUh8l9t07xRP/d8tccvjHl2dcsSo= github.com/coreos/go-oidc/v3 v3.9.0 h1:0J/ogVOd4y8P0f0xUh8l9t07xRP/d8tccvjHl2dcsSo=
@ -441,8 +441,8 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= github.com/onsi/ginkgo/v2 v2.16.0 h1:7q1w9frJDzninhXxjZd+Y/x54XNjG/UlRLIYPZafsPM=
github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= github.com/onsi/ginkgo/v2 v2.16.0/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=

View File

@ -1 +1 @@
1.52.1-dev 1.53.0

View File

@ -1,3 +1,22 @@
## 2.16.0
### Features
- add SpecContext to reporting nodes
### Fixes
- merge coverages instead of combining them (#1329) (#1340) [23f0cc5]
- core_dsl: disable Getwd() with environment variable (#1357) [cd418b7]
### Maintenance
- docs/index.md: Typo [2cebe8d]
- fix docs [06de431]
- chore: test with Go 1.22 (#1352) [898cba9]
- Bump golang.org/x/tools from 0.16.1 to 0.17.0 (#1336) [17ae120]
- Bump golang.org/x/sys from 0.15.0 to 0.16.0 (#1327) [5a179ed]
- Bump github.com/go-logr/logr from 1.3.0 to 1.4.1 (#1321) [a1e6b69]
- Bump github-pages and jekyll-feed in /docs (#1351) [d52951d]
- Fix docs for handling failures in goroutines (#1339) [4471b2e]
## 2.15.0 ## 2.15.0
### Features ### Features

View File

@ -292,7 +292,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool {
err = global.Suite.BuildTree() err = global.Suite.BuildTree()
exitIfErr(err) exitIfErr(err)
suitePath, err := os.Getwd() suitePath, err := getwd()
exitIfErr(err) exitIfErr(err)
suitePath, err = filepath.Abs(suitePath) suitePath, err = filepath.Abs(suitePath)
exitIfErr(err) exitIfErr(err)
@ -345,6 +345,15 @@ func extractSuiteConfiguration(args []interface{}) Labels {
return suiteLabels return suiteLabels
} }
func getwd() (string, error) {
if !strings.EqualFold(os.Getenv("GINKGO_PRESERVE_CACHE"), "true") {
// Getwd calls os.Getenv("PWD"), which breaks test caching if the cache
// is shared between two different directories with the same test code.
return os.Getwd()
}
return "", nil
}
/* /*
PreviewSpecs walks the testing tree and produces a report without actually invoking the specs. PreviewSpecs walks the testing tree and produces a report without actually invoking the specs.
See http://onsi.github.io/ginkgo/#previewing-specs for more information. See http://onsi.github.io/ginkgo/#previewing-specs for more information.
@ -369,7 +378,7 @@ func PreviewSpecs(description string, args ...any) Report {
err = global.Suite.BuildTree() err = global.Suite.BuildTree()
exitIfErr(err) exitIfErr(err)
suitePath, err := os.Getwd() suitePath, err := getwd()
exitIfErr(err) exitIfErr(err)
suitePath, err = filepath.Abs(suitePath) suitePath, err = filepath.Abs(suitePath)
exitIfErr(err) exitIfErr(err)

View File

@ -0,0 +1,129 @@
// Copyright (c) 2015, Wade Simmons
// All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Package gocovmerge takes the results from multiple `go test -coverprofile`
// runs and merges them into one profile
// this file was originally taken from the gocovmerge project
// see also: https://go.shabbyrobe.org/gocovmerge
package internal
import (
"fmt"
"io"
"sort"
"golang.org/x/tools/cover"
)
func AddCoverProfile(profiles []*cover.Profile, p *cover.Profile) []*cover.Profile {
i := sort.Search(len(profiles), func(i int) bool { return profiles[i].FileName >= p.FileName })
if i < len(profiles) && profiles[i].FileName == p.FileName {
MergeCoverProfiles(profiles[i], p)
} else {
profiles = append(profiles, nil)
copy(profiles[i+1:], profiles[i:])
profiles[i] = p
}
return profiles
}
func DumpCoverProfiles(profiles []*cover.Profile, out io.Writer) error {
if len(profiles) == 0 {
return nil
}
if _, err := fmt.Fprintf(out, "mode: %s\n", profiles[0].Mode); err != nil {
return err
}
for _, p := range profiles {
for _, b := range p.Blocks {
if _, err := fmt.Fprintf(out, "%s:%d.%d,%d.%d %d %d\n", p.FileName, b.StartLine, b.StartCol, b.EndLine, b.EndCol, b.NumStmt, b.Count); err != nil {
return err
}
}
}
return nil
}
func MergeCoverProfiles(into *cover.Profile, merge *cover.Profile) error {
if into.Mode != merge.Mode {
return fmt.Errorf("cannot merge profiles with different modes")
}
// Since the blocks are sorted, we can keep track of where the last block
// was inserted and only look at the blocks after that as targets for merge
startIndex := 0
for _, b := range merge.Blocks {
var err error
startIndex, err = mergeProfileBlock(into, b, startIndex)
if err != nil {
return err
}
}
return nil
}
func mergeProfileBlock(p *cover.Profile, pb cover.ProfileBlock, startIndex int) (int, error) {
sortFunc := func(i int) bool {
pi := p.Blocks[i+startIndex]
return pi.StartLine >= pb.StartLine && (pi.StartLine != pb.StartLine || pi.StartCol >= pb.StartCol)
}
i := 0
if sortFunc(i) != true {
i = sort.Search(len(p.Blocks)-startIndex, sortFunc)
}
i += startIndex
if i < len(p.Blocks) && p.Blocks[i].StartLine == pb.StartLine && p.Blocks[i].StartCol == pb.StartCol {
if p.Blocks[i].EndLine != pb.EndLine || p.Blocks[i].EndCol != pb.EndCol {
return i, fmt.Errorf("gocovmerge: overlapping merge %v %v %v", p.FileName, p.Blocks[i], pb)
}
switch p.Mode {
case "set":
p.Blocks[i].Count |= pb.Count
case "count", "atomic":
p.Blocks[i].Count += pb.Count
default:
return i, fmt.Errorf("gocovmerge: unsupported covermode '%s'", p.Mode)
}
} else {
if i > 0 {
pa := p.Blocks[i-1]
if pa.EndLine >= pb.EndLine && (pa.EndLine != pb.EndLine || pa.EndCol > pb.EndCol) {
return i, fmt.Errorf("gocovmerge: overlap before %v %v %v", p.FileName, pa, pb)
}
}
if i < len(p.Blocks)-1 {
pa := p.Blocks[i+1]
if pa.StartLine <= pb.StartLine && (pa.StartLine != pb.StartLine || pa.StartCol < pb.StartCol) {
return i, fmt.Errorf("gocovmerge: overlap after %v %v %v", p.FileName, pa, pb)
}
}
p.Blocks = append(p.Blocks, cover.ProfileBlock{})
copy(p.Blocks[i+1:], p.Blocks[i:])
p.Blocks[i] = pb
}
return i + 1, nil
}

View File

@ -1,7 +1,6 @@
package internal package internal
import ( import (
"bytes"
"fmt" "fmt"
"os" "os"
"os/exec" "os/exec"
@ -12,6 +11,7 @@ import (
"github.com/google/pprof/profile" "github.com/google/pprof/profile"
"github.com/onsi/ginkgo/v2/reporters" "github.com/onsi/ginkgo/v2/reporters"
"github.com/onsi/ginkgo/v2/types" "github.com/onsi/ginkgo/v2/types"
"golang.org/x/tools/cover"
) )
func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string { func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string {
@ -144,38 +144,26 @@ func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIC
return messages, nil return messages, nil
} }
// loads each profile, combines them, deletes them, stores them in destination // loads each profile, merges them, deletes them, stores them in destination
func MergeAndCleanupCoverProfiles(profiles []string, destination string) error { func MergeAndCleanupCoverProfiles(profiles []string, destination string) error {
combined := &bytes.Buffer{} var merged []*cover.Profile
modeRegex := regexp.MustCompile(`^mode: .*\n`) for _, file := range profiles {
for i, profile := range profiles { parsedProfiles, err := cover.ParseProfiles(file)
contents, err := os.ReadFile(profile)
if err != nil { if err != nil {
return fmt.Errorf("Unable to read coverage file %s:\n%s", profile, err.Error()) return err
} }
os.Remove(profile) os.Remove(file)
for _, p := range parsedProfiles {
// remove the cover mode line from every file merged = AddCoverProfile(merged, p)
// except the first one
if i > 0 {
contents = modeRegex.ReplaceAll(contents, []byte{})
}
_, err = combined.Write(contents)
// Add a newline to the end of every file if missing.
if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' {
_, err = combined.Write([]byte("\n"))
}
if err != nil {
return fmt.Errorf("Unable to append to coverprofile:\n%s", err.Error())
} }
} }
dst, err := os.OpenFile(destination, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)
err := os.WriteFile(destination, combined.Bytes(), 0666)
if err != nil { if err != nil {
return fmt.Errorf("Unable to create combined cover profile:\n%s", err.Error()) return err
}
err = DumpCoverProfiles(merged, dst)
if err != nil {
return err
} }
return nil return nil
} }

View File

@ -5,9 +5,8 @@ import (
"fmt" "fmt"
"reflect" "reflect"
"sort" "sort"
"time"
"sync" "sync"
"time"
"github.com/onsi/ginkgo/v2/types" "github.com/onsi/ginkgo/v2/types"
) )
@ -16,8 +15,8 @@ var _global_node_id_counter = uint(0)
var _global_id_mutex = &sync.Mutex{} var _global_id_mutex = &sync.Mutex{}
func UniqueNodeID() uint { func UniqueNodeID() uint {
//There's a reace in the internal integration tests if we don't make // There's a reace in the internal integration tests if we don't make
//accessing _global_node_id_counter safe across goroutines. // accessing _global_node_id_counter safe across goroutines.
_global_id_mutex.Lock() _global_id_mutex.Lock()
defer _global_id_mutex.Unlock() defer _global_id_mutex.Unlock()
_global_node_id_counter += 1 _global_node_id_counter += 1
@ -44,8 +43,8 @@ type Node struct {
SynchronizedAfterSuiteProc1Body func(SpecContext) SynchronizedAfterSuiteProc1Body func(SpecContext)
SynchronizedAfterSuiteProc1BodyHasContext bool SynchronizedAfterSuiteProc1BodyHasContext bool
ReportEachBody func(types.SpecReport) ReportEachBody func(SpecContext, types.SpecReport)
ReportSuiteBody func(types.Report) ReportSuiteBody func(SpecContext, types.Report)
MarkedFocus bool MarkedFocus bool
MarkedPending bool MarkedPending bool
@ -209,7 +208,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
args = unrollInterfaceSlice(args) args = unrollInterfaceSlice(args)
remainingArgs := []interface{}{} remainingArgs := []interface{}{}
//First get the CodeLocation up-to-date // First get the CodeLocation up-to-date
for _, arg := range args { for _, arg := range args {
switch v := arg.(type) { switch v := arg.(type) {
case Offset: case Offset:
@ -225,11 +224,11 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
trackedFunctionError := false trackedFunctionError := false
args = remainingArgs args = remainingArgs
remainingArgs = []interface{}{} remainingArgs = []interface{}{}
//now process the rest of the args // now process the rest of the args
for _, arg := range args { for _, arg := range args {
switch t := reflect.TypeOf(arg); { switch t := reflect.TypeOf(arg); {
case t == reflect.TypeOf(float64(0)): case t == reflect.TypeOf(float64(0)):
break //ignore deprecated timeouts break // ignore deprecated timeouts
case t == reflect.TypeOf(Focus): case t == reflect.TypeOf(Focus):
node.MarkedFocus = bool(arg.(focusType)) node.MarkedFocus = bool(arg.(focusType))
if !nodeType.Is(types.NodeTypesForContainerAndIt) { if !nodeType.Is(types.NodeTypesForContainerAndIt) {
@ -325,7 +324,12 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
node.Body = func(SpecContext) { body() } node.Body = func(SpecContext) { body() }
} else if nodeType.Is(types.NodeTypeReportBeforeEach | types.NodeTypeReportAfterEach) { } else if nodeType.Is(types.NodeTypeReportBeforeEach | types.NodeTypeReportAfterEach) {
if node.ReportEachBody == nil { if node.ReportEachBody == nil {
node.ReportEachBody = arg.(func(types.SpecReport)) if fn, ok := arg.(func(types.SpecReport)); ok {
node.ReportEachBody = func(_ SpecContext, r types.SpecReport) { fn(r) }
} else {
node.ReportEachBody = arg.(func(SpecContext, types.SpecReport))
node.HasContext = true
}
} else { } else {
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
trackedFunctionError = true trackedFunctionError = true
@ -333,7 +337,12 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
} }
} else if nodeType.Is(types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) { } else if nodeType.Is(types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) {
if node.ReportSuiteBody == nil { if node.ReportSuiteBody == nil {
node.ReportSuiteBody = arg.(func(types.Report)) if fn, ok := arg.(func(types.Report)); ok {
node.ReportSuiteBody = func(_ SpecContext, r types.Report) { fn(r) }
} else {
node.ReportSuiteBody = arg.(func(SpecContext, types.Report))
node.HasContext = true
}
} else { } else {
appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType))
trackedFunctionError = true trackedFunctionError = true
@ -395,7 +404,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy
} }
} }
//validations // validations
if node.MarkedPending && node.MarkedFocus { if node.MarkedPending && node.MarkedFocus {
appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType)) appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType))
} }

View File

@ -594,8 +594,8 @@ func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) {
suite.writer.Truncate() suite.writer.Truncate()
suite.outputInterceptor.StartInterceptingOutput() suite.outputInterceptor.StartInterceptingOutput()
report := suite.currentSpecReport report := suite.currentSpecReport
nodes[i].Body = func(SpecContext) { nodes[i].Body = func(ctx SpecContext) {
nodes[i].ReportEachBody(report) nodes[i].ReportEachBody(ctx, report)
} }
state, failure := suite.runNode(nodes[i], time.Time{}, spec.Nodes.BestTextFor(nodes[i])) state, failure := suite.runNode(nodes[i], time.Time{}, spec.Nodes.BestTextFor(nodes[i]))
@ -762,7 +762,7 @@ func (suite *Suite) runReportSuiteNode(node Node, report types.Report) {
report = report.Add(aggregatedReport) report = report.Add(aggregatedReport)
} }
node.Body = func(SpecContext) { node.ReportSuiteBody(report) } node.Body = func(ctx SpecContext) { node.ReportSuiteBody(ctx, report) }
suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "")
suite.currentSpecReport.EndTime = time.Now() suite.currentSpecReport.EndTime = time.Now()
@ -840,7 +840,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
timeoutInPlay = "node" timeoutInPlay = "node"
} }
if (!deadline.IsZero() && deadline.Before(now)) || interruptStatus.Interrupted() { if (!deadline.IsZero() && deadline.Before(now)) || interruptStatus.Interrupted() {
//we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't // we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't
if node.NodeTimeout > 0 { if node.NodeTimeout > 0 {
deadline = now.Add(node.NodeTimeout) deadline = now.Add(node.NodeTimeout)
timeoutInPlay = "node" timeoutInPlay = "node"
@ -918,9 +918,9 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
if outcomeFromRun != types.SpecStatePassed { if outcomeFromRun != types.SpecStatePassed {
additionalFailure := types.AdditionalFailure{ additionalFailure := types.AdditionalFailure{
State: outcomeFromRun, State: outcomeFromRun,
Failure: failure, //we make a copy - this will include all the configuration set up above... Failure: failure, // we make a copy - this will include all the configuration set up above...
} }
//...and then we update the failure with the details from failureFromRun // ...and then we update the failure with the details from failureFromRun
additionalFailure.Failure.Location, additionalFailure.Failure.ForwardedPanic, additionalFailure.Failure.TimelineLocation = failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation additionalFailure.Failure.Location, additionalFailure.Failure.ForwardedPanic, additionalFailure.Failure.TimelineLocation = failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation
additionalFailure.Failure.ProgressReport = types.ProgressReport{} additionalFailure.Failure.ProgressReport = types.ProgressReport{}
if outcome == types.SpecStateTimedout { if outcome == types.SpecStateTimedout {
@ -959,7 +959,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ
// tell the spec to stop. it's important we generate the progress report first to make sure we capture where // tell the spec to stop. it's important we generate the progress report first to make sure we capture where
// the spec is actually stuck // the spec is actually stuck
sc.cancel(fmt.Errorf("%s timeout occurred", timeoutInPlay)) sc.cancel(fmt.Errorf("%s timeout occurred", timeoutInPlay))
//and now we wait for the grace period // and now we wait for the grace period
gracePeriodChannel = time.After(gracePeriod) gracePeriodChannel = time.After(gracePeriod)
case <-interruptStatus.Channel: case <-interruptStatus.Channel:
interruptStatus = suite.interruptHandler.Status() interruptStatus = suite.interruptHandler.Status()

View File

@ -74,12 +74,21 @@ func AddReportEntry(name string, args ...interface{}) {
/* /*
ReportBeforeEach nodes are run for each spec, even if the spec is skipped or pending. ReportBeforeEach nodes take a function that ReportBeforeEach nodes are run for each spec, even if the spec is skipped or pending. ReportBeforeEach nodes take a function that
receives a SpecReport. They are called before the spec starts. receives a SpecReport or both SpecContext and Report for interruptible behavior. They are called before the spec starts.
Example:
ReportBeforeEach(func(report SpecReport) { // process report })
ReportBeforeEach(func(ctx SpecContext, report SpecReport) {
// process report
}), NodeTimeout(1 * time.Minute))
You cannot nest any other Ginkgo nodes within a ReportBeforeEach node's closure. You cannot nest any other Ginkgo nodes within a ReportBeforeEach node's closure.
You can learn more about ReportBeforeEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically You can learn more about ReportBeforeEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
*/ */
func ReportBeforeEach(body func(SpecReport), args ...interface{}) bool { func ReportBeforeEach(body any, args ...any) bool {
combinedArgs := []interface{}{body} combinedArgs := []interface{}{body}
combinedArgs = append(combinedArgs, args...) combinedArgs = append(combinedArgs, args...)
@ -87,13 +96,23 @@ func ReportBeforeEach(body func(SpecReport), args ...interface{}) bool {
} }
/* /*
ReportAfterEach nodes are run for each spec, even if the spec is skipped or pending. ReportAfterEach nodes take a function that ReportAfterEach nodes are run for each spec, even if the spec is skipped or pending.
receives a SpecReport. They are called after the spec has completed and receive the final report for the spec. ReportAfterEach nodes take a function that receives a SpecReport or both SpecContext and Report for interruptible behavior.
They are called after the spec has completed and receive the final report for the spec.
Example:
ReportAfterEach(func(report SpecReport) { // process report })
ReportAfterEach(func(ctx SpecContext, report SpecReport) {
// process report
}), NodeTimeout(1 * time.Minute))
You cannot nest any other Ginkgo nodes within a ReportAfterEach node's closure. You cannot nest any other Ginkgo nodes within a ReportAfterEach node's closure.
You can learn more about ReportAfterEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically You can learn more about ReportAfterEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
*/ */
func ReportAfterEach(body func(SpecReport), args ...interface{}) bool { func ReportAfterEach(body any, args ...any) bool {
combinedArgs := []interface{}{body} combinedArgs := []interface{}{body}
combinedArgs = append(combinedArgs, args...) combinedArgs = append(combinedArgs, args...)
@ -101,7 +120,15 @@ func ReportAfterEach(body func(SpecReport), args ...interface{}) bool {
} }
/* /*
ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function that receives a suite Report. ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function
that can either receive Report or both SpecContext and Report for interruptible behavior.
Example Usage:
ReportBeforeSuite(func(r Report) { // process report })
ReportBeforeSuite(func(ctx SpecContext, r Report) {
// process report
}, NodeTimeout(1 * time.Minute))
They are called at the beginning of the suite, before any specs have run and any BeforeSuite or SynchronizedBeforeSuite nodes, and are passed in the initial report for the suite. They are called at the beginning of the suite, before any specs have run and any BeforeSuite or SynchronizedBeforeSuite nodes, and are passed in the initial report for the suite.
ReportBeforeSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node) ReportBeforeSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node)
@ -112,18 +139,28 @@ You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure.
You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports
You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
*/ */
func ReportBeforeSuite(body func(Report), args ...interface{}) bool { func ReportBeforeSuite(body any, args ...any) bool {
combinedArgs := []interface{}{body} combinedArgs := []interface{}{body}
combinedArgs = append(combinedArgs, args...) combinedArgs = append(combinedArgs, args...)
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...)) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...))
} }
/* /*
ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes take a function that receives a suite Report. ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes execute at the suite's conclusion,
and accept a function that can either receive Report or both SpecContext and Report for interruptible behavior.
Example Usage:
ReportAfterSuite("Non-interruptible ReportAfterSuite", func(r Report) { // process report })
ReportAfterSuite("Interruptible ReportAfterSuite", func(ctx SpecContext, r Report) {
// process report
}, NodeTimeout(1 * time.Minute))
They are called at the end of the suite, after all specs have run and any AfterSuite or SynchronizedAfterSuite nodes, and are passed in the final report for the suite. They are called at the end of the suite, after all specs have run and any AfterSuite or SynchronizedAfterSuite nodes, and are passed in the final report for the suite.
ReportAftersuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node) ReportAfterSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node)
When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportAfterSuite and that it is passed a report that is aggregated across When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportAfterSuite and that it is passed a report that is aggregated across
all parallel nodes all parallel nodes
@ -134,8 +171,10 @@ You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure.
You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically
You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports
You can learn about interruptible nodes here: https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes
*/ */
func ReportAfterSuite(text string, body func(Report), args ...interface{}) bool { func ReportAfterSuite(text string, body any, args ...interface{}) bool {
combinedArgs := []interface{}{body} combinedArgs := []interface{}{body}
combinedArgs = append(combinedArgs, args...) combinedArgs = append(combinedArgs, args...)
return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...)) return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...))

View File

@ -1,3 +1,3 @@
package types package types
const VERSION = "2.15.0" const VERSION = "2.16.0"

266
vendor/golang.org/x/tools/cover/profile.go generated vendored Normal file
View File

@ -0,0 +1,266 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package cover provides support for parsing coverage profiles
// generated by "go test -coverprofile=cover.out".
package cover // import "golang.org/x/tools/cover"
import (
"bufio"
"errors"
"fmt"
"io"
"math"
"os"
"sort"
"strconv"
"strings"
)
// Profile represents the profiling data for a specific file.
type Profile struct {
FileName string
Mode string
Blocks []ProfileBlock
}
// ProfileBlock represents a single block of profiling data.
type ProfileBlock struct {
StartLine, StartCol int
EndLine, EndCol int
NumStmt, Count int
}
type byFileName []*Profile
func (p byFileName) Len() int { return len(p) }
func (p byFileName) Less(i, j int) bool { return p[i].FileName < p[j].FileName }
func (p byFileName) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// ParseProfiles parses profile data in the specified file and returns a
// Profile for each source file described therein.
func ParseProfiles(fileName string) ([]*Profile, error) {
pf, err := os.Open(fileName)
if err != nil {
return nil, err
}
defer pf.Close()
return ParseProfilesFromReader(pf)
}
// ParseProfilesFromReader parses profile data from the Reader and
// returns a Profile for each source file described therein.
func ParseProfilesFromReader(rd io.Reader) ([]*Profile, error) {
// First line is "mode: foo", where foo is "set", "count", or "atomic".
// Rest of file is in the format
// encoding/base64/base64.go:34.44,37.40 3 1
// where the fields are: name.go:line.column,line.column numberOfStatements count
files := make(map[string]*Profile)
s := bufio.NewScanner(rd)
mode := ""
for s.Scan() {
line := s.Text()
if mode == "" {
const p = "mode: "
if !strings.HasPrefix(line, p) || line == p {
return nil, fmt.Errorf("bad mode line: %v", line)
}
mode = line[len(p):]
continue
}
fn, b, err := parseLine(line)
if err != nil {
return nil, fmt.Errorf("line %q doesn't match expected format: %v", line, err)
}
p := files[fn]
if p == nil {
p = &Profile{
FileName: fn,
Mode: mode,
}
files[fn] = p
}
p.Blocks = append(p.Blocks, b)
}
if err := s.Err(); err != nil {
return nil, err
}
for _, p := range files {
sort.Sort(blocksByStart(p.Blocks))
// Merge samples from the same location.
j := 1
for i := 1; i < len(p.Blocks); i++ {
b := p.Blocks[i]
last := p.Blocks[j-1]
if b.StartLine == last.StartLine &&
b.StartCol == last.StartCol &&
b.EndLine == last.EndLine &&
b.EndCol == last.EndCol {
if b.NumStmt != last.NumStmt {
return nil, fmt.Errorf("inconsistent NumStmt: changed from %d to %d", last.NumStmt, b.NumStmt)
}
if mode == "set" {
p.Blocks[j-1].Count |= b.Count
} else {
p.Blocks[j-1].Count += b.Count
}
continue
}
p.Blocks[j] = b
j++
}
p.Blocks = p.Blocks[:j]
}
// Generate a sorted slice.
profiles := make([]*Profile, 0, len(files))
for _, profile := range files {
profiles = append(profiles, profile)
}
sort.Sort(byFileName(profiles))
return profiles, nil
}
// parseLine parses a line from a coverage file.
// It is equivalent to the regex
// ^(.+):([0-9]+)\.([0-9]+),([0-9]+)\.([0-9]+) ([0-9]+) ([0-9]+)$
//
// However, it is much faster: https://golang.org/cl/179377
func parseLine(l string) (fileName string, block ProfileBlock, err error) {
end := len(l)
b := ProfileBlock{}
b.Count, end, err = seekBack(l, ' ', end, "Count")
if err != nil {
return "", b, err
}
b.NumStmt, end, err = seekBack(l, ' ', end, "NumStmt")
if err != nil {
return "", b, err
}
b.EndCol, end, err = seekBack(l, '.', end, "EndCol")
if err != nil {
return "", b, err
}
b.EndLine, end, err = seekBack(l, ',', end, "EndLine")
if err != nil {
return "", b, err
}
b.StartCol, end, err = seekBack(l, '.', end, "StartCol")
if err != nil {
return "", b, err
}
b.StartLine, end, err = seekBack(l, ':', end, "StartLine")
if err != nil {
return "", b, err
}
fn := l[0:end]
if fn == "" {
return "", b, errors.New("a FileName cannot be blank")
}
return fn, b, nil
}
// seekBack searches backwards from end to find sep in l, then returns the
// value between sep and end as an integer.
// If seekBack fails, the returned error will reference what.
func seekBack(l string, sep byte, end int, what string) (value int, nextSep int, err error) {
// Since we're seeking backwards and we know only ASCII is legal for these values,
// we can ignore the possibility of non-ASCII characters.
for start := end - 1; start >= 0; start-- {
if l[start] == sep {
i, err := strconv.Atoi(l[start+1 : end])
if err != nil {
return 0, 0, fmt.Errorf("couldn't parse %q: %v", what, err)
}
if i < 0 {
return 0, 0, fmt.Errorf("negative values are not allowed for %s, found %d", what, i)
}
return i, start, nil
}
}
return 0, 0, fmt.Errorf("couldn't find a %s before %s", string(sep), what)
}
type blocksByStart []ProfileBlock
func (b blocksByStart) Len() int { return len(b) }
func (b blocksByStart) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b blocksByStart) Less(i, j int) bool {
bi, bj := b[i], b[j]
return bi.StartLine < bj.StartLine || bi.StartLine == bj.StartLine && bi.StartCol < bj.StartCol
}
// Boundary represents the position in a source file of the beginning or end of a
// block as reported by the coverage profile. In HTML mode, it will correspond to
// the opening or closing of a <span> tag and will be used to colorize the source
type Boundary struct {
Offset int // Location as a byte offset in the source file.
Start bool // Is this the start of a block?
Count int // Event count from the cover profile.
Norm float64 // Count normalized to [0..1].
Index int // Order in input file.
}
// Boundaries returns a Profile as a set of Boundary objects within the provided src.
func (p *Profile) Boundaries(src []byte) (boundaries []Boundary) {
// Find maximum count.
max := 0
for _, b := range p.Blocks {
if b.Count > max {
max = b.Count
}
}
// Divisor for normalization.
divisor := math.Log(float64(max))
// boundary returns a Boundary, populating the Norm field with a normalized Count.
index := 0
boundary := func(offset int, start bool, count int) Boundary {
b := Boundary{Offset: offset, Start: start, Count: count, Index: index}
index++
if !start || count == 0 {
return b
}
if max <= 1 {
b.Norm = 0.8 // Profile is in"set" mode; we want a heat map. Use cov8 in the CSS.
} else if count > 0 {
b.Norm = math.Log(float64(count)) / divisor
}
return b
}
line, col := 1, 2 // TODO: Why is this 2?
for si, bi := 0, 0; si < len(src) && bi < len(p.Blocks); {
b := p.Blocks[bi]
if b.StartLine == line && b.StartCol == col {
boundaries = append(boundaries, boundary(si, true, b.Count))
}
if b.EndLine == line && b.EndCol == col || line > b.EndLine {
boundaries = append(boundaries, boundary(si, false, 0))
bi++
continue // Don't advance through src; maybe the next block starts here.
}
if src[si] == '\n' {
line++
col = 0
}
col++
si++
}
sort.Sort(boundariesByPos(boundaries))
return
}
type boundariesByPos []Boundary
func (b boundariesByPos) Len() int { return len(b) }
func (b boundariesByPos) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b boundariesByPos) Less(i, j int) bool {
if b[i].Offset == b[j].Offset {
// Boundaries at the same offset should be ordered according to
// their original position.
return b[i].Index < b[j].Index
}
return b[i].Offset < b[j].Offset
}

7
vendor/modules.txt vendored
View File

@ -315,7 +315,7 @@ github.com/containers/image/v5/transports
github.com/containers/image/v5/transports/alltransports github.com/containers/image/v5/transports/alltransports
github.com/containers/image/v5/types github.com/containers/image/v5/types
github.com/containers/image/v5/version github.com/containers/image/v5/version
# github.com/containers/libhvee v0.6.1-0.20240301191848-0ff33af3be2d # github.com/containers/libhvee v0.6.1-0.20240305211055-7cee23cdfc54
## explicit; go 1.18 ## explicit; go 1.18
github.com/containers/libhvee/pkg/hypervctl github.com/containers/libhvee/pkg/hypervctl
github.com/containers/libhvee/pkg/kvp/ginsu github.com/containers/libhvee/pkg/kvp/ginsu
@ -353,7 +353,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process github.com/containers/psgo/internal/process
# github.com/containers/storage v1.52.1-0.20240301185114-bdd7d8188030 # github.com/containers/storage v1.53.0
## explicit; go 1.20 ## explicit; go 1.20
github.com/containers/storage github.com/containers/storage
github.com/containers/storage/drivers github.com/containers/storage/drivers
@ -839,7 +839,7 @@ github.com/nxadm/tail/winfile
# github.com/oklog/ulid v1.3.1 # github.com/oklog/ulid v1.3.1
## explicit ## explicit
github.com/oklog/ulid github.com/oklog/ulid
# github.com/onsi/ginkgo/v2 v2.15.0 # github.com/onsi/ginkgo/v2 v2.16.0
## explicit; go 1.20 ## explicit; go 1.20
github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2
github.com/onsi/ginkgo/v2/config github.com/onsi/ginkgo/v2/config
@ -1254,6 +1254,7 @@ golang.org/x/time/rate
# golang.org/x/tools v0.17.0 # golang.org/x/tools v0.17.0
## explicit; go 1.18 ## explicit; go 1.18
golang.org/x/tools/cmd/stringer golang.org/x/tools/cmd/stringer
golang.org/x/tools/cover
golang.org/x/tools/go/ast/inspector golang.org/x/tools/go/ast/inspector
golang.org/x/tools/go/gcexportdata golang.org/x/tools/go/gcexportdata
golang.org/x/tools/go/internal/packagesdriver golang.org/x/tools/go/internal/packagesdriver