update c/image and c/storage to latest

Signed-off-by: Paul Holzinger <pholzing@redhat.com>
This commit is contained in:
Paul Holzinger 2023-06-26 16:11:36 +02:00
parent 4445a5040a
commit b80fd54a56
No known key found for this signature in database
GPG Key ID: EB145DD938A3CAF2
31 changed files with 395 additions and 153 deletions

8
go.mod
View File

@ -15,11 +15,11 @@ require (
github.com/containers/buildah v1.30.1-0.20230504052500-e925b5852e07
github.com/containers/common v0.53.1-0.20230626115555-370c89881624
github.com/containers/conmon v2.0.20+incompatible
github.com/containers/image/v5 v5.25.1-0.20230613183705-07ced6137083
github.com/containers/image/v5 v5.25.1-0.20230623174242-68798a22ce3e
github.com/containers/libhvee v0.0.5
github.com/containers/ocicrypt v1.1.7
github.com/containers/psgo v1.8.0
github.com/containers/storage v1.46.2-0.20230616083707-cc0d208e5e1c
github.com/containers/storage v1.46.2-0.20230621123301-73f29956326d
github.com/coreos/go-systemd/v22 v22.5.0
github.com/coreos/stream-metadata-go v0.4.2
github.com/crc-org/vfkit v0.0.5-0.20230602131541-3d57f09010c9
@ -74,6 +74,7 @@ require (
)
require (
dario.cat/mergo v1.0.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/Microsoft/hcsshim v0.10.0-rc.8 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
@ -119,7 +120,6 @@ require (
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-retryablehttp v0.7.4 // indirect
github.com/imdario/mergo v0.3.16 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jinzhu/copier v0.3.5 // indirect
github.com/josharian/intern v1.0.0 // indirect
@ -151,7 +151,7 @@ require (
github.com/segmentio/ksuid v1.0.4 // indirect
github.com/sigstore/fulcio v1.3.1 // indirect
github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12 // indirect
github.com/sigstore/sigstore v1.6.5 // indirect
github.com/sigstore/sigstore v1.7.1 // indirect
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
github.com/sylabs/sif/v2 v2.11.5 // indirect

18
go.sum
View File

@ -22,6 +22,8 @@ cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIA
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1INOIyr5hWOWhvpmQpY6tKjeG0hT1s3AMC/9fic=
@ -243,8 +245,8 @@ github.com/containers/common v0.53.1-0.20230626115555-370c89881624 h1:YBgjfoo0G3
github.com/containers/common v0.53.1-0.20230626115555-370c89881624/go.mod h1:qE1MzGl69IoK7ZNCCH51+aLVjyQtnH0LiZe0wG32Jy0=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/image/v5 v5.25.1-0.20230613183705-07ced6137083 h1:6Pbnll97ls6G0U3DSxaTqp7Sd8Fykc4gd7BUJm7Bpn8=
github.com/containers/image/v5 v5.25.1-0.20230613183705-07ced6137083/go.mod h1:yRLIs3vw20kCSt3ZvRyX3cp4EIYjNUW6RX9uq2cZ8J8=
github.com/containers/image/v5 v5.25.1-0.20230623174242-68798a22ce3e h1:4W/7KRo29f7zRGRruc3kSf18wNZB/loR1jTygi0TvRM=
github.com/containers/image/v5 v5.25.1-0.20230623174242-68798a22ce3e/go.mod h1:3tWjWAL5TC/ZsaaBNkvTxdQqvlNJ463QF51m+oRtZwI=
github.com/containers/libhvee v0.0.5 h1:5tUiF2eVe8XbVSPD/Os4dIU1gJWoQgtkQHIjQ5X7wpE=
github.com/containers/libhvee v0.0.5/go.mod h1:AYsyMe44w9ylWWEZNW+IOzA7oZ2i/P9TChNljavhYMI=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
@ -257,8 +259,8 @@ github.com/containers/ocicrypt v1.1.7/go.mod h1:7CAhjcj2H8AYp5YvEie7oVSK2AhBY8Ns
github.com/containers/psgo v1.8.0 h1:2loGekmGAxM9ir5OsXWEfGwFxorMPYnc6gEDsGFQvhY=
github.com/containers/psgo v1.8.0/go.mod h1:T8ZxnX3Ur4RvnhxFJ7t8xJ1F48RhiZB4rSrOaR/qGHc=
github.com/containers/storage v1.43.0/go.mod h1:uZ147thiIFGdVTjMmIw19knttQnUCl3y9zjreHrg11s=
github.com/containers/storage v1.46.2-0.20230616083707-cc0d208e5e1c h1:hJP+UF9OzDaThxavD5isFbAFxbvb25TdFtjohAhH/dc=
github.com/containers/storage v1.46.2-0.20230616083707-cc0d208e5e1c/go.mod h1:pRp3lkRo2qodb/ltpnudoXggrviRmaCmU5a5GhTBae0=
github.com/containers/storage v1.46.2-0.20230621123301-73f29956326d h1:OzN0VtkQ8kcj6HB8QdrIq7qiVsZ6pEcmolV1trZgrgA=
github.com/containers/storage v1.46.2-0.20230621123301-73f29956326d/go.mod h1:pRp3lkRo2qodb/ltpnudoXggrviRmaCmU5a5GhTBae0=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
@ -434,7 +436,7 @@ github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogB
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU=
github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
github.com/go-rod/rod v0.113.1 h1:+Qb4K/vkR7BOhW6FhfhtLzUD3l11+0XlF4do+27sOQk=
github.com/go-rod/rod v0.113.3 h1:oLiKZW721CCMwA5g7977cWfcAKQ+FuosP47Zf1QiDrA=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
@ -608,8 +610,6 @@ github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
@ -883,8 +883,8 @@ github.com/sigstore/fulcio v1.3.1 h1:0ntW9VbQbt2JytoSs8BOGB84A65eeyvGSavWteYp29Y
github.com/sigstore/fulcio v1.3.1/go.mod h1:/XfqazOec45ulJZpyL9sq+OsVQ8g2UOVoNVi7abFgqU=
github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12 h1:x/WnxasgR40qGY67IHwioakXLuhDxJ10vF8/INuOTiI=
github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12/go.mod h1:8c+a8Yo7r8gKuYbIaz+c3oOdw9iMXx+tMdOg2+b+2jQ=
github.com/sigstore/sigstore v1.6.5 h1:/liHIo7YPJp6sN31DzBYDOuRPmN1xbzROMBE5DLllYM=
github.com/sigstore/sigstore v1.6.5/go.mod h1:h+EoQsf9+6UKgNYxKhBcPgo4PZeEVfzAJxKRRIYhyN4=
github.com/sigstore/sigstore v1.7.1 h1:fCATemikcBK0cG4+NcM940MfoIgmioY1vC6E66hXxks=
github.com/sigstore/sigstore v1.7.1/go.mod h1:0PmMzfJP2Y9+lugD0wer4e7TihR5tM7NcIs3bQNk5xg=
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=

View File

@ -9,4 +9,4 @@ name = "go"
enabled = true
[analyzers.meta]
import_path = "github.com/imdario/mergo"
import_path = "dario.cat/mergo"

View File

@ -46,13 +46,19 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the
It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
### Important note
### Important notes
#### 1.0.0
In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`.
#### 0.3.9
Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules.
Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code.
If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u dario.cat/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
### Donations
@ -110,11 +116,11 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont
## Install
go get github.com/imdario/mergo
go get dario.cat/mergo
// use in your .go code
import (
"github.com/imdario/mergo"
"dario.cat/mergo"
)
## Usage
@ -152,7 +158,7 @@ package main
import (
"fmt"
"github.com/imdario/mergo"
"dario.cat/mergo"
)
type Foo struct {
@ -188,9 +194,9 @@ package main
import (
"fmt"
"github.com/imdario/mergo"
"reflect"
"time"
"dario.cat/mergo"
"reflect"
"time"
)
type timeTransformer struct {

View File

@ -8,30 +8,36 @@ A helper to merge structs and maps in Golang. Useful for configuration default v
Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
Status
# Status
It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc.
Important note
# Important notes
1.0.0
In 1.0.0 Mergo moves to a vanity URL `dario.cat/mergo`.
0.3.9
Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules.
Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code.
If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u dario.cat/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
Install
# Install
Do your usual installation procedure:
go get github.com/imdario/mergo
go get dario.cat/mergo
// use in your .go code
import (
"github.com/imdario/mergo"
)
// use in your .go code
import (
"dario.cat/mergo"
)
Usage
# Usage
You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
@ -59,7 +65,7 @@ Here is a nice example:
import (
"fmt"
"github.com/imdario/mergo"
"dario.cat/mergo"
)
type Foo struct {
@ -81,7 +87,7 @@ Here is a nice example:
// {two 2}
}
Transformers
# Transformers
Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time?
@ -89,9 +95,9 @@ Transformers allow to merge specific types differently than in the default behav
import (
"fmt"
"github.com/imdario/mergo"
"reflect"
"time"
"dario.cat/mergo"
"reflect"
"time"
)
type timeTransformer struct {
@ -127,17 +133,16 @@ Transformers allow to merge specific types differently than in the default behav
// { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
}
Contact me
# Contact me
If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario
About
# About
Written by Dario Castañé: https://da.rio.hn
License
# License
BSD 3-Clause license, as Go language.
*/
package mergo

View File

@ -256,9 +256,11 @@ func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.P
}
sigs = append(sigs, newSigs...)
c.Printf("Storing signatures\n")
if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil {
return nil, "", "", fmt.Errorf("writing signatures: %w", err)
if len(sigs) > 0 {
c.Printf("Storing signatures\n")
if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil {
return nil, "", "", fmt.Errorf("writing signatures: %w", err)
}
}
return manifestBytes, retManifestType, retManifestDigest, nil

View File

@ -17,8 +17,8 @@ import (
"strings"
"time"
"dario.cat/mergo"
"github.com/containers/storage/pkg/homedir"
"github.com/imdario/mergo"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
"gopkg.in/yaml.v3"

View File

@ -628,18 +628,13 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
if err != nil {
return "", err
}
switch len(options.Options) {
case 0:
case 1:
if options.Options[0] == "ro" {
for _, opt := range options.Options {
if opt == "ro" {
// ignore "ro" option
break
continue
}
fallthrough
default:
return "", fmt.Errorf("btrfs driver does not support mount options")
}
if !st.IsDir() {
return "", fmt.Errorf("%s: not a directory", dir)
}

View File

@ -187,6 +187,8 @@ type DriverWithDifferOutput struct {
UncompressedDigest digest.Digest
Metadata string
BigData map[string][]byte
TarSplit []byte
TOCDigest digest.Digest
}
// Differ defines the interface for using a custom differ.

View File

@ -60,6 +60,8 @@ const (
FsMagicCephFs = FsMagic(0x00C36400)
// FsMagicCIFS filesystem id for CIFS
FsMagicCIFS = FsMagic(0xFF534D42)
// FsMagicEROFS filesystem id for EROFS
FsMagicEROFS = FsMagic(0xE0F5E1E2)
// FsMagicFHGFS filesystem id for FHGFS
FsMagicFHGFSFs = FsMagic(0x19830326)
// FsMagicIBRIX filesystem id for IBRIX
@ -106,6 +108,7 @@ var (
FsMagicBtrfs: "btrfs",
FsMagicCramfs: "cramfs",
FsMagicEcryptfs: "ecryptfs",
FsMagicEROFS: "erofs",
FsMagicExtfs: "extfs",
FsMagicF2fs: "f2fs",
FsMagicGPFS: "gpfs",

View File

@ -55,6 +55,7 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare
options := MountOpts{
MountLabel: mountLabel,
Options: []string{"ro"},
}
layerFs, err := driver.Get(id, options)
if err != nil {

View File

@ -1952,6 +1952,9 @@ func (d *Driver) ApplyDiffFromStagingDirectory(id, parent, stagingDirectory stri
if err := os.RemoveAll(diff); err != nil && !os.IsNotExist(err) {
return err
}
diffOutput.UncompressedDigest = diffOutput.TOCDigest
return os.Rename(stagingDirectory, diff)
}

View File

@ -226,15 +226,12 @@ func (d *Driver) Remove(id string) error {
// Get returns the directory for the given id.
func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) {
dir := d.dir(id)
switch len(options.Options) {
case 0:
case 1:
if options.Options[0] == "ro" {
for _, opt := range options.Options {
if opt == "ro" {
// ignore "ro" option
break
continue
}
fallthrough
default:
return "", fmt.Errorf("vfs driver does not support mount options")
}
if st, err := os.Stat(dir); err != nil {

View File

@ -2392,8 +2392,26 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string,
layer.UncompressedDigest = diffOutput.UncompressedDigest
layer.UncompressedSize = diffOutput.Size
layer.Metadata = diffOutput.Metadata
if err = r.saveFor(layer); err != nil {
return err
if len(diffOutput.TarSplit) != 0 {
tsdata := bytes.Buffer{}
compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed)
if err != nil {
compressor = pgzip.NewWriter(&tsdata)
}
if err := compressor.SetConcurrency(1024*1024, 1); err != nil { // 1024*1024 is the hard-coded default; we're not changing that
logrus.Infof("setting compression concurrency threads to 1: %v; ignoring", err)
}
if _, err := compressor.Write(diffOutput.TarSplit); err != nil {
compressor.Close()
return err
}
compressor.Close()
if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0o700); err != nil {
return err
}
if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0o600); err != nil {
return err
}
}
for k, v := range diffOutput.BigData {
if err := r.SetBigData(id, k, bytes.NewReader(v)); err != nil {
@ -2403,6 +2421,9 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string,
return err
}
}
if err = r.saveFor(layer); err != nil {
return err
}
return err
}

View File

@ -516,14 +516,14 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) {
iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
if field != "entries" {
if strings.ToLower(field) != "entries" {
iter.Skip()
continue
}
for iter.ReadArray() {
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
switch field {
case "type", "name", "linkName", "digest", "chunkDigest", "chunkType":
switch strings.ToLower(field) {
case "type", "name", "linkname", "digest", "chunkdigest", "chunktype", "modtime", "accesstime", "changetime":
count += len(iter.ReadStringAsSlice())
case "xattrs":
for key := iter.ReadObject(); key != ""; key = iter.ReadObject() {
@ -548,33 +548,33 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) {
iter = jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
if field == "version" {
if strings.ToLower(field) == "version" {
toc.Version = iter.ReadInt()
continue
}
if field != "entries" {
if strings.ToLower(field) != "entries" {
iter.Skip()
continue
}
for iter.ReadArray() {
var m internal.FileMetadata
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
switch field {
switch strings.ToLower(field) {
case "type":
m.Type = getString(iter.ReadStringAsSlice())
case "name":
m.Name = getString(iter.ReadStringAsSlice())
case "linkName":
case "linkname":
m.Linkname = getString(iter.ReadStringAsSlice())
case "mode":
m.Mode = iter.ReadInt64()
case "size":
m.Size = iter.ReadInt64()
case "UID":
case "uid":
m.UID = iter.ReadInt()
case "GID":
case "gid":
m.GID = iter.ReadInt()
case "ModTime":
case "modtime":
time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice()))
if err != nil {
return nil, err
@ -592,23 +592,23 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) {
return nil, err
}
m.ChangeTime = &time
case "devMajor":
case "devmajor":
m.Devmajor = iter.ReadInt64()
case "devMinor":
case "devminor":
m.Devminor = iter.ReadInt64()
case "digest":
m.Digest = getString(iter.ReadStringAsSlice())
case "offset":
m.Offset = iter.ReadInt64()
case "endOffset":
case "endoffset":
m.EndOffset = iter.ReadInt64()
case "chunkSize":
case "chunksize":
m.ChunkSize = iter.ReadInt64()
case "chunkOffset":
case "chunkoffset":
m.ChunkOffset = iter.ReadInt64()
case "chunkDigest":
case "chunkdigest":
m.ChunkDigest = getString(iter.ReadStringAsSlice())
case "chunkType":
case "chunktype":
m.ChunkType = getString(iter.ReadStringAsSlice())
case "xattrs":
m.Xattrs = make(map[string]string)

View File

@ -150,22 +150,32 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64,
// readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. The blob total size must
// be specified.
// This function uses the io.github.containers.zstd-chunked. annotations when specified.
func readZstdChunkedManifest(ctx context.Context, blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) {
func readZstdChunkedManifest(ctx context.Context, blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, []byte, int64, error) {
footerSize := int64(internal.FooterSizeSupported)
if blobSize <= footerSize {
return nil, 0, errors.New("blob too small")
return nil, nil, 0, errors.New("blob too small")
}
manifestChecksumAnnotation := annotations[internal.ManifestChecksumKey]
if manifestChecksumAnnotation == "" {
return nil, 0, fmt.Errorf("manifest checksum annotation %q not found", internal.ManifestChecksumKey)
return nil, nil, 0, fmt.Errorf("manifest checksum annotation %q not found", internal.ManifestChecksumKey)
}
var offset, length, lengthUncompressed, manifestType uint64
var offsetTarSplit, lengthTarSplit, lengthUncompressedTarSplit uint64
tarSplitChecksumAnnotation := ""
if offsetMetadata := annotations[internal.ManifestInfoKey]; offsetMetadata != "" {
if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &offset, &length, &lengthUncompressed, &manifestType); err != nil {
return nil, 0, err
return nil, nil, 0, err
}
if tarSplitInfoKeyAnnotation, found := annotations[internal.TarSplitInfoKey]; found {
if _, err := fmt.Sscanf(tarSplitInfoKeyAnnotation, "%d:%d:%d", &offsetTarSplit, &lengthTarSplit, &lengthUncompressedTarSplit); err != nil {
return nil, nil, 0, err
}
tarSplitChecksumAnnotation = annotations[internal.TarSplitChecksumKey]
}
} else {
chunk := ImageSourceChunk{
@ -174,39 +184,39 @@ func readZstdChunkedManifest(ctx context.Context, blobStream ImageSourceSeekable
}
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
if err != nil {
return nil, 0, err
return nil, nil, 0, err
}
var reader io.ReadCloser
select {
case r := <-parts:
reader = r
case err := <-errs:
return nil, 0, err
return nil, nil, 0, err
}
footer := make([]byte, footerSize)
if _, err := io.ReadFull(reader, footer); err != nil {
return nil, 0, err
return nil, nil, 0, err
}
offset = binary.LittleEndian.Uint64(footer[0:8])
length = binary.LittleEndian.Uint64(footer[8:16])
lengthUncompressed = binary.LittleEndian.Uint64(footer[16:24])
manifestType = binary.LittleEndian.Uint64(footer[24:32])
if !isZstdChunkedFrameMagic(footer[32:40]) {
return nil, 0, errors.New("invalid magic number")
if !isZstdChunkedFrameMagic(footer[48:56]) {
return nil, nil, 0, errors.New("invalid magic number")
}
}
if manifestType != internal.ManifestTypeCRFS {
return nil, 0, errors.New("invalid manifest type")
return nil, nil, 0, errors.New("invalid manifest type")
}
// set a reasonable limit
if length > (1<<20)*50 {
return nil, 0, errors.New("manifest too big")
return nil, nil, 0, errors.New("manifest too big")
}
if lengthUncompressed > (1<<20)*50 {
return nil, 0, errors.New("manifest too big")
return nil, nil, 0, errors.New("manifest too big")
}
chunk := ImageSourceChunk{
@ -214,47 +224,86 @@ func readZstdChunkedManifest(ctx context.Context, blobStream ImageSourceSeekable
Length: length,
}
parts, errs, err := blobStream.GetBlobAt([]ImageSourceChunk{chunk})
chunks := []ImageSourceChunk{chunk}
if offsetTarSplit > 0 {
chunkTarSplit := ImageSourceChunk{
Offset: offsetTarSplit,
Length: lengthTarSplit,
}
chunks = append(chunks, chunkTarSplit)
}
parts, errs, err := blobStream.GetBlobAt(chunks)
if err != nil {
return nil, 0, err
}
var reader io.ReadCloser
select {
case r := <-parts:
reader = r
case err := <-errs:
return nil, 0, err
return nil, nil, 0, err
}
manifest := make([]byte, length)
if _, err := io.ReadFull(reader, manifest); err != nil {
return nil, 0, err
readBlob := func(len uint64) ([]byte, error) {
var reader io.ReadCloser
select {
case r := <-parts:
reader = r
case err := <-errs:
return nil, err
}
blob := make([]byte, len)
if _, err := io.ReadFull(reader, blob); err != nil {
reader.Close()
return nil, err
}
if err := reader.Close(); err != nil {
return nil, err
}
return blob, nil
}
manifestDigester := digest.Canonical.Digester()
manifestChecksum := manifestDigester.Hash()
if _, err := manifestChecksum.Write(manifest); err != nil {
return nil, 0, err
}
d, err := digest.Parse(manifestChecksumAnnotation)
manifest, err := readBlob(length)
if err != nil {
return nil, 0, err
return nil, nil, 0, err
}
if manifestDigester.Digest() != d {
return nil, 0, errors.New("invalid manifest checksum")
decodedBlob, err := decodeAndValidateBlob(manifest, lengthUncompressed, manifestChecksumAnnotation)
if err != nil {
return nil, nil, 0, err
}
decodedTarSplit := []byte{}
if offsetTarSplit > 0 {
tarSplit, err := readBlob(lengthTarSplit)
if err != nil {
return nil, nil, 0, err
}
decodedTarSplit, err = decodeAndValidateBlob(tarSplit, lengthUncompressedTarSplit, tarSplitChecksumAnnotation)
if err != nil {
return nil, nil, 0, err
}
}
return decodedBlob, decodedTarSplit, int64(offset), err
}
func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedUncompressedChecksum string) ([]byte, error) {
d, err := digest.Parse(expectedUncompressedChecksum)
if err != nil {
return nil, err
}
blobDigester := d.Algorithm().Digester()
blobChecksum := blobDigester.Hash()
if _, err := blobChecksum.Write(blob); err != nil {
return nil, err
}
if blobDigester.Digest() != d {
return nil, fmt.Errorf("invalid blob checksum, expected checksum %s, got %s", d, blobDigester.Digest())
}
decoder, err := zstd.NewReader(nil) //nolint:contextcheck
if err != nil {
return nil, 0, err
return nil, err
}
defer decoder.Close()
b := make([]byte, 0, lengthUncompressed)
if decoded, err := decoder.DecodeAll(manifest, b); err == nil {
return decoded, int64(offset), nil
}
return manifest, int64(offset), nil
return decoder.DecodeAll(blob, b)
}

View File

@ -6,13 +6,17 @@ package compressor
import (
"bufio"
"bytes"
"encoding/base64"
"io"
"github.com/containers/storage/pkg/chunked/internal"
"github.com/containers/storage/pkg/ioutils"
"github.com/klauspost/compress/zstd"
"github.com/opencontainers/go-digest"
"github.com/vbatts/tar-split/archive/tar"
"github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage"
)
const (
@ -198,11 +202,55 @@ type chunk struct {
ChunkType string
}
type tarSplitData struct {
compressed *bytes.Buffer
digester digest.Digester
uncompressedCounter *ioutils.WriteCounter
zstd *zstd.Encoder
packer storage.Packer
}
func newTarSplitData(level int) (*tarSplitData, error) {
compressed := bytes.NewBuffer(nil)
digester := digest.Canonical.Digester()
zstdWriter, err := internal.ZstdWriterWithLevel(io.MultiWriter(compressed, digester.Hash()), level)
if err != nil {
return nil, err
}
uncompressedCounter := ioutils.NewWriteCounter(zstdWriter)
metaPacker := storage.NewJSONPacker(uncompressedCounter)
return &tarSplitData{
compressed: compressed,
digester: digester,
uncompressedCounter: uncompressedCounter,
zstd: zstdWriter,
packer: metaPacker,
}, nil
}
func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error {
// total written so far. Used to retrieve partial offsets in the file
dest := ioutils.NewWriteCounter(destFile)
tr := tar.NewReader(reader)
tarSplitData, err := newTarSplitData(level)
if err != nil {
return err
}
defer func() {
if tarSplitData.zstd != nil {
tarSplitData.zstd.Close()
}
}()
its, err := asm.NewInputTarStream(reader, tarSplitData.packer, nil)
if err != nil {
return err
}
tr := tar.NewReader(its)
tr.RawAccounting = true
buf := make([]byte, 4096)
@ -214,7 +262,6 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
defer func() {
if zstdWriter != nil {
zstdWriter.Close()
zstdWriter.Flush()
}
}()
@ -224,9 +271,6 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
if err := zstdWriter.Close(); err != nil {
return 0, err
}
if err := zstdWriter.Flush(); err != nil {
return 0, err
}
offset = dest.Count
zstdWriter.Reset(dest)
}
@ -373,9 +417,11 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
rawBytes := tr.RawBytes()
if _, err := zstdWriter.Write(rawBytes); err != nil {
zstdWriter.Close()
return err
}
if err := zstdWriter.Flush(); err != nil {
zstdWriter.Close()
return err
}
if err := zstdWriter.Close(); err != nil {
@ -383,7 +429,21 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
}
zstdWriter = nil
return internal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), metadata, level)
if err := tarSplitData.zstd.Flush(); err != nil {
return err
}
if err := tarSplitData.zstd.Close(); err != nil {
return err
}
tarSplitData.zstd = nil
ts := internal.TarSplitData{
Data: tarSplitData.compressed.Bytes(),
Digest: tarSplitData.digester.Digest(),
UncompressedSize: tarSplitData.uncompressedCounter.Count,
}
return internal.WriteZstdChunkedManifest(dest, outMetadata, uint64(dest.Count), &ts, metadata, level)
}
type zstdChunkedWriter struct {

View File

@ -90,6 +90,8 @@ func GetType(t byte) (string, error) {
const (
ManifestChecksumKey = "io.github.containers.zstd-chunked.manifest-checksum"
ManifestInfoKey = "io.github.containers.zstd-chunked.manifest-position"
TarSplitChecksumKey = "io.github.containers.zstd-chunked.tarsplit-checksum"
TarSplitInfoKey = "io.github.containers.zstd-chunked.tarsplit-position"
// ManifestTypeCRFS is a manifest file compatible with the CRFS TOC file.
ManifestTypeCRFS = 1
@ -97,7 +99,7 @@ const (
// FooterSizeSupported is the footer size supported by this implementation.
// Newer versions of the image format might increase this value, so reject
// any version that is not supported.
FooterSizeSupported = 40
FooterSizeSupported = 56
)
var (
@ -125,9 +127,16 @@ func appendZstdSkippableFrame(dest io.Writer, data []byte) error {
return nil
}
func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, metadata []FileMetadata, level int) error {
type TarSplitData struct {
Data []byte
Digest digest.Digest
UncompressedSize int64
}
func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, offset uint64, tarSplitData *TarSplitData, metadata []FileMetadata, level int) error {
// 8 is the size of the zstd skippable frame header + the frame size
manifestOffset := offset + 8
const zstdSkippableFrameHeader = 8
manifestOffset := offset + zstdSkippableFrameHeader
toc := TOC{
Version: 1,
@ -167,13 +176,20 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off
return err
}
outMetadata[TarSplitChecksumKey] = tarSplitData.Digest.String()
tarSplitOffset := manifestOffset + uint64(len(compressedManifest)) + zstdSkippableFrameHeader
outMetadata[TarSplitInfoKey] = fmt.Sprintf("%d:%d:%d", tarSplitOffset, len(tarSplitData.Data), tarSplitData.UncompressedSize)
if err := appendZstdSkippableFrame(dest, tarSplitData.Data); err != nil {
return err
}
// Store the offset to the manifest and its size in LE order
manifestDataLE := make([]byte, FooterSizeSupported)
binary.LittleEndian.PutUint64(manifestDataLE, manifestOffset)
binary.LittleEndian.PutUint64(manifestDataLE[8:], uint64(len(compressedManifest)))
binary.LittleEndian.PutUint64(manifestDataLE[16:], uint64(len(manifest)))
binary.LittleEndian.PutUint64(manifestDataLE[24:], uint64(ManifestTypeCRFS))
copy(manifestDataLE[32:], ZstdChunkedFrameMagic)
binary.LittleEndian.PutUint64(manifestDataLE[8*1:], uint64(len(compressedManifest)))
binary.LittleEndian.PutUint64(manifestDataLE[8*2:], uint64(len(manifest)))
binary.LittleEndian.PutUint64(manifestDataLE[8*3:], uint64(ManifestTypeCRFS))
copy(manifestDataLE[8*4:], ZstdChunkedFrameMagic)
return appendZstdSkippableFrame(dest, manifestDataLE)
}

View File

@ -55,6 +55,7 @@ type compressedFileType int
type chunkedDiffer struct {
stream ImageSourceSeekable
manifest []byte
tarSplit []byte
layersCache *layersCache
tocOffset int64
fileType compressedFileType
@ -64,6 +65,8 @@ type chunkedDiffer struct {
gzipReader *pgzip.Reader
zstdReader *zstd.Decoder
rawReader io.Reader
tocDigest digest.Digest
}
var xattrsToIgnore = map[string]interface{}{
@ -135,6 +138,26 @@ func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, us
return dstFile, st.Size(), nil
}
// GetTOCDigest returns the digest of the TOC as recorded in the annotations.
// This is an experimental feature and may be changed/removed in the future.
func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) {
if tocDigest, ok := annotations[estargz.TOCJSONDigestAnnotation]; ok {
d, err := digest.Parse(tocDigest)
if err != nil {
return nil, err
}
return &d, nil
}
if tocDigest, ok := annotations[internal.ManifestChecksumKey]; ok {
d, err := digest.Parse(tocDigest)
if err != nil {
return nil, err
}
return &d, nil
}
return nil, nil
}
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
if _, ok := annotations[internal.ManifestChecksumKey]; ok {
@ -147,7 +170,7 @@ func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotat
}
func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedDiffer, error) {
manifest, tocOffset, err := readZstdChunkedManifest(ctx, iss, blobSize, annotations)
manifest, tarSplit, tocOffset, err := readZstdChunkedManifest(ctx, iss, blobSize, annotations)
if err != nil {
return nil, fmt.Errorf("read zstd:chunked manifest: %w", err)
}
@ -156,13 +179,20 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in
return nil, err
}
tocDigest, err := digest.Parse(annotations[internal.ManifestChecksumKey])
if err != nil {
return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[internal.ManifestChecksumKey], err)
}
return &chunkedDiffer{
copyBuffer: makeCopyBuffer(),
stream: iss,
manifest: manifest,
layersCache: layersCache,
tocOffset: tocOffset,
fileType: fileTypeZstdChunked,
layersCache: layersCache,
manifest: manifest,
stream: iss,
tarSplit: tarSplit,
tocOffset: tocOffset,
tocDigest: tocDigest,
}, nil
}
@ -176,6 +206,11 @@ func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize
return nil, err
}
tocDigest, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation])
if err != nil {
return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[estargz.TOCJSONDigestAnnotation], err)
}
return &chunkedDiffer{
copyBuffer: makeCopyBuffer(),
stream: iss,
@ -183,6 +218,7 @@ func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize
layersCache: layersCache,
tocOffset: tocOffset,
fileType: fileTypeEstargz,
tocDigest: tocDigest,
}, nil
}
@ -363,6 +399,24 @@ func maybeDoIDRemap(manifest []internal.FileMetadata, options *archive.TarOption
return nil
}
func mapToSlice(inputMap map[uint32]struct{}) []uint32 {
var out []uint32
for value := range inputMap {
out = append(out, value)
}
return out
}
func collectIDs(entries []internal.FileMetadata) ([]uint32, []uint32) {
uids := make(map[uint32]struct{})
gids := make(map[uint32]struct{})
for _, entry := range entries {
uids[uint32(entry.UID)] = struct{}{}
gids[uint32(entry.GID)] = struct{}{}
}
return mapToSlice(uids), mapToSlice(gids)
}
type originFile struct {
Root string
Path string
@ -1271,12 +1325,13 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
}
}()
bigData := map[string][]byte{
bigDataKey: c.manifest,
}
output := graphdriver.DriverWithDifferOutput{
Differ: c,
BigData: bigData,
Differ: c,
TarSplit: c.tarSplit,
BigData: map[string][]byte{
bigDataKey: c.manifest,
},
TOCDigest: c.tocDigest,
}
storeOpts, err := types.DefaultStoreOptionsAutoDetectUID()
@ -1305,6 +1360,8 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
var missingParts []missingPart
output.UIDs, output.GIDs = collectIDs(toc.Entries)
mergedEntries, totalSize, err := c.mergeTocEntries(c.fileType, toc.Entries)
if err != nil {
return output, err
@ -1579,6 +1636,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
if totalChunksSize > 0 {
logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize))
}
return output, nil
}

View File

@ -9,9 +9,16 @@ import (
storage "github.com/containers/storage"
graphdriver "github.com/containers/storage/drivers"
digest "github.com/opencontainers/go-digest"
)
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
return nil, errors.New("format not supported on this architecture")
return nil, errors.New("format not supported on this system")
}
// GetTOCDigest returns the digest of the TOC as recorded in the annotations.
// This is an experimental feature and may be changed/removed in the future.
func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) {
return nil, errors.New("format not supported on this system")
}

View File

@ -52,16 +52,32 @@ type Image struct {
// Cosign describes a container image signed using Cosign
type Cosign struct {
Image name.Digest
Annotations map[string]interface{}
Image name.Digest
// ClaimedIdentity is what the signer claims the image to be; usually a registry.com/…/repo:tag, but can also use a digest instead.
// ALMOST ALL consumers MUST verify that ClaimedIdentity in the signature is correct given how user refers to the image;
// e.g. if the user asks to access a signed image example.com/repo/mysql:3.14,
// it is ALMOST ALWAYS necessary to validate that ClaimedIdentity = example.com/repo/mysql:3.14
//
// Considerations:
// - The user might refer to an image using a digest (example.com/repo/mysql@sha256:…); in that case the registry/…/repo should still match
// - If the image is multi-arch, ClaimedIdentity usually refers to the top-level multi-arch image index also on the per-arch images
// (possibly even if ClaimedIdentity contains a digest!)
// - Older versions of cosign generate signatures where ClaimedIdentity only contains a registry/…/repo ; signature consumers should allow users
// to determine whether such images should be accepted (and, long-term, the default SHOULD be to reject them)
ClaimedIdentity string
Annotations map[string]interface{}
}
// SimpleContainerImage returns information about a container image in the github.com/containers/image/signature format
func (p Cosign) SimpleContainerImage() SimpleContainerImage {
dockerReference := p.Image.Repository.Name()
if p.ClaimedIdentity != "" {
dockerReference = p.ClaimedIdentity
}
return SimpleContainerImage{
Critical: Critical{
Identity: Identity{
DockerReference: p.Image.Repository.Name(),
DockerReference: dockerReference,
},
Image: Image{
DockerManifestDigest: p.Image.DigestStr(),
@ -98,6 +114,7 @@ func (p *Cosign) UnmarshalJSON(data []byte) error {
return fmt.Errorf("could not parse image digest string %q: %w", digestStr, err)
}
p.Image = digest
p.ClaimedIdentity = simple.Critical.Identity.DockerReference
p.Annotations = simple.Optional
return nil
}

14
vendor/modules.txt vendored
View File

@ -1,3 +1,6 @@
# dario.cat/mergo v1.0.0
## explicit; go 1.13
dario.cat/mergo
# github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161
## explicit; go 1.16
github.com/Azure/go-ansiterm
@ -183,7 +186,7 @@ github.com/containers/common/version
# github.com/containers/conmon v2.0.20+incompatible
## explicit
github.com/containers/conmon/runner/config
# github.com/containers/image/v5 v5.25.1-0.20230613183705-07ced6137083
# github.com/containers/image/v5 v5.25.1-0.20230623174242-68798a22ce3e
## explicit; go 1.18
github.com/containers/image/v5/copy
github.com/containers/image/v5/directory
@ -290,7 +293,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
# github.com/containers/storage v1.46.2-0.20230616083707-cc0d208e5e1c
# github.com/containers/storage v1.46.2-0.20230621123301-73f29956326d
## explicit; go 1.19
github.com/containers/storage
github.com/containers/storage/drivers
@ -580,9 +583,6 @@ github.com/hashicorp/go-multierror
# github.com/hashicorp/go-retryablehttp v0.7.4
## explicit; go 1.13
github.com/hashicorp/go-retryablehttp
# github.com/imdario/mergo v0.3.16
## explicit; go 1.13
github.com/imdario/mergo
# github.com/inconshreveable/mousetrap v1.1.0
## explicit; go 1.18
github.com/inconshreveable/mousetrap
@ -816,8 +816,8 @@ github.com/sigstore/rekor/pkg/generated/client/pubkey
github.com/sigstore/rekor/pkg/generated/client/tlog
github.com/sigstore/rekor/pkg/generated/models
github.com/sigstore/rekor/pkg/util
# github.com/sigstore/sigstore v1.6.5
## explicit; go 1.18
# github.com/sigstore/sigstore v1.7.1
## explicit; go 1.19
github.com/sigstore/sigstore/pkg/cryptoutils
github.com/sigstore/sigstore/pkg/oauth
github.com/sigstore/sigstore/pkg/oauthflow