mirror of https://github.com/containers/podman.git
Merge pull request #6172 from containers/dependabot/go_modules/github.com/containers/image/v5-5.4.4
Bump github.com/containers/image/v5 from 5.4.3 to 5.4.4
This commit is contained in:
commit
171dd10125
6
go.mod
6
go.mod
|
@ -12,7 +12,7 @@ require (
|
|||
github.com/containers/buildah v1.14.9-0.20200501175434-42a48f9373d9
|
||||
github.com/containers/common v0.11.1
|
||||
github.com/containers/conmon v2.0.14+incompatible
|
||||
github.com/containers/image/v5 v5.4.3
|
||||
github.com/containers/image/v5 v5.4.4
|
||||
github.com/containers/psgo v1.5.0
|
||||
github.com/containers/storage v1.19.1
|
||||
github.com/coreos/go-systemd/v22 v22.0.0
|
||||
|
@ -57,10 +57,10 @@ require (
|
|||
github.com/varlink/go v0.0.0-20190502142041-0f1d566d194b
|
||||
github.com/vishvananda/netlink v1.1.0
|
||||
go.etcd.io/bbolt v1.3.4
|
||||
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59
|
||||
golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a
|
||||
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775
|
||||
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f
|
||||
gopkg.in/yaml.v2 v2.2.8
|
||||
k8s.io/api v0.18.2
|
||||
k8s.io/apimachinery v0.18.2
|
||||
|
|
8
go.sum
8
go.sum
|
@ -78,6 +78,8 @@ github.com/containers/conmon v2.0.14+incompatible h1:knU1O1QxXy5YxtjMQVKEyCajROa
|
|||
github.com/containers/conmon v2.0.14+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
|
||||
github.com/containers/image/v5 v5.4.3 h1:zn2HR7uu4hpvT5QQHgjqonOzKDuM1I1UHUEmzZT5sbs=
|
||||
github.com/containers/image/v5 v5.4.3/go.mod h1:pN0tvp3YbDd7BWavK2aE0mvJUqVd2HmhPjekyWSFm0U=
|
||||
github.com/containers/image/v5 v5.4.4 h1:JSanNn3v/BMd3o0MEvO4R4OKNuoJUSzVGQAI1+0FMXE=
|
||||
github.com/containers/image/v5 v5.4.4/go.mod h1:g7cxNXitiLi6pEr9/L9n/0wfazRuhDKXU15kV86N8h8=
|
||||
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
|
||||
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||
github.com/containers/ocicrypt v1.0.2 h1:Q0/IPs8ohfbXNxEfyJ2pFVmvJu5BhqJUAmc6ES9NKbo=
|
||||
|
@ -469,6 +471,8 @@ github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02
|
|||
github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
|
||||
github.com/vbauerster/mpb/v5 v5.0.3 h1:Ldt/azOkbThTk2loi6FrBd/3fhxGFQ24MxFAS88PoNY=
|
||||
github.com/vbauerster/mpb/v5 v5.0.3/go.mod h1:h3YxU5CSr8rZP4Q3xZPVB3jJLhWPou63lHEdr9ytH4Y=
|
||||
github.com/vbauerster/mpb/v5 v5.0.4 h1:w7l/tJfHmtIOKZkU+bhbDZOUxj1kln9jy4DUOp3Tl14=
|
||||
github.com/vbauerster/mpb/v5 v5.0.4/go.mod h1:fvzasBUyuo35UyuA6sSOlVhpLoNQsp2nBdHw7OiSUU8=
|
||||
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
|
||||
github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0=
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
|
@ -505,6 +509,8 @@ golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPh
|
|||
golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59 h1:3zb4D3T4G8jdExgVU/95+vQXfpEPiMdCaZgmGVxjNHM=
|
||||
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5 h1:Q7tZBpemrlsc2I7IyODzhtallWRSm4Q0d09pL6XbQtU=
|
||||
golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
@ -573,6 +579,8 @@ golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775 h1:TC0v2RSO1u2kn1ZugjrFXkRZAEaqMN/RW+OTZkBzmLE=
|
||||
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f h1:gWF768j/LaZugp8dyS4UwsslYCYz9XgFxvlgsn0n9H8=
|
||||
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
|
|
|
@ -798,7 +798,6 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
|||
|
||||
// copyGroup is used to determine if all layers are copied
|
||||
copyGroup := sync.WaitGroup{}
|
||||
copyGroup.Add(numLayers)
|
||||
|
||||
// copySemaphore is used to limit the number of parallel downloads to
|
||||
// avoid malicious images causing troubles and to be nice to servers.
|
||||
|
@ -850,18 +849,22 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
|||
|
||||
if err := func() error { // A scope for defer
|
||||
progressPool, progressCleanup := ic.c.newProgressPool(ctx)
|
||||
defer progressCleanup()
|
||||
defer func() {
|
||||
// Wait for all layers to be copied. progressCleanup() must not be called while any of the copyLayerHelpers interact with the progressPool.
|
||||
copyGroup.Wait()
|
||||
progressCleanup()
|
||||
}()
|
||||
|
||||
for i, srcLayer := range srcInfos {
|
||||
err = copySemaphore.Acquire(ctx, 1)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Can't acquire semaphore")
|
||||
}
|
||||
copyGroup.Add(1)
|
||||
go copyLayerHelper(i, srcLayer, encLayerBitmap[i], progressPool)
|
||||
}
|
||||
|
||||
// Wait for all layers to be copied
|
||||
copyGroup.Wait()
|
||||
// A call to copyGroup.Wait() is done at this point by the defer above.
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return err
|
||||
|
|
|
@ -613,6 +613,9 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall
|
|||
params.Add("client_id", "containers/image")
|
||||
|
||||
authReq.Body = ioutil.NopCloser(bytes.NewBufferString(params.Encode()))
|
||||
if c.sys != nil && c.sys.DockerRegistryUserAgent != "" {
|
||||
authReq.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent)
|
||||
}
|
||||
authReq.Header.Add("Content-Type", "application/x-www-form-urlencoded")
|
||||
logrus.Debugf("%s %s", authReq.Method, authReq.URL.String())
|
||||
res, err := c.client.Do(authReq)
|
||||
|
@ -665,6 +668,9 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
|
|||
if c.auth.Username != "" && c.auth.Password != "" {
|
||||
authReq.SetBasicAuth(c.auth.Username, c.auth.Password)
|
||||
}
|
||||
if c.sys != nil && c.sys.DockerRegistryUserAgent != "" {
|
||||
authReq.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent)
|
||||
}
|
||||
|
||||
logrus.Debugf("%s %s", authReq.Method, authReq.URL.String())
|
||||
res, err := c.client.Do(authReq)
|
||||
|
|
|
@ -37,7 +37,7 @@ func newImage(ctx context.Context, sys *types.SystemContext, ref dockerReference
|
|||
|
||||
// SourceRefFullName returns a fully expanded name for the repository this image is in.
|
||||
func (i *Image) SourceRefFullName() string {
|
||||
return i.src.ref.ref.Name()
|
||||
return i.src.logicalRef.ref.Name()
|
||||
}
|
||||
|
||||
// GetRepositoryTags list all tags available in the repository. The tag
|
||||
|
@ -45,7 +45,7 @@ func (i *Image) SourceRefFullName() string {
|
|||
// backward-compatible shim method which calls the module-level
|
||||
// GetRepositoryTags)
|
||||
func (i *Image) GetRepositoryTags(ctx context.Context) ([]string, error) {
|
||||
return GetRepositoryTags(ctx, i.src.c.sys, i.src.ref)
|
||||
return GetRepositoryTags(ctx, i.src.c.sys, i.src.logicalRef)
|
||||
}
|
||||
|
||||
// GetRepositoryTags list all tags available in the repository. The tag
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/iolimits"
|
||||
"github.com/containers/image/v5/internal/uploadreader"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/blobinfocache/none"
|
||||
"github.com/containers/image/v5/types"
|
||||
|
@ -162,20 +163,31 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
|||
|
||||
digester := digest.Canonical.Digester()
|
||||
sizeCounter := &sizeCounter{}
|
||||
tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter))
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, v2Auth, nil)
|
||||
uploadLocation, err = func() (*url.URL, error) { // A scope for defer
|
||||
uploadReader := uploadreader.NewUploadReader(io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter)))
|
||||
// This error text should never be user-visible, we terminate only after makeRequestToResolvedURL
|
||||
// returns, so there isn’t a way for the error text to be provided to any of our callers.
|
||||
defer uploadReader.Terminate(errors.New("Reading data from an already terminated upload"))
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error uploading layer chunked %v", err)
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if !successStatus(res.StatusCode) {
|
||||
return nil, errors.Wrapf(client.HandleErrorResponse(res), "Error uploading layer chunked")
|
||||
}
|
||||
uploadLocation, err := res.Location()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error determining upload URL")
|
||||
}
|
||||
return uploadLocation, nil
|
||||
}()
|
||||
if err != nil {
|
||||
logrus.Debugf("Error uploading layer chunked, response %#v", res)
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
computedDigest := digester.Digest()
|
||||
|
||||
uploadLocation, err = res.Location()
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
|
||||
}
|
||||
|
||||
// FIXME: DELETE uploadLocation on failure (does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope)
|
||||
|
||||
locationQuery := uploadLocation.Query()
|
||||
|
@ -469,17 +481,17 @@ func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [
|
|||
}
|
||||
switch {
|
||||
case d.c.signatureBase != nil:
|
||||
return d.putSignaturesToLookaside(signatures, instanceDigest)
|
||||
return d.putSignaturesToLookaside(signatures, *instanceDigest)
|
||||
case d.c.supportsSignatures:
|
||||
return d.putSignaturesToAPIExtension(ctx, signatures, instanceDigest)
|
||||
return d.putSignaturesToAPIExtension(ctx, signatures, *instanceDigest)
|
||||
default:
|
||||
return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured")
|
||||
}
|
||||
}
|
||||
|
||||
// putSignaturesToLookaside implements PutSignatures() from the lookaside location configured in s.c.signatureBase,
|
||||
// which is not nil.
|
||||
func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, instanceDigest *digest.Digest) error {
|
||||
// which is not nil, for a manifest with manifestDigest.
|
||||
func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, manifestDigest digest.Digest) error {
|
||||
// FIXME? This overwrites files one at a time, definitely not atomic.
|
||||
// A failure when updating signatures with a reordered copy could lose some of them.
|
||||
|
||||
|
@ -490,7 +502,7 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, i
|
|||
|
||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||
for i, signature := range signatures {
|
||||
url := signatureStorageURL(d.c.signatureBase, *instanceDigest, i)
|
||||
url := signatureStorageURL(d.c.signatureBase, manifestDigest, i)
|
||||
if url == nil {
|
||||
return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
|
||||
}
|
||||
|
@ -505,7 +517,7 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte, i
|
|||
// is enough for dockerImageSource to stop looking for other signatures, so that
|
||||
// is sufficient.
|
||||
for i := len(signatures); ; i++ {
|
||||
url := signatureStorageURL(d.c.signatureBase, *instanceDigest, i)
|
||||
url := signatureStorageURL(d.c.signatureBase, manifestDigest, i)
|
||||
if url == nil {
|
||||
return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
|
||||
}
|
||||
|
@ -564,8 +576,9 @@ func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error
|
|||
}
|
||||
}
|
||||
|
||||
// putSignaturesToAPIExtension implements PutSignatures() using the X-Registry-Supports-Signatures API extension.
|
||||
func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
|
||||
// putSignaturesToAPIExtension implements PutSignatures() using the X-Registry-Supports-Signatures API extension,
|
||||
// for a manifest with manifestDigest.
|
||||
func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures [][]byte, manifestDigest digest.Digest) error {
|
||||
// Skip dealing with the manifest digest, or reading the old state, if not necessary.
|
||||
if len(signatures) == 0 {
|
||||
return nil
|
||||
|
@ -575,7 +588,7 @@ func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context
|
|||
// always adds signatures. Eventually we should also allow removing signatures,
|
||||
// but the X-Registry-Supports-Signatures API extension does not support that yet.
|
||||
|
||||
existingSignatures, err := d.c.getExtensionsSignatures(ctx, d.ref, *instanceDigest)
|
||||
existingSignatures, err := d.c.getExtensionsSignatures(ctx, d.ref, manifestDigest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -600,7 +613,7 @@ sigExists:
|
|||
if err != nil || n != 16 {
|
||||
return errors.Wrapf(err, "Error generating random signature len %d", n)
|
||||
}
|
||||
signatureName = fmt.Sprintf("%s@%032x", instanceDigest.String(), randBytes)
|
||||
signatureName = fmt.Sprintf("%s@%032x", manifestDigest.String(), randBytes)
|
||||
if _, ok := existingSigNames[signatureName]; !ok {
|
||||
break
|
||||
}
|
||||
|
@ -616,7 +629,7 @@ sigExists:
|
|||
return err
|
||||
}
|
||||
|
||||
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), d.manifestDigest.String())
|
||||
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), manifestDigest.String())
|
||||
res, err := d.c.makeRequest(ctx, "PUT", path, nil, bytes.NewReader(body), v2Auth, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -24,8 +24,9 @@ import (
|
|||
)
|
||||
|
||||
type dockerImageSource struct {
|
||||
ref dockerReference
|
||||
c *dockerClient
|
||||
logicalRef dockerReference // The reference the user requested.
|
||||
physicalRef dockerReference // The actual reference we are accessing (possibly a mirror)
|
||||
c *dockerClient
|
||||
// State
|
||||
cachedManifest []byte // nil if not loaded yet
|
||||
cachedManifestMIMEType string // Only valid if cachedManifest != nil
|
||||
|
@ -49,7 +50,6 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
|
|||
}
|
||||
}
|
||||
|
||||
primaryDomain := reference.Domain(ref.ref)
|
||||
// Check all endpoints for the manifest availability. If we find one that does
|
||||
// contain the image, it will be used for all future pull actions. Always try the
|
||||
// non-mirror original location last; this both transparently handles the case
|
||||
|
@ -66,7 +66,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
|
|||
attempts := []attempt{}
|
||||
for _, pullSource := range pullSources {
|
||||
logrus.Debugf("Trying to access %q", pullSource.Reference)
|
||||
s, err := newImageSourceAttempt(ctx, sys, pullSource, primaryDomain)
|
||||
s, err := newImageSourceAttempt(ctx, sys, ref, pullSource)
|
||||
if err == nil {
|
||||
return s, nil
|
||||
}
|
||||
|
@ -95,32 +95,33 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
|
|||
}
|
||||
|
||||
// newImageSourceAttempt is an internal helper for newImageSource. Everyone else must call newImageSource.
|
||||
// Given a pullSource and primaryDomain, return a dockerImageSource if it is reachable.
|
||||
// Given a logicalReference and a pullSource, return a dockerImageSource if it is reachable.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, pullSource sysregistriesv2.PullSource, primaryDomain string) (*dockerImageSource, error) {
|
||||
ref, err := newReference(pullSource.Reference)
|
||||
func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logicalRef dockerReference, pullSource sysregistriesv2.PullSource) (*dockerImageSource, error) {
|
||||
physicalRef, err := newReference(pullSource.Reference)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
endpointSys := sys
|
||||
// sys.DockerAuthConfig does not explicitly specify a registry; we must not blindly send the credentials intended for the primary endpoint to mirrors.
|
||||
if endpointSys != nil && endpointSys.DockerAuthConfig != nil && reference.Domain(ref.ref) != primaryDomain {
|
||||
if endpointSys != nil && endpointSys.DockerAuthConfig != nil && reference.Domain(physicalRef.ref) != reference.Domain(logicalRef.ref) {
|
||||
copy := *endpointSys
|
||||
copy.DockerAuthConfig = nil
|
||||
copy.DockerBearerRegistryToken = ""
|
||||
endpointSys = ©
|
||||
}
|
||||
|
||||
client, err := newDockerClientFromRef(endpointSys, ref, false, "pull")
|
||||
client, err := newDockerClientFromRef(endpointSys, physicalRef, false, "pull")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.tlsClientConfig.InsecureSkipVerify = pullSource.Endpoint.Insecure
|
||||
|
||||
s := &dockerImageSource{
|
||||
ref: ref,
|
||||
c: client,
|
||||
logicalRef: logicalRef,
|
||||
physicalRef: physicalRef,
|
||||
c: client,
|
||||
}
|
||||
|
||||
if err := s.ensureManifestIsLoaded(ctx); err != nil {
|
||||
|
@ -132,7 +133,7 @@ func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, pullSo
|
|||
// Reference returns the reference used to set up this source, _as specified by the user_
|
||||
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
|
||||
func (s *dockerImageSource) Reference() types.ImageReference {
|
||||
return s.ref
|
||||
return s.logicalRef
|
||||
}
|
||||
|
||||
// Close removes resources associated with an initialized ImageSource, if any.
|
||||
|
@ -181,7 +182,7 @@ func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *dig
|
|||
}
|
||||
|
||||
func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) {
|
||||
path := fmt.Sprintf(manifestPath, reference.Path(s.ref.ref), tagOrDigest)
|
||||
path := fmt.Sprintf(manifestPath, reference.Path(s.physicalRef.ref), tagOrDigest)
|
||||
headers := map[string][]string{
|
||||
"Accept": manifest.DefaultRequestedManifestMIMETypes,
|
||||
}
|
||||
|
@ -191,7 +192,7 @@ func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest strin
|
|||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return nil, "", errors.Wrapf(client.HandleErrorResponse(res), "Error reading manifest %s in %s", tagOrDigest, s.ref.ref.Name())
|
||||
return nil, "", errors.Wrapf(client.HandleErrorResponse(res), "Error reading manifest %s in %s", tagOrDigest, s.physicalRef.ref.Name())
|
||||
}
|
||||
|
||||
manblob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxManifestBodySize)
|
||||
|
@ -213,7 +214,7 @@ func (s *dockerImageSource) ensureManifestIsLoaded(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
reference, err := s.ref.tagOrDigest()
|
||||
reference, err := s.physicalRef.tagOrDigest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -271,7 +272,7 @@ func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
|
|||
return s.getExternalBlob(ctx, info.URLs)
|
||||
}
|
||||
|
||||
path := fmt.Sprintf(blobsPath, reference.Path(s.ref.ref), info.Digest.String())
|
||||
path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String())
|
||||
logrus.Debugf("Downloading %s", path)
|
||||
res, err := s.c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
|
@ -280,7 +281,7 @@ func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
|
|||
if err := httpResponseToError(res, "Error fetching blob"); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
cache.RecordKnownLocation(s.ref.Transport(), bicTransportScope(s.ref), info.Digest, newBICLocationReference(s.ref))
|
||||
cache.RecordKnownLocation(s.physicalRef.Transport(), bicTransportScope(s.physicalRef), info.Digest, newBICLocationReference(s.physicalRef))
|
||||
return res.Body, getBlobSize(res), nil
|
||||
}
|
||||
|
||||
|
@ -308,7 +309,7 @@ func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest *
|
|||
if instanceDigest != nil {
|
||||
return *instanceDigest, nil
|
||||
}
|
||||
if digested, ok := s.ref.ref.(reference.Digested); ok {
|
||||
if digested, ok := s.physicalRef.ref.(reference.Digested); ok {
|
||||
d := digested.Digest()
|
||||
if d.Algorithm() == digest.Canonical {
|
||||
return d, nil
|
||||
|
@ -398,7 +399,7 @@ func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, i
|
|||
return nil, err
|
||||
}
|
||||
|
||||
parsedBody, err := s.c.getExtensionsSignatures(ctx, s.ref, manifestDigest)
|
||||
parsedBody, err := s.c.getExtensionsSignatures(ctx, s.physicalRef, manifestDigest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
91
vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
generated
vendored
91
vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
generated
vendored
|
@ -115,12 +115,23 @@ func getCPUVariant(os string, arch string) string {
|
|||
return ""
|
||||
}
|
||||
|
||||
// compatibility contains, for a specified architecture, a list of known variants, in the
|
||||
// order from most capable (most restrictive) to least capable (most compatible).
|
||||
// Architectures that don’t have variants should not have an entry here.
|
||||
var compatibility = map[string][]string{
|
||||
"arm": {"v7", "v6", "v5"},
|
||||
"arm": {"v8", "v7", "v6", "v5"},
|
||||
"arm64": {"v8"},
|
||||
}
|
||||
|
||||
// Returns all compatible platforms with the platform specifics possibly overriden by user,
|
||||
// baseVariants contains, for a specified architecture, a variant that is known to be
|
||||
// supported by _all_ machines using that architecture.
|
||||
// Architectures that don’t have variants, or where there are possible versions without
|
||||
// an established variant name, should not have an entry here.
|
||||
var baseVariants = map[string]string{
|
||||
"arm64": "v8",
|
||||
}
|
||||
|
||||
// WantedPlatforms returns all compatible platforms with the platform specifics possibly overriden by user,
|
||||
// the most compatible platform is first.
|
||||
// If some option (arch, os, variant) is not present, a value from current platform is detected.
|
||||
func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
|
||||
|
@ -145,59 +156,45 @@ func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
|
|||
wantedOS = ctx.OSChoice
|
||||
}
|
||||
|
||||
var wantedPlatforms []imgspecv1.Platform
|
||||
if wantedVariant != "" && compatibility[wantedArch] != nil {
|
||||
wantedPlatforms = make([]imgspecv1.Platform, 0, len(compatibility[wantedArch]))
|
||||
wantedIndex := -1
|
||||
for i, v := range compatibility[wantedArch] {
|
||||
if wantedVariant == v {
|
||||
wantedIndex = i
|
||||
break
|
||||
var variants []string = nil
|
||||
if wantedVariant != "" {
|
||||
if compatibility[wantedArch] != nil {
|
||||
variantOrder := compatibility[wantedArch]
|
||||
for i, v := range variantOrder {
|
||||
if wantedVariant == v {
|
||||
variants = variantOrder[i:]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
// user wants a variant which we know nothing about - not even compatibility
|
||||
if wantedIndex == -1 {
|
||||
wantedPlatforms = []imgspecv1.Platform{
|
||||
{
|
||||
OS: wantedOS,
|
||||
Architecture: wantedArch,
|
||||
Variant: wantedVariant,
|
||||
},
|
||||
}
|
||||
} else {
|
||||
for i := wantedIndex; i < len(compatibility[wantedArch]); i++ {
|
||||
v := compatibility[wantedArch][i]
|
||||
wantedPlatforms = append(wantedPlatforms, imgspecv1.Platform{
|
||||
OS: wantedOS,
|
||||
Architecture: wantedArch,
|
||||
Variant: v,
|
||||
})
|
||||
}
|
||||
if variants == nil {
|
||||
// user wants a variant which we know nothing about - not even compatibility
|
||||
variants = []string{wantedVariant}
|
||||
}
|
||||
variants = append(variants, "")
|
||||
} else {
|
||||
wantedPlatforms = []imgspecv1.Platform{
|
||||
{
|
||||
OS: wantedOS,
|
||||
Architecture: wantedArch,
|
||||
Variant: wantedVariant,
|
||||
},
|
||||
variants = append(variants, "") // No variant specified, use a “no variant specified” image if present
|
||||
if baseVariant, ok := baseVariants[wantedArch]; ok {
|
||||
// But also accept an image with the “base” variant for the architecture, if it exists.
|
||||
variants = append(variants, baseVariant)
|
||||
}
|
||||
}
|
||||
|
||||
return wantedPlatforms, nil
|
||||
res := make([]imgspecv1.Platform, 0, len(variants))
|
||||
for _, v := range variants {
|
||||
res = append(res, imgspecv1.Platform{
|
||||
OS: wantedOS,
|
||||
Architecture: wantedArch,
|
||||
Variant: v,
|
||||
})
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// MatchesPlatform returns true if a platform descriptor from a multi-arch image matches
|
||||
// an item from the return value of WantedPlatforms.
|
||||
func MatchesPlatform(image imgspecv1.Platform, wanted imgspecv1.Platform) bool {
|
||||
if image.Architecture != wanted.Architecture {
|
||||
return false
|
||||
}
|
||||
if image.OS != wanted.OS {
|
||||
return false
|
||||
}
|
||||
|
||||
if wanted.Variant == "" || image.Variant == wanted.Variant {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
return image.Architecture == wanted.Architecture &&
|
||||
image.OS == wanted.OS &&
|
||||
image.Variant == wanted.Variant
|
||||
}
|
||||
|
|
61
vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go
generated
vendored
Normal file
61
vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go
generated
vendored
Normal file
|
@ -0,0 +1,61 @@
|
|||
package uploadreader
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// UploadReader is a pass-through reader for use in sending non-trivial data using the net/http
|
||||
// package (http.NewRequest, http.Post and the like).
|
||||
//
|
||||
// The net/http package uses a separate goroutine to upload data to a HTTP connection,
|
||||
// and it is possible for the server to return a response (typically an error) before consuming
|
||||
// the full body of the request. In that case http.Client.Do can return with an error while
|
||||
// the body is still being read — regardless of of the cancellation, if any, of http.Request.Context().
|
||||
//
|
||||
// As a result, any data used/updated by the io.Reader() provided as the request body may be
|
||||
// used/updated even after http.Client.Do returns, causing races.
|
||||
//
|
||||
// To fix this, UploadReader provides a synchronized Terminate() method, which can block for
|
||||
// a not-completely-negligible time (for a duration of the underlying Read()), but guarantees that
|
||||
// after Terminate() returns, the underlying reader is never used any more (unlike calling
|
||||
// the cancellation callback of context.WithCancel, which returns before any recipients may have
|
||||
// reacted to the cancellation).
|
||||
type UploadReader struct {
|
||||
mutex sync.Mutex
|
||||
// The following members can only be used with mutex held
|
||||
reader io.Reader
|
||||
terminationError error // nil if not terminated yet
|
||||
}
|
||||
|
||||
// NewUploadReader returns an UploadReader for an "underlying" reader.
|
||||
func NewUploadReader(underlying io.Reader) *UploadReader {
|
||||
return &UploadReader{
|
||||
reader: underlying,
|
||||
terminationError: nil,
|
||||
}
|
||||
}
|
||||
|
||||
// Read returns the error set by Terminate, if any, or calls the underlying reader.
|
||||
// It is safe to call this from a different goroutine than Terminate.
|
||||
func (ur *UploadReader) Read(p []byte) (int, error) {
|
||||
ur.mutex.Lock()
|
||||
defer ur.mutex.Unlock()
|
||||
|
||||
if ur.terminationError != nil {
|
||||
return 0, ur.terminationError
|
||||
}
|
||||
return ur.reader.Read(p)
|
||||
}
|
||||
|
||||
// Terminate waits for in-progress Read calls, if any, to finish, and ensures that after
|
||||
// this function returns, any Read calls will fail with the provided error, and the underlying
|
||||
// reader will never be used any more.
|
||||
//
|
||||
// It is safe to call this from a different goroutine than Read.
|
||||
func (ur *UploadReader) Terminate(err error) {
|
||||
ur.mutex.Lock() // May block for some time if ur.reader.Read() is in progress
|
||||
defer ur.mutex.Unlock()
|
||||
|
||||
ur.terminationError = err
|
||||
}
|
|
@ -0,0 +1,118 @@
|
|||
package manifest
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// dupStringSlice returns a deep copy of a slice of strings, or nil if the
|
||||
// source slice is empty.
|
||||
func dupStringSlice(list []string) []string {
|
||||
if len(list) == 0 {
|
||||
return nil
|
||||
}
|
||||
dup := make([]string, len(list))
|
||||
copy(dup, list)
|
||||
return dup
|
||||
}
|
||||
|
||||
// dupStringStringMap returns a deep copy of a map[string]string, or nil if the
|
||||
// passed-in map is nil or has no keys.
|
||||
func dupStringStringMap(m map[string]string) map[string]string {
|
||||
if len(m) == 0 {
|
||||
return nil
|
||||
}
|
||||
result := make(map[string]string)
|
||||
for k, v := range m {
|
||||
result[k] = v
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos()
|
||||
// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure.
|
||||
func layerInfosToStrings(infos []LayerInfo) []string {
|
||||
layers := make([]string, len(infos))
|
||||
for i, info := range infos {
|
||||
layers[i] = info.Digest.String()
|
||||
}
|
||||
return layers
|
||||
}
|
||||
|
||||
// compressionMIMETypeSet describes a set of MIME type “variants” that represent differently-compressed
|
||||
// versions of “the same kind of content”.
|
||||
// The map key is the return value of compression.Algorithm.Name(), or mtsUncompressed;
|
||||
// the map value is a MIME type, or mtsUnsupportedMIMEType to mean "recognized but unsupported".
|
||||
type compressionMIMETypeSet map[string]string
|
||||
|
||||
const mtsUncompressed = "" // A key in compressionMIMETypeSet for the uncompressed variant
|
||||
const mtsUnsupportedMIMEType = "" // A value in compressionMIMETypeSet that means “recognized but unsupported”
|
||||
|
||||
// compressionVariantMIMEType returns a variant of mimeType for the specified algorithm (which may be nil
|
||||
// to mean "no compression"), based on variantTable.
|
||||
func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType string, algorithm *compression.Algorithm) (string, error) {
|
||||
if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries
|
||||
return "", fmt.Errorf("cannot update unknown MIME type")
|
||||
}
|
||||
for _, variants := range variantTable {
|
||||
for _, mt := range variants {
|
||||
if mt == mimeType { // Found the variant
|
||||
name := mtsUncompressed
|
||||
if algorithm != nil {
|
||||
name = algorithm.Name()
|
||||
}
|
||||
if res, ok := variants[name]; ok {
|
||||
if res != mtsUnsupportedMIMEType {
|
||||
return res, nil
|
||||
}
|
||||
if name != mtsUncompressed {
|
||||
return "", fmt.Errorf("%s compression is not supported", name)
|
||||
}
|
||||
return "", errors.New("uncompressed variant is not supported")
|
||||
}
|
||||
if name != mtsUncompressed {
|
||||
return "", fmt.Errorf("unknown compression algorithm %s", name)
|
||||
}
|
||||
// We can't very well say “the idea of no compression is unknown”
|
||||
return "", errors.New("uncompressed variant is not supported")
|
||||
}
|
||||
}
|
||||
}
|
||||
if algorithm != nil {
|
||||
return "", fmt.Errorf("unsupported MIME type for compression: %s", mimeType)
|
||||
}
|
||||
return "", fmt.Errorf("unsupported MIME type for decompression: %s", mimeType)
|
||||
}
|
||||
|
||||
// updatedMIMEType returns the result of applying edits in updated (MediaType, CompressionOperation) to
|
||||
// mimeType, based on variantTable. It may use updated.Digest for error messages.
|
||||
func updatedMIMEType(variantTable []compressionMIMETypeSet, mimeType string, updated types.BlobInfo) (string, error) {
|
||||
// Note that manifests in containers-storage might be reporting the
|
||||
// wrong media type since the original manifests are stored while layers
|
||||
// are decompressed in storage. Hence, we need to consider the case
|
||||
// that an already {de}compressed layer should be {de}compressed;
|
||||
// compressionVariantMIMEType does that by not caring whether the original is
|
||||
// {de}compressed.
|
||||
switch updated.CompressionOperation {
|
||||
case types.PreserveOriginal:
|
||||
// Keep the original media type.
|
||||
return mimeType, nil
|
||||
|
||||
case types.Decompress:
|
||||
return compressionVariantMIMEType(variantTable, mimeType, nil)
|
||||
|
||||
case types.Compress:
|
||||
if updated.CompressionAlgorithm == nil {
|
||||
logrus.Debugf("Error preparing updated manifest: blob %q was compressed but does not specify by which algorithm: falling back to use the original blob", updated.Digest)
|
||||
return mimeType, nil
|
||||
}
|
||||
return compressionVariantMIMEType(variantTable, mimeType, updated.CompressionAlgorithm)
|
||||
|
||||
default:
|
||||
return "", fmt.Errorf("unknown compression operation (%d)", updated.CompressionOperation)
|
||||
}
|
||||
}
|
|
@ -10,7 +10,6 @@ import (
|
|||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Schema2Descriptor is a “descriptor” in docker/distribution schema 2.
|
||||
|
@ -213,26 +212,17 @@ func (m *Schema2) LayerInfos() []LayerInfo {
|
|||
return blobs
|
||||
}
|
||||
|
||||
// isSchema2ForeignLayer is a convenience wrapper to check if a given mime type
|
||||
// is a compressed or decompressed schema 2 foreign layer.
|
||||
func isSchema2ForeignLayer(mimeType string) bool {
|
||||
switch mimeType {
|
||||
case DockerV2Schema2ForeignLayerMediaType, DockerV2Schema2ForeignLayerMediaTypeGzip:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// isSchema2Layer is a convenience wrapper to check if a given mime type is a
|
||||
// compressed or decompressed schema 2 layer.
|
||||
func isSchema2Layer(mimeType string) bool {
|
||||
switch mimeType {
|
||||
case DockerV2SchemaLayerMediaTypeUncompressed, DockerV2Schema2LayerMediaType:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
var schema2CompressionMIMETypeSets = []compressionMIMETypeSet{
|
||||
{
|
||||
mtsUncompressed: DockerV2Schema2ForeignLayerMediaType,
|
||||
compression.Gzip.Name(): DockerV2Schema2ForeignLayerMediaTypeGzip,
|
||||
compression.Zstd.Name(): mtsUnsupportedMIMEType,
|
||||
},
|
||||
{
|
||||
mtsUncompressed: DockerV2SchemaLayerMediaTypeUncompressed,
|
||||
compression.Gzip.Name(): DockerV2Schema2LayerMediaType,
|
||||
compression.Zstd.Name(): mtsUnsupportedMIMEType,
|
||||
},
|
||||
}
|
||||
|
||||
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
|
||||
|
@ -243,67 +233,16 @@ func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
|||
original := m.LayersDescriptors
|
||||
m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos))
|
||||
for i, info := range layerInfos {
|
||||
mimeType := original[i].MediaType
|
||||
// First make sure we support the media type of the original layer.
|
||||
if err := SupportedSchema2MediaType(original[i].MediaType); err != nil {
|
||||
return fmt.Errorf("Error preparing updated manifest: unknown media type of original layer: %q", original[i].MediaType)
|
||||
if err := SupportedSchema2MediaType(mimeType); err != nil {
|
||||
return fmt.Errorf("Error preparing updated manifest: unknown media type of original layer %q: %q", info.Digest, mimeType)
|
||||
}
|
||||
|
||||
// Set the correct media types based on the specified compression
|
||||
// operation, the desired compression algorithm AND the original media
|
||||
// type.
|
||||
//
|
||||
// Note that manifests in containers-storage might be reporting the
|
||||
// wrong media type since the original manifests are stored while layers
|
||||
// are decompressed in storage. Hence, we need to consider the case
|
||||
// that an already {de}compressed layer should be {de}compressed, which
|
||||
// is being addressed in `isSchema2{Foreign}Layer`.
|
||||
switch info.CompressionOperation {
|
||||
case types.PreserveOriginal:
|
||||
// Keep the original media type.
|
||||
m.LayersDescriptors[i].MediaType = original[i].MediaType
|
||||
|
||||
case types.Decompress:
|
||||
// Decompress the original media type and check if it was
|
||||
// non-distributable one or not.
|
||||
mimeType := original[i].MediaType
|
||||
switch {
|
||||
case isSchema2ForeignLayer(mimeType):
|
||||
m.LayersDescriptors[i].MediaType = DockerV2Schema2ForeignLayerMediaType
|
||||
case isSchema2Layer(mimeType):
|
||||
m.LayersDescriptors[i].MediaType = DockerV2SchemaLayerMediaTypeUncompressed
|
||||
default:
|
||||
return fmt.Errorf("Error preparing updated manifest: unsupported media type for decompression: %q", original[i].MediaType)
|
||||
}
|
||||
|
||||
case types.Compress:
|
||||
if info.CompressionAlgorithm == nil {
|
||||
logrus.Debugf("Preparing updated manifest: blob %q was compressed but does not specify by which algorithm: falling back to use the original blob", info.Digest)
|
||||
m.LayersDescriptors[i].MediaType = original[i].MediaType
|
||||
break
|
||||
}
|
||||
// Compress the original media type and set the new one based on
|
||||
// that type (distributable or not) and the specified compression
|
||||
// algorithm. Throw an error if the algorithm is not supported.
|
||||
switch info.CompressionAlgorithm.Name() {
|
||||
case compression.Gzip.Name():
|
||||
mimeType := original[i].MediaType
|
||||
switch {
|
||||
case isSchema2ForeignLayer(mimeType):
|
||||
m.LayersDescriptors[i].MediaType = DockerV2Schema2ForeignLayerMediaTypeGzip
|
||||
case isSchema2Layer(mimeType):
|
||||
m.LayersDescriptors[i].MediaType = DockerV2Schema2LayerMediaType
|
||||
default:
|
||||
return fmt.Errorf("Error preparing updated manifest: unsupported media type for compression: %q", original[i].MediaType)
|
||||
}
|
||||
case compression.Zstd.Name():
|
||||
return fmt.Errorf("Error preparing updated manifest: zstd compression is not supported for docker images")
|
||||
default:
|
||||
return fmt.Errorf("Error preparing updated manifest: unknown compression algorithm %q for layer %q", info.CompressionAlgorithm.Name(), info.Digest)
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("Error preparing updated manifest: unknown compression operation (%d) for layer %q", info.CompressionOperation, info.Digest)
|
||||
mimeType, err := updatedMIMEType(schema2CompressionMIMETypeSets, mimeType, info)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error preparing updated manifest, layer %q", info.Digest)
|
||||
}
|
||||
m.LayersDescriptors[i].MediaType = mimeType
|
||||
m.LayersDescriptors[i].Digest = info.Digest
|
||||
m.LayersDescriptors[i].Size = info.Size
|
||||
m.LayersDescriptors[i].URLs = info.URLs
|
||||
|
|
|
@ -107,7 +107,7 @@ func (list *Schema2List) ChooseInstance(ctx *types.SystemContext) (digest.Digest
|
|||
}
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("no image found in manifest list for architecture %s, variant %s, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
|
||||
return "", fmt.Errorf("no image found in manifest list for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
|
||||
}
|
||||
|
||||
// Serialize returns the list in a blob format.
|
||||
|
|
|
@ -59,30 +59,6 @@ type ListUpdate struct {
|
|||
MediaType string
|
||||
}
|
||||
|
||||
// dupStringSlice returns a deep copy of a slice of strings, or nil if the
|
||||
// source slice is empty.
|
||||
func dupStringSlice(list []string) []string {
|
||||
if len(list) == 0 {
|
||||
return nil
|
||||
}
|
||||
dup := make([]string, len(list))
|
||||
copy(dup, list)
|
||||
return dup
|
||||
}
|
||||
|
||||
// dupStringStringMap returns a deep copy of a map[string]string, or nil if the
|
||||
// passed-in map is nil or has no keys.
|
||||
func dupStringStringMap(m map[string]string) map[string]string {
|
||||
if len(m) == 0 {
|
||||
return nil
|
||||
}
|
||||
result := make(map[string]string)
|
||||
for k, v := range m {
|
||||
result[k] = v
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ListFromBlob parses a list of manifests.
|
||||
func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) {
|
||||
normalized := NormalizedMIMEType(manifestMIMEType)
|
||||
|
|
|
@ -256,13 +256,3 @@ func FromBlob(manblob []byte, mt string) (Manifest, error) {
|
|||
// Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
|
||||
return nil, fmt.Errorf("Unimplemented manifest MIME type %s (normalized as %s)", mt, nmt)
|
||||
}
|
||||
|
||||
// layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos()
|
||||
// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure.
|
||||
func layerInfosToStrings(infos []LayerInfo) []string {
|
||||
layers := make([]string, len(infos))
|
||||
for i, info := range infos {
|
||||
layers[i] = info.Digest.String()
|
||||
}
|
||||
return layers
|
||||
}
|
||||
|
|
|
@ -12,7 +12,6 @@ import (
|
|||
"github.com/opencontainers/image-spec/specs-go"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor.
|
||||
|
@ -95,26 +94,17 @@ func (m *OCI1) LayerInfos() []LayerInfo {
|
|||
return blobs
|
||||
}
|
||||
|
||||
// isOCI1NonDistributableLayer is a convenience wrapper to check if a given mime
|
||||
// type is a compressed or decompressed OCI v1 non-distributable layer.
|
||||
func isOCI1NonDistributableLayer(mimeType string) bool {
|
||||
switch mimeType {
|
||||
case imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// isOCI1Layer is a convenience wrapper to check if a given mime type is a
|
||||
// compressed or decompressed OCI v1 layer.
|
||||
func isOCI1Layer(mimeType string) bool {
|
||||
switch mimeType {
|
||||
case imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerZstd:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{
|
||||
{
|
||||
mtsUncompressed: imgspecv1.MediaTypeImageLayerNonDistributable,
|
||||
compression.Gzip.Name(): imgspecv1.MediaTypeImageLayerNonDistributableGzip,
|
||||
compression.Zstd.Name(): imgspecv1.MediaTypeImageLayerNonDistributableZstd,
|
||||
},
|
||||
{
|
||||
mtsUncompressed: imgspecv1.MediaTypeImageLayer,
|
||||
compression.Gzip.Name(): imgspecv1.MediaTypeImageLayerGzip,
|
||||
compression.Zstd.Name(): imgspecv1.MediaTypeImageLayerZstd,
|
||||
},
|
||||
}
|
||||
|
||||
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls+mediatype), in order (the root layer first, and then successive layered layers)
|
||||
|
@ -133,79 +123,19 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
|||
}
|
||||
mimeType = decMimeType
|
||||
}
|
||||
|
||||
// Set the correct media types based on the specified compression
|
||||
// operation, the desired compression algorithm AND the original media
|
||||
// type.
|
||||
//
|
||||
// Note that manifests in containers-storage might be reporting the
|
||||
// wrong media type since the original manifests are stored while layers
|
||||
// are decompressed in storage. Hence, we need to consider the case
|
||||
// that an already {de}compressed layer should be {de}compressed, which
|
||||
// is being addressed in `isSchema2{Foreign}Layer`.
|
||||
switch info.CompressionOperation {
|
||||
case types.PreserveOriginal:
|
||||
// Keep the original media type.
|
||||
m.Layers[i].MediaType = mimeType
|
||||
|
||||
case types.Decompress:
|
||||
// Decompress the original media type and check if it was
|
||||
// non-distributable one or not.
|
||||
switch {
|
||||
case isOCI1NonDistributableLayer(mimeType):
|
||||
m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable
|
||||
case isOCI1Layer(mimeType):
|
||||
m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayer
|
||||
default:
|
||||
return fmt.Errorf("Error preparing updated manifest: unsupported media type for decompression: %q", mimeType)
|
||||
}
|
||||
|
||||
case types.Compress:
|
||||
if info.CompressionAlgorithm == nil {
|
||||
logrus.Debugf("Error preparing updated manifest: blob %q was compressed but does not specify by which algorithm: falling back to use the original blob", info.Digest)
|
||||
m.Layers[i].MediaType = mimeType
|
||||
break
|
||||
}
|
||||
// Compress the original media type and set the new one based on
|
||||
// that type (distributable or not) and the specified compression
|
||||
// algorithm. Throw an error if the algorithm is not supported.
|
||||
switch info.CompressionAlgorithm.Name() {
|
||||
case compression.Gzip.Name():
|
||||
switch {
|
||||
case isOCI1NonDistributableLayer(mimeType):
|
||||
m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip
|
||||
case isOCI1Layer(mimeType):
|
||||
m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerGzip
|
||||
default:
|
||||
return fmt.Errorf("Error preparing updated manifest: unsupported media type for compression: %q", mimeType)
|
||||
}
|
||||
|
||||
case compression.Zstd.Name():
|
||||
switch {
|
||||
case isOCI1NonDistributableLayer(mimeType):
|
||||
m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableZstd
|
||||
case isOCI1Layer(mimeType):
|
||||
m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerZstd
|
||||
default:
|
||||
return fmt.Errorf("Error preparing updated manifest: unsupported media type for compression: %q", mimeType)
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("Error preparing updated manifest: unknown compression algorithm %q for layer %q", info.CompressionAlgorithm.Name(), info.Digest)
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("Error preparing updated manifest: unknown compression operation (%d) for layer %q", info.CompressionOperation, info.Digest)
|
||||
mimeType, err := updatedMIMEType(oci1CompressionMIMETypeSets, mimeType, info)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error preparing updated manifest, layer %q", info.Digest)
|
||||
}
|
||||
|
||||
if info.CryptoOperation == types.Encrypt {
|
||||
encMediaType, err := getEncryptedMediaType(m.Layers[i].MediaType)
|
||||
encMediaType, err := getEncryptedMediaType(mimeType)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error preparing updated manifest: encryption specified but no counterpart for mediatype: %q", m.Layers[i].MediaType)
|
||||
return fmt.Errorf("error preparing updated manifest: encryption specified but no counterpart for mediatype: %q", mimeType)
|
||||
}
|
||||
m.Layers[i].MediaType = encMediaType
|
||||
mimeType = encMediaType
|
||||
}
|
||||
|
||||
m.Layers[i].MediaType = mimeType
|
||||
m.Layers[i].Digest = info.Digest
|
||||
m.Layers[i].Size = info.Size
|
||||
m.Layers[i].Annotations = info.Annotations
|
||||
|
|
|
@ -79,6 +79,9 @@ func (index *OCI1Index) ChooseInstance(ctx *types.SystemContext) (digest.Digest,
|
|||
}
|
||||
for _, wantedPlatform := range wantedPlatforms {
|
||||
for _, d := range index.Manifests {
|
||||
if d.Platform == nil {
|
||||
continue
|
||||
}
|
||||
imagePlatform := imgspecv1.Platform{
|
||||
Architecture: d.Platform.Architecture,
|
||||
OS: d.Platform.OS,
|
||||
|
@ -97,7 +100,7 @@ func (index *OCI1Index) ChooseInstance(ctx *types.SystemContext) (digest.Digest,
|
|||
return d.Digest, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("no image found in image index for architecture %s, variant %s, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
|
||||
return "", fmt.Errorf("no image found in image index for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
|
||||
}
|
||||
|
||||
// Serialize returns the index in a blob format.
|
||||
|
|
|
@ -8,7 +8,7 @@ const (
|
|||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 4
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 3
|
||||
VersionPatch = 4
|
||||
|
||||
// VersionDev indicates development branch. Releases will be empty string.
|
||||
VersionDev = ""
|
||||
|
|
|
@ -69,6 +69,7 @@ type bState struct {
|
|||
trimSpace bool
|
||||
toComplete bool
|
||||
completeFlushed bool
|
||||
ignoreComplete bool
|
||||
noPop bool
|
||||
aDecorators []decor.Decorator
|
||||
pDecorators []decor.Decorator
|
||||
|
@ -170,17 +171,18 @@ func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) {
|
|||
}
|
||||
|
||||
// SetTotal sets total dynamically.
|
||||
// If total is less or equal to zero it takes progress' current value.
|
||||
// If complete is true, complete event will be triggered.
|
||||
// If total is less than or equal to zero it takes progress' current value.
|
||||
// A complete flag enables or disables complete event on `current >= total`.
|
||||
func (b *Bar) SetTotal(total int64, complete bool) {
|
||||
select {
|
||||
case b.operateState <- func(s *bState) {
|
||||
s.ignoreComplete = !complete
|
||||
if total <= 0 {
|
||||
s.total = s.current
|
||||
} else {
|
||||
s.total = total
|
||||
}
|
||||
if complete && !s.toComplete {
|
||||
if !s.ignoreComplete && !s.toComplete {
|
||||
s.current = s.total
|
||||
s.toComplete = true
|
||||
go b.refreshTillShutdown()
|
||||
|
@ -197,7 +199,7 @@ func (b *Bar) SetCurrent(current int64) {
|
|||
s.iterated = true
|
||||
s.lastN = current - s.current
|
||||
s.current = current
|
||||
if s.total > 0 && s.current >= s.total {
|
||||
if !s.ignoreComplete && s.current >= s.total {
|
||||
s.current = s.total
|
||||
s.toComplete = true
|
||||
go b.refreshTillShutdown()
|
||||
|
@ -224,7 +226,7 @@ func (b *Bar) IncrInt64(n int64) {
|
|||
s.iterated = true
|
||||
s.lastN = n
|
||||
s.current += n
|
||||
if s.total > 0 && s.current >= s.total {
|
||||
if !s.ignoreComplete && s.current >= s.total {
|
||||
s.current = s.total
|
||||
s.toComplete = true
|
||||
go b.refreshTillShutdown()
|
||||
|
|
|
@ -76,7 +76,7 @@ func (s *barFiller) SetReverse(reverse bool) {
|
|||
s.flush = reverseFlush
|
||||
} else {
|
||||
s.tip = s.format[rTip]
|
||||
s.flush = normalFlush
|
||||
s.flush = regularFlush
|
||||
}
|
||||
s.reverse = reverse
|
||||
}
|
||||
|
@ -125,7 +125,7 @@ func (s *barFiller) Fill(w io.Writer, width int, stat *decor.Statistics) {
|
|||
s.flush(w, bb)
|
||||
}
|
||||
|
||||
func normalFlush(w io.Writer, bb [][]byte) {
|
||||
func regularFlush(w io.Writer, bb [][]byte) {
|
||||
for i := 0; i < len(bb); i++ {
|
||||
w.Write(bb[i])
|
||||
}
|
||||
|
|
|
@ -3,8 +3,8 @@ module github.com/vbauerster/mpb/v5
|
|||
require (
|
||||
github.com/VividCortex/ewma v1.1.1
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
|
||||
golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 // indirect
|
||||
golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5
|
||||
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f // indirect
|
||||
)
|
||||
|
||||
go 1.14
|
||||
|
|
|
@ -3,11 +3,11 @@ github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmx
|
|||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4 h1:QmwruyY+bKbDDL0BaglrbZABEali68eoMFhTZpCjYVA=
|
||||
golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5 h1:Q7tZBpemrlsc2I7IyODzhtallWRSm4Q0d09pL6XbQtU=
|
||||
golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f h1:gWF768j/LaZugp8dyS4UwsslYCYz9XgFxvlgsn0n9H8=
|
||||
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
|
|
@ -7,6 +7,9 @@ func Percentage(total, current int64, width int) float64 {
|
|||
if total <= 0 {
|
||||
return 0
|
||||
}
|
||||
if current >= total {
|
||||
return float64(width)
|
||||
}
|
||||
return float64(int64(width)*current) / float64(total)
|
||||
}
|
||||
|
||||
|
|
|
@ -42,10 +42,14 @@ type Cipher struct {
|
|||
|
||||
// The last len bytes of buf are leftover key stream bytes from the previous
|
||||
// XORKeyStream invocation. The size of buf depends on how many blocks are
|
||||
// computed at a time.
|
||||
// computed at a time by xorKeyStreamBlocks.
|
||||
buf [bufSize]byte
|
||||
len int
|
||||
|
||||
// overflow is set when the counter overflowed, no more blocks can be
|
||||
// generated, and the next XORKeyStream call should panic.
|
||||
overflow bool
|
||||
|
||||
// The counter-independent results of the first round are cached after they
|
||||
// are computed the first time.
|
||||
precompDone bool
|
||||
|
@ -89,6 +93,7 @@ func newUnauthenticatedCipher(c *Cipher, key, nonce []byte) (*Cipher, error) {
|
|||
return nil, errors.New("chacha20: wrong nonce size")
|
||||
}
|
||||
|
||||
key, nonce = key[:KeySize], nonce[:NonceSize] // bounds check elimination hint
|
||||
c.key = [8]uint32{
|
||||
binary.LittleEndian.Uint32(key[0:4]),
|
||||
binary.LittleEndian.Uint32(key[4:8]),
|
||||
|
@ -139,15 +144,18 @@ func quarterRound(a, b, c, d uint32) (uint32, uint32, uint32, uint32) {
|
|||
// SetCounter sets the Cipher counter. The next invocation of XORKeyStream will
|
||||
// behave as if (64 * counter) bytes had been encrypted so far.
|
||||
//
|
||||
// To prevent accidental counter reuse, SetCounter panics if counter is
|
||||
// less than the current value.
|
||||
// To prevent accidental counter reuse, SetCounter panics if counter is less
|
||||
// than the current value.
|
||||
//
|
||||
// Note that the execution time of XORKeyStream is not independent of the
|
||||
// counter value.
|
||||
func (s *Cipher) SetCounter(counter uint32) {
|
||||
// Internally, s may buffer multiple blocks, which complicates this
|
||||
// implementation slightly. When checking whether the counter has rolled
|
||||
// back, we must use both s.counter and s.len to determine how many blocks
|
||||
// we have already output.
|
||||
outputCounter := s.counter - uint32(s.len)/blockSize
|
||||
if counter < outputCounter {
|
||||
if s.overflow || counter < outputCounter {
|
||||
panic("chacha20: SetCounter attempted to rollback counter")
|
||||
}
|
||||
|
||||
|
@ -196,34 +204,52 @@ func (s *Cipher) XORKeyStream(dst, src []byte) {
|
|||
dst[i] = src[i] ^ b
|
||||
}
|
||||
s.len -= len(keyStream)
|
||||
src = src[len(keyStream):]
|
||||
dst = dst[len(keyStream):]
|
||||
dst, src = dst[len(keyStream):], src[len(keyStream):]
|
||||
}
|
||||
if len(src) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
const blocksPerBuf = bufSize / blockSize
|
||||
numBufs := (uint64(len(src)) + bufSize - 1) / bufSize
|
||||
if uint64(s.counter)+numBufs*blocksPerBuf >= 1<<32 {
|
||||
// If we'd need to let the counter overflow and keep generating output,
|
||||
// panic immediately. If instead we'd only reach the last block, remember
|
||||
// not to generate any more output after the buffer is drained.
|
||||
numBlocks := (uint64(len(src)) + blockSize - 1) / blockSize
|
||||
if s.overflow || uint64(s.counter)+numBlocks > 1<<32 {
|
||||
panic("chacha20: counter overflow")
|
||||
} else if uint64(s.counter)+numBlocks == 1<<32 {
|
||||
s.overflow = true
|
||||
}
|
||||
|
||||
// xorKeyStreamBlocks implementations expect input lengths that are a
|
||||
// multiple of bufSize. Platform-specific ones process multiple blocks at a
|
||||
// time, so have bufSizes that are a multiple of blockSize.
|
||||
|
||||
rem := len(src) % bufSize
|
||||
full := len(src) - rem
|
||||
|
||||
full := len(src) - len(src)%bufSize
|
||||
if full > 0 {
|
||||
s.xorKeyStreamBlocks(dst[:full], src[:full])
|
||||
}
|
||||
dst, src = dst[full:], src[full:]
|
||||
|
||||
// If using a multi-block xorKeyStreamBlocks would overflow, use the generic
|
||||
// one that does one block at a time.
|
||||
const blocksPerBuf = bufSize / blockSize
|
||||
if uint64(s.counter)+blocksPerBuf > 1<<32 {
|
||||
s.buf = [bufSize]byte{}
|
||||
numBlocks := (len(src) + blockSize - 1) / blockSize
|
||||
buf := s.buf[bufSize-numBlocks*blockSize:]
|
||||
copy(buf, src)
|
||||
s.xorKeyStreamBlocksGeneric(buf, buf)
|
||||
s.len = len(buf) - copy(dst, buf)
|
||||
return
|
||||
}
|
||||
|
||||
// If we have a partial (multi-)block, pad it for xorKeyStreamBlocks, and
|
||||
// keep the leftover keystream for the next XORKeyStream invocation.
|
||||
if rem > 0 {
|
||||
if len(src) > 0 {
|
||||
s.buf = [bufSize]byte{}
|
||||
copy(s.buf[:], src[full:])
|
||||
copy(s.buf[:], src)
|
||||
s.xorKeyStreamBlocks(s.buf[:], s.buf[:])
|
||||
s.len = bufSize - copy(dst[full:], s.buf[:])
|
||||
s.len = bufSize - copy(dst, s.buf[:])
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -260,7 +286,9 @@ func (s *Cipher) xorKeyStreamBlocksGeneric(dst, src []byte) {
|
|||
s.precompDone = true
|
||||
}
|
||||
|
||||
for i := 0; i < len(src); i += blockSize {
|
||||
// A condition of len(src) > 0 would be sufficient, but this also
|
||||
// acts as a bounds check elimination hint.
|
||||
for len(src) >= 64 && len(dst) >= 64 {
|
||||
// The remainder of the first column round.
|
||||
fcr0, fcr4, fcr8, fcr12 := quarterRound(c0, c4, c8, s.counter)
|
||||
|
||||
|
@ -285,49 +313,28 @@ func (s *Cipher) xorKeyStreamBlocksGeneric(dst, src []byte) {
|
|||
x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14)
|
||||
}
|
||||
|
||||
// Finally, add back the initial state to generate the key stream.
|
||||
x0 += c0
|
||||
x1 += c1
|
||||
x2 += c2
|
||||
x3 += c3
|
||||
x4 += c4
|
||||
x5 += c5
|
||||
x6 += c6
|
||||
x7 += c7
|
||||
x8 += c8
|
||||
x9 += c9
|
||||
x10 += c10
|
||||
x11 += c11
|
||||
x12 += s.counter
|
||||
x13 += c13
|
||||
x14 += c14
|
||||
x15 += c15
|
||||
// Add back the initial state to generate the key stream, then
|
||||
// XOR the key stream with the source and write out the result.
|
||||
addXor(dst[0:4], src[0:4], x0, c0)
|
||||
addXor(dst[4:8], src[4:8], x1, c1)
|
||||
addXor(dst[8:12], src[8:12], x2, c2)
|
||||
addXor(dst[12:16], src[12:16], x3, c3)
|
||||
addXor(dst[16:20], src[16:20], x4, c4)
|
||||
addXor(dst[20:24], src[20:24], x5, c5)
|
||||
addXor(dst[24:28], src[24:28], x6, c6)
|
||||
addXor(dst[28:32], src[28:32], x7, c7)
|
||||
addXor(dst[32:36], src[32:36], x8, c8)
|
||||
addXor(dst[36:40], src[36:40], x9, c9)
|
||||
addXor(dst[40:44], src[40:44], x10, c10)
|
||||
addXor(dst[44:48], src[44:48], x11, c11)
|
||||
addXor(dst[48:52], src[48:52], x12, s.counter)
|
||||
addXor(dst[52:56], src[52:56], x13, c13)
|
||||
addXor(dst[56:60], src[56:60], x14, c14)
|
||||
addXor(dst[60:64], src[60:64], x15, c15)
|
||||
|
||||
s.counter += 1
|
||||
if s.counter == 0 {
|
||||
panic("chacha20: internal error: counter overflow")
|
||||
}
|
||||
|
||||
in, out := src[i:], dst[i:]
|
||||
in, out = in[:blockSize], out[:blockSize] // bounds check elimination hint
|
||||
|
||||
// XOR the key stream with the source and write out the result.
|
||||
xor(out[0:], in[0:], x0)
|
||||
xor(out[4:], in[4:], x1)
|
||||
xor(out[8:], in[8:], x2)
|
||||
xor(out[12:], in[12:], x3)
|
||||
xor(out[16:], in[16:], x4)
|
||||
xor(out[20:], in[20:], x5)
|
||||
xor(out[24:], in[24:], x6)
|
||||
xor(out[28:], in[28:], x7)
|
||||
xor(out[32:], in[32:], x8)
|
||||
xor(out[36:], in[36:], x9)
|
||||
xor(out[40:], in[40:], x10)
|
||||
xor(out[44:], in[44:], x11)
|
||||
xor(out[48:], in[48:], x12)
|
||||
xor(out[52:], in[52:], x13)
|
||||
xor(out[56:], in[56:], x14)
|
||||
xor(out[60:], in[60:], x15)
|
||||
src, dst = src[blockSize:], dst[blockSize:]
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -13,10 +13,10 @@ const unaligned = runtime.GOARCH == "386" ||
|
|||
runtime.GOARCH == "ppc64le" ||
|
||||
runtime.GOARCH == "s390x"
|
||||
|
||||
// xor reads a little endian uint32 from src, XORs it with u and
|
||||
// addXor reads a little endian uint32 from src, XORs it with (a + b) and
|
||||
// places the result in little endian byte order in dst.
|
||||
func xor(dst, src []byte, u uint32) {
|
||||
_, _ = src[3], dst[3] // eliminate bounds checks
|
||||
func addXor(dst, src []byte, a, b uint32) {
|
||||
_, _ = src[3], dst[3] // bounds check elimination hint
|
||||
if unaligned {
|
||||
// The compiler should optimize this code into
|
||||
// 32-bit unaligned little endian loads and stores.
|
||||
|
@ -27,15 +27,16 @@ func xor(dst, src []byte, u uint32) {
|
|||
v |= uint32(src[1]) << 8
|
||||
v |= uint32(src[2]) << 16
|
||||
v |= uint32(src[3]) << 24
|
||||
v ^= u
|
||||
v ^= a + b
|
||||
dst[0] = byte(v)
|
||||
dst[1] = byte(v >> 8)
|
||||
dst[2] = byte(v >> 16)
|
||||
dst[3] = byte(v >> 24)
|
||||
} else {
|
||||
dst[0] = src[0] ^ byte(u)
|
||||
dst[1] = src[1] ^ byte(u>>8)
|
||||
dst[2] = src[2] ^ byte(u>>16)
|
||||
dst[3] = src[3] ^ byte(u>>24)
|
||||
a += b
|
||||
dst[0] = src[0] ^ byte(a)
|
||||
dst[1] = src[1] ^ byte(a>>8)
|
||||
dst[2] = src[2] ^ byte(a>>16)
|
||||
dst[3] = src[3] ^ byte(a>>24)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,5 +7,3 @@
|
|||
package poly1305
|
||||
|
||||
type mac struct{ macGeneric }
|
||||
|
||||
func newMAC(key *[32]byte) mac { return mac{newMACGeneric(key)} }
|
||||
|
|
|
@ -46,10 +46,9 @@ func Verify(mac *[16]byte, m []byte, key *[32]byte) bool {
|
|||
// two different messages with the same key allows an attacker
|
||||
// to forge messages at will.
|
||||
func New(key *[32]byte) *MAC {
|
||||
return &MAC{
|
||||
mac: newMAC(key),
|
||||
finalized: false,
|
||||
}
|
||||
m := &MAC{}
|
||||
initialize(key, &m.macState)
|
||||
return m
|
||||
}
|
||||
|
||||
// MAC is an io.Writer computing an authentication tag
|
||||
|
@ -58,7 +57,7 @@ func New(key *[32]byte) *MAC {
|
|||
// MAC cannot be used like common hash.Hash implementations,
|
||||
// because using a poly1305 key twice breaks its security.
|
||||
// Therefore writing data to a running MAC after calling
|
||||
// Sum causes it to panic.
|
||||
// Sum or Verify causes it to panic.
|
||||
type MAC struct {
|
||||
mac // platform-dependent implementation
|
||||
|
||||
|
@ -71,10 +70,10 @@ func (h *MAC) Size() int { return TagSize }
|
|||
// Write adds more data to the running message authentication code.
|
||||
// It never returns an error.
|
||||
//
|
||||
// It must not be called after the first call of Sum.
|
||||
// It must not be called after the first call of Sum or Verify.
|
||||
func (h *MAC) Write(p []byte) (n int, err error) {
|
||||
if h.finalized {
|
||||
panic("poly1305: write to MAC after Sum")
|
||||
panic("poly1305: write to MAC after Sum or Verify")
|
||||
}
|
||||
return h.mac.Write(p)
|
||||
}
|
||||
|
@ -87,3 +86,12 @@ func (h *MAC) Sum(b []byte) []byte {
|
|||
h.finalized = true
|
||||
return append(b, mac[:]...)
|
||||
}
|
||||
|
||||
// Verify returns whether the authenticator of all data written to
|
||||
// the message authentication code matches the expected value.
|
||||
func (h *MAC) Verify(expected []byte) bool {
|
||||
var mac [TagSize]byte
|
||||
h.mac.Sum(&mac)
|
||||
h.finalized = true
|
||||
return subtle.ConstantTimeCompare(expected, mac[:]) == 1
|
||||
}
|
||||
|
|
|
@ -9,17 +9,6 @@ package poly1305
|
|||
//go:noescape
|
||||
func update(state *macState, msg []byte)
|
||||
|
||||
func sum(out *[16]byte, m []byte, key *[32]byte) {
|
||||
h := newMAC(key)
|
||||
h.Write(m)
|
||||
h.Sum(out)
|
||||
}
|
||||
|
||||
func newMAC(key *[32]byte) (h mac) {
|
||||
initialize(key, &h.r, &h.s)
|
||||
return
|
||||
}
|
||||
|
||||
// mac is a wrapper for macGeneric that redirects calls that would have gone to
|
||||
// updateGeneric to update.
|
||||
//
|
||||
|
|
|
@ -31,9 +31,10 @@ func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) {
|
|||
h.Sum(out)
|
||||
}
|
||||
|
||||
func newMACGeneric(key *[32]byte) (h macGeneric) {
|
||||
initialize(key, &h.r, &h.s)
|
||||
return
|
||||
func newMACGeneric(key *[32]byte) macGeneric {
|
||||
m := macGeneric{}
|
||||
initialize(key, &m.macState)
|
||||
return m
|
||||
}
|
||||
|
||||
// macState holds numbers in saturated 64-bit little-endian limbs. That is,
|
||||
|
@ -97,11 +98,12 @@ const (
|
|||
rMask1 = 0x0FFFFFFC0FFFFFFC
|
||||
)
|
||||
|
||||
func initialize(key *[32]byte, r, s *[2]uint64) {
|
||||
r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0
|
||||
r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1
|
||||
s[0] = binary.LittleEndian.Uint64(key[16:24])
|
||||
s[1] = binary.LittleEndian.Uint64(key[24:32])
|
||||
// initialize loads the 256-bit key into the two 128-bit secret values r and s.
|
||||
func initialize(key *[32]byte, m *macState) {
|
||||
m.r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0
|
||||
m.r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1
|
||||
m.s[0] = binary.LittleEndian.Uint64(key[16:24])
|
||||
m.s[1] = binary.LittleEndian.Uint64(key[24:32])
|
||||
}
|
||||
|
||||
// uint128 holds a 128-bit number as two 64-bit limbs, for use with the
|
||||
|
|
|
@ -2,12 +2,17 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build s390x,!go1.11 !amd64,!s390x,!ppc64le gccgo purego
|
||||
// At this point only s390x has an assembly implementation of sum. All other
|
||||
// platforms have assembly implementations of mac, and just define sum as using
|
||||
// that through New. Once s390x is ported, this file can be deleted and the body
|
||||
// of sum moved into Sum.
|
||||
|
||||
// +build !go1.11 !s390x gccgo purego
|
||||
|
||||
package poly1305
|
||||
|
||||
func sum(out *[TagSize]byte, msg []byte, key *[32]byte) {
|
||||
h := newMAC(key)
|
||||
h := New(key)
|
||||
h.Write(msg)
|
||||
h.Sum(out)
|
||||
h.Sum(out[:0])
|
||||
}
|
||||
|
|
|
@ -9,17 +9,6 @@ package poly1305
|
|||
//go:noescape
|
||||
func update(state *macState, msg []byte)
|
||||
|
||||
func sum(out *[16]byte, m []byte, key *[32]byte) {
|
||||
h := newMAC(key)
|
||||
h.Write(m)
|
||||
h.Sum(out)
|
||||
}
|
||||
|
||||
func newMAC(key *[32]byte) (h mac) {
|
||||
initialize(key, &h.r, &h.s)
|
||||
return
|
||||
}
|
||||
|
||||
// mac is a wrapper for macGeneric that redirects calls that would have gone to
|
||||
// updateGeneric to update.
|
||||
//
|
||||
|
|
|
@ -119,7 +119,7 @@ var cipherModes = map[string]*cipherMode{
|
|||
chacha20Poly1305ID: {64, 0, newChaCha20Cipher},
|
||||
|
||||
// CBC mode is insecure and so is not included in the default config.
|
||||
// (See http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf). If absolutely
|
||||
// (See https://www.ieee-security.org/TC/SP2013/papers/4977a526.pdf). If absolutely
|
||||
// needed, it's possible to specify a custom Config to enable it.
|
||||
// You should expect that an active attacker can recover plaintext if
|
||||
// you do.
|
||||
|
|
|
@ -113,6 +113,7 @@ func NewTerminal(c io.ReadWriter, prompt string) *Terminal {
|
|||
}
|
||||
|
||||
const (
|
||||
keyCtrlC = 3
|
||||
keyCtrlD = 4
|
||||
keyCtrlU = 21
|
||||
keyEnter = '\r'
|
||||
|
@ -151,8 +152,12 @@ func bytesToKey(b []byte, pasteActive bool) (rune, []byte) {
|
|||
switch b[0] {
|
||||
case 1: // ^A
|
||||
return keyHome, b[1:]
|
||||
case 2: // ^B
|
||||
return keyLeft, b[1:]
|
||||
case 5: // ^E
|
||||
return keyEnd, b[1:]
|
||||
case 6: // ^F
|
||||
return keyRight, b[1:]
|
||||
case 8: // ^H
|
||||
return keyBackspace, b[1:]
|
||||
case 11: // ^K
|
||||
|
@ -738,6 +743,9 @@ func (t *Terminal) readLine() (line string, err error) {
|
|||
return "", io.EOF
|
||||
}
|
||||
}
|
||||
if key == keyCtrlC {
|
||||
return "", io.EOF
|
||||
}
|
||||
if key == keyPasteStart {
|
||||
t.pasteActive = true
|
||||
if len(t.line) == 0 {
|
||||
|
|
|
@ -89,7 +89,7 @@ constants.
|
|||
|
||||
Adding new syscall numbers is mostly done by running the build on a sufficiently
|
||||
new installation of the target OS (or updating the source checkouts for the
|
||||
new build system). However, depending on the OS, you make need to update the
|
||||
new build system). However, depending on the OS, you may need to update the
|
||||
parsing in mksysnum.
|
||||
|
||||
### mksyscall.go
|
||||
|
@ -163,7 +163,7 @@ The merge is performed in the following steps:
|
|||
|
||||
## Generated files
|
||||
|
||||
### `zerror_${GOOS}_${GOARCH}.go`
|
||||
### `zerrors_${GOOS}_${GOARCH}.go`
|
||||
|
||||
A file containing all of the system's generated error numbers, error strings,
|
||||
signal numbers, and constants. Generated by `mkerrors.sh` (see above).
|
||||
|
|
|
@ -486,6 +486,7 @@ ccflags="$@"
|
|||
$2 ~ /^LINUX_REBOOT_MAGIC[12]$/ ||
|
||||
$2 ~ /^MODULE_INIT_/ ||
|
||||
$2 !~ "NLA_TYPE_MASK" &&
|
||||
$2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ &&
|
||||
$2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ ||
|
||||
$2 ~ /^SIOC/ ||
|
||||
$2 ~ /^TIOC/ ||
|
||||
|
|
|
@ -216,6 +216,7 @@ const (
|
|||
BPF_F_RDONLY = 0x8
|
||||
BPF_F_RDONLY_PROG = 0x80
|
||||
BPF_F_RECOMPUTE_CSUM = 0x1
|
||||
BPF_F_REPLACE = 0x4
|
||||
BPF_F_REUSE_STACKID = 0x400
|
||||
BPF_F_SEQ_NUMBER = 0x8
|
||||
BPF_F_SKIP_FIELD_MASK = 0xff
|
||||
|
@ -389,6 +390,7 @@ const (
|
|||
CLONE_NEWNET = 0x40000000
|
||||
CLONE_NEWNS = 0x20000
|
||||
CLONE_NEWPID = 0x20000000
|
||||
CLONE_NEWTIME = 0x80
|
||||
CLONE_NEWUSER = 0x10000000
|
||||
CLONE_NEWUTS = 0x4000000
|
||||
CLONE_PARENT = 0x8000
|
||||
|
@ -737,6 +739,7 @@ const (
|
|||
GENL_NAMSIZ = 0x10
|
||||
GENL_START_ALLOC = 0x13
|
||||
GENL_UNS_ADMIN_PERM = 0x10
|
||||
GRND_INSECURE = 0x4
|
||||
GRND_NONBLOCK = 0x1
|
||||
GRND_RANDOM = 0x2
|
||||
HDIO_DRIVE_CMD = 0x31f
|
||||
|
@ -1487,6 +1490,7 @@ const (
|
|||
PR_GET_FPEMU = 0x9
|
||||
PR_GET_FPEXC = 0xb
|
||||
PR_GET_FP_MODE = 0x2e
|
||||
PR_GET_IO_FLUSHER = 0x3a
|
||||
PR_GET_KEEPCAPS = 0x7
|
||||
PR_GET_NAME = 0x10
|
||||
PR_GET_NO_NEW_PRIVS = 0x27
|
||||
|
@ -1522,6 +1526,7 @@ const (
|
|||
PR_SET_FPEMU = 0xa
|
||||
PR_SET_FPEXC = 0xc
|
||||
PR_SET_FP_MODE = 0x2d
|
||||
PR_SET_IO_FLUSHER = 0x39
|
||||
PR_SET_KEEPCAPS = 0x8
|
||||
PR_SET_MM = 0x23
|
||||
PR_SET_MM_ARG_END = 0x9
|
||||
|
@ -1750,12 +1755,15 @@ const (
|
|||
RTM_DELRULE = 0x21
|
||||
RTM_DELTCLASS = 0x29
|
||||
RTM_DELTFILTER = 0x2d
|
||||
RTM_DELVLAN = 0x71
|
||||
RTM_F_CLONED = 0x200
|
||||
RTM_F_EQUALIZE = 0x400
|
||||
RTM_F_FIB_MATCH = 0x2000
|
||||
RTM_F_LOOKUP_TABLE = 0x1000
|
||||
RTM_F_NOTIFY = 0x100
|
||||
RTM_F_OFFLOAD = 0x4000
|
||||
RTM_F_PREFIX = 0x800
|
||||
RTM_F_TRAP = 0x8000
|
||||
RTM_GETACTION = 0x32
|
||||
RTM_GETADDR = 0x16
|
||||
RTM_GETADDRLABEL = 0x4a
|
||||
|
@ -1777,7 +1785,8 @@ const (
|
|||
RTM_GETSTATS = 0x5e
|
||||
RTM_GETTCLASS = 0x2a
|
||||
RTM_GETTFILTER = 0x2e
|
||||
RTM_MAX = 0x6f
|
||||
RTM_GETVLAN = 0x72
|
||||
RTM_MAX = 0x73
|
||||
RTM_NEWACTION = 0x30
|
||||
RTM_NEWADDR = 0x14
|
||||
RTM_NEWADDRLABEL = 0x48
|
||||
|
@ -1792,6 +1801,7 @@ const (
|
|||
RTM_NEWNETCONF = 0x50
|
||||
RTM_NEWNEXTHOP = 0x68
|
||||
RTM_NEWNSID = 0x58
|
||||
RTM_NEWNVLAN = 0x70
|
||||
RTM_NEWPREFIX = 0x34
|
||||
RTM_NEWQDISC = 0x24
|
||||
RTM_NEWROUTE = 0x18
|
||||
|
@ -1799,8 +1809,8 @@ const (
|
|||
RTM_NEWSTATS = 0x5c
|
||||
RTM_NEWTCLASS = 0x28
|
||||
RTM_NEWTFILTER = 0x2c
|
||||
RTM_NR_FAMILIES = 0x18
|
||||
RTM_NR_MSGTYPES = 0x60
|
||||
RTM_NR_FAMILIES = 0x19
|
||||
RTM_NR_MSGTYPES = 0x64
|
||||
RTM_SETDCB = 0x4f
|
||||
RTM_SETLINK = 0x13
|
||||
RTM_SETNEIGHTBL = 0x43
|
||||
|
@ -2090,7 +2100,7 @@ const (
|
|||
TASKSTATS_GENL_NAME = "TASKSTATS"
|
||||
TASKSTATS_GENL_VERSION = 0x1
|
||||
TASKSTATS_TYPE_MAX = 0x6
|
||||
TASKSTATS_VERSION = 0x9
|
||||
TASKSTATS_VERSION = 0xa
|
||||
TCIFLUSH = 0x0
|
||||
TCIOFF = 0x2
|
||||
TCIOFLUSH = 0x2
|
||||
|
@ -2271,7 +2281,7 @@ const (
|
|||
VMADDR_CID_ANY = 0xffffffff
|
||||
VMADDR_CID_HOST = 0x2
|
||||
VMADDR_CID_HYPERVISOR = 0x0
|
||||
VMADDR_CID_RESERVED = 0x1
|
||||
VMADDR_CID_LOCAL = 0x1
|
||||
VMADDR_PORT_ANY = 0xffffffff
|
||||
VM_SOCKETS_INVALID_VERSION = 0xffffffff
|
||||
VQUIT = 0x1
|
||||
|
@ -2398,6 +2408,7 @@ const (
|
|||
XENFS_SUPER_MAGIC = 0xabba1974
|
||||
XFS_SUPER_MAGIC = 0x58465342
|
||||
Z3FOLD_MAGIC = 0x33
|
||||
ZONEFS_MAGIC = 0x5a4f4653
|
||||
ZSMALLOC_MAGIC = 0x58295829
|
||||
)
|
||||
|
||||
|
|
|
@ -431,4 +431,6 @@ const (
|
|||
SYS_FSPICK = 433
|
||||
SYS_PIDFD_OPEN = 434
|
||||
SYS_CLONE3 = 435
|
||||
SYS_OPENAT2 = 437
|
||||
SYS_PIDFD_GETFD = 438
|
||||
)
|
||||
|
|
|
@ -353,4 +353,6 @@ const (
|
|||
SYS_FSPICK = 433
|
||||
SYS_PIDFD_OPEN = 434
|
||||
SYS_CLONE3 = 435
|
||||
SYS_OPENAT2 = 437
|
||||
SYS_PIDFD_GETFD = 438
|
||||
)
|
||||
|
|
|
@ -395,4 +395,6 @@ const (
|
|||
SYS_FSPICK = 433
|
||||
SYS_PIDFD_OPEN = 434
|
||||
SYS_CLONE3 = 435
|
||||
SYS_OPENAT2 = 437
|
||||
SYS_PIDFD_GETFD = 438
|
||||
)
|
||||
|
|
|
@ -298,4 +298,6 @@ const (
|
|||
SYS_FSPICK = 433
|
||||
SYS_PIDFD_OPEN = 434
|
||||
SYS_CLONE3 = 435
|
||||
SYS_OPENAT2 = 437
|
||||
SYS_PIDFD_GETFD = 438
|
||||
)
|
||||
|
|
|
@ -416,4 +416,6 @@ const (
|
|||
SYS_FSPICK = 4433
|
||||
SYS_PIDFD_OPEN = 4434
|
||||
SYS_CLONE3 = 4435
|
||||
SYS_OPENAT2 = 4437
|
||||
SYS_PIDFD_GETFD = 4438
|
||||
)
|
||||
|
|
|
@ -346,4 +346,6 @@ const (
|
|||
SYS_FSPICK = 5433
|
||||
SYS_PIDFD_OPEN = 5434
|
||||
SYS_CLONE3 = 5435
|
||||
SYS_OPENAT2 = 5437
|
||||
SYS_PIDFD_GETFD = 5438
|
||||
)
|
||||
|
|
|
@ -346,4 +346,6 @@ const (
|
|||
SYS_FSPICK = 5433
|
||||
SYS_PIDFD_OPEN = 5434
|
||||
SYS_CLONE3 = 5435
|
||||
SYS_OPENAT2 = 5437
|
||||
SYS_PIDFD_GETFD = 5438
|
||||
)
|
||||
|
|
|
@ -416,4 +416,6 @@ const (
|
|||
SYS_FSPICK = 4433
|
||||
SYS_PIDFD_OPEN = 4434
|
||||
SYS_CLONE3 = 4435
|
||||
SYS_OPENAT2 = 4437
|
||||
SYS_PIDFD_GETFD = 4438
|
||||
)
|
||||
|
|
|
@ -395,4 +395,6 @@ const (
|
|||
SYS_FSPICK = 433
|
||||
SYS_PIDFD_OPEN = 434
|
||||
SYS_CLONE3 = 435
|
||||
SYS_OPENAT2 = 437
|
||||
SYS_PIDFD_GETFD = 438
|
||||
)
|
||||
|
|
|
@ -395,4 +395,6 @@ const (
|
|||
SYS_FSPICK = 433
|
||||
SYS_PIDFD_OPEN = 434
|
||||
SYS_CLONE3 = 435
|
||||
SYS_OPENAT2 = 437
|
||||
SYS_PIDFD_GETFD = 438
|
||||
)
|
||||
|
|
|
@ -297,4 +297,6 @@ const (
|
|||
SYS_FSPICK = 433
|
||||
SYS_PIDFD_OPEN = 434
|
||||
SYS_CLONE3 = 435
|
||||
SYS_OPENAT2 = 437
|
||||
SYS_PIDFD_GETFD = 438
|
||||
)
|
||||
|
|
|
@ -360,4 +360,6 @@ const (
|
|||
SYS_FSPICK = 433
|
||||
SYS_PIDFD_OPEN = 434
|
||||
SYS_CLONE3 = 435
|
||||
SYS_OPENAT2 = 437
|
||||
SYS_PIDFD_GETFD = 438
|
||||
)
|
||||
|
|
|
@ -374,4 +374,6 @@ const (
|
|||
SYS_FSMOUNT = 432
|
||||
SYS_FSPICK = 433
|
||||
SYS_PIDFD_OPEN = 434
|
||||
SYS_OPENAT2 = 437
|
||||
SYS_PIDFD_GETFD = 438
|
||||
)
|
||||
|
|
|
@ -114,7 +114,8 @@ type FscryptKeySpecifier struct {
|
|||
type FscryptAddKeyArg struct {
|
||||
Key_spec FscryptKeySpecifier
|
||||
Raw_size uint32
|
||||
_ [9]uint32
|
||||
Key_id uint32
|
||||
_ [8]uint32
|
||||
}
|
||||
|
||||
type FscryptRemoveKeyArg struct {
|
||||
|
@ -479,7 +480,7 @@ const (
|
|||
IFLA_NEW_IFINDEX = 0x31
|
||||
IFLA_MIN_MTU = 0x32
|
||||
IFLA_MAX_MTU = 0x33
|
||||
IFLA_MAX = 0x35
|
||||
IFLA_MAX = 0x36
|
||||
IFLA_INFO_KIND = 0x1
|
||||
IFLA_INFO_DATA = 0x2
|
||||
IFLA_INFO_XSTATS = 0x3
|
||||
|
@ -2308,3 +2309,32 @@ type FsverityEnableArg struct {
|
|||
Sig_ptr uint64
|
||||
_ [11]uint64
|
||||
}
|
||||
|
||||
type Nhmsg struct {
|
||||
Family uint8
|
||||
Scope uint8
|
||||
Protocol uint8
|
||||
Resvd uint8
|
||||
Flags uint32
|
||||
}
|
||||
|
||||
type NexthopGrp struct {
|
||||
Id uint32
|
||||
Weight uint8
|
||||
Resvd1 uint8
|
||||
Resvd2 uint16
|
||||
}
|
||||
|
||||
const (
|
||||
NHA_UNSPEC = 0x0
|
||||
NHA_ID = 0x1
|
||||
NHA_GROUP = 0x2
|
||||
NHA_GROUP_TYPE = 0x3
|
||||
NHA_BLACKHOLE = 0x4
|
||||
NHA_OIF = 0x5
|
||||
NHA_GATEWAY = 0x6
|
||||
NHA_ENCAP_TYPE = 0x7
|
||||
NHA_ENCAP = 0x8
|
||||
NHA_GROUPS = 0x9
|
||||
NHA_MASTER = 0xa
|
||||
)
|
||||
|
|
|
@ -287,6 +287,7 @@ type Taskstats struct {
|
|||
Freepages_delay_total uint64
|
||||
Thrashing_count uint64
|
||||
Thrashing_delay_total uint64
|
||||
Ac_btime64 uint64
|
||||
}
|
||||
|
||||
type cpuMask uint32
|
||||
|
|
|
@ -298,6 +298,7 @@ type Taskstats struct {
|
|||
Freepages_delay_total uint64
|
||||
Thrashing_count uint64
|
||||
Thrashing_delay_total uint64
|
||||
Ac_btime64 uint64
|
||||
}
|
||||
|
||||
type cpuMask uint64
|
||||
|
|
|
@ -276,6 +276,7 @@ type Taskstats struct {
|
|||
Freepages_delay_total uint64
|
||||
Thrashing_count uint64
|
||||
Thrashing_delay_total uint64
|
||||
Ac_btime64 uint64
|
||||
}
|
||||
|
||||
type cpuMask uint32
|
||||
|
|
|
@ -277,6 +277,7 @@ type Taskstats struct {
|
|||
Freepages_delay_total uint64
|
||||
Thrashing_count uint64
|
||||
Thrashing_delay_total uint64
|
||||
Ac_btime64 uint64
|
||||
}
|
||||
|
||||
type cpuMask uint64
|
||||
|
|
|
@ -281,6 +281,7 @@ type Taskstats struct {
|
|||
Freepages_delay_total uint64
|
||||
Thrashing_count uint64
|
||||
Thrashing_delay_total uint64
|
||||
Ac_btime64 uint64
|
||||
}
|
||||
|
||||
type cpuMask uint32
|
||||
|
|
|
@ -280,6 +280,7 @@ type Taskstats struct {
|
|||
Freepages_delay_total uint64
|
||||
Thrashing_count uint64
|
||||
Thrashing_delay_total uint64
|
||||
Ac_btime64 uint64
|
||||
}
|
||||
|
||||
type cpuMask uint64
|
||||
|
|
|
@ -280,6 +280,7 @@ type Taskstats struct {
|
|||
Freepages_delay_total uint64
|
||||
Thrashing_count uint64
|
||||
Thrashing_delay_total uint64
|
||||
Ac_btime64 uint64
|
||||
}
|
||||
|
||||
type cpuMask uint64
|
||||
|
|
|
@ -281,6 +281,7 @@ type Taskstats struct {
|
|||
Freepages_delay_total uint64
|
||||
Thrashing_count uint64
|
||||
Thrashing_delay_total uint64
|
||||
Ac_btime64 uint64
|
||||
}
|
||||
|
||||
type cpuMask uint32
|
||||
|
|
|
@ -287,6 +287,7 @@ type Taskstats struct {
|
|||
Freepages_delay_total uint64
|
||||
Thrashing_count uint64
|
||||
Thrashing_delay_total uint64
|
||||
Ac_btime64 uint64
|
||||
}
|
||||
|
||||
type cpuMask uint64
|
||||
|
|
|
@ -287,6 +287,7 @@ type Taskstats struct {
|
|||
Freepages_delay_total uint64
|
||||
Thrashing_count uint64
|
||||
Thrashing_delay_total uint64
|
||||
Ac_btime64 uint64
|
||||
}
|
||||
|
||||
type cpuMask uint64
|
||||
|
|
|
@ -305,6 +305,7 @@ type Taskstats struct {
|
|||
Freepages_delay_total uint64
|
||||
Thrashing_count uint64
|
||||
Thrashing_delay_total uint64
|
||||
Ac_btime64 uint64
|
||||
}
|
||||
|
||||
type cpuMask uint64
|
||||
|
|
|
@ -300,6 +300,7 @@ type Taskstats struct {
|
|||
Freepages_delay_total uint64
|
||||
Thrashing_count uint64
|
||||
Thrashing_delay_total uint64
|
||||
Ac_btime64 uint64
|
||||
}
|
||||
|
||||
type cpuMask uint64
|
||||
|
|
|
@ -282,6 +282,7 @@ type Taskstats struct {
|
|||
Freepages_delay_total uint64
|
||||
Thrashing_count uint64
|
||||
Thrashing_delay_total uint64
|
||||
Ac_btime64 uint64
|
||||
}
|
||||
|
||||
type cpuMask uint64
|
||||
|
|
|
@ -91,7 +91,7 @@ github.com/containers/common/pkg/config
|
|||
github.com/containers/common/pkg/sysinfo
|
||||
# github.com/containers/conmon v2.0.14+incompatible
|
||||
github.com/containers/conmon/runner/config
|
||||
# github.com/containers/image/v5 v5.4.3
|
||||
# github.com/containers/image/v5 v5.4.4
|
||||
github.com/containers/image/v5/copy
|
||||
github.com/containers/image/v5/directory
|
||||
github.com/containers/image/v5/directory/explicitfilepath
|
||||
|
@ -106,6 +106,7 @@ github.com/containers/image/v5/internal/iolimits
|
|||
github.com/containers/image/v5/internal/pkg/keyctl
|
||||
github.com/containers/image/v5/internal/pkg/platform
|
||||
github.com/containers/image/v5/internal/tmpdir
|
||||
github.com/containers/image/v5/internal/uploadreader
|
||||
github.com/containers/image/v5/manifest
|
||||
github.com/containers/image/v5/oci/archive
|
||||
github.com/containers/image/v5/oci/internal
|
||||
|
@ -520,7 +521,7 @@ github.com/varlink/go/varlink/idl
|
|||
github.com/vbatts/tar-split/archive/tar
|
||||
github.com/vbatts/tar-split/tar/asm
|
||||
github.com/vbatts/tar-split/tar/storage
|
||||
# github.com/vbauerster/mpb/v5 v5.0.3
|
||||
# github.com/vbauerster/mpb/v5 v5.0.4
|
||||
github.com/vbauerster/mpb/v5
|
||||
github.com/vbauerster/mpb/v5/cwriter
|
||||
github.com/vbauerster/mpb/v5/decor
|
||||
|
@ -546,7 +547,7 @@ go.opencensus.io/trace/internal
|
|||
go.opencensus.io/trace/tracestate
|
||||
# go.uber.org/atomic v1.4.0
|
||||
go.uber.org/atomic
|
||||
# golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59
|
||||
# golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5
|
||||
golang.org/x/crypto/blowfish
|
||||
golang.org/x/crypto/cast5
|
||||
golang.org/x/crypto/chacha20
|
||||
|
@ -583,7 +584,7 @@ golang.org/x/oauth2/internal
|
|||
# golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a
|
||||
golang.org/x/sync/errgroup
|
||||
golang.org/x/sync/semaphore
|
||||
# golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775
|
||||
# golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f
|
||||
golang.org/x/sys/cpu
|
||||
golang.org/x/sys/unix
|
||||
golang.org/x/sys/windows
|
||||
|
|
Loading…
Reference in New Issue