Vendor in containers/(common, storage, image)

Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
Daniel J Walsh 2022-05-02 17:16:10 -04:00
parent 3ac5cec086
commit e247f02a4f
No known key found for this signature in database
GPG Key ID: A2DF901DABE2C028
49 changed files with 7445 additions and 541 deletions

10
go.mod
View File

@ -12,12 +12,12 @@ require (
github.com/containernetworking/cni v1.1.0
github.com/containernetworking/plugins v1.1.1
github.com/containers/buildah v1.25.2-0.20220423102655-8f2bb8876f3f
github.com/containers/common v0.47.5-0.20220429111201-21d83cf7c533
github.com/containers/common v0.48.0
github.com/containers/conmon v2.0.20+incompatible
github.com/containers/image/v5 v5.21.1-0.20220425080628-be085685e524
github.com/containers/ocicrypt v1.1.3
github.com/containers/image/v5 v5.21.1
github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f
github.com/containers/psgo v1.7.2
github.com/containers/storage v1.39.1-0.20220422100603-8996869ae40b
github.com/containers/storage v1.40.0
github.com/coreos/go-systemd/v22 v22.3.2
github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3
github.com/cyphar/filepath-securejoin v0.2.3
@ -66,7 +66,7 @@ require (
go.etcd.io/bbolt v1.3.6
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
golang.org/x/text v0.3.7
google.golang.org/protobuf v1.28.0

23
go.sum
View File

@ -361,22 +361,23 @@ github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19
github.com/containers/buildah v1.25.2-0.20220423102655-8f2bb8876f3f h1:a5Zjz9EXUDOelPdJKJiXBfDzQS1ynXL3rc16O93tcXo=
github.com/containers/buildah v1.25.2-0.20220423102655-8f2bb8876f3f/go.mod h1:fHTZF4uEZGIlR8oM0fhNvU0wYQOtDpuar8/PxTtdvR0=
github.com/containers/common v0.47.5-0.20220421111103-112a47964ddb/go.mod h1:r80nWTmJrG9EoLkuI6WfbWQDUNQVqkVuB8Oaj1VVjOA=
github.com/containers/common v0.47.5-0.20220429111201-21d83cf7c533 h1:ppbtd7X6Itf6Mq1vQ5WPLpYNmXE3W34/8O7v4NwBXs4=
github.com/containers/common v0.47.5-0.20220429111201-21d83cf7c533/go.mod h1:qhkfBkVP+96bHzTE3j0TYJC7OSqpJLxbXnVWPa5XJnc=
github.com/containers/common v0.48.0 h1:997nnXBZ+eNpfSM7L4SxhhZubQrfEyw3jRyNMTSsNlw=
github.com/containers/common v0.48.0/go.mod h1:zPLZCfLXfnd1jI0QRsD4By54fP4k1+ifQs+tulIe3o0=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/image/v5 v5.19.2-0.20220224100137-1045fb70b094/go.mod h1:XoYK6kE0dpazFNcuS+a8lra+QfbC6s8tzv+cUuCrZpE=
github.com/containers/image/v5 v5.21.1-0.20220421124950-8527e238867c/go.mod h1:qpUuaiE2mON6xMA0PRO9GteyH9+KT+C6WygZzL5RhnE=
github.com/containers/image/v5 v5.21.1-0.20220425080628-be085685e524 h1:SEar3HX8b/tYE2m5fQ6fBMbwrTXLCzSBvxqODOfjp+E=
github.com/containers/image/v5 v5.21.1-0.20220425080628-be085685e524/go.mod h1:qpUuaiE2mON6xMA0PRO9GteyH9+KT+C6WygZzL5RhnE=
github.com/containers/image/v5 v5.21.1 h1:Cr3zw2f0FZs4SCkdGlc8SN/mpcmg2AKG4OUuDbeGS/Q=
github.com/containers/image/v5 v5.21.1/go.mod h1:zl35egpcDQa79IEXIuoUe1bW+D1pdxRxYjNlyb3YiXw=
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a h1:spAGlqziZjCJL25C6F1zsQY05tfCKE9F5YwtEWWe6hU=
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
github.com/containers/ocicrypt v1.1.3 h1:uMxn2wTb4nDR7GqG3rnZSfpJXqWURfzZ7nKydzIeKpA=
github.com/containers/ocicrypt v1.1.3/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g=
github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f h1:hffElEaoDQfREHltc2wtFPd68BqDmzW6KkEDpuSRBjs=
github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g=
github.com/containers/psgo v1.7.2 h1:WbCvsY9w+nCv3j4der0mbD3PSRUv/W8l+G0YrZrdSDc=
github.com/containers/psgo v1.7.2/go.mod h1:SLpqxsPOHtTqRygjutCPXmeU2PoEFzV3gzJplN4BMx0=
github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4=
@ -384,8 +385,9 @@ github.com/containers/storage v1.38.0/go.mod h1:lBzt28gAk5ADZuRtwdndRJyqX22vnRaX
github.com/containers/storage v1.38.2/go.mod h1:INP0RPLHWBxx+pTsO5uiHlDUGHDFvWZPWprAbAlQWPQ=
github.com/containers/storage v1.38.3-0.20220301151551-d06b0f81c0aa/go.mod h1:LkkL34WRi4dI4jt9Cp+ImdZi/P5i36glSHimT5CP5zM=
github.com/containers/storage v1.39.0/go.mod h1:UAD0cKLouN4BOQRgZut/nMjrh/EnTCjSNPgp4ZuGWMs=
github.com/containers/storage v1.39.1-0.20220422100603-8996869ae40b h1:nGXmBAy71/Zjvi0K9fn8bvfZR15+IRxoxqGa0XPs774=
github.com/containers/storage v1.39.1-0.20220422100603-8996869ae40b/go.mod h1:hFiHLMgNU0r3MiUpE97hEBaEKCN8fEIuEEBXoFC9eN0=
github.com/containers/storage v1.40.0 h1:erKY3ZVgp2F8+9jldwkJKJezrToNYs1YH/gqbPuwHes=
github.com/containers/storage v1.40.0/go.mod h1:zUyPC3CFIGR1OhY1CKkffxgw9+LuH76PGvVcFj38dgs=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
@ -884,8 +886,9 @@ github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e
github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.14.3/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.2 h1:3WH+AG7s2+T8o3nrM/8u2rdqUEcQhmga7smjrT41nAw=
github.com/klauspost/compress v1.15.2/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@ -1329,8 +1332,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/sylabs/release-tools v0.1.0/go.mod h1:pqP/z/11/rYMQ0OM/Nn7TxGijw7KfZwW9UolD/J1TUo=
github.com/sylabs/sif/v2 v2.3.2/go.mod h1:IrLX2pzmQ2O4qgv5iy3HdKJcBNYds9DTMd9Je8A9tX4=
github.com/sylabs/sif/v2 v2.6.0 h1:nrWbtSAavp4T6gETg/QgZXxs67qTpSNEgqs2H1y228w=
github.com/sylabs/sif/v2 v2.6.0/go.mod h1:TiyBWsgWeh5yBeQFNuQnvROwswqK7YJT8JA1L53bsXQ=
github.com/sylabs/sif/v2 v2.7.0 h1:VFzN8alnJ/3n1JA0K9DyUtfSzezWgWrzLDcYGhgBskk=
github.com/sylabs/sif/v2 v2.7.0/go.mod h1:TiyBWsgWeh5yBeQFNuQnvROwswqK7YJT8JA1L53bsXQ=
github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
@ -1794,8 +1798,9 @@ golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc=
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=

View File

@ -1,4 +1,4 @@
package version
// Version is the version of the build.
const Version = "0.47.4+dev"
const Version = "0.48.0"

View File

@ -32,7 +32,6 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vbauerster/mpb/v7"
"github.com/vbauerster/mpb/v7/decor"
"golang.org/x/sync/semaphore"
"golang.org/x/term"
)
@ -712,8 +711,6 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
// If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here.
ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates)
// If encrypted and decryption keys provided, we should try to decrypt
ic.diffIDsAreNeeded = ic.diffIDsAreNeeded || (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || ic.c.ociEncryptConfig != nil
// If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal
if options.OptimizeDestinationImageAlreadyExists {
@ -1069,85 +1066,6 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc
return man, manifestDigest, nil
}
// newProgressPool creates a *mpb.Progress.
// The caller must eventually call pool.Wait() after the pool will no longer be updated.
// NOTE: Every progress bar created within the progress pool must either successfully
// complete or be aborted, or pool.Wait() will hang. That is typically done
// using "defer bar.Abort(false)", which must be called BEFORE pool.Wait() is called.
func (c *copier) newProgressPool() *mpb.Progress {
return mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput))
}
// customPartialBlobDecorFunc implements mpb.DecorFunc for the partial blobs retrieval progress bar
func customPartialBlobDecorFunc(s decor.Statistics) string {
if s.Total == 0 {
pairFmt := "%.1f / %.1f (skipped: %.1f)"
return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill))
}
pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)"
percentage := 100.0 * float64(s.Refill) / float64(s.Total)
return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage)
}
// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter
// is io.Discard, the progress bar's output will be discarded
// NOTE: Every progress bar created within a progress pool must either successfully
// complete or be aborted, or pool.Wait() will hang. That is typically done
// using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called.
func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *mpb.Bar {
// shortDigestLen is the length of the digest used for blobs.
const shortDigestLen = 12
prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded())
// Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column.
maxPrefixLen := len("Copying blob ") + shortDigestLen
if len(prefix) > maxPrefixLen {
prefix = prefix[:maxPrefixLen]
}
// onComplete will replace prefix once the bar/spinner has completed
onComplete = prefix + " " + onComplete
// Use a normal progress bar when we know the size (i.e., size > 0).
// Otherwise, use a spinner to indicate that something's happening.
var bar *mpb.Bar
if info.Size > 0 {
if partial {
bar = pool.AddBar(info.Size,
mpb.BarFillerClearOnComplete(),
mpb.PrependDecorators(
decor.OnComplete(decor.Name(prefix), onComplete),
),
mpb.AppendDecorators(
decor.Any(customPartialBlobDecorFunc),
),
)
} else {
bar = pool.AddBar(info.Size,
mpb.BarFillerClearOnComplete(),
mpb.PrependDecorators(
decor.OnComplete(decor.Name(prefix), onComplete),
),
mpb.AppendDecorators(
decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""),
),
)
}
} else {
bar = pool.New(0,
mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft(),
mpb.BarFillerClearOnComplete(),
mpb.PrependDecorators(
decor.OnComplete(decor.Name(prefix), onComplete),
),
)
}
if c.progressOutput == io.Discard {
c.Printf("Copying %s %s\n", kind, info.Digest)
}
return bar
}
// copyConfig copies config.json, if any, from src to dest.
func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
srcInfo := src.ConfigInfo()
@ -1158,22 +1076,23 @@ func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
}
defer c.concurrentBlobCopiesSemaphore.Release(1)
configBlob, err := src.ConfigBlob(ctx)
if err != nil {
return errors.Wrapf(err, "reading config blob %s", srcInfo.Digest)
}
destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
progressPool := c.newProgressPool()
defer progressPool.Wait()
bar := c.createProgressBar(progressPool, false, srcInfo, "config", "done")
defer bar.Abort(false)
configBlob, err := src.ConfigBlob(ctx)
if err != nil {
return types.BlobInfo{}, errors.Wrapf(err, "reading config blob %s", srcInfo.Digest)
}
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, false, bar, -1, false)
if err != nil {
return types.BlobInfo{}, err
}
bar.SetTotal(int64(len(configBlob)), true)
bar.mark100PercentComplete()
return destInfo, nil
}()
if err != nil {
@ -1218,11 +1137,15 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
}
cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
// Diffs are needed if we are encrypting an image or trying to decrypt an image
diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" || toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil)
diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == ""
// When encrypting to decrypting, only use the simple code path. We might be able to optimize more
// (e.g. if we know the DiffID of an encrypted compressed layer, it might not be necessary to pull, decrypt and decompress again),
// but its not trivially safe to do such things, so until someone takes the effort to make a comprehensive argument, lets not.
encryptingOrDecrypting := toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil)
canAvoidProcessingCompleteLayer := !diffIDIsNeeded && !encryptingOrDecrypting
// If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source.
if !diffIDIsNeeded {
// Dont read the layer from the source if we already have the blob, and optimizations are acceptable.
if canAvoidProcessingCompleteLayer {
// TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm
// that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing
// a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause
@ -1242,9 +1165,9 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
if reused {
logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
func() { // A scope for defer
bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "skipped: already exists")
bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: blobInfo.Digest, Size: 0}, "blob", "skipped: already exists")
defer bar.Abort(false)
bar.SetTotal(0, true)
bar.mark100PercentComplete()
}()
// Throw an event that the layer has been skipped
@ -1275,7 +1198,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// of the source file are not known yet and must be fetched.
// Attempt a partial only when the source allows to retrieve a blob partially and
// the destination has support for it.
if ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() && !diffIDIsNeeded {
if canAvoidProcessingCompleteLayer && ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() {
if reused, blobInfo := func() (bool, types.BlobInfo) { // A scope for defer
bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done")
hideProgressBar := true
@ -1287,12 +1210,12 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
wrapped: ic.c.rawSource,
bar: bar,
}
bar.SetTotal(srcInfo.Size, false)
info, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache)
if err == nil {
bar.SetRefill(srcInfo.Size - bar.Current())
bar.SetCurrent(srcInfo.Size)
bar.SetTotal(srcInfo.Size, true)
if srcInfo.Size != -1 {
bar.SetRefill(srcInfo.Size - bar.Current())
}
bar.mark100PercentComplete()
hideProgressBar = false
logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest)
return true, info
@ -1305,16 +1228,16 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
}
// Fallback: copy the layer, computing the diffID if we need to do so
srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
if err != nil {
return types.BlobInfo{}, "", errors.Wrapf(err, "reading blob %s", srcInfo.Digest)
}
defer srcStream.Close()
return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer
bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done")
defer bar.Abort(false)
srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
if err != nil {
return types.BlobInfo{}, "", errors.Wrapf(err, "reading blob %s", srcInfo.Digest)
}
defer srcStream.Close()
blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer)
if err != nil {
return types.BlobInfo{}, "", err
@ -1330,14 +1253,22 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "computing layer DiffID")
}
logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
// This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
// we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader.
ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest)
// Dont record any associations that involve encrypted data. This is a bit crude,
// some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes)
// might be safe, but its not trivially obvious, so lets be conservative for now.
// This crude approach also means we dont need to record whether a blob is encrypted
// in the blob info cache (which would probably be necessary for any more complex logic),
// and the simplicity is attractive.
if !encryptingOrDecrypting {
// This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
// we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader.
ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest)
}
diffID = diffIDResult.digest
}
}
bar.SetTotal(srcInfo.Size, true)
bar.mark100PercentComplete()
return blobInfo, diffID, nil
}()
}
@ -1347,7 +1278,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// perhaps (de/re/)compressing the stream,
// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
diffIDIsNeeded bool, toEncrypt bool, bar *mpb.Bar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) {
diffIDIsNeeded bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) {
var getDiffIDRecorder func(compressiontypes.DecompressorFunc) io.Writer // = nil
var diffIDChan chan diffIDResult
@ -1424,7 +1355,7 @@ func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
// and returns a complete blobInfo of the copied blob.
func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
getOriginalLayerCopyWriter func(decompressor compressiontypes.DecompressorFunc) io.Writer,
canModifyBlob bool, isConfig bool, toEncrypt bool, bar *mpb.Bar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) {
canModifyBlob bool, isConfig bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) {
if isConfig { // This is guaranteed by the caller, but set it here to be explicit.
canModifyBlob = false
}
@ -1444,6 +1375,9 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
}
var destStream io.Reader = digestingReader
// === Update progress bars
destStream = bar.ProxyReader(destStream)
// === Decrypt the stream, if required.
var decrypted bool
if isOciEncrypted(srcInfo.MediaType) && c.ociDecryptConfig != nil {
@ -1478,9 +1412,6 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedCompressionFormat.Name(), compressionFormat.Name())
}
// === Update progress bars
destStream = bar.ProxyReader(destStream)
// === Send a copy of the original, uncompressed, stream, to a separate path if necessary.
var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so.
if getOriginalLayerCopyWriter != nil {
@ -1681,19 +1612,27 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest)
}
if digestingReader.validationSucceeded {
// If compressionOperation != types.PreserveOriginal, we now have two reliable digest values:
// srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader
// uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob
// (because inputInfo.Digest == "", this must have been computed afresh).
switch compressionOperation {
case types.PreserveOriginal:
break // Do nothing, we have only one digest and we might not have even verified it.
case types.Compress:
c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
case types.Decompress:
c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
default:
return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation)
// Dont record any associations that involve encrypted data. This is a bit crude,
// some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes)
// might be safe, but its not trivially obvious, so lets be conservative for now.
// This crude approach also means we dont need to record whether a blob is encrypted
// in the blob info cache (which would probably be necessary for any more complex logic),
// and the simplicity is attractive.
if !encrypted && !decrypted {
// If compressionOperation != types.PreserveOriginal, we now have two reliable digest values:
// srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader
// uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob
// (because inputInfo.Digest == "", this must have been computed afresh).
switch compressionOperation {
case types.PreserveOriginal:
break // Do nothing, we have only one digest and we might not have even verified it.
case types.Compress:
c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
case types.Decompress:
c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
default:
return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation)
}
}
if uploadCompressorName != "" && uploadCompressorName != internalblobinfocache.UnknownCompression {
c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, uploadCompressorName)

View File

@ -0,0 +1,148 @@
package copy
import (
"context"
"fmt"
"io"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/types"
"github.com/vbauerster/mpb/v7"
"github.com/vbauerster/mpb/v7/decor"
)
// newProgressPool creates a *mpb.Progress.
// The caller must eventually call pool.Wait() after the pool will no longer be updated.
// NOTE: Every progress bar created within the progress pool must either successfully
// complete or be aborted, or pool.Wait() will hang. That is typically done
// using "defer bar.Abort(false)", which must be called BEFORE pool.Wait() is called.
func (c *copier) newProgressPool() *mpb.Progress {
return mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput))
}
// customPartialBlobDecorFunc implements mpb.DecorFunc for the partial blobs retrieval progress bar
func customPartialBlobDecorFunc(s decor.Statistics) string {
if s.Total == 0 {
pairFmt := "%.1f / %.1f (skipped: %.1f)"
return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill))
}
pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)"
percentage := 100.0 * float64(s.Refill) / float64(s.Total)
return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage)
}
// progressBar wraps a *mpb.Bar, allowing us to add extra state and methods.
type progressBar struct {
*mpb.Bar
originalSize int64 // or -1 if unknown
}
// createProgressBar creates a progressBar in pool. Note that if the copier's reportWriter
// is io.Discard, the progress bar's output will be discarded
//
// NOTE: Every progress bar created within a progress pool must either successfully
// complete or be aborted, or pool.Wait() will hang. That is typically done
// using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called.
//
// As a convention, most users of progress bars should call mark100PercentComplete on full success;
// by convention, we don't leave progress bars in partial state when fully done
// (even if we copied much less data than anticipated).
func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *progressBar {
// shortDigestLen is the length of the digest used for blobs.
const shortDigestLen = 12
prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded())
// Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column.
maxPrefixLen := len("Copying blob ") + shortDigestLen
if len(prefix) > maxPrefixLen {
prefix = prefix[:maxPrefixLen]
}
// onComplete will replace prefix once the bar/spinner has completed
onComplete = prefix + " " + onComplete
// Use a normal progress bar when we know the size (i.e., size > 0).
// Otherwise, use a spinner to indicate that something's happening.
var bar *mpb.Bar
if info.Size > 0 {
if partial {
bar = pool.AddBar(info.Size,
mpb.BarFillerClearOnComplete(),
mpb.PrependDecorators(
decor.OnComplete(decor.Name(prefix), onComplete),
),
mpb.AppendDecorators(
decor.Any(customPartialBlobDecorFunc),
),
)
} else {
bar = pool.AddBar(info.Size,
mpb.BarFillerClearOnComplete(),
mpb.PrependDecorators(
decor.OnComplete(decor.Name(prefix), onComplete),
),
mpb.AppendDecorators(
decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""),
),
)
}
} else {
bar = pool.New(0,
mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft(),
mpb.BarFillerClearOnComplete(),
mpb.PrependDecorators(
decor.OnComplete(decor.Name(prefix), onComplete),
),
)
}
if c.progressOutput == io.Discard {
c.Printf("Copying %s %s\n", kind, info.Digest)
}
return &progressBar{
Bar: bar,
originalSize: info.Size,
}
}
// mark100PercentComplete marks the progres bars as 100% complete;
// it may do so by possibly advancing the current state if it is below the known total.
func (bar *progressBar) mark100PercentComplete() {
if bar.originalSize > 0 {
// We can't call bar.SetTotal even if we wanted to; the total can not be changed
// after a progress bar is created with a definite total.
bar.SetCurrent(bar.originalSize) // This triggers the completion condition.
} else {
// -1 = unknown size
// 0 is somewhat of a a special case: Unlike c/image, where 0 is a definite known
// size (possible at least in theory), in mpb, zero-sized progress bars are treated
// as unknown size, in particular they are not configured to be marked as
// complete on bar.Current() reaching bar.total (because that would happen already
// when creating the progress bar).
// That means that we are both _allowed_ to call SetTotal, and we _have to_.
bar.SetTotal(-1, true) // total < 0 = set it to bar.Current(), report it; and mark the bar as complete.
}
}
// blobChunkAccessorProxy wraps a BlobChunkAccessor and updates a *progressBar
// with the number of received bytes.
type blobChunkAccessorProxy struct {
wrapped private.BlobChunkAccessor // The underlying BlobChunkAccessor
bar *progressBar // A progress bar updated with the number of bytes read so far
}
// GetBlobAt returns a sequential channel of readers that contain data for the requested
// blob chunks, and a channel that might get a single error value.
// The specified chunks must be not overlapping and sorted by their offset.
// The readers must be fully consumed, in the order they are returned, before blocking
// to read the next chunk.
func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks)
if err == nil {
total := int64(0)
for _, c := range chunks {
total += int64(c.Length)
}
s.bar.IncrInt64(total)
}
return rc, errs, err
}

View File

@ -1,16 +1,13 @@
package copy
import (
"context"
"io"
"time"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/types"
"github.com/vbauerster/mpb/v7"
)
// progressReader is a reader that reports its progress on an interval.
// progressReader is a reader that reports its progress to a types.ProgressProperties channel on an interval.
type progressReader struct {
source io.Reader
channel chan<- types.ProgressProperties
@ -80,27 +77,3 @@ func (r *progressReader) Read(p []byte) (int, error) {
}
return n, err
}
// blobChunkAccessorProxy wraps a BlobChunkAccessor and keeps track of how many bytes
// are received.
type blobChunkAccessorProxy struct {
wrapped private.BlobChunkAccessor // The underlying BlobChunkAccessor
bar *mpb.Bar // A progress bar updated with the number of bytes read so far
}
// GetBlobAt returns a sequential channel of readers that contain data for the requested
// blob chunks, and a channel that might get a single error value.
// The specified chunks must be not overlapping and sorted by their offset.
// The readers must be fully consumed, in the order they are returned, before blocking
// to read the next chunk.
func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks)
if err == nil {
total := int64(0)
for _, c := range chunks {
total += int64(c.Length)
}
s.bar.IncrInt64(total)
}
return rc, errs, err
}

View File

@ -11,7 +11,7 @@ const (
VersionPatch = 1
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = "-dev"
VersionDev = ""
)
// Version is the specification version that the package types support.

View File

@ -3,3 +3,4 @@
# Github ID, Name, Email Address
lumjjb, Brandon Lum, lumjjb@gmail.com
stefanberger, Stefan Berger, stefanb@linux.ibm.com
arronwy, Arron Wang, arron.wang@intel.com

View File

@ -40,8 +40,6 @@ import (
var (
// OAEPLabel defines the label we use for OAEP encryption; this cannot be changed
OAEPLabel = []byte("")
// OAEPDefaultHash defines the default hash used for OAEP encryption; this cannot be changed
OAEPDefaultHash = "sha1"
// OAEPSha1Params describes the OAEP parameters with sha1 hash algorithm; needed by SoftHSM
OAEPSha1Params = &pkcs11.OAEPParams{
@ -69,12 +67,12 @@ func rsaPublicEncryptOAEP(pubKey *rsa.PublicKey, plaintext []byte) ([]byte, stri
)
oaephash := os.Getenv("OCICRYPT_OAEP_HASHALG")
// The default is 'sha1'
// The default is sha256 (previously was sha1)
switch strings.ToLower(oaephash) {
case "sha1", "":
case "sha1":
hashfunc = sha1.New()
hashalg = "sha1"
case "sha256":
case "sha256", "":
hashfunc = sha256.New()
hashalg = "sha256"
default:
@ -283,12 +281,12 @@ func publicEncryptOAEP(pubKey *Pkcs11KeyFileObject, plaintext []byte) ([]byte, s
var oaep *pkcs11.OAEPParams
oaephash := os.Getenv("OCICRYPT_OAEP_HASHALG")
// the default is sha1
// The default is sha256 (previously was sha1)
switch strings.ToLower(oaephash) {
case "sha1", "":
case "sha1":
oaep = OAEPSha1Params
hashalg = "sha1"
case "sha256":
case "sha256", "":
oaep = OAEPSha256Params
hashalg = "sha256"
default:
@ -333,7 +331,7 @@ func privateDecryptOAEP(privKeyObj *Pkcs11KeyFileObject, ciphertext []byte, hash
var oaep *pkcs11.OAEPParams
// the default is sha1
// An empty string from the Hash in the JSON historically defaults to sha1.
switch hashalg {
case "sha1", "":
oaep = OAEPSha1Params
@ -410,9 +408,6 @@ func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) {
return nil, err
}
if hashalg == OAEPDefaultHash {
hashalg = ""
}
recipient := Pkcs11Recipient{
Version: 0,
Blob: base64.StdEncoding.EncodeToString(ciphertext),
@ -431,15 +426,18 @@ func EncryptMultiple(pubKeys []interface{}, data []byte) ([]byte, error) {
// {
// "version": 0,
// "blob": <base64 encoded RSA OAEP encrypted blob>,
// "hash": <hash used for OAEP other than 'sha256'>
// "hash": <hash used for OAEP other than 'sha1'>
// } ,
// {
// "version": 0,
// "blob": <base64 encoded RSA OAEP encrypted blob>,
// "hash": <hash used for OAEP other than 'sha256'>
// "hash": <hash used for OAEP other than 'sha1'>
// } ,
// [...]
// }
// Note: More recent versions of this code explicitly write 'sha1'
// while older versions left it empty in case of 'sha1'.
//
func Decrypt(privKeyObjs []*Pkcs11KeyFileObject, pkcs11blobstr []byte) ([]byte, error) {
pkcs11blob := Pkcs11Blob{}
err := json.Unmarshal(pkcs11blobstr, &pkcs11blob)

View File

@ -1 +1 @@
1.39.0+dev
1.40.0

View File

@ -76,7 +76,7 @@ func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContai
UID: uid,
GID: gid,
}
mappedPair, err := toHost.ToHost(pair)
mappedPair, err := toHost.ToHostOverflow(pair)
if err != nil {
return fmt.Errorf("error mapping container ID pair %#v for %q to host: %v", pair, path, err)
}

View File

@ -1,7 +1,6 @@
package graphdriver
import (
"fmt"
"golang.org/x/sys/unix"
"github.com/containers/storage/pkg/mount"

View File

@ -939,6 +939,16 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
rootUID = int(st.UID())
rootGID = int(st.GID())
}
if _, err := system.Lstat(dir); err == nil {
logrus.Warnf("Trying to create a layer %#v while directory %q already exists; removing it first", id, dir)
// Dont just os.RemoveAll(dir) here; d.Remove also removes the link in linkDir,
// so that we cant end up with two symlinks in linkDir pointing to the same layer.
if err := d.Remove(id); err != nil {
return errors.Wrapf(err, "removing a pre-existing layer directory %q", dir)
}
}
if err := idtools.MkdirAllAndChownNew(dir, 0700, idPair); err != nil {
return err
}
@ -1389,7 +1399,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
for err == nil {
absLowers = append(absLowers, filepath.Join(dir, nameWithSuffix("diff", diffN)))
relLowers = append(relLowers, dumbJoin(string(link), "..", nameWithSuffix("diff", diffN)))
relLowers = append(relLowers, dumbJoin(linkDir, string(link), "..", nameWithSuffix("diff", diffN)))
diffN++
st, err = os.Stat(filepath.Join(dir, nameWithSuffix("diff", diffN)))
if err == nil && !permsKnown {

View File

@ -2,7 +2,6 @@ package zfs
import (
"fmt"
"strings"
"github.com/containers/storage/drivers"
"github.com/pkg/errors"
@ -26,19 +25,10 @@ func checkRootdirFs(rootdir string) error {
}
func getMountpoint(id string) string {
maxlen := 12
// we need to preserve filesystem suffix
suffix := strings.SplitN(id, "-", 2)
if len(suffix) > 1 {
return id[:maxlen] + "-" + suffix[1]
}
return id[:maxlen]
return id
}
func detachUnmount(mountpoint string) error {
// FreeBSD doesn't have an equivalent to MNT_DETACH
return unix.Unmount(mountpoint, 0)
// FreeBSD's MNT_FORCE is roughly equivalent to MNT_DETACH
return unix.Unmount(mountpoint, unix.MNT_FORCE)
}

View File

@ -12,7 +12,7 @@ require (
github.com/google/go-intervals v0.0.2
github.com/hashicorp/go-multierror v1.1.1
github.com/json-iterator/go v1.1.12
github.com/klauspost/compress v1.15.1
github.com/klauspost/compress v1.15.2
github.com/klauspost/pgzip v1.2.5
github.com/mattn/go-shellwords v1.0.12
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible

View File

@ -424,8 +424,9 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.2 h1:3WH+AG7s2+T8o3nrM/8u2rdqUEcQhmga7smjrT41nAw=
github.com/klauspost/compress v1.15.2/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=

View File

@ -692,11 +692,10 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
return copyLayer(layer), nil
}
func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (layer *Layer, size int64, err error) {
func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) {
if !r.IsReadWrite() {
return nil, -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new layers at %q", r.layerspath())
}
size = -1
if err := os.MkdirAll(r.rundir, 0700); err != nil {
return nil, -1, err
}
@ -762,6 +761,60 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
if mountLabel != "" {
label.ReserveLabel(mountLabel)
}
// Before actually creating the layer, make a persistent record of it with incompleteFlag,
// so that future processes have a chance to delete it.
layer := &Layer{
ID: id,
Parent: parent,
Names: names,
MountLabel: mountLabel,
Metadata: templateMetadata,
Created: time.Now().UTC(),
CompressedDigest: templateCompressedDigest,
CompressedSize: templateCompressedSize,
UncompressedDigest: templateUncompressedDigest,
UncompressedSize: templateUncompressedSize,
CompressionType: templateCompressionType,
UIDs: templateUIDs,
GIDs: templateGIDs,
Flags: make(map[string]interface{}),
UIDMap: copyIDMap(moreOptions.UIDMap),
GIDMap: copyIDMap(moreOptions.GIDMap),
BigDataNames: []string{},
}
r.layers = append(r.layers, layer)
r.idindex.Add(id)
r.byid[id] = layer
for _, name := range names {
r.byname[name] = layer
}
for flag, value := range flags {
layer.Flags[flag] = value
}
layer.Flags[incompleteFlag] = true
succeeded := false
cleanupFailureContext := ""
defer func() {
if !succeeded {
// On any error, try both removing the driver's data as well
// as the in-memory layer record.
if err2 := r.Delete(layer.ID); err2 != nil {
if cleanupFailureContext == "" {
cleanupFailureContext = "unknown: cleanupFailureContext not set at the failure site"
}
logrus.Errorf("While recovering from a failure (%s), error deleting layer %#v: %v", cleanupFailureContext, layer.ID, err2)
}
}
}()
err := r.Save()
if err != nil {
cleanupFailureContext = "saving incomplete layer metadata"
return nil, -1, err
}
idMappings := idtools.NewIDMappingsFromMaps(moreOptions.UIDMap, moreOptions.GIDMap)
opts := drivers.CreateOpts{
MountLabel: mountLabel,
@ -769,132 +822,67 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
IDMappings: idMappings,
}
if moreOptions.TemplateLayer != "" {
if err = r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil {
if err := r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil {
cleanupFailureContext = "creating a layer from template"
return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q with ID %q", moreOptions.TemplateLayer, id)
}
oldMappings = templateIDMappings
} else {
if writeable {
if err = r.driver.CreateReadWrite(id, parent, &opts); err != nil {
if err := r.driver.CreateReadWrite(id, parent, &opts); err != nil {
cleanupFailureContext = "creating a read-write layer"
return nil, -1, errors.Wrapf(err, "error creating read-write layer with ID %q", id)
}
} else {
if err = r.driver.Create(id, parent, &opts); err != nil {
if err := r.driver.Create(id, parent, &opts); err != nil {
cleanupFailureContext = "creating a read-only layer"
return nil, -1, errors.Wrapf(err, "error creating layer with ID %q", id)
}
}
oldMappings = parentMappings
}
if !reflect.DeepEqual(oldMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(oldMappings.GIDs(), idMappings.GIDs()) {
if err = r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil {
// We don't have a record of this layer, but at least
// try to clean it up underneath us.
if err2 := r.driver.Remove(id); err2 != nil {
logrus.Errorf("While recovering from a failure creating in UpdateLayerIDMap, error deleting layer %#v: %v", id, err2)
}
if err := r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil {
cleanupFailureContext = "in UpdateLayerIDMap"
return nil, -1, err
}
}
if len(templateTSdata) > 0 {
if err := os.MkdirAll(filepath.Dir(r.tspath(id)), 0o700); err != nil {
// We don't have a record of this layer, but at least
// try to clean it up underneath us.
if err2 := r.driver.Remove(id); err2 != nil {
logrus.Errorf("While recovering from a failure creating in UpdateLayerIDMap, error deleting layer %#v: %v", id, err2)
}
cleanupFailureContext = "creating tar-split parent directory for a copy from template"
return nil, -1, err
}
if err = ioutils.AtomicWriteFile(r.tspath(id), templateTSdata, 0o600); err != nil {
// We don't have a record of this layer, but at least
// try to clean it up underneath us.
if err2 := r.driver.Remove(id); err2 != nil {
logrus.Errorf("While recovering from a failure creating in UpdateLayerIDMap, error deleting layer %#v: %v", id, err2)
}
if err := ioutils.AtomicWriteFile(r.tspath(id), templateTSdata, 0o600); err != nil {
cleanupFailureContext = "creating a tar-split copy from template"
return nil, -1, err
}
}
if err == nil {
layer = &Layer{
ID: id,
Parent: parent,
Names: names,
MountLabel: mountLabel,
Metadata: templateMetadata,
Created: time.Now().UTC(),
CompressedDigest: templateCompressedDigest,
CompressedSize: templateCompressedSize,
UncompressedDigest: templateUncompressedDigest,
UncompressedSize: templateUncompressedSize,
CompressionType: templateCompressionType,
UIDs: templateUIDs,
GIDs: templateGIDs,
Flags: make(map[string]interface{}),
UIDMap: copyIDMap(moreOptions.UIDMap),
GIDMap: copyIDMap(moreOptions.GIDMap),
BigDataNames: []string{},
}
r.layers = append(r.layers, layer)
r.idindex.Add(id)
r.byid[id] = layer
for _, name := range names {
r.byname[name] = layer
}
for flag, value := range flags {
layer.Flags[flag] = value
}
savedIncompleteLayer := false
if diff != nil {
layer.Flags[incompleteFlag] = true
err = r.Save()
if err != nil {
// We don't have a record of this layer, but at least
// try to clean it up underneath us.
if err2 := r.driver.Remove(id); err2 != nil {
logrus.Errorf("While recovering from a failure saving incomplete layer metadata, error deleting layer %#v: %v", id, err2)
}
return nil, -1, err
}
savedIncompleteLayer = true
size, err = r.applyDiffWithOptions(layer.ID, moreOptions, diff)
if err != nil {
if err2 := r.Delete(layer.ID); err2 != nil {
// Either a driver error or an error saving.
// We now have a layer that's been marked for
// deletion but which we failed to remove.
logrus.Errorf("While recovering from a failure applying layer diff, error deleting layer %#v: %v", layer.ID, err2)
}
return nil, -1, err
}
delete(layer.Flags, incompleteFlag)
} else {
// applyDiffWithOptions in the `diff != nil` case handles this bit for us
if layer.CompressedDigest != "" {
r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID)
}
if layer.UncompressedDigest != "" {
r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
}
}
err = r.Save()
var size int64 = -1
if diff != nil {
size, err = r.applyDiffWithOptions(layer.ID, moreOptions, diff)
if err != nil {
if savedIncompleteLayer {
if err2 := r.Delete(layer.ID); err2 != nil {
// Either a driver error or an error saving.
// We now have a layer that's been marked for
// deletion but which we failed to remove.
logrus.Errorf("While recovering from a failure saving finished layer metadata, error deleting layer %#v: %v", layer.ID, err2)
}
} else {
// We don't have a record of this layer, but at least
// try to clean it up underneath us.
if err2 := r.driver.Remove(id); err2 != nil {
logrus.Errorf("While recovering from a failure saving finished layer metadata, error deleting layer %#v in graph driver: %v", id, err2)
}
}
cleanupFailureContext = "applying layer diff"
return nil, -1, err
}
layer = copyLayer(layer)
} else {
// applyDiffWithOptions in the `diff != nil` case handles this bit for us
if layer.CompressedDigest != "" {
r.bycompressedsum[layer.CompressedDigest] = append(r.bycompressedsum[layer.CompressedDigest], layer.ID)
}
if layer.UncompressedDigest != "" {
r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
}
}
delete(layer.Flags, incompleteFlag)
err = r.Save()
if err != nil {
cleanupFailureContext = "saving finished layer metadata"
return nil, -1, err
}
layer = copyLayer(layer)
succeeded = true
return layer, size, err
}

View File

@ -3,15 +3,18 @@ package idtools
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"os/user"
"sort"
"strconv"
"strings"
"sync"
"syscall"
"github.com/containers/storage/pkg/system"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// IDMap contains a single entry for user namespace range remapping. An array
@ -203,6 +206,67 @@ func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) {
return target, err
}
var (
overflowUIDOnce sync.Once
overflowGIDOnce sync.Once
overflowUID int
overflowGID int
)
// getOverflowUID returns the UID mapped to the overflow user
func getOverflowUID() int {
overflowUIDOnce.Do(func() {
// 65534 is the value on older kernels where /proc/sys/kernel/overflowuid is not present
overflowUID = 65534
if content, err := ioutil.ReadFile("/proc/sys/kernel/overflowuid"); err == nil {
if tmp, err := strconv.Atoi(string(content)); err == nil {
overflowUID = tmp
}
}
})
return overflowUID
}
// getOverflowUID returns the GID mapped to the overflow user
func getOverflowGID() int {
overflowGIDOnce.Do(func() {
// 65534 is the value on older kernels where /proc/sys/kernel/overflowgid is not present
overflowGID = 65534
if content, err := ioutil.ReadFile("/proc/sys/kernel/overflowgid"); err == nil {
if tmp, err := strconv.Atoi(string(content)); err == nil {
overflowGID = tmp
}
}
})
return overflowGID
}
// ToHost returns the host UID and GID for the container uid, gid.
// Remapping is only performed if the ids aren't already the remapped root ids
// If the mapping is not possible because the target ID is not mapped into
// the namespace, then the overflow ID is used.
func (i *IDMappings) ToHostOverflow(pair IDPair) (IDPair, error) {
var err error
target := i.RootPair()
if pair.UID != target.UID {
target.UID, err = RawToHost(pair.UID, i.uids)
if err != nil {
target.UID = getOverflowUID()
logrus.Debugf("Failed to map UID %v to the target mapping, using the overflow ID %v", pair.UID, target.UID)
}
}
if pair.GID != target.GID {
target.GID, err = RawToHost(pair.GID, i.gids)
if err != nil {
target.GID = getOverflowGID()
logrus.Debugf("Failed to map GID %v to the target mapping, using the overflow ID %v", pair.GID, target.GID)
}
}
return target, nil
}
// ToContainer returns the container UID and GID for the host uid and gid
func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) {
uid, err := RawToContainer(pair.UID, i.uids)

View File

@ -0,0 +1,48 @@
package mount
import (
"golang.org/x/sys/unix"
)
const (
// RDONLY will mount the file system read-only.
RDONLY = unix.MNT_RDONLY
// NOSUID will not allow set-user-identifier or set-group-identifier bits to
// take effect.
NOSUID = unix.MNT_NOSUID
// NOEXEC will not allow execution of any binaries on the mounted file system.
NOEXEC = unix.MNT_NOEXEC
// SYNCHRONOUS will allow I/O to the file system to be done synchronously.
SYNCHRONOUS = unix.MNT_SYNCHRONOUS
// REMOUNT will attempt to remount an already-mounted file system. This is
// commonly used to change the mount flags for a file system, especially to
// make a readonly file system writeable. It does not change device or mount
// point.
REMOUNT = unix.MNT_UPDATE
// NOATIME will not update the file access time when reading from a file.
NOATIME = unix.MNT_NOATIME
mntDetach = unix.MNT_FORCE
NODIRATIME = 0
NODEV = 0
DIRSYNC = 0
MANDLOCK = 0
BIND = 0
RBIND = 0
UNBINDABLE = 0
RUNBINDABLE = 0
PRIVATE = 0
RPRIVATE = 0
SLAVE = 0
RSLAVE = 0
SHARED = 0
RSHARED = 0
RELATIME = 0
STRICTATIME = 0
)

View File

@ -1,4 +1,5 @@
// +build !linux
//go:build !linux && !freebsd
// +build !linux,!freebsd
package mount

View File

@ -28,14 +28,25 @@ func allocateIOVecs(options []string) []C.struct_iovec {
func mount(device, target, mType string, flag uintptr, data string) error {
isNullFS := false
xs := strings.Split(data, ",")
for _, x := range xs {
if x == "bind" {
isNullFS = true
options := []string{"fspath", target}
if data != "" {
xs := strings.Split(data, ",")
for _, x := range xs {
if x == "bind" {
isNullFS = true
continue
}
opt := strings.SplitN(x, "=", 2)
options = append(options, opt[0])
if len(opt) == 2 {
options = append(options, opt[1])
} else {
options = append(options, "")
}
}
}
options := []string{"fspath", target}
if isNullFS {
options = append(options, "fstype", "nullfs", "target", device)
} else {

View File

@ -0,0 +1,37 @@
// +build freebsd
package reexec
import (
"context"
"os"
"os/exec"
"golang.org/x/sys/unix"
)
// Self returns the path to the current process's binary.
// Uses sysctl.
func Self() string {
path, err := unix.SysctlArgs("kern.proc.pathname", -1)
if err == nil {
return path
}
return os.Args[0]
}
// Command returns *exec.Cmd which has Path as current binary.
// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will
// be set to "/usr/bin/docker".
func Command(args ...string) *exec.Cmd {
cmd := exec.Command(Self())
cmd.Args = args
return cmd
}
// CommandContext returns *exec.Cmd which has Path as current binary.
func CommandContext(ctx context.Context, args ...string) *exec.Cmd {
cmd := exec.CommandContext(ctx, Self())
cmd.Args = args
return cmd
}

View File

@ -1,4 +1,5 @@
// +build freebsd solaris darwin
//go:build solaris || darwin
// +build solaris darwin
package reexec

View File

@ -23,3 +23,10 @@ _testmain.go
*.test
*.prof
/s2/cmd/_s2sx/sfx-exe
# Linux perf files
perf.data
perf.data.old
# gdb history
.gdb_history

View File

@ -17,6 +17,13 @@ This package provides various compression algorithms.
# changelog
* Mar 11, 2022 (v1.15.1)
* huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512)
* zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514)
* zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520)
* zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521)
* zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523)
* Mar 3, 2022 (v1.15.0)
* zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)
* zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)

View File

@ -1,3 +1,3 @@
module github.com/klauspost/compress
go 1.15
go 1.16

View File

@ -12,14 +12,14 @@ import (
// decompress4x_main_loop_x86 is an x86 assembler implementation
// of Decompress4X when tablelog > 8.
// go:noescape
//go:noescape
func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
// decompress4x_8b_loop_x86 is an x86 assembler implementation
// of Decompress4X when tablelog <= 8 which decodes 4 entries
// per loop.
// go:noescape
//go:noescape
func decompress4x_8b_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
peekBits uint8, buf *byte, tbl *dEntrySingle) uint8

View File

@ -0,0 +1,34 @@
// Package cpuinfo gives runtime info about the current CPU.
//
// This is a very limited module meant for use internally
// in this project. For more versatile solution check
// https://github.com/klauspost/cpuid.
package cpuinfo
// HasBMI1 checks whether an x86 CPU supports the BMI1 extension.
func HasBMI1() bool {
return hasBMI1
}
// HasBMI2 checks whether an x86 CPU supports the BMI2 extension.
func HasBMI2() bool {
return hasBMI2
}
// DisableBMI2 will disable BMI2, for testing purposes.
// Call returned function to restore previous state.
func DisableBMI2() func() {
old := hasBMI2
hasBMI2 = false
return func() {
hasBMI2 = old
}
}
// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions.
func HasBMI() bool {
return HasBMI1() && HasBMI2()
}
var hasBMI1 bool
var hasBMI2 bool

View File

@ -0,0 +1,11 @@
//go:build amd64 && !appengine && !noasm && gc
// +build amd64,!appengine,!noasm,gc
package cpuinfo
// go:noescape
func x86extensions() (bmi1, bmi2 bool)
func init() {
hasBMI1, hasBMI2 = x86extensions()
}

View File

@ -0,0 +1,36 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#include "funcdata.h"
#include "go_asm.h"
TEXT ·x86extensions(SB), NOSPLIT, $0
// 1. determine max EAX value
XORQ AX, AX
CPUID
CMPQ AX, $7
JB unsupported
// 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction"
MOVQ $7, AX
MOVQ $0, CX
CPUID
BTQ $3, BX // bit 3 = BMI1
SETCS AL
BTQ $8, BX // bit 8 = BMI2
SETCS AH
MOVB AL, bmi1+0(FP)
MOVB AH, bmi2+1(FP)
RET
unsupported:
XORQ AX, AX
MOVB AL, bmi1+0(FP)
MOVB AL, bmi2+1(FP)
RET

View File

@ -386,47 +386,31 @@ In practice this means that concurrency is often limited to utilizing about 3 co
### Benchmarks
These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd).
The first two are streaming decodes and the last are smaller inputs.
Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used.
```
BenchmarkDecoderSilesia-8 3 385000067 ns/op 550.51 MB/s 5498 B/op 8 allocs/op
BenchmarkDecoderSilesiaCgo-8 6 197666567 ns/op 1072.25 MB/s 270672 B/op 8 allocs/op
BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op
BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op
BenchmarkDecoderEnwik9-8 1 2027001600 ns/op 493.34 MB/s 10496 B/op 18 allocs/op
BenchmarkDecoderEnwik9Cgo-8 2 979499200 ns/op 1020.93 MB/s 270672 B/op 8 allocs/op
Concurrent blocks, performance:
Concurrent performance:
BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16 28915 42469 ns/op 4340.07 MB/s 114 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16 116505 9965 ns/op 11900.16 MB/s 16 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16 8952 134272 ns/op 3588.70 MB/s 915 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16 11820 102538 ns/op 4161.90 MB/s 594 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16 34782 34184 ns/op 3661.88 MB/s 60 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16 27712 43447 ns/op 3500.58 MB/s 99 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16 62826 18750 ns/op 21845.10 MB/s 104 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16 631545 1794 ns/op 57078.74 MB/s 2 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16 1690140 712 ns/op 172938.13 MB/s 1 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16 10432 113593 ns/op 6180.73 MB/s 1143 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/html.zst-16 113206 10671 ns/op 9596.27 MB/s 15 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16 1530615 779 ns/op 5229.49 MB/s 0 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16 65217 16192 ns/op 11383.34 MB/s 46 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16 292671 4039 ns/op 29363.19 MB/s 6 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16 26314 46021 ns/op 10470.43 MB/s 293 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16 33897 34900 ns/op 12227.96 MB/s 205 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16 104348 11433 ns/op 10949.01 MB/s 20 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16 75949 15510 ns/op 9805.60 MB/s 32 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16 173910 6756 ns/op 60624.29 MB/s 37 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16 923076 1339 ns/op 76474.87 MB/s 1 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16 922920 1351 ns/op 91102.57 MB/s 2 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16 27649 43618 ns/op 16096.19 MB/s 407 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16 279073 4160 ns/op 24614.18 MB/s 6 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 1579 ns/op 2581.71 MB/s 0 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op
BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op
```
This reflects the performance around May 2020, but this may be out of date.
This reflects the performance around May 2022, but this may be out of date.
## Zstd inside ZIP files

View File

@ -5,9 +5,14 @@
package zstd
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sync"
"github.com/klauspost/compress/huff0"
@ -38,6 +43,9 @@ const (
// maxCompressedBlockSize is the biggest allowed compressed block size (128KB)
maxCompressedBlockSize = 128 << 10
compressedBlockOverAlloc = 16
maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc
// Maximum possible block size (all Raw+Uncompressed).
maxBlockSize = (1 << 21) - 1
@ -136,7 +144,7 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
b.Type = blockType((bh >> 1) & 3)
// find size.
cSize := int(bh >> 3)
maxSize := maxBlockSize
maxSize := maxCompressedBlockSizeAlloc
switch b.Type {
case blockTypeReserved:
return ErrReservedBlockType
@ -157,9 +165,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
println("Data size on stream:", cSize)
}
b.RLESize = 0
maxSize = maxCompressedBlockSize
maxSize = maxCompressedBlockSizeAlloc
if windowSize < maxCompressedBlockSize && b.lowMem {
maxSize = int(windowSize)
maxSize = int(windowSize) + compressedBlockOverAlloc
}
if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize {
if debugDecoder {
@ -190,9 +198,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
// Read block data.
if cap(b.dataStorage) < cSize {
if b.lowMem || cSize > maxCompressedBlockSize {
b.dataStorage = make([]byte, 0, cSize)
b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc)
} else {
b.dataStorage = make([]byte, 0, maxCompressedBlockSize)
b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc)
}
}
if cap(b.dst) <= maxSize {
@ -486,10 +494,15 @@ func (b *blockDec) decodeCompressed(hist *history) error {
b.dst = append(b.dst, hist.decoders.literals...)
return nil
}
err = hist.decoders.decodeSync(hist)
before := len(hist.decoders.out)
err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:])
if err != nil {
return err
}
if hist.decoders.maxSyncLen > 0 {
hist.decoders.maxSyncLen += uint64(before)
hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out))
}
b.dst = hist.decoders.out
hist.recentOffsets = hist.decoders.prevOffset
return nil
@ -632,6 +645,22 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
println("initializing sequences:", err)
return err
}
// Extract blocks...
if false && hist.dict == nil {
fatalErr := func(err error) {
if err != nil {
panic(err)
}
}
fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize)
var buf bytes.Buffer
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse))
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
buf.Write(in)
ioutil.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
}
return nil
}
@ -650,6 +679,7 @@ func (b *blockDec) decodeSequences(hist *history) error {
}
hist.decoders.windowSize = hist.windowSize
hist.decoders.prevOffset = hist.recentOffsets
err := hist.decoders.decode(b.sequence)
hist.recentOffsets = hist.decoders.prevOffset
return err

View File

@ -347,18 +347,20 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
}
frame.history.setDict(&dict)
}
if frame.FrameContentSize != fcsUnknown && frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
return dst, ErrDecoderSizeExceeded
if frame.WindowSize > d.o.maxWindowSize {
return dst, ErrWindowSizeExceeded
}
if frame.FrameContentSize < 1<<30 {
// Never preallocate more than 1 GB up front.
if frame.FrameContentSize != fcsUnknown {
if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
return dst, ErrDecoderSizeExceeded
}
if cap(dst)-len(dst) < int(frame.FrameContentSize) {
dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize))
dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc)
copy(dst2, dst)
dst = dst2
}
}
if cap(dst) == 0 {
// Allocate len(input) * 2 by default if nothing is provided
// and we didn't get frame content size.

View File

@ -31,7 +31,7 @@ func (o *decoderOptions) setDefault() {
if o.concurrent > 4 {
o.concurrent = 4
}
o.maxDecodedSize = 1 << 63
o.maxDecodedSize = 64 << 30
}
// WithDecoderLowmem will set whether to use a lower amount of memory,
@ -66,7 +66,7 @@ func WithDecoderConcurrency(n int) DOption {
// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory
// non-streaming operations or maximum window size for streaming operations.
// This can be used to control memory usage of potentially hostile content.
// Maximum and default is 1 << 63 bytes.
// Maximum is 1 << 63 bytes. Default is 64GiB.
func WithDecoderMaxMemory(n uint64) DOption {
return func(o *decoderOptions) error {
if n == 0 {

View File

@ -326,6 +326,19 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
d.history.ignoreBuffer = len(dst)
// Store input length, so we only check new data.
crcStart := len(dst)
d.history.decoders.maxSyncLen = 0
if d.FrameContentSize != fcsUnknown {
d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
if d.history.decoders.maxSyncLen > d.o.maxDecodedSize {
return dst, ErrDecoderSizeExceeded
}
if uint64(cap(dst)) < d.history.decoders.maxSyncLen {
// Alloc for output
dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc)
copy(dst2, dst)
dst = dst2
}
}
var err error
for {
err = dec.reset(d.rawInput, d.WindowSize)

View File

@ -5,8 +5,10 @@
package zstd
import (
"encoding/binary"
"errors"
"fmt"
"io"
)
const (
@ -182,6 +184,29 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error {
return s.buildDtable()
}
func (s *fseDecoder) mustReadFrom(r io.Reader) {
fatalErr := func(err error) {
if err != nil {
panic(err)
}
}
// dt [maxTablesize]decSymbol // Decompression table.
// symbolLen uint16 // Length of active part of the symbol table.
// actualTableLog uint8 // Selected tablelog.
// maxBits uint8 // Maximum number of additional bits
// // used for table creation to avoid allocations.
// stateTable [256]uint16
// norm [maxSymbolValue + 1]int16
// preDefined bool
fatalErr(binary.Read(r, binary.LittleEndian, &s.dt))
fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen))
fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog))
fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits))
fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable))
fatalErr(binary.Read(r, binary.LittleEndian, &s.norm))
fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined))
}
// decSymbol contains information about a state entry,
// Including the state offset base, the output symbol and
// the number of bits to read for the low part of the destination state.

View File

@ -73,6 +73,7 @@ type sequenceDecs struct {
seqSize int
windowSize int
maxBits uint8
maxSyncLen uint64
}
// initialize all 3 decoders from the stream input.
@ -98,153 +99,13 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) erro
return nil
}
// decode sequences from the stream with the provided history.
func (s *sequenceDecs) decode(seqs []seqVals) error {
br := s.br
// Grab full sizes tables, to avoid bounds checks.
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
s.seqSize = 0
litRemain := len(s.literals)
maxBlockSize := maxCompressedBlockSize
if s.windowSize < maxBlockSize {
maxBlockSize = s.windowSize
}
for i := range seqs {
var ll, mo, ml int
if br.off > 4+((maxOffsetBits+16+16)>>3) {
// inlined function:
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
// Final will not read from stream.
var llB, mlB, moB uint8
ll, llB = llState.final()
ml, mlB = mlState.final()
mo, moB = ofState.final()
// extra bits are stored in reverse order.
br.fillFast()
mo += br.getBits(moB)
if s.maxBits > 32 {
br.fillFast()
}
ml += br.getBits(mlB)
ll += br.getBits(llB)
if moB > 1 {
s.prevOffset[2] = s.prevOffset[1]
s.prevOffset[1] = s.prevOffset[0]
s.prevOffset[0] = mo
} else {
// mo = s.adjustOffset(mo, ll, moB)
// Inlined for rather big speedup
if ll == 0 {
// There is an exception though, when current sequence's literals_length = 0.
// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
mo++
}
if mo == 0 {
mo = s.prevOffset[0]
} else {
var temp int
if mo == 3 {
temp = s.prevOffset[0] - 1
} else {
temp = s.prevOffset[mo]
}
if temp == 0 {
// 0 is not valid; input is corrupted; force offset to 1
println("WARNING: temp was 0")
temp = 1
}
if mo != 1 {
s.prevOffset[2] = s.prevOffset[1]
}
s.prevOffset[1] = s.prevOffset[0]
s.prevOffset[0] = temp
mo = temp
}
}
br.fillFast()
} else {
if br.overread() {
if debugDecoder {
printf("reading sequence %d, exceeded available data\n", i)
}
return io.ErrUnexpectedEOF
}
ll, mo, ml = s.next(br, llState, mlState, ofState)
br.fill()
}
if debugSequences {
println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
}
// Evaluate.
// We might be doing this async, so do it early.
if mo == 0 && ml > 0 {
return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
}
if ml > maxMatchLen {
return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
}
s.seqSize += ll + ml
if s.seqSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
}
litRemain -= ll
if litRemain < 0 {
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll)
}
seqs[i] = seqVals{
ll: ll,
ml: ml,
mo: mo,
}
if i == len(seqs)-1 {
// This is the last sequence, so we shouldn't update state.
break
}
// Manually inlined, ~ 5-20% faster
// Update all 3 states at once. Approx 20% faster.
nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
if nBits == 0 {
llState = llTable[llState.newState()&maxTableMask]
mlState = mlTable[mlState.newState()&maxTableMask]
ofState = ofTable[ofState.newState()&maxTableMask]
} else {
bits := br.get32BitsFast(nBits)
lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
llState = llTable[(llState.newState()+lowBits)&maxTableMask]
lowBits = uint16(bits >> (ofState.nbBits() & 31))
lowBits &= bitMask[mlState.nbBits()&15]
mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
}
}
s.seqSize += litRemain
if s.seqSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
}
err := br.close()
if err != nil {
printf("Closing sequences: %v, %+v\n", err, *br)
}
return err
}
// execute will execute the decoded sequence with the provided history.
// The sequence must be evaluated before being sent.
func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
if len(s.dict) == 0 {
return s.executeSimple(seqs, hist)
}
// Ensure we have enough output size...
if len(s.out)+s.seqSize > cap(s.out) {
addBytes := s.seqSize + len(s.out)
@ -341,14 +202,19 @@ func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
}
// decode sequences from the stream with the provided history.
func (s *sequenceDecs) decodeSync(history *history) error {
func (s *sequenceDecs) decodeSync(hist []byte) error {
if true {
supported, err := s.decodeSyncSimple(hist)
if supported {
return err
}
}
br := s.br
seqs := s.nSeqs
startSize := len(s.out)
// Grab full sizes tables, to avoid bounds checks.
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
hist := history.b[history.ignoreBuffer:]
out := s.out
maxBlockSize := maxCompressedBlockSize
if s.windowSize < maxBlockSize {
@ -433,7 +299,7 @@ func (s *sequenceDecs) decodeSync(history *history) error {
}
size := ll + ml + len(out)
if size-startSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
return fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
}
if size > cap(out) {
// Not enough size, which can happen under high volume block streaming conditions
@ -463,13 +329,13 @@ func (s *sequenceDecs) decodeSync(history *history) error {
if mo > len(out)+len(hist) || mo > s.windowSize {
if len(s.dict) == 0 {
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist))
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize)
}
// we may be in dictionary.
dictO := len(s.dict) - (mo - (len(out) + len(hist)))
if dictO < 0 || dictO >= len(s.dict) {
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist))
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize)
}
end := dictO + ml
if end > len(s.dict) {
@ -543,8 +409,8 @@ func (s *sequenceDecs) decodeSync(history *history) error {
}
// Check if space for literals
if len(s.literals)+len(s.out)-startSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", len(s.out), maxBlockSize)
if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
}
// Add final literals

View File

@ -0,0 +1,350 @@
//go:build amd64 && !appengine && !noasm && gc
// +build amd64,!appengine,!noasm,gc
package zstd
import (
"fmt"
"github.com/klauspost/compress/internal/cpuinfo"
)
type decodeSyncAsmContext struct {
llTable []decSymbol
mlTable []decSymbol
ofTable []decSymbol
llState uint64
mlState uint64
ofState uint64
iteration int
litRemain int
out []byte
outPosition int
literals []byte
litPosition int
history []byte
windowSize int
ll int // set on error (not for all errors, please refer to _generate/gen.go)
ml int // set on error (not for all errors, please refer to _generate/gen.go)
mo int // set on error (not for all errors, please refer to _generate/gen.go)
}
// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm.
//
// Please refer to seqdec_generic.go for the reference implementation.
//go:noescape
func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions.
//go:noescape
func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer.
//go:noescape
func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer.
//go:noescape
func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// decode sequences from the stream with the provided history but without a dictionary.
func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
if len(s.dict) > 0 {
return false, nil
}
if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize {
return false, nil
}
useSafe := false
if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc {
useSafe = true
}
if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) {
useSafe = true
}
br := s.br
maxBlockSize := maxCompressedBlockSize
if s.windowSize < maxBlockSize {
maxBlockSize = s.windowSize
}
ctx := decodeSyncAsmContext{
llTable: s.litLengths.fse.dt[:maxTablesize],
mlTable: s.matchLengths.fse.dt[:maxTablesize],
ofTable: s.offsets.fse.dt[:maxTablesize],
llState: uint64(s.litLengths.state.state),
mlState: uint64(s.matchLengths.state.state),
ofState: uint64(s.offsets.state.state),
iteration: s.nSeqs - 1,
litRemain: len(s.literals),
out: s.out,
outPosition: len(s.out),
literals: s.literals,
windowSize: s.windowSize,
history: hist,
}
s.seqSize = 0
startSize := len(s.out)
var errCode int
if cpuinfo.HasBMI2() {
if useSafe {
errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx)
} else {
errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx)
}
} else {
if useSafe {
errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx)
} else {
errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx)
}
}
switch errCode {
case noError:
break
case errorMatchLenOfsMismatch:
return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml)
case errorMatchLenTooBig:
return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml)
case errorMatchOffTooBig:
return true, fmt.Errorf("match offset (%d) bigger than current history (%d)",
ctx.mo, ctx.outPosition+len(hist)-startSize)
case errorNotEnoughLiterals:
return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available",
ctx.ll, ctx.litRemain+ctx.ll)
case errorNotEnoughSpace:
size := ctx.outPosition + ctx.ll + ctx.ml
if debugDecoder {
println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize)
}
return true, fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
default:
return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode)
}
s.seqSize += ctx.litRemain
if s.seqSize > maxBlockSize {
return true, fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
}
err := br.close()
if err != nil {
printf("Closing sequences: %v, %+v\n", err, *br)
return true, err
}
s.literals = s.literals[ctx.litPosition:]
t := ctx.outPosition
s.out = s.out[:t]
// Add final literals
s.out = append(s.out, s.literals...)
if debugDecoder {
t += len(s.literals)
if t != len(s.out) {
panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t))
}
}
return true, nil
}
// --------------------------------------------------------------------------------
type decodeAsmContext struct {
llTable []decSymbol
mlTable []decSymbol
ofTable []decSymbol
llState uint64
mlState uint64
ofState uint64
iteration int
seqs []seqVals
litRemain int
}
const noError = 0
// error reported when mo == 0 && ml > 0
const errorMatchLenOfsMismatch = 1
// error reported when ml > maxMatchLen
const errorMatchLenTooBig = 2
// error reported when mo > available history or mo > s.windowSize
const errorMatchOffTooBig = 3
// error reported when the sum of literal lengths exeeceds the literal buffer size
const errorNotEnoughLiterals = 4
// error reported when capacity of `out` is too small
const errorNotEnoughSpace = 5
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
//
// Please refer to seqdec_generic.go for the reference implementation.
//go:noescape
func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
//
// Please refer to seqdec_generic.go for the reference implementation.
//go:noescape
func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
//go:noescape
func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
//go:noescape
func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// decode sequences from the stream without the provided history.
func (s *sequenceDecs) decode(seqs []seqVals) error {
br := s.br
maxBlockSize := maxCompressedBlockSize
if s.windowSize < maxBlockSize {
maxBlockSize = s.windowSize
}
ctx := decodeAsmContext{
llTable: s.litLengths.fse.dt[:maxTablesize],
mlTable: s.matchLengths.fse.dt[:maxTablesize],
ofTable: s.offsets.fse.dt[:maxTablesize],
llState: uint64(s.litLengths.state.state),
mlState: uint64(s.matchLengths.state.state),
ofState: uint64(s.offsets.state.state),
seqs: seqs,
iteration: len(seqs) - 1,
litRemain: len(s.literals),
}
s.seqSize = 0
lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56
var errCode int
if cpuinfo.HasBMI2() {
if lte56bits {
errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx)
} else {
errCode = sequenceDecs_decode_bmi2(s, br, &ctx)
}
} else {
if lte56bits {
errCode = sequenceDecs_decode_56_amd64(s, br, &ctx)
} else {
errCode = sequenceDecs_decode_amd64(s, br, &ctx)
}
}
if errCode != 0 {
i := len(seqs) - ctx.iteration - 1
switch errCode {
case errorMatchLenOfsMismatch:
ml := ctx.seqs[i].ml
return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
case errorMatchLenTooBig:
ml := ctx.seqs[i].ml
return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
case errorNotEnoughLiterals:
ll := ctx.seqs[i].ll
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll)
}
return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode)
}
if ctx.litRemain < 0 {
return fmt.Errorf("literal count is too big: total available %d, total requested %d",
len(s.literals), len(s.literals)-ctx.litRemain)
}
s.seqSize += ctx.litRemain
if s.seqSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
}
err := br.close()
if err != nil {
printf("Closing sequences: %v, %+v\n", err, *br)
}
return err
}
// --------------------------------------------------------------------------------
type executeAsmContext struct {
seqs []seqVals
seqIndex int
out []byte
history []byte
literals []byte
outPosition int
litPosition int
windowSize int
}
// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm.
//
// Returns false if a match offset is too big.
//
// Please refer to seqdec_generic.go for the reference implementation.
//go:noescape
func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
// executeSimple handles cases when dictionary is not used.
func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error {
// Ensure we have enough output size...
if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) {
addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc
s.out = append(s.out, make([]byte, addBytes)...)
s.out = s.out[:len(s.out)-addBytes]
}
if debugDecoder {
printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize)
}
var t = len(s.out)
out := s.out[:t+s.seqSize]
ctx := executeAsmContext{
seqs: seqs,
seqIndex: 0,
out: out,
history: hist,
outPosition: t,
litPosition: 0,
literals: s.literals,
windowSize: s.windowSize,
}
ok := sequenceDecs_executeSimple_amd64(&ctx)
if !ok {
return fmt.Errorf("match offset (%d) bigger than current history (%d)",
seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist))
}
s.literals = s.literals[ctx.litPosition:]
t = ctx.outPosition
// Add final literals
copy(out[t:], s.literals)
if debugDecoder {
t += len(s.literals)
if t != len(out) {
panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
}
}
s.out = out
return nil
}

3519
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,237 @@
//go:build !amd64 || appengine || !gc || noasm
// +build !amd64 appengine !gc noasm
package zstd
import (
"fmt"
"io"
)
// decode sequences from the stream with the provided history but without dictionary.
func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
return false, nil
}
// decode sequences from the stream without the provided history.
func (s *sequenceDecs) decode(seqs []seqVals) error {
br := s.br
// Grab full sizes tables, to avoid bounds checks.
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
s.seqSize = 0
litRemain := len(s.literals)
maxBlockSize := maxCompressedBlockSize
if s.windowSize < maxBlockSize {
maxBlockSize = s.windowSize
}
for i := range seqs {
var ll, mo, ml int
if br.off > 4+((maxOffsetBits+16+16)>>3) {
// inlined function:
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
// Final will not read from stream.
var llB, mlB, moB uint8
ll, llB = llState.final()
ml, mlB = mlState.final()
mo, moB = ofState.final()
// extra bits are stored in reverse order.
br.fillFast()
mo += br.getBits(moB)
if s.maxBits > 32 {
br.fillFast()
}
ml += br.getBits(mlB)
ll += br.getBits(llB)
if moB > 1 {
s.prevOffset[2] = s.prevOffset[1]
s.prevOffset[1] = s.prevOffset[0]
s.prevOffset[0] = mo
} else {
// mo = s.adjustOffset(mo, ll, moB)
// Inlined for rather big speedup
if ll == 0 {
// There is an exception though, when current sequence's literals_length = 0.
// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
mo++
}
if mo == 0 {
mo = s.prevOffset[0]
} else {
var temp int
if mo == 3 {
temp = s.prevOffset[0] - 1
} else {
temp = s.prevOffset[mo]
}
if temp == 0 {
// 0 is not valid; input is corrupted; force offset to 1
println("WARNING: temp was 0")
temp = 1
}
if mo != 1 {
s.prevOffset[2] = s.prevOffset[1]
}
s.prevOffset[1] = s.prevOffset[0]
s.prevOffset[0] = temp
mo = temp
}
}
br.fillFast()
} else {
if br.overread() {
if debugDecoder {
printf("reading sequence %d, exceeded available data\n", i)
}
return io.ErrUnexpectedEOF
}
ll, mo, ml = s.next(br, llState, mlState, ofState)
br.fill()
}
if debugSequences {
println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
}
// Evaluate.
// We might be doing this async, so do it early.
if mo == 0 && ml > 0 {
return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
}
if ml > maxMatchLen {
return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
}
s.seqSize += ll + ml
if s.seqSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
}
litRemain -= ll
if litRemain < 0 {
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll)
}
seqs[i] = seqVals{
ll: ll,
ml: ml,
mo: mo,
}
if i == len(seqs)-1 {
// This is the last sequence, so we shouldn't update state.
break
}
// Manually inlined, ~ 5-20% faster
// Update all 3 states at once. Approx 20% faster.
nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
if nBits == 0 {
llState = llTable[llState.newState()&maxTableMask]
mlState = mlTable[mlState.newState()&maxTableMask]
ofState = ofTable[ofState.newState()&maxTableMask]
} else {
bits := br.get32BitsFast(nBits)
lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
llState = llTable[(llState.newState()+lowBits)&maxTableMask]
lowBits = uint16(bits >> (ofState.nbBits() & 31))
lowBits &= bitMask[mlState.nbBits()&15]
mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
}
}
s.seqSize += litRemain
if s.seqSize > maxBlockSize {
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
}
err := br.close()
if err != nil {
printf("Closing sequences: %v, %+v\n", err, *br)
}
return err
}
// executeSimple handles cases when a dictionary is not used.
func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error {
// Ensure we have enough output size...
if len(s.out)+s.seqSize > cap(s.out) {
addBytes := s.seqSize + len(s.out)
s.out = append(s.out, make([]byte, addBytes)...)
s.out = s.out[:len(s.out)-addBytes]
}
if debugDecoder {
printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize)
}
var t = len(s.out)
out := s.out[:t+s.seqSize]
for _, seq := range seqs {
// Add literals
copy(out[t:], s.literals[:seq.ll])
t += seq.ll
s.literals = s.literals[seq.ll:]
// Malformed input
if seq.mo > t+len(hist) || seq.mo > s.windowSize {
return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist))
}
// Copy from history.
if v := seq.mo - t; v > 0 {
// v is the start position in history from end.
start := len(hist) - v
if seq.ml > v {
// Some goes into the current block.
// Copy remainder of history
copy(out[t:], hist[start:])
t += v
seq.ml -= v
} else {
copy(out[t:], hist[start:start+seq.ml])
t += seq.ml
continue
}
}
// We must be in the current buffer now
if seq.ml > 0 {
start := t - seq.mo
if seq.ml <= t-start {
// No overlap
copy(out[t:], out[start:start+seq.ml])
t += seq.ml
} else {
// Overlapping copy
// Extend destination slice and copy one byte at the time.
src := out[start : start+seq.ml]
dst := out[t:]
dst = dst[:len(src)]
t += len(src)
// Destination is the space we just added.
for i := range src {
dst[i] = src[i]
}
}
}
}
// Add final literals
copy(out[t:], s.literals)
if debugDecoder {
t += len(s.literals)
if t != len(out) {
panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
}
}
s.out = out
return nil
}

View File

@ -21,23 +21,34 @@ const ZipMethodPKWare = 20
var zipReaderPool sync.Pool
// newZipReader creates a pooled zip decompressor.
func newZipReader(r io.Reader) io.ReadCloser {
dec, ok := zipReaderPool.Get().(*Decoder)
if ok {
dec.Reset(r)
} else {
d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true))
if err != nil {
panic(err)
}
dec = d
func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser {
pool := &zipReaderPool
if len(opts) > 0 {
opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...)
// Force concurrency 1
opts = append(opts, WithDecoderConcurrency(1))
// Create our own pool
pool = &sync.Pool{}
}
return func(r io.Reader) io.ReadCloser {
dec, ok := pool.Get().(*Decoder)
if ok {
dec.Reset(r)
} else {
d, err := NewReader(r, opts...)
if err != nil {
panic(err)
}
dec = d
}
return &pooledZipReader{dec: dec, pool: pool}
}
return &pooledZipReader{dec: dec}
}
type pooledZipReader struct {
mu sync.Mutex // guards Close and Read
dec *Decoder
mu sync.Mutex // guards Close and Read
pool *sync.Pool
dec *Decoder
}
func (r *pooledZipReader) Read(p []byte) (n int, err error) {
@ -48,8 +59,8 @@ func (r *pooledZipReader) Read(p []byte) (n int, err error) {
}
dec, err := r.dec.Read(p)
if err == io.EOF {
err = r.dec.Reset(nil)
zipReaderPool.Put(r.dec)
r.dec.Reset(nil)
r.pool.Put(r.dec)
r.dec = nil
}
return dec, err
@ -61,7 +72,7 @@ func (r *pooledZipReader) Close() error {
var err error
if r.dec != nil {
err = r.dec.Reset(nil)
zipReaderPool.Put(r.dec)
r.pool.Put(r.dec)
r.dec = nil
}
return err
@ -115,6 +126,9 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) {
// ZipDecompressor returns a decompressor that can be registered with zip libraries.
// See ZipCompressor for example.
func ZipDecompressor() func(r io.Reader) io.ReadCloser {
return newZipReader
// Options can be specified. WithDecoderConcurrency(1) is forced,
// and by default a 128MB maximum decompression window is specified.
// The window size can be overridden if required.
func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser {
return newZipReader(opts...)
}

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh
// +build 386 amd64 amd64p32 alpha arm arm64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh
//go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh
// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh
package unix

191
vendor/golang.org/x/sys/unix/syscall_linux_loong64.go generated vendored Normal file
View File

@ -0,0 +1,191 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build loong64 && linux
// +build loong64,linux
package unix
import "unsafe"
//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT
//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64
//sys Fchown(fd int, uid int, gid int) (err error)
//sys Fstat(fd int, stat *Stat_t) (err error)
//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error)
//sys Fstatfs(fd int, buf *Statfs_t) (err error)
//sys Ftruncate(fd int, length int64) (err error)
//sysnb Getegid() (egid int)
//sysnb Geteuid() (euid int)
//sysnb Getgid() (gid int)
//sysnb Getuid() (uid int)
//sys Listen(s int, n int) (err error)
//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
var ts *Timespec
if timeout != nil {
ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000}
}
return Pselect(nfd, r, w, e, ts, nil)
}
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
//sys setfsgid(gid int) (prev int, err error)
//sys setfsuid(uid int) (prev int, err error)
//sysnb Setregid(rgid int, egid int) (err error)
//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
//sysnb Setresuid(ruid int, euid int, suid int) (err error)
//sysnb Setreuid(ruid int, euid int) (err error)
//sys Shutdown(fd int, how int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
func Stat(path string, stat *Stat_t) (err error) {
return Fstatat(AT_FDCWD, path, stat, 0)
}
func Lchown(path string, uid int, gid int) (err error) {
return Fchownat(AT_FDCWD, path, uid, gid, AT_SYMLINK_NOFOLLOW)
}
func Lstat(path string, stat *Stat_t) (err error) {
return Fstatat(AT_FDCWD, path, stat, AT_SYMLINK_NOFOLLOW)
}
//sys Statfs(path string, buf *Statfs_t) (err error)
//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error)
//sys Truncate(path string, length int64) (err error)
func Ustat(dev int, ubuf *Ustat_t) (err error) {
return ENOSYS
}
//sys accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error)
//sys bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
//sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error)
//sysnb getgroups(n int, list *_Gid_t) (nn int, err error)
//sysnb setgroups(n int, list *_Gid_t) (err error)
//sys getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error)
//sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error)
//sysnb socket(domain int, typ int, proto int) (fd int, err error)
//sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error)
//sysnb getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
//sysnb getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error)
//sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error)
//sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error)
//sys recvmsg(s int, msg *Msghdr, flags int) (n int, err error)
//sys sendmsg(s int, msg *Msghdr, flags int) (n int, err error)
//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error)
//sysnb Gettimeofday(tv *Timeval) (err error)
func setTimespec(sec, nsec int64) Timespec {
return Timespec{Sec: sec, Nsec: nsec}
}
func setTimeval(sec, usec int64) Timeval {
return Timeval{Sec: sec, Usec: usec}
}
func Getrlimit(resource int, rlim *Rlimit) (err error) {
err = Prlimit(0, resource, nil, rlim)
return
}
func Setrlimit(resource int, rlim *Rlimit) (err error) {
err = Prlimit(0, resource, rlim, nil)
return
}
func futimesat(dirfd int, path string, tv *[2]Timeval) (err error) {
if tv == nil {
return utimensat(dirfd, path, nil, 0)
}
ts := []Timespec{
NsecToTimespec(TimevalToNsec(tv[0])),
NsecToTimespec(TimevalToNsec(tv[1])),
}
return utimensat(dirfd, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0)
}
func Time(t *Time_t) (Time_t, error) {
var tv Timeval
err := Gettimeofday(&tv)
if err != nil {
return 0, err
}
if t != nil {
*t = Time_t(tv.Sec)
}
return Time_t(tv.Sec), nil
}
func Utime(path string, buf *Utimbuf) error {
tv := []Timeval{
{Sec: buf.Actime},
{Sec: buf.Modtime},
}
return Utimes(path, tv)
}
func utimes(path string, tv *[2]Timeval) (err error) {
if tv == nil {
return utimensat(AT_FDCWD, path, nil, 0)
}
ts := []Timespec{
NsecToTimespec(TimevalToNsec(tv[0])),
NsecToTimespec(TimevalToNsec(tv[1])),
}
return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0)
}
func (r *PtraceRegs) PC() uint64 { return r.Era }
func (r *PtraceRegs) SetPC(era uint64) { r.Era = era }
func (iov *Iovec) SetLen(length int) {
iov.Len = uint64(length)
}
func (msghdr *Msghdr) SetControllen(length int) {
msghdr.Controllen = uint64(length)
}
func (msghdr *Msghdr) SetIovlen(length int) {
msghdr.Iovlen = uint64(length)
}
func (cmsg *Cmsghdr) SetLen(length int) {
cmsg.Len = uint64(length)
}
func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) {
rsa.Service_name_len = uint64(length)
}
func Pause() error {
_, err := ppoll(nil, 0, nil, nil)
return err
}
func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
return Renameat2(olddirfd, oldpath, newdirfd, newpath, 0)
}
//sys kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error)
func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error {
cmdlineLen := len(cmdline)
if cmdlineLen > 0 {
// Account for the additional NULL byte added by
// BytePtrFromString in kexecFileLoad. The kexec_file_load
// syscall expects a NULL-terminated string.
cmdlineLen++
}
return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags)
}

View File

@ -1310,6 +1310,7 @@ const (
KEXEC_ARCH_ARM = 0x280000
KEXEC_ARCH_DEFAULT = 0x0
KEXEC_ARCH_IA_64 = 0x320000
KEXEC_ARCH_LOONGARCH = 0x1020000
KEXEC_ARCH_MASK = 0xffff0000
KEXEC_ARCH_MIPS = 0x80000
KEXEC_ARCH_MIPS_LE = 0xa0000

818
vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go generated vendored Normal file
View File

@ -0,0 +1,818 @@
// mkerrors.sh -Wall -Werror -static -I/tmp/include
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build loong64 && linux
// +build loong64,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go
package unix
import "syscall"
const (
B1000000 = 0x1008
B115200 = 0x1002
B1152000 = 0x1009
B1500000 = 0x100a
B2000000 = 0x100b
B230400 = 0x1003
B2500000 = 0x100c
B3000000 = 0x100d
B3500000 = 0x100e
B4000000 = 0x100f
B460800 = 0x1004
B500000 = 0x1005
B57600 = 0x1001
B576000 = 0x1006
B921600 = 0x1007
BLKBSZGET = 0x80081270
BLKBSZSET = 0x40081271
BLKFLSBUF = 0x1261
BLKFRAGET = 0x1265
BLKFRASET = 0x1264
BLKGETSIZE = 0x1260
BLKGETSIZE64 = 0x80081272
BLKPBSZGET = 0x127b
BLKRAGET = 0x1263
BLKRASET = 0x1262
BLKROGET = 0x125e
BLKROSET = 0x125d
BLKRRPART = 0x125f
BLKSECTGET = 0x1267
BLKSECTSET = 0x1266
BLKSSZGET = 0x1268
BOTHER = 0x1000
BS1 = 0x2000
BSDLY = 0x2000
CBAUD = 0x100f
CBAUDEX = 0x1000
CIBAUD = 0x100f0000
CLOCAL = 0x800
CR1 = 0x200
CR2 = 0x400
CR3 = 0x600
CRDLY = 0x600
CREAD = 0x80
CS6 = 0x10
CS7 = 0x20
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200
ECHOE = 0x10
ECHOK = 0x20
ECHOKE = 0x800
ECHONL = 0x40
ECHOPRT = 0x400
EFD_CLOEXEC = 0x80000
EFD_NONBLOCK = 0x800
EPOLL_CLOEXEC = 0x80000
EXTPROC = 0x10000
FF1 = 0x8000
FFDLY = 0x8000
FICLONE = 0x40049409
FICLONERANGE = 0x4020940d
FLUSHO = 0x1000
FPU_CTX_MAGIC = 0x46505501
FS_IOC_ENABLE_VERITY = 0x40806685
FS_IOC_GETFLAGS = 0x80086601
FS_IOC_GET_ENCRYPTION_NONCE = 0x8010661b
FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614
FS_IOC_SETFLAGS = 0x40086602
FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613
F_GETLK = 0x5
F_GETLK64 = 0x5
F_GETOWN = 0x9
F_RDLCK = 0x0
F_SETLK = 0x6
F_SETLK64 = 0x6
F_SETLKW = 0x7
F_SETLKW64 = 0x7
F_SETOWN = 0x8
F_UNLCK = 0x2
F_WRLCK = 0x1
HIDIOCGRAWINFO = 0x80084803
HIDIOCGRDESC = 0x90044802
HIDIOCGRDESCSIZE = 0x80044801
HUPCL = 0x400
ICANON = 0x2
IEXTEN = 0x8000
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
IXON = 0x400
LASX_CTX_MAGIC = 0x41535801
LSX_CTX_MAGIC = 0x53580001
MAP_ANON = 0x20
MAP_ANONYMOUS = 0x20
MAP_DENYWRITE = 0x800
MAP_EXECUTABLE = 0x1000
MAP_GROWSDOWN = 0x100
MAP_HUGETLB = 0x40000
MAP_LOCKED = 0x2000
MAP_NONBLOCK = 0x10000
MAP_NORESERVE = 0x4000
MAP_POPULATE = 0x8000
MAP_STACK = 0x20000
MAP_SYNC = 0x80000
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MCL_ONFAULT = 0x4
MEMERASE = 0x40084d02
MEMERASE64 = 0x40104d14
MEMGETBADBLOCK = 0x40084d0b
MEMGETINFO = 0x80204d01
MEMGETOOBSEL = 0x80c84d0a
MEMGETREGIONCOUNT = 0x80044d07
MEMISLOCKED = 0x80084d17
MEMLOCK = 0x40084d05
MEMREADOOB = 0xc0104d04
MEMSETBADBLOCK = 0x40084d0c
MEMUNLOCK = 0x40084d06
MEMWRITEOOB = 0xc0104d03
MTDFILEMODE = 0x4d13
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
NS_GET_NSTYPE = 0xb703
NS_GET_OWNER_UID = 0xb704
NS_GET_PARENT = 0xb702
NS_GET_USERNS = 0xb701
OLCUC = 0x2
ONLCR = 0x4
OTPERASE = 0x400c4d19
OTPGETREGIONCOUNT = 0x40044d0e
OTPGETREGIONINFO = 0x400c4d0f
OTPLOCK = 0x800c4d10
OTPSELECT = 0x80044d0d
O_APPEND = 0x400
O_ASYNC = 0x2000
O_CLOEXEC = 0x80000
O_CREAT = 0x40
O_DIRECT = 0x4000
O_DIRECTORY = 0x10000
O_DSYNC = 0x1000
O_EXCL = 0x80
O_FSYNC = 0x101000
O_LARGEFILE = 0x0
O_NDELAY = 0x800
O_NOATIME = 0x40000
O_NOCTTY = 0x100
O_NOFOLLOW = 0x20000
O_NONBLOCK = 0x800
O_PATH = 0x200000
O_RSYNC = 0x101000
O_SYNC = 0x101000
O_TMPFILE = 0x410000
O_TRUNC = 0x200
PARENB = 0x100
PARODD = 0x200
PENDIN = 0x4000
PERF_EVENT_IOC_DISABLE = 0x2401
PERF_EVENT_IOC_ENABLE = 0x2400
PERF_EVENT_IOC_ID = 0x80082407
PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b
PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409
PERF_EVENT_IOC_PERIOD = 0x40082404
PERF_EVENT_IOC_QUERY_BPF = 0xc008240a
PERF_EVENT_IOC_REFRESH = 0x2402
PERF_EVENT_IOC_RESET = 0x2403
PERF_EVENT_IOC_SET_BPF = 0x40042408
PERF_EVENT_IOC_SET_FILTER = 0x40082406
PERF_EVENT_IOC_SET_OUTPUT = 0x2405
PPPIOCATTACH = 0x4004743d
PPPIOCATTCHAN = 0x40047438
PPPIOCBRIDGECHAN = 0x40047435
PPPIOCCONNECT = 0x4004743a
PPPIOCDETACH = 0x4004743c
PPPIOCDISCONN = 0x7439
PPPIOCGASYNCMAP = 0x80047458
PPPIOCGCHAN = 0x80047437
PPPIOCGDEBUG = 0x80047441
PPPIOCGFLAGS = 0x8004745a
PPPIOCGIDLE = 0x8010743f
PPPIOCGIDLE32 = 0x8008743f
PPPIOCGIDLE64 = 0x8010743f
PPPIOCGL2TPSTATS = 0x80487436
PPPIOCGMRU = 0x80047453
PPPIOCGRASYNCMAP = 0x80047455
PPPIOCGUNIT = 0x80047456
PPPIOCGXASYNCMAP = 0x80207450
PPPIOCSACTIVE = 0x40107446
PPPIOCSASYNCMAP = 0x40047457
PPPIOCSCOMPRESS = 0x4010744d
PPPIOCSDEBUG = 0x40047440
PPPIOCSFLAGS = 0x40047459
PPPIOCSMAXCID = 0x40047451
PPPIOCSMRRU = 0x4004743b
PPPIOCSMRU = 0x40047452
PPPIOCSNPMODE = 0x4008744b
PPPIOCSPASS = 0x40107447
PPPIOCSRASYNCMAP = 0x40047454
PPPIOCSXASYNCMAP = 0x4020744f
PPPIOCUNBRIDGECHAN = 0x7434
PPPIOCXFERUNIT = 0x744e
PR_SET_PTRACER_ANY = 0xffffffffffffffff
PTRACE_SYSEMU = 0x1f
PTRACE_SYSEMU_SINGLESTEP = 0x20
RLIMIT_AS = 0x9
RLIMIT_MEMLOCK = 0x8
RLIMIT_NOFILE = 0x7
RLIMIT_NPROC = 0x6
RLIMIT_RSS = 0x5
RNDADDENTROPY = 0x40085203
RNDADDTOENTCNT = 0x40045201
RNDCLEARPOOL = 0x5206
RNDGETENTCNT = 0x80045200
RNDGETPOOL = 0x80085202
RNDRESEEDCRNG = 0x5207
RNDZAPENTCNT = 0x5204
RTC_AIE_OFF = 0x7002
RTC_AIE_ON = 0x7001
RTC_ALM_READ = 0x80247008
RTC_ALM_SET = 0x40247007
RTC_EPOCH_READ = 0x8008700d
RTC_EPOCH_SET = 0x4008700e
RTC_IRQP_READ = 0x8008700b
RTC_IRQP_SET = 0x4008700c
RTC_PARAM_GET = 0x40187013
RTC_PARAM_SET = 0x40187014
RTC_PIE_OFF = 0x7006
RTC_PIE_ON = 0x7005
RTC_PLL_GET = 0x80207011
RTC_PLL_SET = 0x40207012
RTC_RD_TIME = 0x80247009
RTC_SET_TIME = 0x4024700a
RTC_UIE_OFF = 0x7004
RTC_UIE_ON = 0x7003
RTC_VL_CLR = 0x7014
RTC_VL_READ = 0x80047013
RTC_WIE_OFF = 0x7010
RTC_WIE_ON = 0x700f
RTC_WKALM_RD = 0x80287010
RTC_WKALM_SET = 0x4028700f
SCM_TIMESTAMPING = 0x25
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SFD_CLOEXEC = 0x80000
SFD_NONBLOCK = 0x800
SIOCATMARK = 0x8905
SIOCGPGRP = 0x8904
SIOCGSTAMPNS_NEW = 0x80108907
SIOCGSTAMP_NEW = 0x80108906
SIOCINQ = 0x541b
SIOCOUTQ = 0x5411
SIOCSPGRP = 0x8902
SOCK_CLOEXEC = 0x80000
SOCK_DGRAM = 0x2
SOCK_NONBLOCK = 0x800
SOCK_STREAM = 0x1
SOL_SOCKET = 0x1
SO_ACCEPTCONN = 0x1e
SO_ATTACH_BPF = 0x32
SO_ATTACH_REUSEPORT_CBPF = 0x33
SO_ATTACH_REUSEPORT_EBPF = 0x34
SO_BINDTODEVICE = 0x19
SO_BINDTOIFINDEX = 0x3e
SO_BPF_EXTENSIONS = 0x30
SO_BROADCAST = 0x6
SO_BSDCOMPAT = 0xe
SO_BUF_LOCK = 0x48
SO_BUSY_POLL = 0x2e
SO_BUSY_POLL_BUDGET = 0x46
SO_CNX_ADVICE = 0x35
SO_COOKIE = 0x39
SO_DETACH_REUSEPORT_BPF = 0x44
SO_DOMAIN = 0x27
SO_DONTROUTE = 0x5
SO_ERROR = 0x4
SO_INCOMING_CPU = 0x31
SO_INCOMING_NAPI_ID = 0x38
SO_KEEPALIVE = 0x9
SO_LINGER = 0xd
SO_LOCK_FILTER = 0x2c
SO_MARK = 0x24
SO_MAX_PACING_RATE = 0x2f
SO_MEMINFO = 0x37
SO_NETNS_COOKIE = 0x47
SO_NOFCS = 0x2b
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11
SO_PEERGROUPS = 0x3b
SO_PEERSEC = 0x1f
SO_PREFER_BUSY_POLL = 0x45
SO_PROTOCOL = 0x26
SO_RCVBUF = 0x8
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
SO_RESERVE_MEM = 0x49
SO_REUSEADDR = 0x2
SO_REUSEPORT = 0xf
SO_RXQ_OVFL = 0x28
SO_SECURITY_AUTHENTICATION = 0x16
SO_SECURITY_ENCRYPTION_NETWORK = 0x18
SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17
SO_SELECT_ERR_QUEUE = 0x2d
SO_SNDBUF = 0x7
SO_SNDBUFFORCE = 0x20
SO_SNDLOWAT = 0x13
SO_SNDTIMEO = 0x15
SO_SNDTIMEO_NEW = 0x43
SO_SNDTIMEO_OLD = 0x15
SO_TIMESTAMPING = 0x25
SO_TIMESTAMPING_NEW = 0x41
SO_TIMESTAMPING_OLD = 0x25
SO_TIMESTAMPNS = 0x23
SO_TIMESTAMPNS_NEW = 0x40
SO_TIMESTAMPNS_OLD = 0x23
SO_TIMESTAMP_NEW = 0x3f
SO_TXTIME = 0x3d
SO_TYPE = 0x3
SO_WIFI_STATUS = 0x29
SO_ZEROCOPY = 0x3c
TAB1 = 0x800
TAB2 = 0x1000
TAB3 = 0x1800
TABDLY = 0x1800
TCFLSH = 0x540b
TCGETA = 0x5405
TCGETS = 0x5401
TCGETS2 = 0x802c542a
TCGETX = 0x5432
TCSAFLUSH = 0x2
TCSBRK = 0x5409
TCSBRKP = 0x5425
TCSETA = 0x5406
TCSETAF = 0x5408
TCSETAW = 0x5407
TCSETS = 0x5402
TCSETS2 = 0x402c542b
TCSETSF = 0x5404
TCSETSF2 = 0x402c542d
TCSETSW = 0x5403
TCSETSW2 = 0x402c542c
TCSETX = 0x5433
TCSETXF = 0x5434
TCSETXW = 0x5435
TCXONC = 0x540a
TFD_CLOEXEC = 0x80000
TFD_NONBLOCK = 0x800
TIOCCBRK = 0x5428
TIOCCONS = 0x541d
TIOCEXCL = 0x540c
TIOCGDEV = 0x80045432
TIOCGETD = 0x5424
TIOCGEXCL = 0x80045440
TIOCGICOUNT = 0x545d
TIOCGISO7816 = 0x80285442
TIOCGLCKTRMIOS = 0x5456
TIOCGPGRP = 0x540f
TIOCGPKT = 0x80045438
TIOCGPTLCK = 0x80045439
TIOCGPTN = 0x80045430
TIOCGPTPEER = 0x5441
TIOCGRS485 = 0x542e
TIOCGSERIAL = 0x541e
TIOCGSID = 0x5429
TIOCGSOFTCAR = 0x5419
TIOCGWINSZ = 0x5413
TIOCINQ = 0x541b
TIOCLINUX = 0x541c
TIOCMBIC = 0x5417
TIOCMBIS = 0x5416
TIOCMGET = 0x5415
TIOCMIWAIT = 0x545c
TIOCMSET = 0x5418
TIOCM_CAR = 0x40
TIOCM_CD = 0x40
TIOCM_CTS = 0x20
TIOCM_DSR = 0x100
TIOCM_RI = 0x80
TIOCM_RNG = 0x80
TIOCM_SR = 0x10
TIOCM_ST = 0x8
TIOCNOTTY = 0x5422
TIOCNXCL = 0x540d
TIOCOUTQ = 0x5411
TIOCPKT = 0x5420
TIOCSBRK = 0x5427
TIOCSCTTY = 0x540e
TIOCSERCONFIG = 0x5453
TIOCSERGETLSR = 0x5459
TIOCSERGETMULTI = 0x545a
TIOCSERGSTRUCT = 0x5458
TIOCSERGWILD = 0x5454
TIOCSERSETMULTI = 0x545b
TIOCSERSWILD = 0x5455
TIOCSER_TEMT = 0x1
TIOCSETD = 0x5423
TIOCSIG = 0x40045436
TIOCSISO7816 = 0xc0285443
TIOCSLCKTRMIOS = 0x5457
TIOCSPGRP = 0x5410
TIOCSPTLCK = 0x40045431
TIOCSRS485 = 0x542f
TIOCSSERIAL = 0x541f
TIOCSSOFTCAR = 0x541a
TIOCSTI = 0x5412
TIOCSWINSZ = 0x5414
TIOCVHANGUP = 0x5437
TOSTOP = 0x100
TUNATTACHFILTER = 0x401054d5
TUNDETACHFILTER = 0x401054d6
TUNGETDEVNETNS = 0x54e3
TUNGETFEATURES = 0x800454cf
TUNGETFILTER = 0x801054db
TUNGETIFF = 0x800454d2
TUNGETSNDBUF = 0x800454d3
TUNGETVNETBE = 0x800454df
TUNGETVNETHDRSZ = 0x800454d7
TUNGETVNETLE = 0x800454dd
TUNSETCARRIER = 0x400454e2
TUNSETDEBUG = 0x400454c9
TUNSETFILTEREBPF = 0x800454e1
TUNSETGROUP = 0x400454ce
TUNSETIFF = 0x400454ca
TUNSETIFINDEX = 0x400454da
TUNSETLINK = 0x400454cd
TUNSETNOCSUM = 0x400454c8
TUNSETOFFLOAD = 0x400454d0
TUNSETOWNER = 0x400454cc
TUNSETPERSIST = 0x400454cb
TUNSETQUEUE = 0x400454d9
TUNSETSNDBUF = 0x400454d4
TUNSETSTEERINGEBPF = 0x800454e0
TUNSETTXFILTER = 0x400454d1
TUNSETVNETBE = 0x400454de
TUNSETVNETHDRSZ = 0x400454d8
TUNSETVNETLE = 0x400454dc
UBI_IOCATT = 0x40186f40
UBI_IOCDET = 0x40046f41
UBI_IOCEBCH = 0x40044f02
UBI_IOCEBER = 0x40044f01
UBI_IOCEBISMAP = 0x80044f05
UBI_IOCEBMAP = 0x40084f03
UBI_IOCEBUNMAP = 0x40044f04
UBI_IOCMKVOL = 0x40986f00
UBI_IOCRMVOL = 0x40046f01
UBI_IOCRNVOL = 0x51106f03
UBI_IOCRPEB = 0x40046f04
UBI_IOCRSVOL = 0x400c6f02
UBI_IOCSETVOLPROP = 0x40104f06
UBI_IOCSPEB = 0x40046f05
UBI_IOCVOLCRBLK = 0x40804f07
UBI_IOCVOLRMBLK = 0x4f08
UBI_IOCVOLUP = 0x40084f00
VDISCARD = 0xd
VEOF = 0x4
VEOL = 0xb
VEOL2 = 0x10
VMIN = 0x6
VREPRINT = 0xc
VSTART = 0x8
VSTOP = 0x9
VSUSP = 0xa
VSWTC = 0x7
VT1 = 0x4000
VTDLY = 0x4000
VTIME = 0x5
VWERASE = 0xe
WDIOC_GETBOOTSTATUS = 0x80045702
WDIOC_GETPRETIMEOUT = 0x80045709
WDIOC_GETSTATUS = 0x80045701
WDIOC_GETSUPPORT = 0x80285700
WDIOC_GETTEMP = 0x80045703
WDIOC_GETTIMELEFT = 0x8004570a
WDIOC_GETTIMEOUT = 0x80045707
WDIOC_KEEPALIVE = 0x80045705
WDIOC_SETOPTIONS = 0x80045704
WORDSIZE = 0x40
XCASE = 0x4
XTABS = 0x1800
_HIDIOCGRAWNAME = 0x80804804
_HIDIOCGRAWPHYS = 0x80404805
_HIDIOCGRAWUNIQ = 0x80404808
)
// Errors
const (
EADDRINUSE = syscall.Errno(0x62)
EADDRNOTAVAIL = syscall.Errno(0x63)
EADV = syscall.Errno(0x44)
EAFNOSUPPORT = syscall.Errno(0x61)
EALREADY = syscall.Errno(0x72)
EBADE = syscall.Errno(0x34)
EBADFD = syscall.Errno(0x4d)
EBADMSG = syscall.Errno(0x4a)
EBADR = syscall.Errno(0x35)
EBADRQC = syscall.Errno(0x38)
EBADSLT = syscall.Errno(0x39)
EBFONT = syscall.Errno(0x3b)
ECANCELED = syscall.Errno(0x7d)
ECHRNG = syscall.Errno(0x2c)
ECOMM = syscall.Errno(0x46)
ECONNABORTED = syscall.Errno(0x67)
ECONNREFUSED = syscall.Errno(0x6f)
ECONNRESET = syscall.Errno(0x68)
EDEADLK = syscall.Errno(0x23)
EDEADLOCK = syscall.Errno(0x23)
EDESTADDRREQ = syscall.Errno(0x59)
EDOTDOT = syscall.Errno(0x49)
EDQUOT = syscall.Errno(0x7a)
EHOSTDOWN = syscall.Errno(0x70)
EHOSTUNREACH = syscall.Errno(0x71)
EHWPOISON = syscall.Errno(0x85)
EIDRM = syscall.Errno(0x2b)
EILSEQ = syscall.Errno(0x54)
EINPROGRESS = syscall.Errno(0x73)
EISCONN = syscall.Errno(0x6a)
EISNAM = syscall.Errno(0x78)
EKEYEXPIRED = syscall.Errno(0x7f)
EKEYREJECTED = syscall.Errno(0x81)
EKEYREVOKED = syscall.Errno(0x80)
EL2HLT = syscall.Errno(0x33)
EL2NSYNC = syscall.Errno(0x2d)
EL3HLT = syscall.Errno(0x2e)
EL3RST = syscall.Errno(0x2f)
ELIBACC = syscall.Errno(0x4f)
ELIBBAD = syscall.Errno(0x50)
ELIBEXEC = syscall.Errno(0x53)
ELIBMAX = syscall.Errno(0x52)
ELIBSCN = syscall.Errno(0x51)
ELNRNG = syscall.Errno(0x30)
ELOOP = syscall.Errno(0x28)
EMEDIUMTYPE = syscall.Errno(0x7c)
EMSGSIZE = syscall.Errno(0x5a)
EMULTIHOP = syscall.Errno(0x48)
ENAMETOOLONG = syscall.Errno(0x24)
ENAVAIL = syscall.Errno(0x77)
ENETDOWN = syscall.Errno(0x64)
ENETRESET = syscall.Errno(0x66)
ENETUNREACH = syscall.Errno(0x65)
ENOANO = syscall.Errno(0x37)
ENOBUFS = syscall.Errno(0x69)
ENOCSI = syscall.Errno(0x32)
ENODATA = syscall.Errno(0x3d)
ENOKEY = syscall.Errno(0x7e)
ENOLCK = syscall.Errno(0x25)
ENOLINK = syscall.Errno(0x43)
ENOMEDIUM = syscall.Errno(0x7b)
ENOMSG = syscall.Errno(0x2a)
ENONET = syscall.Errno(0x40)
ENOPKG = syscall.Errno(0x41)
ENOPROTOOPT = syscall.Errno(0x5c)
ENOSR = syscall.Errno(0x3f)
ENOSTR = syscall.Errno(0x3c)
ENOSYS = syscall.Errno(0x26)
ENOTCONN = syscall.Errno(0x6b)
ENOTEMPTY = syscall.Errno(0x27)
ENOTNAM = syscall.Errno(0x76)
ENOTRECOVERABLE = syscall.Errno(0x83)
ENOTSOCK = syscall.Errno(0x58)
ENOTSUP = syscall.Errno(0x5f)
ENOTUNIQ = syscall.Errno(0x4c)
EOPNOTSUPP = syscall.Errno(0x5f)
EOVERFLOW = syscall.Errno(0x4b)
EOWNERDEAD = syscall.Errno(0x82)
EPFNOSUPPORT = syscall.Errno(0x60)
EPROTO = syscall.Errno(0x47)
EPROTONOSUPPORT = syscall.Errno(0x5d)
EPROTOTYPE = syscall.Errno(0x5b)
EREMCHG = syscall.Errno(0x4e)
EREMOTE = syscall.Errno(0x42)
EREMOTEIO = syscall.Errno(0x79)
ERESTART = syscall.Errno(0x55)
ERFKILL = syscall.Errno(0x84)
ESHUTDOWN = syscall.Errno(0x6c)
ESOCKTNOSUPPORT = syscall.Errno(0x5e)
ESRMNT = syscall.Errno(0x45)
ESTALE = syscall.Errno(0x74)
ESTRPIPE = syscall.Errno(0x56)
ETIME = syscall.Errno(0x3e)
ETIMEDOUT = syscall.Errno(0x6e)
ETOOMANYREFS = syscall.Errno(0x6d)
EUCLEAN = syscall.Errno(0x75)
EUNATCH = syscall.Errno(0x31)
EUSERS = syscall.Errno(0x57)
EXFULL = syscall.Errno(0x36)
)
// Signals
const (
SIGBUS = syscall.Signal(0x7)
SIGCHLD = syscall.Signal(0x11)
SIGCLD = syscall.Signal(0x11)
SIGCONT = syscall.Signal(0x12)
SIGIO = syscall.Signal(0x1d)
SIGPOLL = syscall.Signal(0x1d)
SIGPROF = syscall.Signal(0x1b)
SIGPWR = syscall.Signal(0x1e)
SIGSTKFLT = syscall.Signal(0x10)
SIGSTOP = syscall.Signal(0x13)
SIGSYS = syscall.Signal(0x1f)
SIGTSTP = syscall.Signal(0x14)
SIGTTIN = syscall.Signal(0x15)
SIGTTOU = syscall.Signal(0x16)
SIGURG = syscall.Signal(0x17)
SIGUSR1 = syscall.Signal(0xa)
SIGUSR2 = syscall.Signal(0xc)
SIGVTALRM = syscall.Signal(0x1a)
SIGWINCH = syscall.Signal(0x1c)
SIGXCPU = syscall.Signal(0x18)
SIGXFSZ = syscall.Signal(0x19)
)
// Error table
var errorList = [...]struct {
num syscall.Errno
name string
desc string
}{
{1, "EPERM", "operation not permitted"},
{2, "ENOENT", "no such file or directory"},
{3, "ESRCH", "no such process"},
{4, "EINTR", "interrupted system call"},
{5, "EIO", "input/output error"},
{6, "ENXIO", "no such device or address"},
{7, "E2BIG", "argument list too long"},
{8, "ENOEXEC", "exec format error"},
{9, "EBADF", "bad file descriptor"},
{10, "ECHILD", "no child processes"},
{11, "EAGAIN", "resource temporarily unavailable"},
{12, "ENOMEM", "cannot allocate memory"},
{13, "EACCES", "permission denied"},
{14, "EFAULT", "bad address"},
{15, "ENOTBLK", "block device required"},
{16, "EBUSY", "device or resource busy"},
{17, "EEXIST", "file exists"},
{18, "EXDEV", "invalid cross-device link"},
{19, "ENODEV", "no such device"},
{20, "ENOTDIR", "not a directory"},
{21, "EISDIR", "is a directory"},
{22, "EINVAL", "invalid argument"},
{23, "ENFILE", "too many open files in system"},
{24, "EMFILE", "too many open files"},
{25, "ENOTTY", "inappropriate ioctl for device"},
{26, "ETXTBSY", "text file busy"},
{27, "EFBIG", "file too large"},
{28, "ENOSPC", "no space left on device"},
{29, "ESPIPE", "illegal seek"},
{30, "EROFS", "read-only file system"},
{31, "EMLINK", "too many links"},
{32, "EPIPE", "broken pipe"},
{33, "EDOM", "numerical argument out of domain"},
{34, "ERANGE", "numerical result out of range"},
{35, "EDEADLK", "resource deadlock avoided"},
{36, "ENAMETOOLONG", "file name too long"},
{37, "ENOLCK", "no locks available"},
{38, "ENOSYS", "function not implemented"},
{39, "ENOTEMPTY", "directory not empty"},
{40, "ELOOP", "too many levels of symbolic links"},
{42, "ENOMSG", "no message of desired type"},
{43, "EIDRM", "identifier removed"},
{44, "ECHRNG", "channel number out of range"},
{45, "EL2NSYNC", "level 2 not synchronized"},
{46, "EL3HLT", "level 3 halted"},
{47, "EL3RST", "level 3 reset"},
{48, "ELNRNG", "link number out of range"},
{49, "EUNATCH", "protocol driver not attached"},
{50, "ENOCSI", "no CSI structure available"},
{51, "EL2HLT", "level 2 halted"},
{52, "EBADE", "invalid exchange"},
{53, "EBADR", "invalid request descriptor"},
{54, "EXFULL", "exchange full"},
{55, "ENOANO", "no anode"},
{56, "EBADRQC", "invalid request code"},
{57, "EBADSLT", "invalid slot"},
{59, "EBFONT", "bad font file format"},
{60, "ENOSTR", "device not a stream"},
{61, "ENODATA", "no data available"},
{62, "ETIME", "timer expired"},
{63, "ENOSR", "out of streams resources"},
{64, "ENONET", "machine is not on the network"},
{65, "ENOPKG", "package not installed"},
{66, "EREMOTE", "object is remote"},
{67, "ENOLINK", "link has been severed"},
{68, "EADV", "advertise error"},
{69, "ESRMNT", "srmount error"},
{70, "ECOMM", "communication error on send"},
{71, "EPROTO", "protocol error"},
{72, "EMULTIHOP", "multihop attempted"},
{73, "EDOTDOT", "RFS specific error"},
{74, "EBADMSG", "bad message"},
{75, "EOVERFLOW", "value too large for defined data type"},
{76, "ENOTUNIQ", "name not unique on network"},
{77, "EBADFD", "file descriptor in bad state"},
{78, "EREMCHG", "remote address changed"},
{79, "ELIBACC", "can not access a needed shared library"},
{80, "ELIBBAD", "accessing a corrupted shared library"},
{81, "ELIBSCN", ".lib section in a.out corrupted"},
{82, "ELIBMAX", "attempting to link in too many shared libraries"},
{83, "ELIBEXEC", "cannot exec a shared library directly"},
{84, "EILSEQ", "invalid or incomplete multibyte or wide character"},
{85, "ERESTART", "interrupted system call should be restarted"},
{86, "ESTRPIPE", "streams pipe error"},
{87, "EUSERS", "too many users"},
{88, "ENOTSOCK", "socket operation on non-socket"},
{89, "EDESTADDRREQ", "destination address required"},
{90, "EMSGSIZE", "message too long"},
{91, "EPROTOTYPE", "protocol wrong type for socket"},
{92, "ENOPROTOOPT", "protocol not available"},
{93, "EPROTONOSUPPORT", "protocol not supported"},
{94, "ESOCKTNOSUPPORT", "socket type not supported"},
{95, "ENOTSUP", "operation not supported"},
{96, "EPFNOSUPPORT", "protocol family not supported"},
{97, "EAFNOSUPPORT", "address family not supported by protocol"},
{98, "EADDRINUSE", "address already in use"},
{99, "EADDRNOTAVAIL", "cannot assign requested address"},
{100, "ENETDOWN", "network is down"},
{101, "ENETUNREACH", "network is unreachable"},
{102, "ENETRESET", "network dropped connection on reset"},
{103, "ECONNABORTED", "software caused connection abort"},
{104, "ECONNRESET", "connection reset by peer"},
{105, "ENOBUFS", "no buffer space available"},
{106, "EISCONN", "transport endpoint is already connected"},
{107, "ENOTCONN", "transport endpoint is not connected"},
{108, "ESHUTDOWN", "cannot send after transport endpoint shutdown"},
{109, "ETOOMANYREFS", "too many references: cannot splice"},
{110, "ETIMEDOUT", "connection timed out"},
{111, "ECONNREFUSED", "connection refused"},
{112, "EHOSTDOWN", "host is down"},
{113, "EHOSTUNREACH", "no route to host"},
{114, "EALREADY", "operation already in progress"},
{115, "EINPROGRESS", "operation now in progress"},
{116, "ESTALE", "stale file handle"},
{117, "EUCLEAN", "structure needs cleaning"},
{118, "ENOTNAM", "not a XENIX named type file"},
{119, "ENAVAIL", "no XENIX semaphores available"},
{120, "EISNAM", "is a named type file"},
{121, "EREMOTEIO", "remote I/O error"},
{122, "EDQUOT", "disk quota exceeded"},
{123, "ENOMEDIUM", "no medium found"},
{124, "EMEDIUMTYPE", "wrong medium type"},
{125, "ECANCELED", "operation canceled"},
{126, "ENOKEY", "required key not available"},
{127, "EKEYEXPIRED", "key has expired"},
{128, "EKEYREVOKED", "key has been revoked"},
{129, "EKEYREJECTED", "key was rejected by service"},
{130, "EOWNERDEAD", "owner died"},
{131, "ENOTRECOVERABLE", "state not recoverable"},
{132, "ERFKILL", "operation not possible due to RF-kill"},
{133, "EHWPOISON", "memory page has hardware error"},
}
// Signal table
var signalList = [...]struct {
num syscall.Signal
name string
desc string
}{
{1, "SIGHUP", "hangup"},
{2, "SIGINT", "interrupt"},
{3, "SIGQUIT", "quit"},
{4, "SIGILL", "illegal instruction"},
{5, "SIGTRAP", "trace/breakpoint trap"},
{6, "SIGABRT", "aborted"},
{7, "SIGBUS", "bus error"},
{8, "SIGFPE", "floating point exception"},
{9, "SIGKILL", "killed"},
{10, "SIGUSR1", "user defined signal 1"},
{11, "SIGSEGV", "segmentation fault"},
{12, "SIGUSR2", "user defined signal 2"},
{13, "SIGPIPE", "broken pipe"},
{14, "SIGALRM", "alarm clock"},
{15, "SIGTERM", "terminated"},
{16, "SIGSTKFLT", "stack fault"},
{17, "SIGCHLD", "child exited"},
{18, "SIGCONT", "continued"},
{19, "SIGSTOP", "stopped (signal)"},
{20, "SIGTSTP", "stopped"},
{21, "SIGTTIN", "stopped (tty input)"},
{22, "SIGTTOU", "stopped (tty output)"},
{23, "SIGURG", "urgent I/O condition"},
{24, "SIGXCPU", "CPU time limit exceeded"},
{25, "SIGXFSZ", "file size limit exceeded"},
{26, "SIGVTALRM", "virtual timer expired"},
{27, "SIGPROF", "profiling timer expired"},
{28, "SIGWINCH", "window changed"},
{29, "SIGIO", "I/O possible"},
{30, "SIGPWR", "power failure"},
{31, "SIGSYS", "bad system call"},
}

552
vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go generated vendored Normal file
View File

@ -0,0 +1,552 @@
// go run mksyscall.go -tags linux,loong64 syscall_linux.go syscall_linux_loong64.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build linux && loong64
// +build linux,loong64
package unix
import (
"syscall"
"unsafe"
)
var _ syscall.Errno
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) {
_, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fallocate(fd int, mode uint32, off int64, len int64) (err error) {
_, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) {
r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0)
n = int64(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
var _p0 unsafe.Pointer
if len(events) > 0 {
_p0 = unsafe.Pointer(&events[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_EPOLL_PWAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fadvise(fd int, offset int64, length int64, advice int) (err error) {
_, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchown(fd int, uid int, gid int) (err error) {
_, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fstat(fd int, stat *Stat_t) (err error) {
_, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fstatfs(fd int, buf *Statfs_t) (err error) {
_, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Ftruncate(fd int, length int64) (err error) {
_, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getegid() (egid int) {
r0, _ := RawSyscallNoError(SYS_GETEGID, 0, 0, 0)
egid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Geteuid() (euid int) {
r0, _ := RawSyscallNoError(SYS_GETEUID, 0, 0, 0)
euid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getgid() (gid int) {
r0, _ := RawSyscallNoError(SYS_GETGID, 0, 0, 0)
gid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getuid() (uid int) {
r0, _ := RawSyscallNoError(SYS_GETUID, 0, 0, 0)
uid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Listen(s int, n int) (err error) {
_, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(n), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pread(fd int, p []byte, offset int64) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pwrite(fd int, p []byte, offset int64) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Seek(fd int, offset int64, whence int) (off int64, err error) {
r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence))
off = int64(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0)
written = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func setfsgid(gid int) (prev int, err error) {
r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0)
prev = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func setfsuid(uid int) (prev int, err error) {
r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0)
prev = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setregid(rgid int, egid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setresgid(rgid int, egid int, sgid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setresuid(ruid int, euid int, suid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setreuid(ruid int, euid int) (err error) {
_, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Shutdown(fd int, how int) (err error) {
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) {
r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags))
n = int64(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Statfs(path string, buf *Statfs_t) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func SyncFileRange(fd int, off int64, n int64, flags int) (err error) {
_, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Truncate(path string, length int64) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) {
r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
_, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
_, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getgroups(n int, list *_Gid_t) (nn int, err error) {
r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0)
nn = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func setgroups(n int, list *_Gid_t) (err error) {
_, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
_, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
_, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func socket(domain int, typ int, proto int) (fd int, err error) {
r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto))
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) {
_, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
_, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
_, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
_, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) {
r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset))
xaddr = uintptr(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Gettimeofday(tv *Timeval) (err error) {
_, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(cmdline)
if err != nil {
return
}
_, _, e1 := Syscall6(SYS_KEXEC_FILE_LOAD, uintptr(kernelFd), uintptr(initrdFd), uintptr(cmdlineLen), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}

313
vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go generated vendored Normal file
View File

@ -0,0 +1,313 @@
// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build loong64 && linux
// +build loong64,linux
package unix
const (
SYS_IO_SETUP = 0
SYS_IO_DESTROY = 1
SYS_IO_SUBMIT = 2
SYS_IO_CANCEL = 3
SYS_IO_GETEVENTS = 4
SYS_SETXATTR = 5
SYS_LSETXATTR = 6
SYS_FSETXATTR = 7
SYS_GETXATTR = 8
SYS_LGETXATTR = 9
SYS_FGETXATTR = 10
SYS_LISTXATTR = 11
SYS_LLISTXATTR = 12
SYS_FLISTXATTR = 13
SYS_REMOVEXATTR = 14
SYS_LREMOVEXATTR = 15
SYS_FREMOVEXATTR = 16
SYS_GETCWD = 17
SYS_LOOKUP_DCOOKIE = 18
SYS_EVENTFD2 = 19
SYS_EPOLL_CREATE1 = 20
SYS_EPOLL_CTL = 21
SYS_EPOLL_PWAIT = 22
SYS_DUP = 23
SYS_DUP3 = 24
SYS_FCNTL = 25
SYS_INOTIFY_INIT1 = 26
SYS_INOTIFY_ADD_WATCH = 27
SYS_INOTIFY_RM_WATCH = 28
SYS_IOCTL = 29
SYS_IOPRIO_SET = 30
SYS_IOPRIO_GET = 31
SYS_FLOCK = 32
SYS_MKNODAT = 33
SYS_MKDIRAT = 34
SYS_UNLINKAT = 35
SYS_SYMLINKAT = 36
SYS_LINKAT = 37
SYS_UMOUNT2 = 39
SYS_MOUNT = 40
SYS_PIVOT_ROOT = 41
SYS_NFSSERVCTL = 42
SYS_STATFS = 43
SYS_FSTATFS = 44
SYS_TRUNCATE = 45
SYS_FTRUNCATE = 46
SYS_FALLOCATE = 47
SYS_FACCESSAT = 48
SYS_CHDIR = 49
SYS_FCHDIR = 50
SYS_CHROOT = 51
SYS_FCHMOD = 52
SYS_FCHMODAT = 53
SYS_FCHOWNAT = 54
SYS_FCHOWN = 55
SYS_OPENAT = 56
SYS_CLOSE = 57
SYS_VHANGUP = 58
SYS_PIPE2 = 59
SYS_QUOTACTL = 60
SYS_GETDENTS64 = 61
SYS_LSEEK = 62
SYS_READ = 63
SYS_WRITE = 64
SYS_READV = 65
SYS_WRITEV = 66
SYS_PREAD64 = 67
SYS_PWRITE64 = 68
SYS_PREADV = 69
SYS_PWRITEV = 70
SYS_SENDFILE = 71
SYS_PSELECT6 = 72
SYS_PPOLL = 73
SYS_SIGNALFD4 = 74
SYS_VMSPLICE = 75
SYS_SPLICE = 76
SYS_TEE = 77
SYS_READLINKAT = 78
SYS_FSTATAT = 79
SYS_FSTAT = 80
SYS_SYNC = 81
SYS_FSYNC = 82
SYS_FDATASYNC = 83
SYS_SYNC_FILE_RANGE = 84
SYS_TIMERFD_CREATE = 85
SYS_TIMERFD_SETTIME = 86
SYS_TIMERFD_GETTIME = 87
SYS_UTIMENSAT = 88
SYS_ACCT = 89
SYS_CAPGET = 90
SYS_CAPSET = 91
SYS_PERSONALITY = 92
SYS_EXIT = 93
SYS_EXIT_GROUP = 94
SYS_WAITID = 95
SYS_SET_TID_ADDRESS = 96
SYS_UNSHARE = 97
SYS_FUTEX = 98
SYS_SET_ROBUST_LIST = 99
SYS_GET_ROBUST_LIST = 100
SYS_NANOSLEEP = 101
SYS_GETITIMER = 102
SYS_SETITIMER = 103
SYS_KEXEC_LOAD = 104
SYS_INIT_MODULE = 105
SYS_DELETE_MODULE = 106
SYS_TIMER_CREATE = 107
SYS_TIMER_GETTIME = 108
SYS_TIMER_GETOVERRUN = 109
SYS_TIMER_SETTIME = 110
SYS_TIMER_DELETE = 111
SYS_CLOCK_SETTIME = 112
SYS_CLOCK_GETTIME = 113
SYS_CLOCK_GETRES = 114
SYS_CLOCK_NANOSLEEP = 115
SYS_SYSLOG = 116
SYS_PTRACE = 117
SYS_SCHED_SETPARAM = 118
SYS_SCHED_SETSCHEDULER = 119
SYS_SCHED_GETSCHEDULER = 120
SYS_SCHED_GETPARAM = 121
SYS_SCHED_SETAFFINITY = 122
SYS_SCHED_GETAFFINITY = 123
SYS_SCHED_YIELD = 124
SYS_SCHED_GET_PRIORITY_MAX = 125
SYS_SCHED_GET_PRIORITY_MIN = 126
SYS_SCHED_RR_GET_INTERVAL = 127
SYS_RESTART_SYSCALL = 128
SYS_KILL = 129
SYS_TKILL = 130
SYS_TGKILL = 131
SYS_SIGALTSTACK = 132
SYS_RT_SIGSUSPEND = 133
SYS_RT_SIGACTION = 134
SYS_RT_SIGPROCMASK = 135
SYS_RT_SIGPENDING = 136
SYS_RT_SIGTIMEDWAIT = 137
SYS_RT_SIGQUEUEINFO = 138
SYS_RT_SIGRETURN = 139
SYS_SETPRIORITY = 140
SYS_GETPRIORITY = 141
SYS_REBOOT = 142
SYS_SETREGID = 143
SYS_SETGID = 144
SYS_SETREUID = 145
SYS_SETUID = 146
SYS_SETRESUID = 147
SYS_GETRESUID = 148
SYS_SETRESGID = 149
SYS_GETRESGID = 150
SYS_SETFSUID = 151
SYS_SETFSGID = 152
SYS_TIMES = 153
SYS_SETPGID = 154
SYS_GETPGID = 155
SYS_GETSID = 156
SYS_SETSID = 157
SYS_GETGROUPS = 158
SYS_SETGROUPS = 159
SYS_UNAME = 160
SYS_SETHOSTNAME = 161
SYS_SETDOMAINNAME = 162
SYS_GETRUSAGE = 165
SYS_UMASK = 166
SYS_PRCTL = 167
SYS_GETCPU = 168
SYS_GETTIMEOFDAY = 169
SYS_SETTIMEOFDAY = 170
SYS_ADJTIMEX = 171
SYS_GETPID = 172
SYS_GETPPID = 173
SYS_GETUID = 174
SYS_GETEUID = 175
SYS_GETGID = 176
SYS_GETEGID = 177
SYS_GETTID = 178
SYS_SYSINFO = 179
SYS_MQ_OPEN = 180
SYS_MQ_UNLINK = 181
SYS_MQ_TIMEDSEND = 182
SYS_MQ_TIMEDRECEIVE = 183
SYS_MQ_NOTIFY = 184
SYS_MQ_GETSETATTR = 185
SYS_MSGGET = 186
SYS_MSGCTL = 187
SYS_MSGRCV = 188
SYS_MSGSND = 189
SYS_SEMGET = 190
SYS_SEMCTL = 191
SYS_SEMTIMEDOP = 192
SYS_SEMOP = 193
SYS_SHMGET = 194
SYS_SHMCTL = 195
SYS_SHMAT = 196
SYS_SHMDT = 197
SYS_SOCKET = 198
SYS_SOCKETPAIR = 199
SYS_BIND = 200
SYS_LISTEN = 201
SYS_ACCEPT = 202
SYS_CONNECT = 203
SYS_GETSOCKNAME = 204
SYS_GETPEERNAME = 205
SYS_SENDTO = 206
SYS_RECVFROM = 207
SYS_SETSOCKOPT = 208
SYS_GETSOCKOPT = 209
SYS_SHUTDOWN = 210
SYS_SENDMSG = 211
SYS_RECVMSG = 212
SYS_READAHEAD = 213
SYS_BRK = 214
SYS_MUNMAP = 215
SYS_MREMAP = 216
SYS_ADD_KEY = 217
SYS_REQUEST_KEY = 218
SYS_KEYCTL = 219
SYS_CLONE = 220
SYS_EXECVE = 221
SYS_MMAP = 222
SYS_FADVISE64 = 223
SYS_SWAPON = 224
SYS_SWAPOFF = 225
SYS_MPROTECT = 226
SYS_MSYNC = 227
SYS_MLOCK = 228
SYS_MUNLOCK = 229
SYS_MLOCKALL = 230
SYS_MUNLOCKALL = 231
SYS_MINCORE = 232
SYS_MADVISE = 233
SYS_REMAP_FILE_PAGES = 234
SYS_MBIND = 235
SYS_GET_MEMPOLICY = 236
SYS_SET_MEMPOLICY = 237
SYS_MIGRATE_PAGES = 238
SYS_MOVE_PAGES = 239
SYS_RT_TGSIGQUEUEINFO = 240
SYS_PERF_EVENT_OPEN = 241
SYS_ACCEPT4 = 242
SYS_RECVMMSG = 243
SYS_ARCH_SPECIFIC_SYSCALL = 244
SYS_WAIT4 = 260
SYS_PRLIMIT64 = 261
SYS_FANOTIFY_INIT = 262
SYS_FANOTIFY_MARK = 263
SYS_NAME_TO_HANDLE_AT = 264
SYS_OPEN_BY_HANDLE_AT = 265
SYS_CLOCK_ADJTIME = 266
SYS_SYNCFS = 267
SYS_SETNS = 268
SYS_SENDMMSG = 269
SYS_PROCESS_VM_READV = 270
SYS_PROCESS_VM_WRITEV = 271
SYS_KCMP = 272
SYS_FINIT_MODULE = 273
SYS_SCHED_SETATTR = 274
SYS_SCHED_GETATTR = 275
SYS_RENAMEAT2 = 276
SYS_SECCOMP = 277
SYS_GETRANDOM = 278
SYS_MEMFD_CREATE = 279
SYS_BPF = 280
SYS_EXECVEAT = 281
SYS_USERFAULTFD = 282
SYS_MEMBARRIER = 283
SYS_MLOCK2 = 284
SYS_COPY_FILE_RANGE = 285
SYS_PREADV2 = 286
SYS_PWRITEV2 = 287
SYS_PKEY_MPROTECT = 288
SYS_PKEY_ALLOC = 289
SYS_PKEY_FREE = 290
SYS_STATX = 291
SYS_IO_PGETEVENTS = 292
SYS_RSEQ = 293
SYS_KEXEC_FILE_LOAD = 294
SYS_PIDFD_SEND_SIGNAL = 424
SYS_IO_URING_SETUP = 425
SYS_IO_URING_ENTER = 426
SYS_IO_URING_REGISTER = 427
SYS_OPEN_TREE = 428
SYS_MOVE_MOUNT = 429
SYS_FSOPEN = 430
SYS_FSCONFIG = 431
SYS_FSMOUNT = 432
SYS_FSPICK = 433
SYS_PIDFD_OPEN = 434
SYS_CLONE3 = 435
SYS_CLOSE_RANGE = 436
SYS_OPENAT2 = 437
SYS_PIDFD_GETFD = 438
SYS_FACCESSAT2 = 439
SYS_PROCESS_MADVISE = 440
SYS_EPOLL_PWAIT2 = 441
SYS_MOUNT_SETATTR = 442
SYS_QUOTACTL_FD = 443
SYS_LANDLOCK_CREATE_RULESET = 444
SYS_LANDLOCK_ADD_RULE = 445
SYS_LANDLOCK_RESTRICT_SELF = 446
SYS_PROCESS_MRELEASE = 448
SYS_FUTEX_WAITV = 449
SYS_SET_MEMPOLICY_HOME_NODE = 450
)

679
vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go generated vendored Normal file
View File

@ -0,0 +1,679 @@
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build loong64 && linux
// +build loong64,linux
package unix
const (
SizeofPtr = 0x8
SizeofLong = 0x8
)
type (
_C_long int64
)
type Timespec struct {
Sec int64
Nsec int64
}
type Timeval struct {
Sec int64
Usec int64
}
type Timex struct {
Modes uint32
Offset int64
Freq int64
Maxerror int64
Esterror int64
Status int32
Constant int64
Precision int64
Tolerance int64
Time Timeval
Tick int64
Ppsfreq int64
Jitter int64
Shift int32
Stabil int64
Jitcnt int64
Calcnt int64
Errcnt int64
Stbcnt int64
Tai int32
_ [44]byte
}
type Time_t int64
type Tms struct {
Utime int64
Stime int64
Cutime int64
Cstime int64
}
type Utimbuf struct {
Actime int64
Modtime int64
}
type Rusage struct {
Utime Timeval
Stime Timeval
Maxrss int64
Ixrss int64
Idrss int64
Isrss int64
Minflt int64
Majflt int64
Nswap int64
Inblock int64
Oublock int64
Msgsnd int64
Msgrcv int64
Nsignals int64
Nvcsw int64
Nivcsw int64
}
type Stat_t struct {
Dev uint64
Ino uint64
Mode uint32
Nlink uint32
Uid uint32
Gid uint32
Rdev uint64
_ uint64
Size int64
Blksize int32
_ int32
Blocks int64
Atim Timespec
Mtim Timespec
Ctim Timespec
_ [2]int32
}
type Dirent struct {
Ino uint64
Off int64
Reclen uint16
Type uint8
Name [256]int8
_ [5]byte
}
type Flock_t struct {
Type int16
Whence int16
Start int64
Len int64
Pid int32
_ [4]byte
}
type DmNameList struct {
Dev uint64
Next uint32
Name [0]byte
_ [4]byte
}
const (
FADV_DONTNEED = 0x4
FADV_NOREUSE = 0x5
)
type RawSockaddrNFCLLCP struct {
Sa_family uint16
Dev_idx uint32
Target_idx uint32
Nfc_protocol uint32
Dsap uint8
Ssap uint8
Service_name [63]uint8
Service_name_len uint64
}
type RawSockaddr struct {
Family uint16
Data [14]int8
}
type RawSockaddrAny struct {
Addr RawSockaddr
Pad [96]int8
}
type Iovec struct {
Base *byte
Len uint64
}
type Msghdr struct {
Name *byte
Namelen uint32
Iov *Iovec
Iovlen uint64
Control *byte
Controllen uint64
Flags int32
_ [4]byte
}
type Cmsghdr struct {
Len uint64
Level int32
Type int32
}
type ifreq struct {
Ifrn [16]byte
Ifru [24]byte
}
const (
SizeofSockaddrNFCLLCP = 0x60
SizeofIovec = 0x10
SizeofMsghdr = 0x38
SizeofCmsghdr = 0x10
)
const (
SizeofSockFprog = 0x10
)
type PtraceRegs struct {
Regs [32]uint64
Orig_a0 uint64
Era uint64
Badv uint64
Reserved [10]uint64
}
type FdSet struct {
Bits [16]int64
}
type Sysinfo_t struct {
Uptime int64
Loads [3]uint64
Totalram uint64
Freeram uint64
Sharedram uint64
Bufferram uint64
Totalswap uint64
Freeswap uint64
Procs uint16
Pad uint16
Totalhigh uint64
Freehigh uint64
Unit uint32
_ [0]int8
_ [4]byte
}
type Ustat_t struct {
Tfree int32
Tinode uint64
Fname [6]int8
Fpack [6]int8
_ [4]byte
}
type EpollEvent struct {
Events uint32
_ int32
Fd int32
Pad int32
}
const (
OPEN_TREE_CLOEXEC = 0x80000
)
const (
POLLRDHUP = 0x2000
)
type Sigset_t struct {
Val [16]uint64
}
const _C__NSIG = 0x41
type Siginfo struct {
Signo int32
Errno int32
Code int32
_ int32
_ [112]byte
}
type Termios struct {
Iflag uint32
Oflag uint32
Cflag uint32
Lflag uint32
Line uint8
Cc [19]uint8
Ispeed uint32
Ospeed uint32
}
type Taskstats struct {
Version uint16
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
Cpu_count uint64
Cpu_delay_total uint64
Blkio_count uint64
Blkio_delay_total uint64
Swapin_count uint64
Swapin_delay_total uint64
Cpu_run_real_total uint64
Cpu_run_virtual_total uint64
Ac_comm [32]int8
Ac_sched uint8
Ac_pad [3]uint8
_ [4]byte
Ac_uid uint32
Ac_gid uint32
Ac_pid uint32
Ac_ppid uint32
Ac_btime uint32
Ac_etime uint64
Ac_utime uint64
Ac_stime uint64
Ac_minflt uint64
Ac_majflt uint64
Coremem uint64
Virtmem uint64
Hiwater_rss uint64
Hiwater_vm uint64
Read_char uint64
Write_char uint64
Read_syscalls uint64
Write_syscalls uint64
Read_bytes uint64
Write_bytes uint64
Cancelled_write_bytes uint64
Nvcsw uint64
Nivcsw uint64
Ac_utimescaled uint64
Ac_stimescaled uint64
Cpu_scaled_run_real_total uint64
Freepages_count uint64
Freepages_delay_total uint64
Thrashing_count uint64
Thrashing_delay_total uint64
Ac_btime64 uint64
Compact_count uint64
Compact_delay_total uint64
}
type cpuMask uint64
const (
_NCPUBITS = 0x40
)
const (
CBitFieldMaskBit0 = 0x1
CBitFieldMaskBit1 = 0x2
CBitFieldMaskBit2 = 0x4
CBitFieldMaskBit3 = 0x8
CBitFieldMaskBit4 = 0x10
CBitFieldMaskBit5 = 0x20
CBitFieldMaskBit6 = 0x40
CBitFieldMaskBit7 = 0x80
CBitFieldMaskBit8 = 0x100
CBitFieldMaskBit9 = 0x200
CBitFieldMaskBit10 = 0x400
CBitFieldMaskBit11 = 0x800
CBitFieldMaskBit12 = 0x1000
CBitFieldMaskBit13 = 0x2000
CBitFieldMaskBit14 = 0x4000
CBitFieldMaskBit15 = 0x8000
CBitFieldMaskBit16 = 0x10000
CBitFieldMaskBit17 = 0x20000
CBitFieldMaskBit18 = 0x40000
CBitFieldMaskBit19 = 0x80000
CBitFieldMaskBit20 = 0x100000
CBitFieldMaskBit21 = 0x200000
CBitFieldMaskBit22 = 0x400000
CBitFieldMaskBit23 = 0x800000
CBitFieldMaskBit24 = 0x1000000
CBitFieldMaskBit25 = 0x2000000
CBitFieldMaskBit26 = 0x4000000
CBitFieldMaskBit27 = 0x8000000
CBitFieldMaskBit28 = 0x10000000
CBitFieldMaskBit29 = 0x20000000
CBitFieldMaskBit30 = 0x40000000
CBitFieldMaskBit31 = 0x80000000
CBitFieldMaskBit32 = 0x100000000
CBitFieldMaskBit33 = 0x200000000
CBitFieldMaskBit34 = 0x400000000
CBitFieldMaskBit35 = 0x800000000
CBitFieldMaskBit36 = 0x1000000000
CBitFieldMaskBit37 = 0x2000000000
CBitFieldMaskBit38 = 0x4000000000
CBitFieldMaskBit39 = 0x8000000000
CBitFieldMaskBit40 = 0x10000000000
CBitFieldMaskBit41 = 0x20000000000
CBitFieldMaskBit42 = 0x40000000000
CBitFieldMaskBit43 = 0x80000000000
CBitFieldMaskBit44 = 0x100000000000
CBitFieldMaskBit45 = 0x200000000000
CBitFieldMaskBit46 = 0x400000000000
CBitFieldMaskBit47 = 0x800000000000
CBitFieldMaskBit48 = 0x1000000000000
CBitFieldMaskBit49 = 0x2000000000000
CBitFieldMaskBit50 = 0x4000000000000
CBitFieldMaskBit51 = 0x8000000000000
CBitFieldMaskBit52 = 0x10000000000000
CBitFieldMaskBit53 = 0x20000000000000
CBitFieldMaskBit54 = 0x40000000000000
CBitFieldMaskBit55 = 0x80000000000000
CBitFieldMaskBit56 = 0x100000000000000
CBitFieldMaskBit57 = 0x200000000000000
CBitFieldMaskBit58 = 0x400000000000000
CBitFieldMaskBit59 = 0x800000000000000
CBitFieldMaskBit60 = 0x1000000000000000
CBitFieldMaskBit61 = 0x2000000000000000
CBitFieldMaskBit62 = 0x4000000000000000
CBitFieldMaskBit63 = 0x8000000000000000
)
type SockaddrStorage struct {
Family uint16
_ [118]int8
_ uint64
}
type HDGeometry struct {
Heads uint8
Sectors uint8
Cylinders uint16
Start uint64
}
type Statfs_t struct {
Type int64
Bsize int64
Blocks uint64
Bfree uint64
Bavail uint64
Files uint64
Ffree uint64
Fsid Fsid
Namelen int64
Frsize int64
Flags int64
Spare [4]int64
}
type TpacketHdr struct {
Status uint64
Len uint32
Snaplen uint32
Mac uint16
Net uint16
Sec uint32
Usec uint32
_ [4]byte
}
const (
SizeofTpacketHdr = 0x20
)
type RTCPLLInfo struct {
Ctrl int32
Value int32
Max int32
Min int32
Posmult int32
Negmult int32
Clock int64
}
type BlkpgPartition struct {
Start int64
Length int64
Pno int32
Devname [64]uint8
Volname [64]uint8
_ [4]byte
}
const (
BLKPG = 0x1269
)
type XDPUmemReg struct {
Addr uint64
Len uint64
Size uint32
Headroom uint32
Flags uint32
_ [4]byte
}
type CryptoUserAlg struct {
Name [64]int8
Driver_name [64]int8
Module_name [64]int8
Type uint32
Mask uint32
Refcnt uint32
Flags uint32
}
type CryptoStatAEAD struct {
Type [64]int8
Encrypt_cnt uint64
Encrypt_tlen uint64
Decrypt_cnt uint64
Decrypt_tlen uint64
Err_cnt uint64
}
type CryptoStatAKCipher struct {
Type [64]int8
Encrypt_cnt uint64
Encrypt_tlen uint64
Decrypt_cnt uint64
Decrypt_tlen uint64
Verify_cnt uint64
Sign_cnt uint64
Err_cnt uint64
}
type CryptoStatCipher struct {
Type [64]int8
Encrypt_cnt uint64
Encrypt_tlen uint64
Decrypt_cnt uint64
Decrypt_tlen uint64
Err_cnt uint64
}
type CryptoStatCompress struct {
Type [64]int8
Compress_cnt uint64
Compress_tlen uint64
Decompress_cnt uint64
Decompress_tlen uint64
Err_cnt uint64
}
type CryptoStatHash struct {
Type [64]int8
Hash_cnt uint64
Hash_tlen uint64
Err_cnt uint64
}
type CryptoStatKPP struct {
Type [64]int8
Setsecret_cnt uint64
Generate_public_key_cnt uint64
Compute_shared_secret_cnt uint64
Err_cnt uint64
}
type CryptoStatRNG struct {
Type [64]int8
Generate_cnt uint64
Generate_tlen uint64
Seed_cnt uint64
Err_cnt uint64
}
type CryptoStatLarval struct {
Type [64]int8
}
type CryptoReportLarval struct {
Type [64]int8
}
type CryptoReportHash struct {
Type [64]int8
Blocksize uint32
Digestsize uint32
}
type CryptoReportCipher struct {
Type [64]int8
Blocksize uint32
Min_keysize uint32
Max_keysize uint32
}
type CryptoReportBlkCipher struct {
Type [64]int8
Geniv [64]int8
Blocksize uint32
Min_keysize uint32
Max_keysize uint32
Ivsize uint32
}
type CryptoReportAEAD struct {
Type [64]int8
Geniv [64]int8
Blocksize uint32
Maxauthsize uint32
Ivsize uint32
}
type CryptoReportComp struct {
Type [64]int8
}
type CryptoReportRNG struct {
Type [64]int8
Seedsize uint32
}
type CryptoReportAKCipher struct {
Type [64]int8
}
type CryptoReportKPP struct {
Type [64]int8
}
type CryptoReportAcomp struct {
Type [64]int8
}
type LoopInfo struct {
Number int32
Device uint32
Inode uint64
Rdevice uint32
Offset int32
Encrypt_type int32
Encrypt_key_size int32
Flags int32
Name [64]int8
Encrypt_key [32]uint8
Init [2]uint64
Reserved [4]int8
_ [4]byte
}
type TIPCSubscr struct {
Seq TIPCServiceRange
Timeout uint32
Filter uint32
Handle [8]int8
}
type TIPCSIOCLNReq struct {
Peer uint32
Id uint32
Linkname [68]int8
}
type TIPCSIOCNodeIDReq struct {
Peer uint32
Id [16]int8
}
type PPSKInfo struct {
Assert_sequence uint32
Clear_sequence uint32
Assert_tu PPSKTime
Clear_tu PPSKTime
Current_mode int32
_ [4]byte
}
const (
PPS_GETPARAMS = 0x800870a1
PPS_SETPARAMS = 0x400870a2
PPS_GETCAP = 0x800870a3
PPS_FETCH = 0xc00870a4
)
const (
PIDFD_NONBLOCK = 0x800
)
type SysvIpcPerm struct {
Key int32
Uid uint32
Gid uint32
Cuid uint32
Cgid uint32
Mode uint32
_ [0]uint8
Seq uint16
_ uint16
_ uint64
_ uint64
}
type SysvShmDesc struct {
Perm SysvIpcPerm
Segsz uint64
Atime int64
Dtime int64
Ctime int64
Cpid int32
Lpid int32
Nattch uint64
_ uint64
_ uint64
}

15
vendor/modules.txt vendored
View File

@ -109,7 +109,7 @@ github.com/containers/buildah/pkg/rusage
github.com/containers/buildah/pkg/sshagent
github.com/containers/buildah/pkg/util
github.com/containers/buildah/util
# github.com/containers/common v0.47.5-0.20220429111201-21d83cf7c533
# github.com/containers/common v0.48.0
## explicit
github.com/containers/common/libimage
github.com/containers/common/libimage/manifests
@ -155,7 +155,7 @@ github.com/containers/common/version
# github.com/containers/conmon v2.0.20+incompatible
## explicit
github.com/containers/conmon/runner/config
# github.com/containers/image/v5 v5.21.1-0.20220425080628-be085685e524
# github.com/containers/image/v5 v5.21.1
## explicit
github.com/containers/image/v5/copy
github.com/containers/image/v5/directory
@ -208,7 +208,7 @@ github.com/containers/image/v5/types
github.com/containers/image/v5/version
# github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a
github.com/containers/libtrust
# github.com/containers/ocicrypt v1.1.3
# github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f
## explicit
github.com/containers/ocicrypt
github.com/containers/ocicrypt/blockcipher
@ -235,7 +235,7 @@ github.com/containers/psgo/internal/dev
github.com/containers/psgo/internal/host
github.com/containers/psgo/internal/proc
github.com/containers/psgo/internal/process
# github.com/containers/storage v1.39.1-0.20220422100603-8996869ae40b
# github.com/containers/storage v1.40.0
## explicit
github.com/containers/storage
github.com/containers/storage/drivers
@ -450,11 +450,12 @@ github.com/jinzhu/copier
# github.com/json-iterator/go v1.1.12
## explicit
github.com/json-iterator/go
# github.com/klauspost/compress v1.15.1
# github.com/klauspost/compress v1.15.2
github.com/klauspost/compress
github.com/klauspost/compress/flate
github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0
github.com/klauspost/compress/internal/cpuinfo
github.com/klauspost/compress/internal/snapref
github.com/klauspost/compress/zstd
github.com/klauspost/compress/zstd/internal/xxhash
@ -643,7 +644,7 @@ github.com/stefanberger/go-pkcs11uri
## explicit
github.com/stretchr/testify/assert
github.com/stretchr/testify/require
# github.com/sylabs/sif/v2 v2.6.0
# github.com/sylabs/sif/v2 v2.7.0
github.com/sylabs/sif/v2/pkg/sif
# github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
## explicit
@ -733,7 +734,7 @@ golang.org/x/net/trace
## explicit
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
# golang.org/x/sys v0.0.0-20220412211240-33da011f77ad
# golang.org/x/sys v0.0.0-20220422013727-9388b58f7150
## explicit
golang.org/x/sys/cpu
golang.org/x/sys/execabs