mirror of https://github.com/containers/podman.git
vendor: bump c/common and other vendors
This commit bumps majorly c/common so netavark features could be synced with podman. But there are some other vendor bumps as well [NO NEW TESTS NEEDED] [NO TESTS NEEDED] Signed-off-by: Aditya R <arajan@redhat.com>
This commit is contained in:
parent
f46478c1e9
commit
2c492be00a
4
go.mod
4
go.mod
|
@ -12,12 +12,12 @@ require (
|
||||||
github.com/containernetworking/cni v1.0.1
|
github.com/containernetworking/cni v1.0.1
|
||||||
github.com/containernetworking/plugins v1.0.1
|
github.com/containernetworking/plugins v1.0.1
|
||||||
github.com/containers/buildah v1.23.1-0.20220112160421-d744ebc4b1d5
|
github.com/containers/buildah v1.23.1-0.20220112160421-d744ebc4b1d5
|
||||||
github.com/containers/common v0.46.1-0.20220117145719-da777f8b15b1
|
github.com/containers/common v0.46.1-0.20220119203335-0e7aca71d00a
|
||||||
github.com/containers/conmon v2.0.20+incompatible
|
github.com/containers/conmon v2.0.20+incompatible
|
||||||
github.com/containers/image/v5 v5.18.0
|
github.com/containers/image/v5 v5.18.0
|
||||||
github.com/containers/ocicrypt v1.1.2
|
github.com/containers/ocicrypt v1.1.2
|
||||||
github.com/containers/psgo v1.7.1
|
github.com/containers/psgo v1.7.1
|
||||||
github.com/containers/storage v1.37.1-0.20211213220314-73a749e4fec5
|
github.com/containers/storage v1.38.0
|
||||||
github.com/coreos/go-systemd/v22 v22.3.2
|
github.com/coreos/go-systemd/v22 v22.3.2
|
||||||
github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3
|
github.com/coreos/stream-metadata-go v0.0.0-20210225230131-70edb9eb47b3
|
||||||
github.com/cyphar/filepath-securejoin v0.2.3
|
github.com/cyphar/filepath-securejoin v0.2.3
|
||||||
|
|
15
go.sum
15
go.sum
|
@ -107,8 +107,9 @@ github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwT
|
||||||
github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
|
github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
|
||||||
github.com/Microsoft/hcsshim v0.8.22/go.mod h1:91uVCVzvX2QD16sMCenoxxXo6L1wJnLMX2PSufFMtF0=
|
github.com/Microsoft/hcsshim v0.8.22/go.mod h1:91uVCVzvX2QD16sMCenoxxXo6L1wJnLMX2PSufFMtF0=
|
||||||
github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
|
github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
|
||||||
github.com/Microsoft/hcsshim v0.9.1 h1:VfDCj+QnY19ktX5TsH22JHcjaZ05RWQiwDbOyEg5ziM=
|
|
||||||
github.com/Microsoft/hcsshim v0.9.1/go.mod h1:Y/0uV2jUab5kBI7SQgl62at0AVX7uaruzADAVmxm3eM=
|
github.com/Microsoft/hcsshim v0.9.1/go.mod h1:Y/0uV2jUab5kBI7SQgl62at0AVX7uaruzADAVmxm3eM=
|
||||||
|
github.com/Microsoft/hcsshim v0.9.2 h1:wB06W5aYFfUB3IvootYAY2WnOmIdgPGfqSI6tufQNnY=
|
||||||
|
github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
|
||||||
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
|
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
|
||||||
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
|
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
|
||||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||||
|
@ -318,8 +319,8 @@ github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNB
|
||||||
github.com/containers/buildah v1.23.1-0.20220112160421-d744ebc4b1d5 h1:J4ZMQgpzjClLNuRDCIYDY2KZE1yO9A1I3A/jEaFvtaY=
|
github.com/containers/buildah v1.23.1-0.20220112160421-d744ebc4b1d5 h1:J4ZMQgpzjClLNuRDCIYDY2KZE1yO9A1I3A/jEaFvtaY=
|
||||||
github.com/containers/buildah v1.23.1-0.20220112160421-d744ebc4b1d5/go.mod h1:pA9nL58rY+rtoyZkzPmkv02Nwb9ifvYlChg95gKkNAY=
|
github.com/containers/buildah v1.23.1-0.20220112160421-d744ebc4b1d5/go.mod h1:pA9nL58rY+rtoyZkzPmkv02Nwb9ifvYlChg95gKkNAY=
|
||||||
github.com/containers/common v0.46.1-0.20220110165509-08c2c97e5e25/go.mod h1:hXUU9gtA8V9dSLHhizp/k/s0ZXBzrnUSScUfrsw8z2Y=
|
github.com/containers/common v0.46.1-0.20220110165509-08c2c97e5e25/go.mod h1:hXUU9gtA8V9dSLHhizp/k/s0ZXBzrnUSScUfrsw8z2Y=
|
||||||
github.com/containers/common v0.46.1-0.20220117145719-da777f8b15b1 h1:TGXTygk3STL+G4F1zGgSITdIEE5i+BgsSDLOmGuUYTY=
|
github.com/containers/common v0.46.1-0.20220119203335-0e7aca71d00a h1:2f2PbMRN/Lu9sJpFFBpKv7g/8izZOUv9HObOnQWlS14=
|
||||||
github.com/containers/common v0.46.1-0.20220117145719-da777f8b15b1/go.mod h1:lJkY5VdkdU2BEDdbO5vgi3G69KWEgWBWXi6tNgm2BlM=
|
github.com/containers/common v0.46.1-0.20220119203335-0e7aca71d00a/go.mod h1:zIOEbgW3aBOXx9lsi91kcbX784HVH60ePkEBgbvPJ7g=
|
||||||
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
|
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
|
||||||
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
|
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
|
||||||
github.com/containers/image/v5 v5.17.1-0.20220106205022-73f80d60f0e1/go.mod h1:daAiRXgcGIf/7eD7B2EkuHHw084/8M8Kh35rzOu56y0=
|
github.com/containers/image/v5 v5.17.1-0.20220106205022-73f80d60f0e1/go.mod h1:daAiRXgcGIf/7eD7B2EkuHHw084/8M8Kh35rzOu56y0=
|
||||||
|
@ -337,8 +338,8 @@ github.com/containers/psgo v1.7.1/go.mod h1:mWGpFzW73qWFA+blhF6l7GuKzbrACkYgr/aj
|
||||||
github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4=
|
github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4=
|
||||||
github.com/containers/storage v1.37.1-0.20211119174841-bf170b3ddac0/go.mod h1:XjCNlt5JUUmRuTJXhFxHb9hHGPho7DNg3o4N/14prdQ=
|
github.com/containers/storage v1.37.1-0.20211119174841-bf170b3ddac0/go.mod h1:XjCNlt5JUUmRuTJXhFxHb9hHGPho7DNg3o4N/14prdQ=
|
||||||
github.com/containers/storage v1.37.1-0.20211122164443-82b8f06bfc08/go.mod h1:hvKpaiPRALDI7oz4Jx+AEch8iS/viRnc22HPilQROWU=
|
github.com/containers/storage v1.37.1-0.20211122164443-82b8f06bfc08/go.mod h1:hvKpaiPRALDI7oz4Jx+AEch8iS/viRnc22HPilQROWU=
|
||||||
github.com/containers/storage v1.37.1-0.20211213220314-73a749e4fec5 h1:DOpYQGCHIJfrErey3FyondnZGfZrbfGpHAN6nQssE1o=
|
github.com/containers/storage v1.38.0 h1:QTgqmtQeb2tk1VucK0nZwCJKmlVLZGybrMMMlixedFY=
|
||||||
github.com/containers/storage v1.37.1-0.20211213220314-73a749e4fec5/go.mod h1:5qRpx96WJRTCQCsArfrWjUh398JSNCaTJG6RbOhMlqY=
|
github.com/containers/storage v1.38.0/go.mod h1:lBzt28gAk5ADZuRtwdndRJyqX22vnRaXmlF+7ktfMYc=
|
||||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||||
|
@ -790,8 +791,9 @@ github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs
|
||||||
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||||
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||||
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||||
github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc=
|
|
||||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||||
|
github.com/klauspost/compress v1.14.1 h1:hLQYb23E8/fO+1u53d02A97a8UnsddcvYzq4ERRU4ds=
|
||||||
|
github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||||
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
|
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
|
||||||
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
@ -1599,6 +1601,7 @@ golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211214234402-4825e8c3871d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211214234402-4825e8c3871d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
|
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
|
||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||||
|
|
|
@ -1,3 +1,6 @@
|
||||||
|
run:
|
||||||
|
timeout: 8m
|
||||||
|
|
||||||
linters:
|
linters:
|
||||||
enable:
|
enable:
|
||||||
- stylecheck
|
- stylecheck
|
||||||
|
|
|
@ -29,7 +29,7 @@ require (
|
||||||
go.opencensus.io v0.22.3
|
go.opencensus.io v0.22.3
|
||||||
golang.org/x/net v0.0.0-20210825183410-e898025ed96a // indirect
|
golang.org/x/net v0.0.0-20210825183410-e898025ed96a // indirect
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007
|
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e
|
||||||
google.golang.org/grpc v1.40.0
|
google.golang.org/grpc v1.40.0
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -812,8 +812,9 @@ golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE=
|
|
||||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM=
|
||||||
|
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
|
|
@ -78,6 +78,13 @@ var (
|
||||||
|
|
||||||
// ErrNotSupported is an error encountered when hcs doesn't support the request
|
// ErrNotSupported is an error encountered when hcs doesn't support the request
|
||||||
ErrPlatformNotSupported = errors.New("unsupported platform request")
|
ErrPlatformNotSupported = errors.New("unsupported platform request")
|
||||||
|
|
||||||
|
// ErrProcessAlreadyStopped is returned by hcs if the process we're trying to kill has already been stopped.
|
||||||
|
ErrProcessAlreadyStopped = syscall.Errno(0x8037011f)
|
||||||
|
|
||||||
|
// ErrInvalidHandle is an error that can be encountrered when querying the properties of a compute system when the handle to that
|
||||||
|
// compute system has already been closed.
|
||||||
|
ErrInvalidHandle = syscall.Errno(0x6)
|
||||||
)
|
)
|
||||||
|
|
||||||
type ErrorEvent struct {
|
type ErrorEvent struct {
|
||||||
|
@ -249,6 +256,14 @@ func IsNotExist(err error) bool {
|
||||||
err == ErrElementNotFound
|
err == ErrElementNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsErrorInvalidHandle checks whether the error is the result of an operation carried
|
||||||
|
// out on a handle that is invalid/closed. This error popped up while trying to query
|
||||||
|
// stats on a container in the process of being stopped.
|
||||||
|
func IsErrorInvalidHandle(err error) bool {
|
||||||
|
err = getInnerError(err)
|
||||||
|
return err == ErrInvalidHandle
|
||||||
|
}
|
||||||
|
|
||||||
// IsAlreadyClosed checks if an error is caused by the Container or Process having been
|
// IsAlreadyClosed checks if an error is caused by the Container or Process having been
|
||||||
// already closed by a call to the Close() method.
|
// already closed by a call to the Close() method.
|
||||||
func IsAlreadyClosed(err error) bool {
|
func IsAlreadyClosed(err error) bool {
|
||||||
|
@ -281,6 +296,7 @@ func IsTimeout(err error) bool {
|
||||||
func IsAlreadyStopped(err error) bool {
|
func IsAlreadyStopped(err error) bool {
|
||||||
err = getInnerError(err)
|
err = getInnerError(err)
|
||||||
return err == ErrVmcomputeAlreadyStopped ||
|
return err == ErrVmcomputeAlreadyStopped ||
|
||||||
|
err == ErrProcessAlreadyStopped ||
|
||||||
err == ErrElementNotFound
|
err == ErrElementNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,9 @@ package hcs
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
@ -16,16 +18,17 @@ import (
|
||||||
|
|
||||||
// ContainerError is an error encountered in HCS
|
// ContainerError is an error encountered in HCS
|
||||||
type Process struct {
|
type Process struct {
|
||||||
handleLock sync.RWMutex
|
handleLock sync.RWMutex
|
||||||
handle vmcompute.HcsProcess
|
handle vmcompute.HcsProcess
|
||||||
processID int
|
processID int
|
||||||
system *System
|
system *System
|
||||||
hasCachedStdio bool
|
hasCachedStdio bool
|
||||||
stdioLock sync.Mutex
|
stdioLock sync.Mutex
|
||||||
stdin io.WriteCloser
|
stdin io.WriteCloser
|
||||||
stdout io.ReadCloser
|
stdout io.ReadCloser
|
||||||
stderr io.ReadCloser
|
stderr io.ReadCloser
|
||||||
callbackNumber uintptr
|
callbackNumber uintptr
|
||||||
|
killSignalDelivered bool
|
||||||
|
|
||||||
closedWaitOnce sync.Once
|
closedWaitOnce sync.Once
|
||||||
waitBlock chan struct{}
|
waitBlock chan struct{}
|
||||||
|
@ -149,12 +152,45 @@ func (process *Process) Kill(ctx context.Context) (bool, error) {
|
||||||
return false, makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
return false, makeProcessError(process, operation, ErrAlreadyClosed, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if process.killSignalDelivered {
|
||||||
|
// A kill signal has already been sent to this process. Sending a second
|
||||||
|
// one offers no real benefit, as processes cannot stop themselves from
|
||||||
|
// being terminated, once a TerminateProcess has been issued. Sending a
|
||||||
|
// second kill may result in a number of errors (two of which detailed bellow)
|
||||||
|
// and which we can avoid handling.
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
resultJSON, err := vmcompute.HcsTerminateProcess(ctx, process.handle)
|
resultJSON, err := vmcompute.HcsTerminateProcess(ctx, process.handle)
|
||||||
|
if err != nil {
|
||||||
|
// We still need to check these two cases, as processes may still be killed by an
|
||||||
|
// external actor (human operator, OOM, random script etc).
|
||||||
|
if errors.Is(err, os.ErrPermission) || IsAlreadyStopped(err) {
|
||||||
|
// There are two cases where it should be safe to ignore an error returned
|
||||||
|
// by HcsTerminateProcess. The first one is cause by the fact that
|
||||||
|
// HcsTerminateProcess ends up calling TerminateProcess in the context
|
||||||
|
// of a container. According to the TerminateProcess documentation:
|
||||||
|
// https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-terminateprocess#remarks
|
||||||
|
// After a process has terminated, call to TerminateProcess with open
|
||||||
|
// handles to the process fails with ERROR_ACCESS_DENIED (5) error code.
|
||||||
|
// It's safe to ignore this error here. HCS should always have permissions
|
||||||
|
// to kill processes inside any container. So an ERROR_ACCESS_DENIED
|
||||||
|
// is unlikely to be anything else than what the ending remarks in the
|
||||||
|
// documentation states.
|
||||||
|
//
|
||||||
|
// The second case is generated by hcs itself, if for any reason HcsTerminateProcess
|
||||||
|
// is called twice in a very short amount of time. In such cases, hcs may return
|
||||||
|
// HCS_E_PROCESS_ALREADY_STOPPED.
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
events := processHcsResult(ctx, resultJSON)
|
events := processHcsResult(ctx, resultJSON)
|
||||||
delivered, err := process.processSignalResult(ctx, err)
|
delivered, err := process.processSignalResult(ctx, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = makeProcessError(process, operation, err, events)
|
err = makeProcessError(process, operation, err, events)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
process.killSignalDelivered = delivered
|
||||||
return delivered, err
|
return delivered, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,44 @@
|
||||||
|
package winapi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
const PSEUDOCONSOLE_INHERIT_CURSOR = 0x1
|
||||||
|
|
||||||
|
// CreatePseudoConsole creates a windows pseudo console.
|
||||||
|
func CreatePseudoConsole(size windows.Coord, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) error {
|
||||||
|
// We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand.
|
||||||
|
return createPseudoConsole(*((*uint32)(unsafe.Pointer(&size))), hInput, hOutput, 0, hpcon)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResizePseudoConsole resizes the internal buffers of the pseudo console to the width and height specified in `size`.
|
||||||
|
func ResizePseudoConsole(hpcon windows.Handle, size windows.Coord) error {
|
||||||
|
// We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand.
|
||||||
|
return resizePseudoConsole(hpcon, *((*uint32)(unsafe.Pointer(&size))))
|
||||||
|
}
|
||||||
|
|
||||||
|
// HRESULT WINAPI CreatePseudoConsole(
|
||||||
|
// _In_ COORD size,
|
||||||
|
// _In_ HANDLE hInput,
|
||||||
|
// _In_ HANDLE hOutput,
|
||||||
|
// _In_ DWORD dwFlags,
|
||||||
|
// _Out_ HPCON* phPC
|
||||||
|
// );
|
||||||
|
//
|
||||||
|
//sys createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) = kernel32.CreatePseudoConsole
|
||||||
|
|
||||||
|
// void WINAPI ClosePseudoConsole(
|
||||||
|
// _In_ HPCON hPC
|
||||||
|
// );
|
||||||
|
//
|
||||||
|
//sys ClosePseudoConsole(hpc windows.Handle) = kernel32.ClosePseudoConsole
|
||||||
|
|
||||||
|
// HRESULT WINAPI ResizePseudoConsole(
|
||||||
|
// _In_ HPCON hPC ,
|
||||||
|
// _In_ COORD size
|
||||||
|
// );
|
||||||
|
//
|
||||||
|
//sys resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole
|
|
@ -2,9 +2,7 @@ package winapi
|
||||||
|
|
||||||
const PROCESS_ALL_ACCESS uint32 = 2097151
|
const PROCESS_ALL_ACCESS uint32 = 2097151
|
||||||
|
|
||||||
// DWORD GetProcessImageFileNameW(
|
const (
|
||||||
// HANDLE hProcess,
|
PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE = 0x20016
|
||||||
// LPWSTR lpImageFileName,
|
PROC_THREAD_ATTRIBUTE_JOB_LIST = 0x2000D
|
||||||
// DWORD nSize
|
)
|
||||||
// );
|
|
||||||
//sys GetProcessImageFileName(hProcess windows.Handle, imageFileName *uint16, nSize uint32) (size uint32, err error) = kernel32.GetProcessImageFileNameW
|
|
||||||
|
|
|
@ -2,4 +2,4 @@
|
||||||
// be thought of as an extension to golang.org/x/sys/windows.
|
// be thought of as an extension to golang.org/x/sys/windows.
|
||||||
package winapi
|
package winapi
|
||||||
|
|
||||||
//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go system.go net.go path.go thread.go iocp.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go
|
//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go console.go system.go net.go path.go thread.go iocp.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go
|
||||||
|
|
|
@ -37,12 +37,15 @@ func errnoErr(e syscall.Errno) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
||||||
modntdll = windows.NewLazySystemDLL("ntdll.dll")
|
modntdll = windows.NewLazySystemDLL("ntdll.dll")
|
||||||
modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll")
|
modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll")
|
||||||
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
|
||||||
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
|
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
|
||||||
modcfgmgr32 = windows.NewLazySystemDLL("cfgmgr32.dll")
|
modcfgmgr32 = windows.NewLazySystemDLL("cfgmgr32.dll")
|
||||||
|
|
||||||
|
procCreatePseudoConsole = modkernel32.NewProc("CreatePseudoConsole")
|
||||||
|
procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole")
|
||||||
|
procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole")
|
||||||
procNtQuerySystemInformation = modntdll.NewProc("NtQuerySystemInformation")
|
procNtQuerySystemInformation = modntdll.NewProc("NtQuerySystemInformation")
|
||||||
procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId")
|
procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId")
|
||||||
procSearchPathW = modkernel32.NewProc("SearchPathW")
|
procSearchPathW = modkernel32.NewProc("SearchPathW")
|
||||||
|
@ -58,7 +61,6 @@ var (
|
||||||
procLogonUserW = modadvapi32.NewProc("LogonUserW")
|
procLogonUserW = modadvapi32.NewProc("LogonUserW")
|
||||||
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
|
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
|
||||||
procLocalFree = modkernel32.NewProc("LocalFree")
|
procLocalFree = modkernel32.NewProc("LocalFree")
|
||||||
procGetProcessImageFileNameW = modkernel32.NewProc("GetProcessImageFileNameW")
|
|
||||||
procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount")
|
procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount")
|
||||||
procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA")
|
procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA")
|
||||||
procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA")
|
procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA")
|
||||||
|
@ -71,6 +73,33 @@ var (
|
||||||
procRtlNtStatusToDosError = modntdll.NewProc("RtlNtStatusToDosError")
|
procRtlNtStatusToDosError = modntdll.NewProc("RtlNtStatusToDosError")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) {
|
||||||
|
r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(hInput), uintptr(hOutput), uintptr(dwFlags), uintptr(unsafe.Pointer(hpcon)), 0)
|
||||||
|
if int32(r0) < 0 {
|
||||||
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
|
r0 &= 0xffff
|
||||||
|
}
|
||||||
|
hr = syscall.Errno(r0)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func ClosePseudoConsole(hpc windows.Handle) {
|
||||||
|
syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(hpc), 0, 0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) {
|
||||||
|
r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(hPc), uintptr(size), 0)
|
||||||
|
if int32(r0) < 0 {
|
||||||
|
if r0&0x1fff0000 == 0x00070000 {
|
||||||
|
r0 &= 0xffff
|
||||||
|
}
|
||||||
|
hr = syscall.Errno(r0)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
func NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) {
|
func NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) {
|
||||||
r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0)
|
r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0)
|
||||||
status = uint32(r0)
|
status = uint32(r0)
|
||||||
|
@ -227,19 +256,6 @@ func LocalFree(ptr uintptr) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetProcessImageFileName(hProcess windows.Handle, imageFileName *uint16, nSize uint32) (size uint32, err error) {
|
|
||||||
r0, _, e1 := syscall.Syscall(procGetProcessImageFileNameW.Addr(), 3, uintptr(hProcess), uintptr(unsafe.Pointer(imageFileName)), uintptr(nSize))
|
|
||||||
size = uint32(r0)
|
|
||||||
if size == 0 {
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
} else {
|
|
||||||
err = syscall.EINVAL
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetActiveProcessorCount(groupNumber uint16) (amount uint32) {
|
func GetActiveProcessorCount(groupNumber uint16) (amount uint32) {
|
||||||
r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0)
|
r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0)
|
||||||
amount = uint32(r0)
|
amount = uint32(r0)
|
||||||
|
|
|
@ -38,4 +38,13 @@ const (
|
||||||
|
|
||||||
// V21H1 corresponds to Windows Server 21H1 (semi-annual channel).
|
// V21H1 corresponds to Windows Server 21H1 (semi-annual channel).
|
||||||
V21H1 = 19043
|
V21H1 = 19043
|
||||||
|
|
||||||
|
// V21H2Win10 corresponds to Windows 10 (November 2021 Update).
|
||||||
|
V21H2Win10 = 19044
|
||||||
|
|
||||||
|
// V21H2Server corresponds to Windows Server 2022 (ltsc2022).
|
||||||
|
V21H2Server = 20348
|
||||||
|
|
||||||
|
// V21H2Win11 corresponds to Windows 11 (original release).
|
||||||
|
V21H2Win11 = 22000
|
||||||
)
|
)
|
||||||
|
|
|
@ -13,6 +13,7 @@ import (
|
||||||
"github.com/containers/common/libnetwork/internal/util"
|
"github.com/containers/common/libnetwork/internal/util"
|
||||||
"github.com/containers/common/libnetwork/types"
|
"github.com/containers/common/libnetwork/types"
|
||||||
"github.com/containers/storage/pkg/lockfile"
|
"github.com/containers/storage/pkg/lockfile"
|
||||||
|
"github.com/containers/storage/pkg/unshare"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
@ -21,6 +22,12 @@ type netavarkNetwork struct {
|
||||||
// networkConfigDir is directory where the network config files are stored.
|
// networkConfigDir is directory where the network config files are stored.
|
||||||
networkConfigDir string
|
networkConfigDir string
|
||||||
|
|
||||||
|
// networkRunDir is where temporary files are stored, i.e.the ipam db, aardvark config etc
|
||||||
|
networkRunDir string
|
||||||
|
|
||||||
|
// tells netavark wheather this is rootless mode or rootfull, "true" or "false"
|
||||||
|
networkRootless bool
|
||||||
|
|
||||||
// netavarkBinary is the path to the netavark binary.
|
// netavarkBinary is the path to the netavark binary.
|
||||||
netavarkBinary string
|
netavarkBinary string
|
||||||
|
|
||||||
|
@ -53,7 +60,7 @@ type InitConfig struct {
|
||||||
// NetavarkBinary is the path to the netavark binary.
|
// NetavarkBinary is the path to the netavark binary.
|
||||||
NetavarkBinary string
|
NetavarkBinary string
|
||||||
|
|
||||||
// NetworkRunDir is where temporary files are stored, i.e.the ipam db.
|
// NetworkRunDir is where temporary files are stored, i.e.the ipam db, aardvark config
|
||||||
NetworkRunDir string
|
NetworkRunDir string
|
||||||
|
|
||||||
// DefaultNetwork is the name for the default network.
|
// DefaultNetwork is the name for the default network.
|
||||||
|
@ -99,7 +106,9 @@ func NewNetworkInterface(conf *InitConfig) (types.ContainerNetwork, error) {
|
||||||
|
|
||||||
n := &netavarkNetwork{
|
n := &netavarkNetwork{
|
||||||
networkConfigDir: conf.NetworkConfigDir,
|
networkConfigDir: conf.NetworkConfigDir,
|
||||||
|
networkRunDir: conf.NetworkRunDir,
|
||||||
netavarkBinary: conf.NetavarkBinary,
|
netavarkBinary: conf.NetavarkBinary,
|
||||||
|
networkRootless: unshare.IsRootless(),
|
||||||
ipamDBPath: filepath.Join(conf.NetworkRunDir, "ipam.db"),
|
ipamDBPath: filepath.Join(conf.NetworkRunDir, "ipam.db"),
|
||||||
defaultNetwork: defaultNetworkName,
|
defaultNetwork: defaultNetworkName,
|
||||||
defaultSubnet: defaultNet,
|
defaultSubnet: defaultNet,
|
||||||
|
|
|
@ -5,6 +5,7 @@ package netavark
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
"github.com/containers/common/libnetwork/internal/util"
|
"github.com/containers/common/libnetwork/internal/util"
|
||||||
"github.com/containers/common/libnetwork/types"
|
"github.com/containers/common/libnetwork/types"
|
||||||
|
@ -54,7 +55,7 @@ func (n *netavarkNetwork) Setup(namespacePath string, options types.SetupOptions
|
||||||
}
|
}
|
||||||
|
|
||||||
result := map[string]types.StatusBlock{}
|
result := map[string]types.StatusBlock{}
|
||||||
err = n.execNetavark([]string{"setup", namespacePath}, netavarkOpts, &result)
|
err = n.execNetavark([]string{"--config", n.networkRunDir, "--rootless=" + strconv.FormatBool(n.networkRootless), "setup", namespacePath}, netavarkOpts, &result)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// lets dealloc ips to prevent leaking
|
// lets dealloc ips to prevent leaking
|
||||||
if err := n.deallocIPs(&options.NetworkOptions); err != nil {
|
if err := n.deallocIPs(&options.NetworkOptions); err != nil {
|
||||||
|
@ -94,7 +95,7 @@ func (n *netavarkNetwork) Teardown(namespacePath string, options types.TeardownO
|
||||||
return errors.Wrap(err, "failed to convert net opts")
|
return errors.Wrap(err, "failed to convert net opts")
|
||||||
}
|
}
|
||||||
|
|
||||||
retErr := n.execNetavark([]string{"teardown", namespacePath}, netavarkOpts, nil)
|
retErr := n.execNetavark([]string{"--config", n.networkRunDir, "--rootless=" + strconv.FormatBool(n.networkRootless), "teardown", namespacePath}, netavarkOpts, nil)
|
||||||
|
|
||||||
// when netavark returned an error we still free the used ips
|
// when netavark returned an error we still free the used ips
|
||||||
// otherwise we could end up in a state where block the ips forever
|
// otherwise we could end up in a state where block the ips forever
|
||||||
|
|
|
@ -72,6 +72,8 @@ type Config struct {
|
||||||
Network NetworkConfig `toml:"network"`
|
Network NetworkConfig `toml:"network"`
|
||||||
// Secret section defines configurations for the secret management
|
// Secret section defines configurations for the secret management
|
||||||
Secrets SecretConfig `toml:"secrets"`
|
Secrets SecretConfig `toml:"secrets"`
|
||||||
|
// ConfigMap section defines configurations for the configmaps management
|
||||||
|
ConfigMaps ConfigMapConfig `toml:"configmaps"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ContainersConfig represents the "containers" TOML config table
|
// ContainersConfig represents the "containers" TOML config table
|
||||||
|
@ -514,6 +516,17 @@ type SecretConfig struct {
|
||||||
Opts map[string]string `toml:"opts,omitempty"`
|
Opts map[string]string `toml:"opts,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ConfigMapConfig represents the "configmap" TOML config table
|
||||||
|
type ConfigMapConfig struct {
|
||||||
|
// Driver specifies the configmap driver to use.
|
||||||
|
// Current valid value:
|
||||||
|
// * file
|
||||||
|
// * pass
|
||||||
|
Driver string `toml:"driver,omitempty"`
|
||||||
|
// Opts contains driver specific options
|
||||||
|
Opts map[string]string `toml:"opts,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
// MachineConfig represents the "machine" TOML config table
|
// MachineConfig represents the "machine" TOML config table
|
||||||
type MachineConfig struct {
|
type MachineConfig struct {
|
||||||
// Number of CPU's a machine is created with.
|
// Number of CPU's a machine is created with.
|
||||||
|
|
|
@ -14,9 +14,27 @@ import (
|
||||||
|
|
||||||
// ValidateVolumeOpts validates a volume's options
|
// ValidateVolumeOpts validates a volume's options
|
||||||
func ValidateVolumeOpts(options []string) ([]string, error) {
|
func ValidateVolumeOpts(options []string) ([]string, error) {
|
||||||
var foundRootPropagation, foundRWRO, foundLabelChange, bindType, foundExec, foundDev, foundSuid, foundChown int
|
var foundRootPropagation, foundRWRO, foundLabelChange, bindType, foundExec, foundDev, foundSuid, foundChown, foundUpperDir, foundWorkDir int
|
||||||
finalOpts := make([]string, 0, len(options))
|
finalOpts := make([]string, 0, len(options))
|
||||||
for _, opt := range options {
|
for _, opt := range options {
|
||||||
|
// support advanced options like upperdir=/path, workdir=/path
|
||||||
|
if strings.Contains(opt, "upperdir") {
|
||||||
|
foundUpperDir++
|
||||||
|
if foundUpperDir > 1 {
|
||||||
|
return nil, errors.Errorf("invalid options %q, can only specify 1 upperdir per overlay", strings.Join(options, ", "))
|
||||||
|
}
|
||||||
|
finalOpts = append(finalOpts, opt)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if strings.Contains(opt, "workdir") {
|
||||||
|
foundWorkDir++
|
||||||
|
if foundWorkDir > 1 {
|
||||||
|
return nil, errors.Errorf("invalid options %q, can only specify 1 workdir per overlay", strings.Join(options, ", "))
|
||||||
|
}
|
||||||
|
finalOpts = append(finalOpts, opt)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
switch opt {
|
switch opt {
|
||||||
case "noexec", "exec":
|
case "noexec", "exec":
|
||||||
foundExec++
|
foundExec++
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
1.37.0+dev
|
1.38.0
|
||||||
|
|
|
@ -3,22 +3,22 @@ go 1.14
|
||||||
module github.com/containers/storage
|
module github.com/containers/storage
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/BurntSushi/toml v0.4.1
|
github.com/BurntSushi/toml v1.0.0
|
||||||
github.com/Microsoft/go-winio v0.5.1
|
github.com/Microsoft/go-winio v0.5.1
|
||||||
github.com/Microsoft/hcsshim v0.9.1
|
github.com/Microsoft/hcsshim v0.9.2
|
||||||
github.com/containerd/stargz-snapshotter/estargz v0.10.1
|
github.com/containerd/stargz-snapshotter/estargz v0.10.1
|
||||||
github.com/cyphar/filepath-securejoin v0.2.3
|
github.com/cyphar/filepath-securejoin v0.2.3
|
||||||
github.com/docker/go-units v0.4.0
|
github.com/docker/go-units v0.4.0
|
||||||
github.com/google/go-intervals v0.0.2
|
github.com/google/go-intervals v0.0.2
|
||||||
github.com/hashicorp/go-multierror v1.1.1
|
github.com/hashicorp/go-multierror v1.1.1
|
||||||
github.com/json-iterator/go v1.1.12
|
github.com/json-iterator/go v1.1.12
|
||||||
github.com/klauspost/compress v1.13.6
|
github.com/klauspost/compress v1.14.1
|
||||||
github.com/klauspost/pgzip v1.2.5
|
github.com/klauspost/pgzip v1.2.5
|
||||||
github.com/mattn/go-shellwords v1.0.12
|
github.com/mattn/go-shellwords v1.0.12
|
||||||
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
|
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
|
||||||
github.com/moby/sys/mountinfo v0.5.0
|
github.com/moby/sys/mountinfo v0.5.0
|
||||||
github.com/opencontainers/go-digest v1.0.0
|
github.com/opencontainers/go-digest v1.0.0
|
||||||
github.com/opencontainers/runc v1.0.3
|
github.com/opencontainers/runc v1.1.0
|
||||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
|
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
|
||||||
github.com/opencontainers/selinux v1.10.0
|
github.com/opencontainers/selinux v1.10.0
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
|
@ -29,6 +29,6 @@ require (
|
||||||
github.com/ulikunitz/xz v0.5.10
|
github.com/ulikunitz/xz v0.5.10
|
||||||
github.com/vbatts/tar-split v0.11.2
|
github.com/vbatts/tar-split v0.11.2
|
||||||
golang.org/x/net v0.0.0-20210825183410-e898025ed96a
|
golang.org/x/net v0.0.0-20210825183410-e898025ed96a
|
||||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359
|
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e
|
||||||
gotest.tools v2.2.0+incompatible
|
gotest.tools v2.2.0+incompatible
|
||||||
)
|
)
|
||||||
|
|
|
@ -36,8 +36,8 @@ github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935
|
||||||
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
|
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
|
||||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw=
|
github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU=
|
||||||
github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||||
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
||||||
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
|
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
|
||||||
|
@ -57,8 +57,8 @@ github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2
|
||||||
github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
|
github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
|
||||||
github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
|
github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
|
||||||
github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
|
github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
|
||||||
github.com/Microsoft/hcsshim v0.9.1 h1:VfDCj+QnY19ktX5TsH22JHcjaZ05RWQiwDbOyEg5ziM=
|
github.com/Microsoft/hcsshim v0.9.2 h1:wB06W5aYFfUB3IvootYAY2WnOmIdgPGfqSI6tufQNnY=
|
||||||
github.com/Microsoft/hcsshim v0.9.1/go.mod h1:Y/0uV2jUab5kBI7SQgl62at0AVX7uaruzADAVmxm3eM=
|
github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
|
||||||
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
|
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
|
||||||
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
|
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
|
||||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||||
|
@ -98,6 +98,7 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf
|
||||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
|
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
|
||||||
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
|
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
|
||||||
|
github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
|
||||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||||
|
@ -106,6 +107,7 @@ github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLI
|
||||||
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
||||||
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
||||||
github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
||||||
|
github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||||
|
@ -131,6 +133,7 @@ github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on
|
||||||
github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
|
github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
|
||||||
github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
|
github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
|
||||||
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
|
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
|
||||||
|
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
||||||
github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||||
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||||
github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||||
|
@ -296,6 +299,7 @@ github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblf
|
||||||
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
|
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
|
||||||
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
|
github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
|
github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
|
||||||
github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
|
github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
|
||||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
|
@ -420,8 +424,9 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||||
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||||
github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc=
|
|
||||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||||
|
github.com/klauspost/compress v1.14.1 h1:hLQYb23E8/fO+1u53d02A97a8UnsddcvYzq4ERRU4ds=
|
||||||
|
github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||||
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
|
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
|
||||||
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
@ -515,8 +520,8 @@ github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h
|
||||||
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||||
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
|
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
|
||||||
github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
|
github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
|
||||||
github.com/opencontainers/runc v1.0.3 h1:1hbqejyQWCJBvtKAfdO0b1FmaEf2z/bxnjqbARass5k=
|
github.com/opencontainers/runc v1.1.0 h1:O9+X96OcDjkmmZyfaG996kV7yq8HsoU2h1XRRQcefG8=
|
||||||
github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
|
github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
|
||||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||||
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||||
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||||
|
@ -578,6 +583,7 @@ github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiB
|
||||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||||
github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
|
github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
|
||||||
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
|
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
|
||||||
|
github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
|
||||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||||
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||||
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||||
|
@ -839,8 +845,11 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359 h1:2B5p2L5IfGiD7+b9BOoRMC6DgObAVZV+Fsp050NqXik=
|
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM=
|
||||||
|
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
|
|
@ -0,0 +1,630 @@
|
||||||
|
package chunked
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
storage "github.com/containers/storage"
|
||||||
|
"github.com/containers/storage/pkg/chunked/internal"
|
||||||
|
"github.com/containers/storage/pkg/ioutils"
|
||||||
|
jsoniter "github.com/json-iterator/go"
|
||||||
|
digest "github.com/opencontainers/go-digest"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
cacheKey = "chunked-manifest-cache"
|
||||||
|
cacheVersion = 1
|
||||||
|
)
|
||||||
|
|
||||||
|
type metadata struct {
|
||||||
|
tagLen int
|
||||||
|
digestLen int
|
||||||
|
tags []byte
|
||||||
|
vdata []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type layer struct {
|
||||||
|
id string
|
||||||
|
metadata *metadata
|
||||||
|
target string
|
||||||
|
}
|
||||||
|
|
||||||
|
type layersCache struct {
|
||||||
|
layers []layer
|
||||||
|
refs int
|
||||||
|
store storage.Store
|
||||||
|
mutex sync.RWMutex
|
||||||
|
created time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
var cacheMutex sync.Mutex
|
||||||
|
var cache *layersCache
|
||||||
|
|
||||||
|
func (c *layersCache) release() {
|
||||||
|
cacheMutex.Lock()
|
||||||
|
defer cacheMutex.Unlock()
|
||||||
|
|
||||||
|
c.refs--
|
||||||
|
if c.refs == 0 {
|
||||||
|
cache = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getLayersCacheRef(store storage.Store) *layersCache {
|
||||||
|
cacheMutex.Lock()
|
||||||
|
defer cacheMutex.Unlock()
|
||||||
|
if cache != nil && cache.store == store && time.Since(cache.created).Minutes() < 10 {
|
||||||
|
cache.refs++
|
||||||
|
return cache
|
||||||
|
}
|
||||||
|
cache := &layersCache{
|
||||||
|
store: store,
|
||||||
|
refs: 1,
|
||||||
|
created: time.Now(),
|
||||||
|
}
|
||||||
|
return cache
|
||||||
|
}
|
||||||
|
|
||||||
|
func getLayersCache(store storage.Store) (*layersCache, error) {
|
||||||
|
c := getLayersCacheRef(store)
|
||||||
|
|
||||||
|
if err := c.load(); err != nil {
|
||||||
|
c.release()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *layersCache) load() error {
|
||||||
|
c.mutex.Lock()
|
||||||
|
defer c.mutex.Unlock()
|
||||||
|
|
||||||
|
allLayers, err := c.store.Layers()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
existingLayers := make(map[string]string)
|
||||||
|
for _, r := range c.layers {
|
||||||
|
existingLayers[r.id] = r.target
|
||||||
|
}
|
||||||
|
|
||||||
|
currentLayers := make(map[string]string)
|
||||||
|
for _, r := range allLayers {
|
||||||
|
currentLayers[r.ID] = r.ID
|
||||||
|
if _, found := existingLayers[r.ID]; found {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
bigData, err := c.store.LayerBigData(r.ID, cacheKey)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Cause(err) == os.ErrNotExist {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer bigData.Close()
|
||||||
|
|
||||||
|
metadata, err := readMetadataFromCache(bigData)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Warningf("Error reading cache file for layer %q: %v", r.ID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if metadata != nil {
|
||||||
|
c.addLayer(r.ID, metadata)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
manifestReader, err := c.store.LayerBigData(r.ID, bigDataKey)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
defer manifestReader.Close()
|
||||||
|
manifest, err := ioutil.ReadAll(manifestReader)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("open manifest file for layer %q: %w", r.ID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata, err = writeCache(manifest, r.ID, c.store)
|
||||||
|
if err == nil {
|
||||||
|
c.addLayer(r.ID, metadata)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var newLayers []layer
|
||||||
|
for _, l := range c.layers {
|
||||||
|
if _, found := currentLayers[l.id]; found {
|
||||||
|
newLayers = append(newLayers, l)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.layers = newLayers
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculateHardLinkFingerprint calculates a hash that can be used to verify if a file
|
||||||
|
// is usable for deduplication with hardlinks.
|
||||||
|
// To calculate the digest, it uses the file payload digest, UID, GID, mode and xattrs.
|
||||||
|
func calculateHardLinkFingerprint(f *internal.FileMetadata) (string, error) {
|
||||||
|
digester := digest.Canonical.Digester()
|
||||||
|
|
||||||
|
modeString := fmt.Sprintf("%d:%d:%o", f.UID, f.GID, f.Mode)
|
||||||
|
hash := digester.Hash()
|
||||||
|
|
||||||
|
if _, err := hash.Write([]byte(f.Digest)); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := hash.Write([]byte(modeString)); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(f.Xattrs) > 0 {
|
||||||
|
keys := make([]string, 0, len(f.Xattrs))
|
||||||
|
for k := range f.Xattrs {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
|
||||||
|
for _, k := range keys {
|
||||||
|
if _, err := hash.Write([]byte(k)); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if _, err := hash.Write([]byte(f.Xattrs[k])); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return string(digester.Digest()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateFileLocation generates a file location in the form $OFFSET@$PATH
|
||||||
|
func generateFileLocation(path string, offset uint64) []byte {
|
||||||
|
return []byte(fmt.Sprintf("%d@%s", offset, path))
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateTag generates a tag in the form $DIGEST$OFFSET@LEN.
|
||||||
|
// the [OFFSET; LEN] points to the variable length data where the file locations
|
||||||
|
// are stored. $DIGEST has length digestLen stored in the metadata file header.
|
||||||
|
func generateTag(digest string, offset, len uint64) string {
|
||||||
|
return fmt.Sprintf("%s%.20d@%.20d", digest, offset, len)
|
||||||
|
}
|
||||||
|
|
||||||
|
type setBigData interface {
|
||||||
|
// SetLayerBigData stores a (possibly large) chunk of named data
|
||||||
|
SetLayerBigData(id, key string, data io.Reader) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeCache write a cache for the layer ID.
|
||||||
|
// It generates a sorted list of digests with their offset to the path location and offset.
|
||||||
|
// The same cache is used to lookup files, chunks and candidates for deduplication with hard links.
|
||||||
|
// There are 3 kind of digests stored:
|
||||||
|
// - digest(file.payload))
|
||||||
|
// - digest(digest(file.payload) + file.UID + file.GID + file.mode + file.xattrs)
|
||||||
|
// - digest(i) for each i in chunks(file payload)
|
||||||
|
func writeCache(manifest []byte, id string, dest setBigData) (*metadata, error) {
|
||||||
|
var vdata bytes.Buffer
|
||||||
|
tagLen := 0
|
||||||
|
digestLen := 0
|
||||||
|
var tagsBuffer bytes.Buffer
|
||||||
|
|
||||||
|
toc, err := prepareMetadata(manifest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var tags []string
|
||||||
|
for _, k := range toc {
|
||||||
|
if k.Digest != "" {
|
||||||
|
location := generateFileLocation(k.Name, 0)
|
||||||
|
|
||||||
|
off := uint64(vdata.Len())
|
||||||
|
l := uint64(len(location))
|
||||||
|
|
||||||
|
d := generateTag(k.Digest, off, l)
|
||||||
|
if tagLen == 0 {
|
||||||
|
tagLen = len(d)
|
||||||
|
}
|
||||||
|
if tagLen != len(d) {
|
||||||
|
return nil, errors.New("digest with different length found")
|
||||||
|
}
|
||||||
|
tags = append(tags, d)
|
||||||
|
|
||||||
|
fp, err := calculateHardLinkFingerprint(k)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
d = generateTag(fp, off, l)
|
||||||
|
if tagLen != len(d) {
|
||||||
|
return nil, errors.New("digest with different length found")
|
||||||
|
}
|
||||||
|
tags = append(tags, d)
|
||||||
|
|
||||||
|
if _, err := vdata.Write(location); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
digestLen = len(k.Digest)
|
||||||
|
}
|
||||||
|
if k.ChunkDigest != "" {
|
||||||
|
location := generateFileLocation(k.Name, uint64(k.ChunkOffset))
|
||||||
|
off := uint64(vdata.Len())
|
||||||
|
l := uint64(len(location))
|
||||||
|
d := generateTag(k.ChunkDigest, off, l)
|
||||||
|
if tagLen == 0 {
|
||||||
|
tagLen = len(d)
|
||||||
|
}
|
||||||
|
if tagLen != len(d) {
|
||||||
|
return nil, errors.New("digest with different length found")
|
||||||
|
}
|
||||||
|
tags = append(tags, d)
|
||||||
|
|
||||||
|
if _, err := vdata.Write(location); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
digestLen = len(k.ChunkDigest)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Strings(tags)
|
||||||
|
|
||||||
|
for _, t := range tags {
|
||||||
|
if _, err := tagsBuffer.Write([]byte(t)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pipeReader, pipeWriter := io.Pipe()
|
||||||
|
errChan := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
defer pipeWriter.Close()
|
||||||
|
defer close(errChan)
|
||||||
|
|
||||||
|
// version
|
||||||
|
if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(cacheVersion)); err != nil {
|
||||||
|
errChan <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// len of a tag
|
||||||
|
if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagLen)); err != nil {
|
||||||
|
errChan <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// len of a digest
|
||||||
|
if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(digestLen)); err != nil {
|
||||||
|
errChan <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// tags length
|
||||||
|
if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(tagsBuffer.Len())); err != nil {
|
||||||
|
errChan <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// vdata length
|
||||||
|
if err := binary.Write(pipeWriter, binary.LittleEndian, uint64(vdata.Len())); err != nil {
|
||||||
|
errChan <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// tags
|
||||||
|
if _, err := pipeWriter.Write(tagsBuffer.Bytes()); err != nil {
|
||||||
|
errChan <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// variable length data
|
||||||
|
if _, err := pipeWriter.Write(vdata.Bytes()); err != nil {
|
||||||
|
errChan <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
errChan <- nil
|
||||||
|
}()
|
||||||
|
defer pipeReader.Close()
|
||||||
|
|
||||||
|
counter := ioutils.NewWriteCounter(ioutil.Discard)
|
||||||
|
|
||||||
|
r := io.TeeReader(pipeReader, counter)
|
||||||
|
|
||||||
|
if err := dest.SetLayerBigData(id, cacheKey, r); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := <-errChan; err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
logrus.Debugf("Written lookaside cache for layer %q with length %v", id, counter.Count)
|
||||||
|
|
||||||
|
return &metadata{
|
||||||
|
digestLen: digestLen,
|
||||||
|
tagLen: tagLen,
|
||||||
|
tags: tagsBuffer.Bytes(),
|
||||||
|
vdata: vdata.Bytes(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readMetadataFromCache(bigData io.Reader) (*metadata, error) {
|
||||||
|
var version, tagLen, digestLen, tagsLen, vdataLen uint64
|
||||||
|
if err := binary.Read(bigData, binary.LittleEndian, &version); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if version != cacheVersion {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
if err := binary.Read(bigData, binary.LittleEndian, &tagLen); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := binary.Read(bigData, binary.LittleEndian, &digestLen); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := binary.Read(bigData, binary.LittleEndian, &tagsLen); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := binary.Read(bigData, binary.LittleEndian, &vdataLen); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
tags := make([]byte, tagsLen)
|
||||||
|
if _, err := bigData.Read(tags); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
vdata := make([]byte, vdataLen)
|
||||||
|
if _, err := bigData.Read(vdata); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &metadata{
|
||||||
|
tagLen: int(tagLen),
|
||||||
|
digestLen: int(digestLen),
|
||||||
|
tags: tags,
|
||||||
|
vdata: vdata,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepareMetadata(manifest []byte) ([]*internal.FileMetadata, error) {
|
||||||
|
toc, err := unmarshalToc(manifest)
|
||||||
|
if err != nil {
|
||||||
|
// ignore errors here. They might be caused by a different manifest format.
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var r []*internal.FileMetadata
|
||||||
|
chunkSeen := make(map[string]bool)
|
||||||
|
for i := range toc.Entries {
|
||||||
|
d := toc.Entries[i].Digest
|
||||||
|
if d != "" {
|
||||||
|
r = append(r, &toc.Entries[i])
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// chunks do not use hard link dedup so keeping just one candidate is enough
|
||||||
|
cd := toc.Entries[i].ChunkDigest
|
||||||
|
if cd != "" && !chunkSeen[cd] {
|
||||||
|
r = append(r, &toc.Entries[i])
|
||||||
|
chunkSeen[cd] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *layersCache) addLayer(id string, metadata *metadata) error {
|
||||||
|
target, err := c.store.DifferTarget(id)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("get checkout directory layer %q: %w", id, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
l := layer{
|
||||||
|
id: id,
|
||||||
|
metadata: metadata,
|
||||||
|
target: target,
|
||||||
|
}
|
||||||
|
c.layers = append(c.layers, l)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func byteSliceAsString(b []byte) string {
|
||||||
|
return *(*string)(unsafe.Pointer(&b))
|
||||||
|
}
|
||||||
|
|
||||||
|
func findTag(digest string, metadata *metadata) (string, uint64, uint64) {
|
||||||
|
if len(digest) != metadata.digestLen {
|
||||||
|
return "", 0, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
nElements := len(metadata.tags) / metadata.tagLen
|
||||||
|
|
||||||
|
i := sort.Search(nElements, func(i int) bool {
|
||||||
|
d := byteSliceAsString(metadata.tags[i*metadata.tagLen : i*metadata.tagLen+metadata.digestLen])
|
||||||
|
return strings.Compare(d, digest) >= 0
|
||||||
|
})
|
||||||
|
if i < nElements {
|
||||||
|
d := string(metadata.tags[i*metadata.tagLen : i*metadata.tagLen+len(digest)])
|
||||||
|
if digest == d {
|
||||||
|
startOff := i*metadata.tagLen + metadata.digestLen
|
||||||
|
parts := strings.Split(string(metadata.tags[startOff:(i+1)*metadata.tagLen]), "@")
|
||||||
|
off, _ := strconv.ParseInt(parts[0], 10, 64)
|
||||||
|
len, _ := strconv.ParseInt(parts[1], 10, 64)
|
||||||
|
return digest, uint64(off), uint64(len)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", 0, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *layersCache) findDigestInternal(digest string) (string, string, int64, error) {
|
||||||
|
if digest == "" {
|
||||||
|
return "", "", -1, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
c.mutex.RLock()
|
||||||
|
defer c.mutex.RUnlock()
|
||||||
|
|
||||||
|
for _, layer := range c.layers {
|
||||||
|
digest, off, len := findTag(digest, layer.metadata)
|
||||||
|
if digest != "" {
|
||||||
|
position := string(layer.metadata.vdata[off : off+len])
|
||||||
|
parts := strings.SplitN(position, "@", 2)
|
||||||
|
offFile, _ := strconv.ParseInt(parts[0], 10, 64)
|
||||||
|
return layer.target, parts[1], offFile, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", "", -1, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// findFileInOtherLayers finds the specified file in other layers.
|
||||||
|
// file is the file to look for.
|
||||||
|
func (c *layersCache) findFileInOtherLayers(file *internal.FileMetadata, useHardLinks bool) (string, string, error) {
|
||||||
|
digest := file.Digest
|
||||||
|
if useHardLinks {
|
||||||
|
var err error
|
||||||
|
digest, err = calculateHardLinkFingerprint(file)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
target, name, off, err := c.findDigestInternal(digest)
|
||||||
|
if off == 0 {
|
||||||
|
return target, name, err
|
||||||
|
}
|
||||||
|
return "", "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *layersCache) findChunkInOtherLayers(chunk *internal.FileMetadata) (string, string, int64, error) {
|
||||||
|
return c.findDigestInternal(chunk.ChunkDigest)
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalToc(manifest []byte) (*internal.TOC, error) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
count := 0
|
||||||
|
var toc internal.TOC
|
||||||
|
|
||||||
|
iter := jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
|
||||||
|
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
|
||||||
|
if field != "entries" {
|
||||||
|
iter.Skip()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for iter.ReadArray() {
|
||||||
|
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
|
||||||
|
switch field {
|
||||||
|
case "type", "name", "linkName", "digest", "chunkDigest", "chunkType":
|
||||||
|
count += len(iter.ReadStringAsSlice())
|
||||||
|
case "xattrs":
|
||||||
|
for key := iter.ReadObject(); key != ""; key = iter.ReadObject() {
|
||||||
|
count += len(iter.ReadStringAsSlice())
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
iter.Skip()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.Grow(count)
|
||||||
|
|
||||||
|
getString := func(b []byte) string {
|
||||||
|
from := buf.Len()
|
||||||
|
buf.Write(b)
|
||||||
|
to := buf.Len()
|
||||||
|
return byteSliceAsString(buf.Bytes()[from:to])
|
||||||
|
}
|
||||||
|
|
||||||
|
iter = jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
|
||||||
|
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
|
||||||
|
if field == "version" {
|
||||||
|
toc.Version = iter.ReadInt()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if field != "entries" {
|
||||||
|
iter.Skip()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for iter.ReadArray() {
|
||||||
|
var m internal.FileMetadata
|
||||||
|
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
|
||||||
|
switch field {
|
||||||
|
case "type":
|
||||||
|
m.Type = getString(iter.ReadStringAsSlice())
|
||||||
|
case "name":
|
||||||
|
m.Name = getString(iter.ReadStringAsSlice())
|
||||||
|
case "linkName":
|
||||||
|
m.Linkname = getString(iter.ReadStringAsSlice())
|
||||||
|
case "mode":
|
||||||
|
m.Mode = iter.ReadInt64()
|
||||||
|
case "size":
|
||||||
|
m.Size = iter.ReadInt64()
|
||||||
|
case "UID":
|
||||||
|
m.UID = iter.ReadInt()
|
||||||
|
case "GID":
|
||||||
|
m.GID = iter.ReadInt()
|
||||||
|
case "ModTime":
|
||||||
|
time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice()))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
m.ModTime = &time
|
||||||
|
case "accesstime":
|
||||||
|
time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice()))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
m.AccessTime = &time
|
||||||
|
case "changetime":
|
||||||
|
time, err := time.Parse(time.RFC3339, byteSliceAsString(iter.ReadStringAsSlice()))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
m.ChangeTime = &time
|
||||||
|
case "devMajor":
|
||||||
|
m.Devmajor = iter.ReadInt64()
|
||||||
|
case "devMinor":
|
||||||
|
m.Devminor = iter.ReadInt64()
|
||||||
|
case "digest":
|
||||||
|
m.Digest = getString(iter.ReadStringAsSlice())
|
||||||
|
case "offset":
|
||||||
|
m.Offset = iter.ReadInt64()
|
||||||
|
case "endOffset":
|
||||||
|
m.EndOffset = iter.ReadInt64()
|
||||||
|
case "chunkSize":
|
||||||
|
m.ChunkSize = iter.ReadInt64()
|
||||||
|
case "chunkOffset":
|
||||||
|
m.ChunkOffset = iter.ReadInt64()
|
||||||
|
case "chunkDigest":
|
||||||
|
m.ChunkDigest = getString(iter.ReadStringAsSlice())
|
||||||
|
case "chunkType":
|
||||||
|
m.ChunkType = getString(iter.ReadStringAsSlice())
|
||||||
|
case "xattrs":
|
||||||
|
m.Xattrs = make(map[string]string)
|
||||||
|
for key := iter.ReadObject(); key != ""; key = iter.ReadObject() {
|
||||||
|
value := iter.ReadStringAsSlice()
|
||||||
|
m.Xattrs[key] = getString(value)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
iter.Skip()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
toc.Entries = append(toc.Entries, m)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
toc.StringsBuf = buf
|
||||||
|
return &toc, nil
|
||||||
|
}
|
|
@ -5,6 +5,7 @@ package compressor
|
||||||
// larger software like the graph drivers.
|
// larger software like the graph drivers.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
@ -15,6 +16,189 @@ import (
|
||||||
"github.com/vbatts/tar-split/archive/tar"
|
"github.com/vbatts/tar-split/archive/tar"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const RollsumBits = 16
|
||||||
|
const holesThreshold = int64(1 << 10)
|
||||||
|
|
||||||
|
type holesFinder struct {
|
||||||
|
reader *bufio.Reader
|
||||||
|
fileOff int64
|
||||||
|
zeros int64
|
||||||
|
from int64
|
||||||
|
threshold int64
|
||||||
|
|
||||||
|
state int
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
holesFinderStateRead = iota
|
||||||
|
holesFinderStateAccumulate
|
||||||
|
holesFinderStateFound
|
||||||
|
holesFinderStateEOF
|
||||||
|
)
|
||||||
|
|
||||||
|
// ReadByte reads a single byte from the underlying reader.
|
||||||
|
// If a single byte is read, the return value is (0, RAW-BYTE-VALUE, nil).
|
||||||
|
// If there are at least f.THRESHOLD consecutive zeros, then the
|
||||||
|
// return value is (N_CONSECUTIVE_ZEROS, '\x00').
|
||||||
|
func (f *holesFinder) ReadByte() (int64, byte, error) {
|
||||||
|
for {
|
||||||
|
switch f.state {
|
||||||
|
// reading the file stream
|
||||||
|
case holesFinderStateRead:
|
||||||
|
if f.zeros > 0 {
|
||||||
|
f.zeros--
|
||||||
|
return 0, 0, nil
|
||||||
|
}
|
||||||
|
b, err := f.reader.ReadByte()
|
||||||
|
if err != nil {
|
||||||
|
return 0, b, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if b != 0 {
|
||||||
|
return 0, b, err
|
||||||
|
}
|
||||||
|
|
||||||
|
f.zeros = 1
|
||||||
|
if f.zeros == f.threshold {
|
||||||
|
f.state = holesFinderStateFound
|
||||||
|
} else {
|
||||||
|
f.state = holesFinderStateAccumulate
|
||||||
|
}
|
||||||
|
// accumulating zeros, but still didn't reach the threshold
|
||||||
|
case holesFinderStateAccumulate:
|
||||||
|
b, err := f.reader.ReadByte()
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
f.state = holesFinderStateEOF
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return 0, b, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if b == 0 {
|
||||||
|
f.zeros++
|
||||||
|
if f.zeros == f.threshold {
|
||||||
|
f.state = holesFinderStateFound
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if f.reader.UnreadByte(); err != nil {
|
||||||
|
return 0, 0, err
|
||||||
|
}
|
||||||
|
f.state = holesFinderStateRead
|
||||||
|
}
|
||||||
|
// found a hole. Number of zeros >= threshold
|
||||||
|
case holesFinderStateFound:
|
||||||
|
b, err := f.reader.ReadByte()
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
f.state = holesFinderStateEOF
|
||||||
|
}
|
||||||
|
holeLen := f.zeros
|
||||||
|
f.zeros = 0
|
||||||
|
return holeLen, 0, nil
|
||||||
|
}
|
||||||
|
if b != 0 {
|
||||||
|
if f.reader.UnreadByte(); err != nil {
|
||||||
|
return 0, 0, err
|
||||||
|
}
|
||||||
|
f.state = holesFinderStateRead
|
||||||
|
|
||||||
|
holeLen := f.zeros
|
||||||
|
f.zeros = 0
|
||||||
|
return holeLen, 0, nil
|
||||||
|
}
|
||||||
|
f.zeros++
|
||||||
|
// reached EOF. Flush pending zeros if any.
|
||||||
|
case holesFinderStateEOF:
|
||||||
|
if f.zeros > 0 {
|
||||||
|
f.zeros--
|
||||||
|
return 0, 0, nil
|
||||||
|
}
|
||||||
|
return 0, 0, io.EOF
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type rollingChecksumReader struct {
|
||||||
|
reader *holesFinder
|
||||||
|
closed bool
|
||||||
|
rollsum *RollSum
|
||||||
|
pendingHole int64
|
||||||
|
|
||||||
|
// WrittenOut is the total number of bytes read from
|
||||||
|
// the stream.
|
||||||
|
WrittenOut int64
|
||||||
|
|
||||||
|
// IsLastChunkZeros tells whether the last generated
|
||||||
|
// chunk is a hole (made of consecutive zeros). If it
|
||||||
|
// is false, then the last chunk is a data chunk
|
||||||
|
// generated by the rolling checksum.
|
||||||
|
IsLastChunkZeros bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rc *rollingChecksumReader) Read(b []byte) (bool, int, error) {
|
||||||
|
rc.IsLastChunkZeros = false
|
||||||
|
|
||||||
|
if rc.pendingHole > 0 {
|
||||||
|
toCopy := int64(len(b))
|
||||||
|
if rc.pendingHole < toCopy {
|
||||||
|
toCopy = rc.pendingHole
|
||||||
|
}
|
||||||
|
rc.pendingHole -= toCopy
|
||||||
|
for i := int64(0); i < toCopy; i++ {
|
||||||
|
b[i] = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
rc.WrittenOut += toCopy
|
||||||
|
|
||||||
|
rc.IsLastChunkZeros = true
|
||||||
|
|
||||||
|
// if there are no other zeros left, terminate the chunk
|
||||||
|
return rc.pendingHole == 0, int(toCopy), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if rc.closed {
|
||||||
|
return false, 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(b); i++ {
|
||||||
|
holeLen, n, err := rc.reader.ReadByte()
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
rc.closed = true
|
||||||
|
if i == 0 {
|
||||||
|
return false, 0, err
|
||||||
|
}
|
||||||
|
return false, i, nil
|
||||||
|
}
|
||||||
|
// Report any other error type
|
||||||
|
return false, -1, err
|
||||||
|
}
|
||||||
|
if holeLen > 0 {
|
||||||
|
for j := int64(0); j < holeLen; j++ {
|
||||||
|
rc.rollsum.Roll(0)
|
||||||
|
}
|
||||||
|
rc.pendingHole = holeLen
|
||||||
|
return true, i, nil
|
||||||
|
}
|
||||||
|
b[i] = n
|
||||||
|
rc.WrittenOut++
|
||||||
|
rc.rollsum.Roll(n)
|
||||||
|
if rc.rollsum.OnSplitWithBits(RollsumBits) {
|
||||||
|
return true, i + 1, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, len(b), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type chunk struct {
|
||||||
|
ChunkOffset int64
|
||||||
|
Offset int64
|
||||||
|
Checksum string
|
||||||
|
ChunkSize int64
|
||||||
|
ChunkType string
|
||||||
|
}
|
||||||
|
|
||||||
func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error {
|
func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, reader io.Reader, level int) error {
|
||||||
// total written so far. Used to retrieve partial offsets in the file
|
// total written so far. Used to retrieve partial offsets in the file
|
||||||
dest := ioutils.NewWriteCounter(destFile)
|
dest := ioutils.NewWriteCounter(destFile)
|
||||||
|
@ -64,40 +248,78 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
|
||||||
if _, err := zstdWriter.Write(rawBytes); err != nil {
|
if _, err := zstdWriter.Write(rawBytes); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
payloadDigester := digest.Canonical.Digester()
|
|
||||||
payloadChecksum := payloadDigester.Hash()
|
|
||||||
|
|
||||||
payloadDest := io.MultiWriter(payloadChecksum, zstdWriter)
|
payloadDigester := digest.Canonical.Digester()
|
||||||
|
chunkDigester := digest.Canonical.Digester()
|
||||||
|
|
||||||
// Now handle the payload, if any
|
// Now handle the payload, if any
|
||||||
var startOffset, endOffset int64
|
startOffset := int64(0)
|
||||||
|
lastOffset := int64(0)
|
||||||
|
lastChunkOffset := int64(0)
|
||||||
|
|
||||||
checksum := ""
|
checksum := ""
|
||||||
|
|
||||||
|
chunks := []chunk{}
|
||||||
|
|
||||||
|
hf := &holesFinder{
|
||||||
|
threshold: holesThreshold,
|
||||||
|
reader: bufio.NewReader(tr),
|
||||||
|
}
|
||||||
|
|
||||||
|
rcReader := &rollingChecksumReader{
|
||||||
|
reader: hf,
|
||||||
|
rollsum: NewRollSum(),
|
||||||
|
}
|
||||||
|
|
||||||
|
payloadDest := io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter)
|
||||||
for {
|
for {
|
||||||
read, errRead := tr.Read(buf)
|
mustSplit, read, errRead := rcReader.Read(buf)
|
||||||
if errRead != nil && errRead != io.EOF {
|
if errRead != nil && errRead != io.EOF {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
// restart the compression only if there is a payload.
|
||||||
// restart the compression only if there is
|
|
||||||
// a payload.
|
|
||||||
if read > 0 {
|
if read > 0 {
|
||||||
if startOffset == 0 {
|
if startOffset == 0 {
|
||||||
startOffset, err = restartCompression()
|
startOffset, err = restartCompression()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
lastOffset = startOffset
|
||||||
}
|
}
|
||||||
_, err := payloadDest.Write(buf[:read])
|
|
||||||
if err != nil {
|
if _, err := payloadDest.Write(buf[:read]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (mustSplit || errRead == io.EOF) && startOffset > 0 {
|
||||||
|
off, err := restartCompression()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
chunkSize := rcReader.WrittenOut - lastChunkOffset
|
||||||
|
if chunkSize > 0 {
|
||||||
|
chunkType := internal.ChunkTypeData
|
||||||
|
if rcReader.IsLastChunkZeros {
|
||||||
|
chunkType = internal.ChunkTypeZeros
|
||||||
|
}
|
||||||
|
|
||||||
|
chunks = append(chunks, chunk{
|
||||||
|
ChunkOffset: lastChunkOffset,
|
||||||
|
Offset: lastOffset,
|
||||||
|
Checksum: chunkDigester.Digest().String(),
|
||||||
|
ChunkSize: chunkSize,
|
||||||
|
ChunkType: chunkType,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
lastOffset = off
|
||||||
|
lastChunkOffset = rcReader.WrittenOut
|
||||||
|
chunkDigester = digest.Canonical.Digester()
|
||||||
|
payloadDest = io.MultiWriter(payloadDigester.Hash(), chunkDigester.Hash(), zstdWriter)
|
||||||
|
}
|
||||||
if errRead == io.EOF {
|
if errRead == io.EOF {
|
||||||
if startOffset > 0 {
|
if startOffset > 0 {
|
||||||
endOffset, err = restartCompression()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
checksum = payloadDigester.Digest().String()
|
checksum = payloadDigester.Digest().String()
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
|
@ -112,30 +334,42 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
|
||||||
for k, v := range hdr.Xattrs {
|
for k, v := range hdr.Xattrs {
|
||||||
xattrs[k] = base64.StdEncoding.EncodeToString([]byte(v))
|
xattrs[k] = base64.StdEncoding.EncodeToString([]byte(v))
|
||||||
}
|
}
|
||||||
m := internal.FileMetadata{
|
entries := []internal.FileMetadata{
|
||||||
Type: typ,
|
{
|
||||||
Name: hdr.Name,
|
Type: typ,
|
||||||
Linkname: hdr.Linkname,
|
Name: hdr.Name,
|
||||||
Mode: hdr.Mode,
|
Linkname: hdr.Linkname,
|
||||||
Size: hdr.Size,
|
Mode: hdr.Mode,
|
||||||
UID: hdr.Uid,
|
Size: hdr.Size,
|
||||||
GID: hdr.Gid,
|
UID: hdr.Uid,
|
||||||
ModTime: hdr.ModTime,
|
GID: hdr.Gid,
|
||||||
AccessTime: hdr.AccessTime,
|
ModTime: &hdr.ModTime,
|
||||||
ChangeTime: hdr.ChangeTime,
|
AccessTime: &hdr.AccessTime,
|
||||||
Devmajor: hdr.Devmajor,
|
ChangeTime: &hdr.ChangeTime,
|
||||||
Devminor: hdr.Devminor,
|
Devmajor: hdr.Devmajor,
|
||||||
Xattrs: xattrs,
|
Devminor: hdr.Devminor,
|
||||||
Digest: checksum,
|
Xattrs: xattrs,
|
||||||
Offset: startOffset,
|
Digest: checksum,
|
||||||
EndOffset: endOffset,
|
Offset: startOffset,
|
||||||
|
EndOffset: lastOffset,
|
||||||
// ChunkSize is 0 for the last chunk
|
},
|
||||||
ChunkSize: 0,
|
|
||||||
ChunkOffset: 0,
|
|
||||||
ChunkDigest: checksum,
|
|
||||||
}
|
}
|
||||||
metadata = append(metadata, m)
|
for i := 1; i < len(chunks); i++ {
|
||||||
|
entries = append(entries, internal.FileMetadata{
|
||||||
|
Type: internal.TypeChunk,
|
||||||
|
Name: hdr.Name,
|
||||||
|
ChunkOffset: chunks[i].ChunkOffset,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if len(chunks) > 1 {
|
||||||
|
for i := range chunks {
|
||||||
|
entries[i].ChunkSize = chunks[i].ChunkSize
|
||||||
|
entries[i].Offset = chunks[i].Offset
|
||||||
|
entries[i].ChunkDigest = chunks[i].Checksum
|
||||||
|
entries[i].ChunkType = chunks[i].ChunkType
|
||||||
|
}
|
||||||
|
}
|
||||||
|
metadata = append(metadata, entries...)
|
||||||
}
|
}
|
||||||
|
|
||||||
rawBytes := tr.RawBytes()
|
rawBytes := tr.RawBytes()
|
||||||
|
@ -212,7 +446,7 @@ func zstdChunkedWriterWithLevel(out io.Writer, metadata map[string]string, level
|
||||||
// ZstdCompressor is a CompressorFunc for the zstd compression algorithm.
|
// ZstdCompressor is a CompressorFunc for the zstd compression algorithm.
|
||||||
func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
|
func ZstdCompressor(r io.Writer, metadata map[string]string, level *int) (io.WriteCloser, error) {
|
||||||
if level == nil {
|
if level == nil {
|
||||||
l := 3
|
l := 10
|
||||||
level = &l
|
level = &l
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
81
vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go
generated
vendored
Normal file
81
vendor/github.com/containers/storage/pkg/chunked/compressor/rollsum.go
generated
vendored
Normal file
|
@ -0,0 +1,81 @@
|
||||||
|
/*
|
||||||
|
Copyright 2011 The Perkeep Authors
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package rollsum implements rolling checksums similar to apenwarr's bup, which
|
||||||
|
// is similar to librsync.
|
||||||
|
//
|
||||||
|
// The bup project is at https://github.com/apenwarr/bup and its splitting in
|
||||||
|
// particular is at https://github.com/apenwarr/bup/blob/master/lib/bup/bupsplit.c
|
||||||
|
package compressor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/bits"
|
||||||
|
)
|
||||||
|
|
||||||
|
const windowSize = 64 // Roll assumes windowSize is a power of 2
|
||||||
|
const charOffset = 31
|
||||||
|
|
||||||
|
const blobBits = 13
|
||||||
|
const blobSize = 1 << blobBits // 8k
|
||||||
|
|
||||||
|
type RollSum struct {
|
||||||
|
s1, s2 uint32
|
||||||
|
window [windowSize]uint8
|
||||||
|
wofs int
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewRollSum() *RollSum {
|
||||||
|
return &RollSum{
|
||||||
|
s1: windowSize * charOffset,
|
||||||
|
s2: windowSize * (windowSize - 1) * charOffset,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs *RollSum) add(drop, add uint32) {
|
||||||
|
s1 := rs.s1 + add - drop
|
||||||
|
rs.s1 = s1
|
||||||
|
rs.s2 += s1 - uint32(windowSize)*(drop+charOffset)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Roll adds ch to the rolling sum.
|
||||||
|
func (rs *RollSum) Roll(ch byte) {
|
||||||
|
wp := &rs.window[rs.wofs]
|
||||||
|
rs.add(uint32(*wp), uint32(ch))
|
||||||
|
*wp = ch
|
||||||
|
rs.wofs = (rs.wofs + 1) & (windowSize - 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnSplit reports whether at least 13 consecutive trailing bits of
|
||||||
|
// the current checksum are set the same way.
|
||||||
|
func (rs *RollSum) OnSplit() bool {
|
||||||
|
return (rs.s2 & (blobSize - 1)) == ((^0) & (blobSize - 1))
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnSplitWithBits reports whether at least n consecutive trailing bits
|
||||||
|
// of the current checksum are set the same way.
|
||||||
|
func (rs *RollSum) OnSplitWithBits(n uint32) bool {
|
||||||
|
mask := (uint32(1) << n) - 1
|
||||||
|
return rs.s2&mask == (^uint32(0))&mask
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs *RollSum) Bits() int {
|
||||||
|
rsum := rs.Digest() >> (blobBits + 1)
|
||||||
|
return blobBits + bits.TrailingZeros32(^rsum)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs *RollSum) Digest() uint32 {
|
||||||
|
return (rs.s1 << 16) | (rs.s2 & 0xffff)
|
||||||
|
}
|
|
@ -8,11 +8,11 @@ import (
|
||||||
"archive/tar"
|
"archive/tar"
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
jsoniter "github.com/json-iterator/go"
|
||||||
"github.com/klauspost/compress/zstd"
|
"github.com/klauspost/compress/zstd"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
)
|
)
|
||||||
|
@ -20,6 +20,9 @@ import (
|
||||||
type TOC struct {
|
type TOC struct {
|
||||||
Version int `json:"version"`
|
Version int `json:"version"`
|
||||||
Entries []FileMetadata `json:"entries"`
|
Entries []FileMetadata `json:"entries"`
|
||||||
|
|
||||||
|
// internal: used by unmarshalToc
|
||||||
|
StringsBuf bytes.Buffer `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type FileMetadata struct {
|
type FileMetadata struct {
|
||||||
|
@ -27,25 +30,33 @@ type FileMetadata struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Linkname string `json:"linkName,omitempty"`
|
Linkname string `json:"linkName,omitempty"`
|
||||||
Mode int64 `json:"mode,omitempty"`
|
Mode int64 `json:"mode,omitempty"`
|
||||||
Size int64 `json:"size"`
|
Size int64 `json:"size,omitempty"`
|
||||||
UID int `json:"uid"`
|
UID int `json:"uid,omitempty"`
|
||||||
GID int `json:"gid"`
|
GID int `json:"gid,omitempty"`
|
||||||
ModTime time.Time `json:"modtime"`
|
ModTime *time.Time `json:"modtime,omitempty"`
|
||||||
AccessTime time.Time `json:"accesstime"`
|
AccessTime *time.Time `json:"accesstime,omitempty"`
|
||||||
ChangeTime time.Time `json:"changetime"`
|
ChangeTime *time.Time `json:"changetime,omitempty"`
|
||||||
Devmajor int64 `json:"devMajor"`
|
Devmajor int64 `json:"devMajor,omitempty"`
|
||||||
Devminor int64 `json:"devMinor"`
|
Devminor int64 `json:"devMinor,omitempty"`
|
||||||
Xattrs map[string]string `json:"xattrs,omitempty"`
|
Xattrs map[string]string `json:"xattrs,omitempty"`
|
||||||
Digest string `json:"digest,omitempty"`
|
Digest string `json:"digest,omitempty"`
|
||||||
Offset int64 `json:"offset,omitempty"`
|
Offset int64 `json:"offset,omitempty"`
|
||||||
EndOffset int64 `json:"endOffset,omitempty"`
|
EndOffset int64 `json:"endOffset,omitempty"`
|
||||||
|
|
||||||
// Currently chunking is not supported.
|
|
||||||
ChunkSize int64 `json:"chunkSize,omitempty"`
|
ChunkSize int64 `json:"chunkSize,omitempty"`
|
||||||
ChunkOffset int64 `json:"chunkOffset,omitempty"`
|
ChunkOffset int64 `json:"chunkOffset,omitempty"`
|
||||||
ChunkDigest string `json:"chunkDigest,omitempty"`
|
ChunkDigest string `json:"chunkDigest,omitempty"`
|
||||||
|
ChunkType string `json:"chunkType,omitempty"`
|
||||||
|
|
||||||
|
// internal: computed by mergeTOCEntries.
|
||||||
|
Chunks []*FileMetadata `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
ChunkTypeData = ""
|
||||||
|
ChunkTypeZeros = "zeros"
|
||||||
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
TypeReg = "reg"
|
TypeReg = "reg"
|
||||||
TypeChunk = "chunk"
|
TypeChunk = "chunk"
|
||||||
|
@ -123,6 +134,7 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off
|
||||||
Entries: metadata,
|
Entries: metadata,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||||
// Generate the manifest
|
// Generate the manifest
|
||||||
manifest, err := json.Marshal(toc)
|
manifest, err := json.Marshal(toc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -82,7 +82,7 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
|
||||||
if len(uidMap) == 1 && uidMap[0].Size == 1 {
|
if len(uidMap) == 1 && uidMap[0].Size == 1 {
|
||||||
uid = uidMap[0].HostID
|
uid = uidMap[0].HostID
|
||||||
} else {
|
} else {
|
||||||
uid, err = toHost(0, uidMap)
|
uid, err = RawToHost(0, uidMap)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return -1, -1, err
|
return -1, -1, err
|
||||||
}
|
}
|
||||||
|
@ -90,7 +90,7 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
|
||||||
if len(gidMap) == 1 && gidMap[0].Size == 1 {
|
if len(gidMap) == 1 && gidMap[0].Size == 1 {
|
||||||
gid = gidMap[0].HostID
|
gid = gidMap[0].HostID
|
||||||
} else {
|
} else {
|
||||||
gid, err = toHost(0, gidMap)
|
gid, err = RawToHost(0, gidMap)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return -1, -1, err
|
return -1, -1, err
|
||||||
}
|
}
|
||||||
|
@ -98,10 +98,14 @@ func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
|
||||||
return uid, gid, nil
|
return uid, gid, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// toContainer takes an id mapping, and uses it to translate a
|
// RawToContainer takes an id mapping, and uses it to translate a host ID to
|
||||||
// host ID to the remapped ID. If no map is provided, then the translation
|
// the remapped ID. If no map is provided, then the translation assumes a
|
||||||
// assumes a 1-to-1 mapping and returns the passed in id
|
// 1-to-1 mapping and returns the passed in id.
|
||||||
func toContainer(hostID int, idMap []IDMap) (int, error) {
|
//
|
||||||
|
// If you wish to map a (uid,gid) combination you should use the corresponding
|
||||||
|
// IDMappings methods, which ensure that you are mapping the correct ID against
|
||||||
|
// the correct mapping.
|
||||||
|
func RawToContainer(hostID int, idMap []IDMap) (int, error) {
|
||||||
if idMap == nil {
|
if idMap == nil {
|
||||||
return hostID, nil
|
return hostID, nil
|
||||||
}
|
}
|
||||||
|
@ -114,10 +118,14 @@ func toContainer(hostID int, idMap []IDMap) (int, error) {
|
||||||
return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
|
return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// toHost takes an id mapping and a remapped ID, and translates the
|
// RawToHost takes an id mapping and a remapped ID, and translates the ID to
|
||||||
// ID to the mapped host ID. If no map is provided, then the translation
|
// the mapped host ID. If no map is provided, then the translation assumes a
|
||||||
// assumes a 1-to-1 mapping and returns the passed in id #
|
// 1-to-1 mapping and returns the passed in id.
|
||||||
func toHost(contID int, idMap []IDMap) (int, error) {
|
//
|
||||||
|
// If you wish to map a (uid,gid) combination you should use the corresponding
|
||||||
|
// IDMappings methods, which ensure that you are mapping the correct ID against
|
||||||
|
// the correct mapping.
|
||||||
|
func RawToHost(contID int, idMap []IDMap) (int, error) {
|
||||||
if idMap == nil {
|
if idMap == nil {
|
||||||
return contID, nil
|
return contID, nil
|
||||||
}
|
}
|
||||||
|
@ -187,22 +195,22 @@ func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) {
|
||||||
var err error
|
var err error
|
||||||
var target IDPair
|
var target IDPair
|
||||||
|
|
||||||
target.UID, err = toHost(pair.UID, i.uids)
|
target.UID, err = RawToHost(pair.UID, i.uids)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return target, err
|
return target, err
|
||||||
}
|
}
|
||||||
|
|
||||||
target.GID, err = toHost(pair.GID, i.gids)
|
target.GID, err = RawToHost(pair.GID, i.gids)
|
||||||
return target, err
|
return target, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToContainer returns the container UID and GID for the host uid and gid
|
// ToContainer returns the container UID and GID for the host uid and gid
|
||||||
func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) {
|
func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) {
|
||||||
uid, err := toContainer(pair.UID, i.uids)
|
uid, err := RawToContainer(pair.UID, i.uids)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return -1, -1, err
|
return -1, -1, err
|
||||||
}
|
}
|
||||||
gid, err := toContainer(pair.GID, i.gids)
|
gid, err := RawToContainer(pair.GID, i.gids)
|
||||||
return uid, gid, err
|
return uid, gid, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -12,10 +12,14 @@ import (
|
||||||
#cgo LDFLAGS: -l subid
|
#cgo LDFLAGS: -l subid
|
||||||
#include <shadow/subid.h>
|
#include <shadow/subid.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
|
#include <stdio.h>
|
||||||
const char *Prog = "storage";
|
const char *Prog = "storage";
|
||||||
|
FILE *shadow_logfd = NULL;
|
||||||
|
|
||||||
struct subid_range get_range(struct subid_range *ranges, int i)
|
struct subid_range get_range(struct subid_range *ranges, int i)
|
||||||
{
|
{
|
||||||
return ranges[i];
|
shadow_logfd = stderr;
|
||||||
|
return ranges[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
#if !defined(SUBID_ABI_MAJOR) || (SUBID_ABI_MAJOR < 4)
|
#if !defined(SUBID_ABI_MAJOR) || (SUBID_ABI_MAJOR < 4)
|
||||||
|
|
|
@ -647,17 +647,21 @@ func GetStore(options types.StoreOptions) (Store, error) {
|
||||||
storesLock.Lock()
|
storesLock.Lock()
|
||||||
defer storesLock.Unlock()
|
defer storesLock.Unlock()
|
||||||
|
|
||||||
|
// return if BOTH run and graph root are matched, otherwise our run-root can be overriden if the graph is found first
|
||||||
for _, s := range stores {
|
for _, s := range stores {
|
||||||
if s.graphRoot == options.GraphRoot && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) {
|
if (s.graphRoot == options.GraphRoot) && (s.runRoot == options.RunRoot) && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) {
|
||||||
return s, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if options.GraphRoot == "" {
|
// if passed a run-root or graph-root alone, the other should be defaulted only error if we have neither.
|
||||||
return nil, errors.Wrap(ErrIncompleteOptions, "no storage root specified")
|
switch {
|
||||||
}
|
case options.RunRoot == "" && options.GraphRoot == "":
|
||||||
if options.RunRoot == "" {
|
return nil, errors.Wrap(ErrIncompleteOptions, "no storage runroot or graphroot specified")
|
||||||
return nil, errors.Wrap(ErrIncompleteOptions, "no storage runroot specified")
|
case options.GraphRoot == "":
|
||||||
|
options.GraphRoot = types.Options().GraphRoot
|
||||||
|
case options.RunRoot == "":
|
||||||
|
options.RunRoot = types.Options().RunRoot
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.MkdirAll(options.RunRoot, 0700); err != nil {
|
if err := os.MkdirAll(options.RunRoot, 0700); err != nil {
|
||||||
|
@ -2497,23 +2501,29 @@ func (s *store) DeleteContainer(id string) error {
|
||||||
gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID)
|
gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID)
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
var err error
|
defer wg.Done()
|
||||||
for attempts := 0; attempts < 50; attempts++ {
|
// attempt a simple rm -rf first
|
||||||
err = os.RemoveAll(gcpath)
|
err := os.RemoveAll(gcpath)
|
||||||
if err == nil || !system.IsEBUSY(err) {
|
if err == nil {
|
||||||
break
|
errChan <- nil
|
||||||
}
|
return
|
||||||
time.Sleep(time.Millisecond * 100)
|
|
||||||
}
|
}
|
||||||
errChan <- err
|
// and if it fails get to the more complicated cleanup
|
||||||
wg.Done()
|
errChan <- system.EnsureRemoveAll(gcpath)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID)
|
rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID)
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
errChan <- os.RemoveAll(rcpath)
|
defer wg.Done()
|
||||||
wg.Done()
|
// attempt a simple rm -rf first
|
||||||
|
err := os.RemoveAll(rcpath)
|
||||||
|
if err == nil {
|
||||||
|
errChan <- nil
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// and if it fails get to the more complicated cleanup
|
||||||
|
errChan <- system.EnsureRemoveAll(rcpath)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
|
|
|
@ -17,7 +17,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// TOML-friendly explicit tables used for conversions.
|
// TOML-friendly explicit tables used for conversions.
|
||||||
type tomlConfig struct {
|
type TomlConfig struct {
|
||||||
Storage struct {
|
Storage struct {
|
||||||
Driver string `toml:"driver"`
|
Driver string `toml:"driver"`
|
||||||
RunRoot string `toml:"runroot"`
|
RunRoot string `toml:"runroot"`
|
||||||
|
@ -306,7 +306,7 @@ func ReloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptio
|
||||||
// ReloadConfigurationFile parses the specified configuration file and overrides
|
// ReloadConfigurationFile parses the specified configuration file and overrides
|
||||||
// the configuration in storeOptions.
|
// the configuration in storeOptions.
|
||||||
func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
|
func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
|
||||||
config := new(tomlConfig)
|
config := new(TomlConfig)
|
||||||
|
|
||||||
meta, err := toml.DecodeFile(configFile, &config)
|
meta, err := toml.DecodeFile(configFile, &config)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -424,3 +424,38 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
|
||||||
func Options() StoreOptions {
|
func Options() StoreOptions {
|
||||||
return defaultStoreOptions
|
return defaultStoreOptions
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Save overwrites the tomlConfig in storage.conf with the given conf
|
||||||
|
func Save(conf TomlConfig, rootless bool) error {
|
||||||
|
configFile, err := DefaultConfigFile(rootless)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err = os.Remove(configFile); !os.IsNotExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.Open(configFile)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return toml.NewEncoder(f).Encode(conf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StorageConfig is used to retreive the storage.conf toml in order to overwrite it
|
||||||
|
func StorageConfig(rootless bool) (*TomlConfig, error) {
|
||||||
|
config := new(TomlConfig)
|
||||||
|
|
||||||
|
configFile, err := DefaultConfigFile(rootless)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = toml.DecodeFile(configFile, &config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return config, nil
|
||||||
|
}
|
||||||
|
|
|
@ -3,6 +3,7 @@
|
||||||
before:
|
before:
|
||||||
hooks:
|
hooks:
|
||||||
- ./gen.sh
|
- ./gen.sh
|
||||||
|
- go install mvdan.cc/garble@latest
|
||||||
|
|
||||||
builds:
|
builds:
|
||||||
-
|
-
|
||||||
|
@ -31,6 +32,7 @@ builds:
|
||||||
- mips64le
|
- mips64le
|
||||||
goarm:
|
goarm:
|
||||||
- 7
|
- 7
|
||||||
|
gobinary: garble
|
||||||
-
|
-
|
||||||
id: "s2d"
|
id: "s2d"
|
||||||
binary: s2d
|
binary: s2d
|
||||||
|
@ -57,6 +59,7 @@ builds:
|
||||||
- mips64le
|
- mips64le
|
||||||
goarm:
|
goarm:
|
||||||
- 7
|
- 7
|
||||||
|
gobinary: garble
|
||||||
-
|
-
|
||||||
id: "s2sx"
|
id: "s2sx"
|
||||||
binary: s2sx
|
binary: s2sx
|
||||||
|
@ -84,6 +87,7 @@ builds:
|
||||||
- mips64le
|
- mips64le
|
||||||
goarm:
|
goarm:
|
||||||
- 7
|
- 7
|
||||||
|
gobinary: garble
|
||||||
|
|
||||||
archives:
|
archives:
|
||||||
-
|
-
|
||||||
|
|
|
@ -17,6 +17,13 @@ This package provides various compression algorithms.
|
||||||
|
|
||||||
# changelog
|
# changelog
|
||||||
|
|
||||||
|
* Jan 11, 2022 (v1.14.1)
|
||||||
|
* s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462)
|
||||||
|
* flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458)
|
||||||
|
* zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468)
|
||||||
|
* zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464)
|
||||||
|
* Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445)
|
||||||
|
|
||||||
* Aug 30, 2021 (v1.13.5)
|
* Aug 30, 2021 (v1.13.5)
|
||||||
* gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425)
|
* gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425)
|
||||||
* s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413)
|
* s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413)
|
||||||
|
@ -432,6 +439,13 @@ For more information see my blog post on [Fast Linear Time Compression](http://b
|
||||||
|
|
||||||
This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip.
|
This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip.
|
||||||
|
|
||||||
|
# Other packages
|
||||||
|
|
||||||
|
Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code):
|
||||||
|
|
||||||
|
* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression.
|
||||||
|
* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression.
|
||||||
|
* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer.
|
||||||
|
|
||||||
# license
|
# license
|
||||||
|
|
||||||
|
|
|
@ -6,9 +6,13 @@
|
||||||
package flate
|
package flate
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
|
"math/bits"
|
||||||
|
|
||||||
|
comp "github.com/klauspost/compress"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -37,15 +41,17 @@ const (
|
||||||
maxMatchLength = 258 // The longest match for the compressor
|
maxMatchLength = 258 // The longest match for the compressor
|
||||||
minOffsetSize = 1 // The shortest offset that makes any sense
|
minOffsetSize = 1 // The shortest offset that makes any sense
|
||||||
|
|
||||||
// The maximum number of tokens we put into a single flat block, just too
|
// The maximum number of tokens we will encode at the time.
|
||||||
// stop things from getting too large.
|
// Smaller sizes usually creates less optimal blocks.
|
||||||
maxFlateBlockTokens = 1 << 14
|
// Bigger can make context switching slow.
|
||||||
|
// We use this for levels 7-9, so we make it big.
|
||||||
|
maxFlateBlockTokens = 1 << 15
|
||||||
maxStoreBlockSize = 65535
|
maxStoreBlockSize = 65535
|
||||||
hashBits = 17 // After 17 performance degrades
|
hashBits = 17 // After 17 performance degrades
|
||||||
hashSize = 1 << hashBits
|
hashSize = 1 << hashBits
|
||||||
hashMask = (1 << hashBits) - 1
|
hashMask = (1 << hashBits) - 1
|
||||||
hashShift = (hashBits + minMatchLength - 1) / minMatchLength
|
hashShift = (hashBits + minMatchLength - 1) / minMatchLength
|
||||||
maxHashOffset = 1 << 24
|
maxHashOffset = 1 << 28
|
||||||
|
|
||||||
skipNever = math.MaxInt32
|
skipNever = math.MaxInt32
|
||||||
|
|
||||||
|
@ -70,9 +76,9 @@ var levels = []compressionLevel{
|
||||||
{0, 0, 0, 0, 0, 6},
|
{0, 0, 0, 0, 0, 6},
|
||||||
// Levels 7-9 use increasingly more lazy matching
|
// Levels 7-9 use increasingly more lazy matching
|
||||||
// and increasingly stringent conditions for "good enough".
|
// and increasingly stringent conditions for "good enough".
|
||||||
{8, 8, 24, 16, skipNever, 7},
|
{6, 10, 12, 16, skipNever, 7},
|
||||||
{10, 16, 24, 64, skipNever, 8},
|
{10, 24, 32, 64, skipNever, 8},
|
||||||
{32, 258, 258, 4096, skipNever, 9},
|
{32, 258, 258, 1024, skipNever, 9},
|
||||||
}
|
}
|
||||||
|
|
||||||
// advancedState contains state for the advanced levels, with bigger hash tables, etc.
|
// advancedState contains state for the advanced levels, with bigger hash tables, etc.
|
||||||
|
@ -93,8 +99,9 @@ type advancedState struct {
|
||||||
hashOffset int
|
hashOffset int
|
||||||
|
|
||||||
// input window: unprocessed data is window[index:windowEnd]
|
// input window: unprocessed data is window[index:windowEnd]
|
||||||
index int
|
index int
|
||||||
hashMatch [maxMatchLength + minMatchLength]uint32
|
estBitsPerByte int
|
||||||
|
hashMatch [maxMatchLength + minMatchLength]uint32
|
||||||
|
|
||||||
hash uint32
|
hash uint32
|
||||||
ii uint16 // position of last match, intended to overflow to reset.
|
ii uint16 // position of last match, intended to overflow to reset.
|
||||||
|
@ -170,7 +177,8 @@ func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error {
|
||||||
window = d.window[d.blockStart:index]
|
window = d.window[d.blockStart:index]
|
||||||
}
|
}
|
||||||
d.blockStart = index
|
d.blockStart = index
|
||||||
d.w.writeBlock(tok, eof, window)
|
//d.w.writeBlock(tok, eof, window)
|
||||||
|
d.w.writeBlockDynamic(tok, eof, window, d.sync)
|
||||||
return d.w.err
|
return d.w.err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -263,7 +271,7 @@ func (d *compressor) fillWindow(b []byte) {
|
||||||
// Try to find a match starting at index whose length is greater than prevSize.
|
// Try to find a match starting at index whose length is greater than prevSize.
|
||||||
// We only look at chainCount possibilities before giving up.
|
// We only look at chainCount possibilities before giving up.
|
||||||
// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead
|
// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead
|
||||||
func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
|
func (d *compressor) findMatch(pos int, prevHead int, lookahead, bpb int) (length, offset int, ok bool) {
|
||||||
minMatchLook := maxMatchLength
|
minMatchLook := maxMatchLength
|
||||||
if lookahead < minMatchLook {
|
if lookahead < minMatchLook {
|
||||||
minMatchLook = lookahead
|
minMatchLook = lookahead
|
||||||
|
@ -279,36 +287,43 @@ func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead
|
||||||
|
|
||||||
// If we've got a match that's good enough, only look in 1/4 the chain.
|
// If we've got a match that's good enough, only look in 1/4 the chain.
|
||||||
tries := d.chain
|
tries := d.chain
|
||||||
length = prevLength
|
length = minMatchLength - 1
|
||||||
if length >= d.good {
|
|
||||||
tries >>= 2
|
|
||||||
}
|
|
||||||
|
|
||||||
wEnd := win[pos+length]
|
wEnd := win[pos+length]
|
||||||
wPos := win[pos:]
|
wPos := win[pos:]
|
||||||
minIndex := pos - windowSize
|
minIndex := pos - windowSize
|
||||||
|
if minIndex < 0 {
|
||||||
|
minIndex = 0
|
||||||
|
}
|
||||||
|
offset = 0
|
||||||
|
|
||||||
|
// Base is 4 bytes at with an additional cost.
|
||||||
|
// Matches must be better than this.
|
||||||
|
cGain := minMatchLength*bpb - 12
|
||||||
for i := prevHead; tries > 0; tries-- {
|
for i := prevHead; tries > 0; tries-- {
|
||||||
if wEnd == win[i+length] {
|
if wEnd == win[i+length] {
|
||||||
n := matchLen(win[i:i+minMatchLook], wPos)
|
n := matchLen(win[i:i+minMatchLook], wPos)
|
||||||
|
if n > length {
|
||||||
if n > length && (n > minMatchLength || pos-i <= 4096) {
|
newGain := n*bpb - bits.Len32(uint32(pos-i))
|
||||||
length = n
|
if newGain > cGain {
|
||||||
offset = pos - i
|
length = n
|
||||||
ok = true
|
offset = pos - i
|
||||||
if n >= nice {
|
cGain = newGain
|
||||||
// The match is good enough that we don't try to find a better one.
|
ok = true
|
||||||
break
|
if n >= nice {
|
||||||
|
// The match is good enough that we don't try to find a better one.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
wEnd = win[pos+n]
|
||||||
}
|
}
|
||||||
wEnd = win[pos+n]
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if i == minIndex {
|
if i <= minIndex {
|
||||||
// hashPrev[i & windowMask] has already been overwritten, so stop now.
|
// hashPrev[i & windowMask] has already been overwritten, so stop now.
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
|
i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
|
||||||
if i < minIndex || i < 0 {
|
if i < minIndex {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -327,8 +342,7 @@ func (d *compressor) writeStoredBlock(buf []byte) error {
|
||||||
// of the supplied slice.
|
// of the supplied slice.
|
||||||
// The caller must ensure that len(b) >= 4.
|
// The caller must ensure that len(b) >= 4.
|
||||||
func hash4(b []byte) uint32 {
|
func hash4(b []byte) uint32 {
|
||||||
b = b[:4]
|
return hash4u(binary.LittleEndian.Uint32(b), hashBits)
|
||||||
return hash4u(uint32(b[3])|uint32(b[2])<<8|uint32(b[1])<<16|uint32(b[0])<<24, hashBits)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// bulkHash4 will compute hashes using the same
|
// bulkHash4 will compute hashes using the same
|
||||||
|
@ -337,11 +351,12 @@ func bulkHash4(b []byte, dst []uint32) {
|
||||||
if len(b) < 4 {
|
if len(b) < 4 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
|
hb := binary.LittleEndian.Uint32(b)
|
||||||
|
|
||||||
dst[0] = hash4u(hb, hashBits)
|
dst[0] = hash4u(hb, hashBits)
|
||||||
end := len(b) - 4 + 1
|
end := len(b) - 4 + 1
|
||||||
for i := 1; i < end; i++ {
|
for i := 1; i < end; i++ {
|
||||||
hb = (hb << 8) | uint32(b[i+3])
|
hb = (hb >> 8) | uint32(b[i+3])<<24
|
||||||
dst[i] = hash4u(hb, hashBits)
|
dst[i] = hash4u(hb, hashBits)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -374,10 +389,15 @@ func (d *compressor) deflateLazy() {
|
||||||
if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
|
if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
s.estBitsPerByte = 8
|
||||||
|
if !d.sync {
|
||||||
|
s.estBitsPerByte = comp.ShannonEntropyBits(d.window[s.index:d.windowEnd])
|
||||||
|
s.estBitsPerByte = int(1 + float64(s.estBitsPerByte)/float64(d.windowEnd-s.index))
|
||||||
|
}
|
||||||
|
|
||||||
s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
|
s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
|
||||||
if s.index < s.maxInsertIndex {
|
if s.index < s.maxInsertIndex {
|
||||||
s.hash = hash4(d.window[s.index : s.index+minMatchLength])
|
s.hash = hash4(d.window[s.index:])
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
|
@ -410,7 +430,7 @@ func (d *compressor) deflateLazy() {
|
||||||
}
|
}
|
||||||
if s.index < s.maxInsertIndex {
|
if s.index < s.maxInsertIndex {
|
||||||
// Update the hash
|
// Update the hash
|
||||||
s.hash = hash4(d.window[s.index : s.index+minMatchLength])
|
s.hash = hash4(d.window[s.index:])
|
||||||
ch := s.hashHead[s.hash&hashMask]
|
ch := s.hashHead[s.hash&hashMask]
|
||||||
s.chainHead = int(ch)
|
s.chainHead = int(ch)
|
||||||
s.hashPrev[s.index&windowMask] = ch
|
s.hashPrev[s.index&windowMask] = ch
|
||||||
|
@ -426,12 +446,37 @@ func (d *compressor) deflateLazy() {
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
|
if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
|
||||||
if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok {
|
if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead, s.estBitsPerByte); ok {
|
||||||
s.length = newLength
|
s.length = newLength
|
||||||
s.offset = newOffset
|
s.offset = newOffset
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if prevLength >= minMatchLength && s.length <= prevLength {
|
if prevLength >= minMatchLength && s.length <= prevLength {
|
||||||
|
// Check for better match at end...
|
||||||
|
//
|
||||||
|
// checkOff must be >=2 since we otherwise risk checking s.index
|
||||||
|
// Offset of 2 seems to yield best results.
|
||||||
|
const checkOff = 2
|
||||||
|
prevIndex := s.index - 1
|
||||||
|
if prevIndex+prevLength+checkOff < s.maxInsertIndex {
|
||||||
|
end := lookahead
|
||||||
|
if lookahead > maxMatchLength {
|
||||||
|
end = maxMatchLength
|
||||||
|
}
|
||||||
|
end += prevIndex
|
||||||
|
idx := prevIndex + prevLength - (4 - checkOff)
|
||||||
|
h := hash4(d.window[idx:])
|
||||||
|
ch2 := int(s.hashHead[h&hashMask]) - s.hashOffset - prevLength + (4 - checkOff)
|
||||||
|
if ch2 > minIndex {
|
||||||
|
length := matchLen(d.window[prevIndex:end], d.window[ch2:])
|
||||||
|
// It seems like a pure length metric is best.
|
||||||
|
if length > prevLength {
|
||||||
|
prevLength = length
|
||||||
|
prevOffset = prevIndex - ch2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
// There was a match at the previous step, and the current match is
|
// There was a match at the previous step, and the current match is
|
||||||
// not better. Output the previous match.
|
// not better. Output the previous match.
|
||||||
d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
|
d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
|
||||||
|
@ -479,6 +524,7 @@ func (d *compressor) deflateLazy() {
|
||||||
}
|
}
|
||||||
d.tokens.Reset()
|
d.tokens.Reset()
|
||||||
}
|
}
|
||||||
|
s.ii = 0
|
||||||
} else {
|
} else {
|
||||||
// Reset, if we got a match this run.
|
// Reset, if we got a match this run.
|
||||||
if s.length >= minMatchLength {
|
if s.length >= minMatchLength {
|
||||||
|
@ -498,13 +544,12 @@ func (d *compressor) deflateLazy() {
|
||||||
|
|
||||||
// If we have a long run of no matches, skip additional bytes
|
// If we have a long run of no matches, skip additional bytes
|
||||||
// Resets when s.ii overflows after 64KB.
|
// Resets when s.ii overflows after 64KB.
|
||||||
if s.ii > 31 {
|
if n := int(s.ii) - d.chain; n > 0 {
|
||||||
n := int(s.ii >> 5)
|
n = 1 + int(n>>6)
|
||||||
for j := 0; j < n; j++ {
|
for j := 0; j < n; j++ {
|
||||||
if s.index >= d.windowEnd-1 {
|
if s.index >= d.windowEnd-1 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
d.tokens.AddLiteral(d.window[s.index-1])
|
d.tokens.AddLiteral(d.window[s.index-1])
|
||||||
if d.tokens.n == maxFlateBlockTokens {
|
if d.tokens.n == maxFlateBlockTokens {
|
||||||
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
|
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
|
||||||
|
@ -512,6 +557,14 @@ func (d *compressor) deflateLazy() {
|
||||||
}
|
}
|
||||||
d.tokens.Reset()
|
d.tokens.Reset()
|
||||||
}
|
}
|
||||||
|
// Index...
|
||||||
|
if s.index < s.maxInsertIndex {
|
||||||
|
h := hash4(d.window[s.index:])
|
||||||
|
ch := s.hashHead[h]
|
||||||
|
s.chainHead = int(ch)
|
||||||
|
s.hashPrev[s.index&windowMask] = ch
|
||||||
|
s.hashHead[h] = uint32(s.index + s.hashOffset)
|
||||||
|
}
|
||||||
s.index++
|
s.index++
|
||||||
}
|
}
|
||||||
// Flush last byte
|
// Flush last byte
|
||||||
|
@ -611,7 +664,9 @@ func (d *compressor) write(b []byte) (n int, err error) {
|
||||||
}
|
}
|
||||||
n = len(b)
|
n = len(b)
|
||||||
for len(b) > 0 {
|
for len(b) > 0 {
|
||||||
d.step(d)
|
if d.windowEnd == len(d.window) || d.sync {
|
||||||
|
d.step(d)
|
||||||
|
}
|
||||||
b = b[d.fill(d, b):]
|
b = b[d.fill(d, b):]
|
||||||
if d.err != nil {
|
if d.err != nil {
|
||||||
return 0, d.err
|
return 0, d.err
|
||||||
|
@ -652,13 +707,13 @@ func (d *compressor) init(w io.Writer, level int) (err error) {
|
||||||
level = 5
|
level = 5
|
||||||
fallthrough
|
fallthrough
|
||||||
case level >= 1 && level <= 6:
|
case level >= 1 && level <= 6:
|
||||||
d.w.logNewTablePenalty = 8
|
d.w.logNewTablePenalty = 7
|
||||||
d.fast = newFastEnc(level)
|
d.fast = newFastEnc(level)
|
||||||
d.window = make([]byte, maxStoreBlockSize)
|
d.window = make([]byte, maxStoreBlockSize)
|
||||||
d.fill = (*compressor).fillBlock
|
d.fill = (*compressor).fillBlock
|
||||||
d.step = (*compressor).storeFast
|
d.step = (*compressor).storeFast
|
||||||
case 7 <= level && level <= 9:
|
case 7 <= level && level <= 9:
|
||||||
d.w.logNewTablePenalty = 10
|
d.w.logNewTablePenalty = 8
|
||||||
d.state = &advancedState{}
|
d.state = &advancedState{}
|
||||||
d.compressionLevel = levels[level]
|
d.compressionLevel = levels[level]
|
||||||
d.initDeflate()
|
d.initDeflate()
|
||||||
|
|
|
@ -213,11 +213,9 @@ func (e *fastGen) Reset() {
|
||||||
// matchLen returns the maximum length.
|
// matchLen returns the maximum length.
|
||||||
// 'a' must be the shortest of the two.
|
// 'a' must be the shortest of the two.
|
||||||
func matchLen(a, b []byte) int {
|
func matchLen(a, b []byte) int {
|
||||||
b = b[:len(a)]
|
|
||||||
var checked int
|
var checked int
|
||||||
|
|
||||||
for len(a) >= 8 {
|
for len(a) >= 8 {
|
||||||
b = b[:len(a)]
|
|
||||||
if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
|
if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
|
||||||
return checked + (bits.TrailingZeros64(diff) >> 3)
|
return checked + (bits.TrailingZeros64(diff) >> 3)
|
||||||
}
|
}
|
||||||
|
|
|
@ -155,37 +155,33 @@ func (w *huffmanBitWriter) reset(writer io.Writer) {
|
||||||
w.lastHuffMan = false
|
w.lastHuffMan = false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *huffmanBitWriter) canReuse(t *tokens) (offsets, lits bool) {
|
func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) {
|
||||||
offsets, lits = true, true
|
|
||||||
a := t.offHist[:offsetCodeCount]
|
a := t.offHist[:offsetCodeCount]
|
||||||
b := w.offsetFreq[:len(a)]
|
b := w.offsetEncoding.codes
|
||||||
for i := range a {
|
b = b[:len(a)]
|
||||||
if b[i] == 0 && a[i] != 0 {
|
for i, v := range a {
|
||||||
offsets = false
|
if v != 0 && b[i].len == 0 {
|
||||||
break
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
a = t.extraHist[:literalCount-256]
|
a = t.extraHist[:literalCount-256]
|
||||||
b = w.literalFreq[256:literalCount]
|
b = w.literalEncoding.codes[256:literalCount]
|
||||||
b = b[:len(a)]
|
b = b[:len(a)]
|
||||||
for i := range a {
|
for i, v := range a {
|
||||||
if b[i] == 0 && a[i] != 0 {
|
if v != 0 && b[i].len == 0 {
|
||||||
lits = false
|
return false
|
||||||
break
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if lits {
|
|
||||||
a = t.litHist[:]
|
a = t.litHist[:256]
|
||||||
b = w.literalFreq[:len(a)]
|
b = w.literalEncoding.codes[:len(a)]
|
||||||
for i := range a {
|
for i, v := range a {
|
||||||
if b[i] == 0 && a[i] != 0 {
|
if v != 0 && b[i].len == 0 {
|
||||||
lits = false
|
return false
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *huffmanBitWriter) flush() {
|
func (w *huffmanBitWriter) flush() {
|
||||||
|
@ -222,7 +218,7 @@ func (w *huffmanBitWriter) write(b []byte) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *huffmanBitWriter) writeBits(b int32, nb uint16) {
|
func (w *huffmanBitWriter) writeBits(b int32, nb uint16) {
|
||||||
w.bits |= uint64(b) << w.nbits
|
w.bits |= uint64(b) << (w.nbits & 63)
|
||||||
w.nbits += nb
|
w.nbits += nb
|
||||||
if w.nbits >= 48 {
|
if w.nbits >= 48 {
|
||||||
w.writeOutBits()
|
w.writeOutBits()
|
||||||
|
@ -423,7 +419,7 @@ func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
|
||||||
|
|
||||||
func (w *huffmanBitWriter) writeCode(c hcode) {
|
func (w *huffmanBitWriter) writeCode(c hcode) {
|
||||||
// The function does not get inlined if we "& 63" the shift.
|
// The function does not get inlined if we "& 63" the shift.
|
||||||
w.bits |= uint64(c.code) << w.nbits
|
w.bits |= uint64(c.code) << (w.nbits & 63)
|
||||||
w.nbits += c.len
|
w.nbits += c.len
|
||||||
if w.nbits >= 48 {
|
if w.nbits >= 48 {
|
||||||
w.writeOutBits()
|
w.writeOutBits()
|
||||||
|
@ -566,7 +562,7 @@ func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) {
|
||||||
w.lastHeader = 0
|
w.lastHeader = 0
|
||||||
}
|
}
|
||||||
numLiterals, numOffsets := w.indexTokens(tokens, false)
|
numLiterals, numOffsets := w.indexTokens(tokens, false)
|
||||||
w.generate(tokens)
|
w.generate()
|
||||||
var extraBits int
|
var extraBits int
|
||||||
storedSize, storable := w.storedSize(input)
|
storedSize, storable := w.storedSize(input)
|
||||||
if storable {
|
if storable {
|
||||||
|
@ -595,7 +591,7 @@ func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stored bytes?
|
// Stored bytes?
|
||||||
if storable && storedSize < size {
|
if storable && storedSize <= size {
|
||||||
w.writeStoredHeader(len(input), eof)
|
w.writeStoredHeader(len(input), eof)
|
||||||
w.writeBytes(input)
|
w.writeBytes(input)
|
||||||
return
|
return
|
||||||
|
@ -634,22 +630,39 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
|
||||||
w.lastHeader = 0
|
w.lastHeader = 0
|
||||||
w.lastHuffMan = false
|
w.lastHuffMan = false
|
||||||
}
|
}
|
||||||
if !sync {
|
|
||||||
tokens.Fill()
|
// fillReuse enables filling of empty values.
|
||||||
|
// This will make encodings always reusable without testing.
|
||||||
|
// However, this does not appear to benefit on most cases.
|
||||||
|
const fillReuse = false
|
||||||
|
|
||||||
|
// Check if we can reuse...
|
||||||
|
if !fillReuse && w.lastHeader > 0 && !w.canReuse(tokens) {
|
||||||
|
w.writeCode(w.literalEncoding.codes[endBlockMarker])
|
||||||
|
w.lastHeader = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
numLiterals, numOffsets := w.indexTokens(tokens, !sync)
|
numLiterals, numOffsets := w.indexTokens(tokens, !sync)
|
||||||
|
extraBits := 0
|
||||||
|
ssize, storable := w.storedSize(input)
|
||||||
|
|
||||||
|
const usePrefs = true
|
||||||
|
if storable || w.lastHeader > 0 {
|
||||||
|
extraBits = w.extraBitSize()
|
||||||
|
}
|
||||||
|
|
||||||
var size int
|
var size int
|
||||||
|
|
||||||
// Check if we should reuse.
|
// Check if we should reuse.
|
||||||
if w.lastHeader > 0 {
|
if w.lastHeader > 0 {
|
||||||
// Estimate size for using a new table.
|
// Estimate size for using a new table.
|
||||||
// Use the previous header size as the best estimate.
|
// Use the previous header size as the best estimate.
|
||||||
newSize := w.lastHeader + tokens.EstimatedBits()
|
newSize := w.lastHeader + tokens.EstimatedBits()
|
||||||
newSize += newSize >> w.logNewTablePenalty
|
newSize += int(w.literalEncoding.codes[endBlockMarker].len) + newSize>>w.logNewTablePenalty
|
||||||
|
|
||||||
// The estimated size is calculated as an optimal table.
|
// The estimated size is calculated as an optimal table.
|
||||||
// We add a penalty to make it more realistic and re-use a bit more.
|
// We add a penalty to make it more realistic and re-use a bit more.
|
||||||
reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + w.extraBitSize()
|
reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + extraBits
|
||||||
|
|
||||||
// Check if a new table is better.
|
// Check if a new table is better.
|
||||||
if newSize < reuseSize {
|
if newSize < reuseSize {
|
||||||
|
@ -660,35 +673,79 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
|
||||||
} else {
|
} else {
|
||||||
size = reuseSize
|
size = reuseSize
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size {
|
||||||
|
// Check if we get a reasonable size decrease.
|
||||||
|
if storable && ssize <= size {
|
||||||
|
w.writeStoredHeader(len(input), eof)
|
||||||
|
w.writeBytes(input)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.writeFixedHeader(eof)
|
||||||
|
if !sync {
|
||||||
|
tokens.AddEOB()
|
||||||
|
}
|
||||||
|
w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
|
||||||
|
return
|
||||||
|
}
|
||||||
// Check if we get a reasonable size decrease.
|
// Check if we get a reasonable size decrease.
|
||||||
if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
|
if storable && ssize <= size {
|
||||||
w.writeStoredHeader(len(input), eof)
|
w.writeStoredHeader(len(input), eof)
|
||||||
w.writeBytes(input)
|
w.writeBytes(input)
|
||||||
w.lastHeader = 0
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// We want a new block/table
|
// We want a new block/table
|
||||||
if w.lastHeader == 0 {
|
if w.lastHeader == 0 {
|
||||||
w.generate(tokens)
|
if fillReuse && !sync {
|
||||||
|
w.fillTokens()
|
||||||
|
numLiterals, numOffsets = maxNumLit, maxNumDist
|
||||||
|
} else {
|
||||||
|
w.literalFreq[endBlockMarker] = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
w.generate()
|
||||||
// Generate codegen and codegenFrequencies, which indicates how to encode
|
// Generate codegen and codegenFrequencies, which indicates how to encode
|
||||||
// the literalEncoding and the offsetEncoding.
|
// the literalEncoding and the offsetEncoding.
|
||||||
w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
|
w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
|
||||||
w.codegenEncoding.generate(w.codegenFreq[:], 7)
|
w.codegenEncoding.generate(w.codegenFreq[:], 7)
|
||||||
|
|
||||||
var numCodegens int
|
var numCodegens int
|
||||||
size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, w.extraBitSize())
|
if fillReuse && !sync {
|
||||||
// Store bytes, if we don't get a reasonable improvement.
|
// Reindex for accurate size...
|
||||||
if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) {
|
w.indexTokens(tokens, true)
|
||||||
|
}
|
||||||
|
size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
|
||||||
|
|
||||||
|
// Store predefined, if we don't get a reasonable improvement.
|
||||||
|
if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size {
|
||||||
|
// Store bytes, if we don't get an improvement.
|
||||||
|
if storable && ssize <= preSize {
|
||||||
|
w.writeStoredHeader(len(input), eof)
|
||||||
|
w.writeBytes(input)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.writeFixedHeader(eof)
|
||||||
|
if !sync {
|
||||||
|
tokens.AddEOB()
|
||||||
|
}
|
||||||
|
w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if storable && ssize <= size {
|
||||||
|
// Store bytes, if we don't get an improvement.
|
||||||
w.writeStoredHeader(len(input), eof)
|
w.writeStoredHeader(len(input), eof)
|
||||||
w.writeBytes(input)
|
w.writeBytes(input)
|
||||||
w.lastHeader = 0
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write Huffman table.
|
// Write Huffman table.
|
||||||
w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
|
w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
|
||||||
w.lastHeader, _ = w.headerSize()
|
if !sync {
|
||||||
|
w.lastHeader, _ = w.headerSize()
|
||||||
|
}
|
||||||
w.lastHuffMan = false
|
w.lastHuffMan = false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -699,6 +756,19 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
|
||||||
w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes)
|
w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (w *huffmanBitWriter) fillTokens() {
|
||||||
|
for i, v := range w.literalFreq[:literalCount] {
|
||||||
|
if v == 0 {
|
||||||
|
w.literalFreq[i] = 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i, v := range w.offsetFreq[:offsetCodeCount] {
|
||||||
|
if v == 0 {
|
||||||
|
w.offsetFreq[i] = 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// indexTokens indexes a slice of tokens, and updates
|
// indexTokens indexes a slice of tokens, and updates
|
||||||
// literalFreq and offsetFreq, and generates literalEncoding
|
// literalFreq and offsetFreq, and generates literalEncoding
|
||||||
// and offsetEncoding.
|
// and offsetEncoding.
|
||||||
|
@ -733,7 +803,7 @@ func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, num
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *huffmanBitWriter) generate(t *tokens) {
|
func (w *huffmanBitWriter) generate() {
|
||||||
w.literalEncoding.generate(w.literalFreq[:literalCount], 15)
|
w.literalEncoding.generate(w.literalFreq[:literalCount], 15)
|
||||||
w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15)
|
w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15)
|
||||||
}
|
}
|
||||||
|
@ -768,7 +838,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
|
||||||
if t < matchType {
|
if t < matchType {
|
||||||
//w.writeCode(lits[t.literal()])
|
//w.writeCode(lits[t.literal()])
|
||||||
c := lits[t.literal()]
|
c := lits[t.literal()]
|
||||||
bits |= uint64(c.code) << nbits
|
bits |= uint64(c.code) << (nbits & 63)
|
||||||
nbits += c.len
|
nbits += c.len
|
||||||
if nbits >= 48 {
|
if nbits >= 48 {
|
||||||
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
|
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
|
||||||
|
@ -796,7 +866,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
|
||||||
} else {
|
} else {
|
||||||
// inlined
|
// inlined
|
||||||
c := lengths[lengthCode&31]
|
c := lengths[lengthCode&31]
|
||||||
bits |= uint64(c.code) << nbits
|
bits |= uint64(c.code) << (nbits & 63)
|
||||||
nbits += c.len
|
nbits += c.len
|
||||||
if nbits >= 48 {
|
if nbits >= 48 {
|
||||||
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
|
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
|
||||||
|
@ -819,7 +889,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
|
||||||
if extraLengthBits > 0 {
|
if extraLengthBits > 0 {
|
||||||
//w.writeBits(extraLength, extraLengthBits)
|
//w.writeBits(extraLength, extraLengthBits)
|
||||||
extraLength := int32(length - lengthBase[lengthCode&31])
|
extraLength := int32(length - lengthBase[lengthCode&31])
|
||||||
bits |= uint64(extraLength) << nbits
|
bits |= uint64(extraLength) << (nbits & 63)
|
||||||
nbits += extraLengthBits
|
nbits += extraLengthBits
|
||||||
if nbits >= 48 {
|
if nbits >= 48 {
|
||||||
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
|
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
|
||||||
|
@ -846,7 +916,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
|
||||||
} else {
|
} else {
|
||||||
// inlined
|
// inlined
|
||||||
c := offs[offsetCode]
|
c := offs[offsetCode]
|
||||||
bits |= uint64(c.code) << nbits
|
bits |= uint64(c.code) << (nbits & 63)
|
||||||
nbits += c.len
|
nbits += c.len
|
||||||
if nbits >= 48 {
|
if nbits >= 48 {
|
||||||
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
|
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
|
||||||
|
@ -867,7 +937,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
|
||||||
offsetComb := offsetCombined[offsetCode]
|
offsetComb := offsetCombined[offsetCode]
|
||||||
if offsetComb > 1<<16 {
|
if offsetComb > 1<<16 {
|
||||||
//w.writeBits(extraOffset, extraOffsetBits)
|
//w.writeBits(extraOffset, extraOffsetBits)
|
||||||
bits |= uint64(offset&matchOffsetOnlyMask-(offsetComb&0xffff)) << nbits
|
bits |= uint64(offset-(offsetComb&0xffff)) << (nbits & 63)
|
||||||
nbits += uint16(offsetComb >> 16)
|
nbits += uint16(offsetComb >> 16)
|
||||||
if nbits >= 48 {
|
if nbits >= 48 {
|
||||||
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
|
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
|
||||||
|
@ -996,10 +1066,41 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
|
||||||
encoding := w.literalEncoding.codes[:256]
|
encoding := w.literalEncoding.codes[:256]
|
||||||
// Go 1.16 LOVES having these on stack. At least 1.5x the speed.
|
// Go 1.16 LOVES having these on stack. At least 1.5x the speed.
|
||||||
bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
|
bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
|
||||||
|
|
||||||
|
// Unroll, write 3 codes/loop.
|
||||||
|
// Fastest number of unrolls.
|
||||||
|
for len(input) > 3 {
|
||||||
|
// We must have at least 48 bits free.
|
||||||
|
if nbits >= 8 {
|
||||||
|
n := nbits >> 3
|
||||||
|
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
|
||||||
|
bits >>= (n * 8) & 63
|
||||||
|
nbits -= n * 8
|
||||||
|
nbytes += uint8(n)
|
||||||
|
}
|
||||||
|
if nbytes >= bufferFlushSize {
|
||||||
|
if w.err != nil {
|
||||||
|
nbytes = 0
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_, w.err = w.writer.Write(w.bytes[:nbytes])
|
||||||
|
nbytes = 0
|
||||||
|
}
|
||||||
|
a, b := encoding[input[0]], encoding[input[1]]
|
||||||
|
bits |= uint64(a.code) << (nbits & 63)
|
||||||
|
bits |= uint64(b.code) << ((nbits + a.len) & 63)
|
||||||
|
c := encoding[input[2]]
|
||||||
|
nbits += b.len + a.len
|
||||||
|
bits |= uint64(c.code) << (nbits & 63)
|
||||||
|
nbits += c.len
|
||||||
|
input = input[3:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remaining...
|
||||||
for _, t := range input {
|
for _, t := range input {
|
||||||
// Bitwriting inlined, ~30% speedup
|
// Bitwriting inlined, ~30% speedup
|
||||||
c := encoding[t]
|
c := encoding[t]
|
||||||
bits |= uint64(c.code) << nbits
|
bits |= uint64(c.code) << (nbits & 63)
|
||||||
nbits += c.len
|
nbits += c.len
|
||||||
if debugDeflate {
|
if debugDeflate {
|
||||||
count += int(c.len)
|
count += int(c.len)
|
||||||
|
|
|
@ -328,11 +328,17 @@ func (f *decompressor) nextBlock() {
|
||||||
switch typ {
|
switch typ {
|
||||||
case 0:
|
case 0:
|
||||||
f.dataBlock()
|
f.dataBlock()
|
||||||
|
if debugDecode {
|
||||||
|
fmt.Println("stored block")
|
||||||
|
}
|
||||||
case 1:
|
case 1:
|
||||||
// compressed, fixed Huffman tables
|
// compressed, fixed Huffman tables
|
||||||
f.hl = &fixedHuffmanDecoder
|
f.hl = &fixedHuffmanDecoder
|
||||||
f.hd = nil
|
f.hd = nil
|
||||||
f.huffmanBlockDecoder()()
|
f.huffmanBlockDecoder()()
|
||||||
|
if debugDecode {
|
||||||
|
fmt.Println("predefinied huffman block")
|
||||||
|
}
|
||||||
case 2:
|
case 2:
|
||||||
// compressed, dynamic Huffman tables
|
// compressed, dynamic Huffman tables
|
||||||
if f.err = f.readHuffman(); f.err != nil {
|
if f.err = f.readHuffman(); f.err != nil {
|
||||||
|
@ -341,6 +347,9 @@ func (f *decompressor) nextBlock() {
|
||||||
f.hl = &f.h1
|
f.hl = &f.h1
|
||||||
f.hd = &f.h2
|
f.hd = &f.h2
|
||||||
f.huffmanBlockDecoder()()
|
f.huffmanBlockDecoder()()
|
||||||
|
if debugDecode {
|
||||||
|
fmt.Println("dynamic huffman block")
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
// 3 is reserved.
|
// 3 is reserved.
|
||||||
if debugDecode {
|
if debugDecode {
|
||||||
|
|
|
@ -20,7 +20,7 @@ type dEntrySingle struct {
|
||||||
|
|
||||||
// double-symbols decoding
|
// double-symbols decoding
|
||||||
type dEntryDouble struct {
|
type dEntryDouble struct {
|
||||||
seq uint16
|
seq [4]byte
|
||||||
nBits uint8
|
nBits uint8
|
||||||
len uint8
|
len uint8
|
||||||
}
|
}
|
||||||
|
@ -753,23 +753,21 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||||
br[stream2].fillFast()
|
br[stream2].fillFast()
|
||||||
|
|
||||||
val := br[stream].peekBitsFast(d.actualTableLog)
|
val := br[stream].peekBitsFast(d.actualTableLog)
|
||||||
v := single[val&tlMask]
|
|
||||||
br[stream].advance(uint8(v.entry))
|
|
||||||
buf[off+bufoff*stream] = uint8(v.entry >> 8)
|
|
||||||
|
|
||||||
val2 := br[stream2].peekBitsFast(d.actualTableLog)
|
val2 := br[stream2].peekBitsFast(d.actualTableLog)
|
||||||
|
v := single[val&tlMask]
|
||||||
v2 := single[val2&tlMask]
|
v2 := single[val2&tlMask]
|
||||||
|
br[stream].advance(uint8(v.entry))
|
||||||
br[stream2].advance(uint8(v2.entry))
|
br[stream2].advance(uint8(v2.entry))
|
||||||
|
buf[off+bufoff*stream] = uint8(v.entry >> 8)
|
||||||
buf[off+bufoff*stream2] = uint8(v2.entry >> 8)
|
buf[off+bufoff*stream2] = uint8(v2.entry >> 8)
|
||||||
|
|
||||||
val = br[stream].peekBitsFast(d.actualTableLog)
|
val = br[stream].peekBitsFast(d.actualTableLog)
|
||||||
v = single[val&tlMask]
|
|
||||||
br[stream].advance(uint8(v.entry))
|
|
||||||
buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
|
|
||||||
|
|
||||||
val2 = br[stream2].peekBitsFast(d.actualTableLog)
|
val2 = br[stream2].peekBitsFast(d.actualTableLog)
|
||||||
|
v = single[val&tlMask]
|
||||||
v2 = single[val2&tlMask]
|
v2 = single[val2&tlMask]
|
||||||
|
br[stream].advance(uint8(v.entry))
|
||||||
br[stream2].advance(uint8(v2.entry))
|
br[stream2].advance(uint8(v2.entry))
|
||||||
|
buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
|
||||||
buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8)
|
buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -780,23 +778,21 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||||
br[stream2].fillFast()
|
br[stream2].fillFast()
|
||||||
|
|
||||||
val := br[stream].peekBitsFast(d.actualTableLog)
|
val := br[stream].peekBitsFast(d.actualTableLog)
|
||||||
v := single[val&tlMask]
|
|
||||||
br[stream].advance(uint8(v.entry))
|
|
||||||
buf[off+bufoff*stream] = uint8(v.entry >> 8)
|
|
||||||
|
|
||||||
val2 := br[stream2].peekBitsFast(d.actualTableLog)
|
val2 := br[stream2].peekBitsFast(d.actualTableLog)
|
||||||
|
v := single[val&tlMask]
|
||||||
v2 := single[val2&tlMask]
|
v2 := single[val2&tlMask]
|
||||||
|
br[stream].advance(uint8(v.entry))
|
||||||
br[stream2].advance(uint8(v2.entry))
|
br[stream2].advance(uint8(v2.entry))
|
||||||
|
buf[off+bufoff*stream] = uint8(v.entry >> 8)
|
||||||
buf[off+bufoff*stream2] = uint8(v2.entry >> 8)
|
buf[off+bufoff*stream2] = uint8(v2.entry >> 8)
|
||||||
|
|
||||||
val = br[stream].peekBitsFast(d.actualTableLog)
|
val = br[stream].peekBitsFast(d.actualTableLog)
|
||||||
v = single[val&tlMask]
|
|
||||||
br[stream].advance(uint8(v.entry))
|
|
||||||
buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
|
|
||||||
|
|
||||||
val2 = br[stream2].peekBitsFast(d.actualTableLog)
|
val2 = br[stream2].peekBitsFast(d.actualTableLog)
|
||||||
|
v = single[val&tlMask]
|
||||||
v2 = single[val2&tlMask]
|
v2 = single[val2&tlMask]
|
||||||
|
br[stream].advance(uint8(v.entry))
|
||||||
br[stream2].advance(uint8(v2.entry))
|
br[stream2].advance(uint8(v2.entry))
|
||||||
|
buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
|
||||||
buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8)
|
buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -914,7 +910,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
out := dst
|
out := dst
|
||||||
dstEvery := (dstSize + 3) / 4
|
dstEvery := (dstSize + 3) / 4
|
||||||
|
|
||||||
shift := (8 - d.actualTableLog) & 7
|
shift := (56 + (8 - d.actualTableLog)) & 63
|
||||||
|
|
||||||
const tlSize = 1 << 8
|
const tlSize = 1 << 8
|
||||||
single := d.dt.single[:tlSize]
|
single := d.dt.single[:tlSize]
|
||||||
|
@ -935,79 +931,91 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
// Interleave 2 decodes.
|
// Interleave 2 decodes.
|
||||||
const stream = 0
|
const stream = 0
|
||||||
const stream2 = 1
|
const stream2 = 1
|
||||||
br[stream].fillFast()
|
br1 := &br[stream]
|
||||||
br[stream2].fillFast()
|
br2 := &br[stream2]
|
||||||
|
br1.fillFast()
|
||||||
|
br2.fillFast()
|
||||||
|
|
||||||
v := single[br[stream].peekByteFast()>>shift].entry
|
v := single[uint8(br1.value>>shift)].entry
|
||||||
|
v2 := single[uint8(br2.value>>shift)].entry
|
||||||
|
br1.bitsRead += uint8(v)
|
||||||
|
br1.value <<= v & 63
|
||||||
|
br2.bitsRead += uint8(v2)
|
||||||
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream] = uint8(v >> 8)
|
buf[off+bufoff*stream] = uint8(v >> 8)
|
||||||
br[stream].advance(uint8(v))
|
|
||||||
|
|
||||||
v2 := single[br[stream2].peekByteFast()>>shift].entry
|
|
||||||
buf[off+bufoff*stream2] = uint8(v2 >> 8)
|
buf[off+bufoff*stream2] = uint8(v2 >> 8)
|
||||||
br[stream2].advance(uint8(v2))
|
|
||||||
|
|
||||||
v = single[br[stream].peekByteFast()>>shift].entry
|
v = single[uint8(br1.value>>shift)].entry
|
||||||
|
v2 = single[uint8(br2.value>>shift)].entry
|
||||||
|
br1.bitsRead += uint8(v)
|
||||||
|
br1.value <<= v & 63
|
||||||
|
br2.bitsRead += uint8(v2)
|
||||||
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream+1] = uint8(v >> 8)
|
buf[off+bufoff*stream+1] = uint8(v >> 8)
|
||||||
br[stream].advance(uint8(v))
|
|
||||||
|
|
||||||
v2 = single[br[stream2].peekByteFast()>>shift].entry
|
|
||||||
buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
|
buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
|
||||||
br[stream2].advance(uint8(v2))
|
|
||||||
|
|
||||||
v = single[br[stream].peekByteFast()>>shift].entry
|
v = single[uint8(br1.value>>shift)].entry
|
||||||
|
v2 = single[uint8(br2.value>>shift)].entry
|
||||||
|
br1.bitsRead += uint8(v)
|
||||||
|
br1.value <<= v & 63
|
||||||
|
br2.bitsRead += uint8(v2)
|
||||||
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream+2] = uint8(v >> 8)
|
buf[off+bufoff*stream+2] = uint8(v >> 8)
|
||||||
br[stream].advance(uint8(v))
|
|
||||||
|
|
||||||
v2 = single[br[stream2].peekByteFast()>>shift].entry
|
|
||||||
buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
|
buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
|
||||||
br[stream2].advance(uint8(v2))
|
|
||||||
|
|
||||||
v = single[br[stream].peekByteFast()>>shift].entry
|
v = single[uint8(br1.value>>shift)].entry
|
||||||
buf[off+bufoff*stream+3] = uint8(v >> 8)
|
v2 = single[uint8(br2.value>>shift)].entry
|
||||||
br[stream].advance(uint8(v))
|
br1.bitsRead += uint8(v)
|
||||||
|
br1.value <<= v & 63
|
||||||
v2 = single[br[stream2].peekByteFast()>>shift].entry
|
br2.bitsRead += uint8(v2)
|
||||||
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
|
buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
|
||||||
br[stream2].advance(uint8(v2))
|
buf[off+bufoff*stream+3] = uint8(v >> 8)
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
const stream = 2
|
const stream = 2
|
||||||
const stream2 = 3
|
const stream2 = 3
|
||||||
br[stream].fillFast()
|
br1 := &br[stream]
|
||||||
br[stream2].fillFast()
|
br2 := &br[stream2]
|
||||||
|
br1.fillFast()
|
||||||
|
br2.fillFast()
|
||||||
|
|
||||||
v := single[br[stream].peekByteFast()>>shift].entry
|
v := single[uint8(br1.value>>shift)].entry
|
||||||
|
v2 := single[uint8(br2.value>>shift)].entry
|
||||||
|
br1.bitsRead += uint8(v)
|
||||||
|
br1.value <<= v & 63
|
||||||
|
br2.bitsRead += uint8(v2)
|
||||||
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream] = uint8(v >> 8)
|
buf[off+bufoff*stream] = uint8(v >> 8)
|
||||||
br[stream].advance(uint8(v))
|
|
||||||
|
|
||||||
v2 := single[br[stream2].peekByteFast()>>shift].entry
|
|
||||||
buf[off+bufoff*stream2] = uint8(v2 >> 8)
|
buf[off+bufoff*stream2] = uint8(v2 >> 8)
|
||||||
br[stream2].advance(uint8(v2))
|
|
||||||
|
|
||||||
v = single[br[stream].peekByteFast()>>shift].entry
|
v = single[uint8(br1.value>>shift)].entry
|
||||||
|
v2 = single[uint8(br2.value>>shift)].entry
|
||||||
|
br1.bitsRead += uint8(v)
|
||||||
|
br1.value <<= v & 63
|
||||||
|
br2.bitsRead += uint8(v2)
|
||||||
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream+1] = uint8(v >> 8)
|
buf[off+bufoff*stream+1] = uint8(v >> 8)
|
||||||
br[stream].advance(uint8(v))
|
|
||||||
|
|
||||||
v2 = single[br[stream2].peekByteFast()>>shift].entry
|
|
||||||
buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
|
buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
|
||||||
br[stream2].advance(uint8(v2))
|
|
||||||
|
|
||||||
v = single[br[stream].peekByteFast()>>shift].entry
|
v = single[uint8(br1.value>>shift)].entry
|
||||||
|
v2 = single[uint8(br2.value>>shift)].entry
|
||||||
|
br1.bitsRead += uint8(v)
|
||||||
|
br1.value <<= v & 63
|
||||||
|
br2.bitsRead += uint8(v2)
|
||||||
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream+2] = uint8(v >> 8)
|
buf[off+bufoff*stream+2] = uint8(v >> 8)
|
||||||
br[stream].advance(uint8(v))
|
|
||||||
|
|
||||||
v2 = single[br[stream2].peekByteFast()>>shift].entry
|
|
||||||
buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
|
buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
|
||||||
br[stream2].advance(uint8(v2))
|
|
||||||
|
|
||||||
v = single[br[stream].peekByteFast()>>shift].entry
|
v = single[uint8(br1.value>>shift)].entry
|
||||||
buf[off+bufoff*stream+3] = uint8(v >> 8)
|
v2 = single[uint8(br2.value>>shift)].entry
|
||||||
br[stream].advance(uint8(v))
|
br1.bitsRead += uint8(v)
|
||||||
|
br1.value <<= v & 63
|
||||||
v2 = single[br[stream2].peekByteFast()>>shift].entry
|
br2.bitsRead += uint8(v2)
|
||||||
|
br2.value <<= v2 & 63
|
||||||
buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
|
buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
|
||||||
br[stream2].advance(uint8(v2))
|
buf[off+bufoff*stream+3] = uint8(v >> 8)
|
||||||
}
|
}
|
||||||
|
|
||||||
off += 4
|
off += 4
|
||||||
|
@ -1073,7 +1081,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read value and increment offset.
|
// Read value and increment offset.
|
||||||
v := single[br.peekByteFast()>>shift].entry
|
v := single[uint8(br.value>>shift)].entry
|
||||||
nBits := uint8(v)
|
nBits := uint8(v)
|
||||||
br.advance(nBits)
|
br.advance(nBits)
|
||||||
bitsLeft -= int(nBits)
|
bitsLeft -= int(nBits)
|
||||||
|
@ -1121,7 +1129,7 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
|
||||||
out := dst
|
out := dst
|
||||||
dstEvery := (dstSize + 3) / 4
|
dstEvery := (dstSize + 3) / 4
|
||||||
|
|
||||||
const shift = 0
|
const shift = 56
|
||||||
const tlSize = 1 << 8
|
const tlSize = 1 << 8
|
||||||
const tlMask = tlSize - 1
|
const tlMask = tlSize - 1
|
||||||
single := d.dt.single[:tlSize]
|
single := d.dt.single[:tlSize]
|
||||||
|
@ -1145,37 +1153,41 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
|
||||||
br[stream].fillFast()
|
br[stream].fillFast()
|
||||||
br[stream2].fillFast()
|
br[stream2].fillFast()
|
||||||
|
|
||||||
v := single[br[stream].peekByteFast()>>shift].entry
|
v := single[uint8(br[stream].value>>shift)].entry
|
||||||
|
v2 := single[uint8(br[stream2].value>>shift)].entry
|
||||||
|
br[stream].bitsRead += uint8(v)
|
||||||
|
br[stream].value <<= v & 63
|
||||||
|
br[stream2].bitsRead += uint8(v2)
|
||||||
|
br[stream2].value <<= v2 & 63
|
||||||
buf[off+bufoff*stream] = uint8(v >> 8)
|
buf[off+bufoff*stream] = uint8(v >> 8)
|
||||||
br[stream].advance(uint8(v))
|
|
||||||
|
|
||||||
v2 := single[br[stream2].peekByteFast()>>shift].entry
|
|
||||||
buf[off+bufoff*stream2] = uint8(v2 >> 8)
|
buf[off+bufoff*stream2] = uint8(v2 >> 8)
|
||||||
br[stream2].advance(uint8(v2))
|
|
||||||
|
|
||||||
v = single[br[stream].peekByteFast()>>shift].entry
|
v = single[uint8(br[stream].value>>shift)].entry
|
||||||
|
v2 = single[uint8(br[stream2].value>>shift)].entry
|
||||||
|
br[stream].bitsRead += uint8(v)
|
||||||
|
br[stream].value <<= v & 63
|
||||||
|
br[stream2].bitsRead += uint8(v2)
|
||||||
|
br[stream2].value <<= v2 & 63
|
||||||
buf[off+bufoff*stream+1] = uint8(v >> 8)
|
buf[off+bufoff*stream+1] = uint8(v >> 8)
|
||||||
br[stream].advance(uint8(v))
|
|
||||||
|
|
||||||
v2 = single[br[stream2].peekByteFast()>>shift].entry
|
|
||||||
buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
|
buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
|
||||||
br[stream2].advance(uint8(v2))
|
|
||||||
|
|
||||||
v = single[br[stream].peekByteFast()>>shift].entry
|
v = single[uint8(br[stream].value>>shift)].entry
|
||||||
|
v2 = single[uint8(br[stream2].value>>shift)].entry
|
||||||
|
br[stream].bitsRead += uint8(v)
|
||||||
|
br[stream].value <<= v & 63
|
||||||
|
br[stream2].bitsRead += uint8(v2)
|
||||||
|
br[stream2].value <<= v2 & 63
|
||||||
buf[off+bufoff*stream+2] = uint8(v >> 8)
|
buf[off+bufoff*stream+2] = uint8(v >> 8)
|
||||||
br[stream].advance(uint8(v))
|
|
||||||
|
|
||||||
v2 = single[br[stream2].peekByteFast()>>shift].entry
|
|
||||||
buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
|
buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
|
||||||
br[stream2].advance(uint8(v2))
|
|
||||||
|
|
||||||
v = single[br[stream].peekByteFast()>>shift].entry
|
v = single[uint8(br[stream].value>>shift)].entry
|
||||||
|
v2 = single[uint8(br[stream2].value>>shift)].entry
|
||||||
|
br[stream].bitsRead += uint8(v)
|
||||||
|
br[stream].value <<= v & 63
|
||||||
|
br[stream2].bitsRead += uint8(v2)
|
||||||
|
br[stream2].value <<= v2 & 63
|
||||||
buf[off+bufoff*stream+3] = uint8(v >> 8)
|
buf[off+bufoff*stream+3] = uint8(v >> 8)
|
||||||
br[stream].advance(uint8(v))
|
|
||||||
|
|
||||||
v2 = single[br[stream2].peekByteFast()>>shift].entry
|
|
||||||
buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
|
buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
|
||||||
br[stream2].advance(uint8(v2))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -1184,37 +1196,41 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
|
||||||
br[stream].fillFast()
|
br[stream].fillFast()
|
||||||
br[stream2].fillFast()
|
br[stream2].fillFast()
|
||||||
|
|
||||||
v := single[br[stream].peekByteFast()>>shift].entry
|
v := single[uint8(br[stream].value>>shift)].entry
|
||||||
|
v2 := single[uint8(br[stream2].value>>shift)].entry
|
||||||
|
br[stream].bitsRead += uint8(v)
|
||||||
|
br[stream].value <<= v & 63
|
||||||
|
br[stream2].bitsRead += uint8(v2)
|
||||||
|
br[stream2].value <<= v2 & 63
|
||||||
buf[off+bufoff*stream] = uint8(v >> 8)
|
buf[off+bufoff*stream] = uint8(v >> 8)
|
||||||
br[stream].advance(uint8(v))
|
|
||||||
|
|
||||||
v2 := single[br[stream2].peekByteFast()>>shift].entry
|
|
||||||
buf[off+bufoff*stream2] = uint8(v2 >> 8)
|
buf[off+bufoff*stream2] = uint8(v2 >> 8)
|
||||||
br[stream2].advance(uint8(v2))
|
|
||||||
|
|
||||||
v = single[br[stream].peekByteFast()>>shift].entry
|
v = single[uint8(br[stream].value>>shift)].entry
|
||||||
|
v2 = single[uint8(br[stream2].value>>shift)].entry
|
||||||
|
br[stream].bitsRead += uint8(v)
|
||||||
|
br[stream].value <<= v & 63
|
||||||
|
br[stream2].bitsRead += uint8(v2)
|
||||||
|
br[stream2].value <<= v2 & 63
|
||||||
buf[off+bufoff*stream+1] = uint8(v >> 8)
|
buf[off+bufoff*stream+1] = uint8(v >> 8)
|
||||||
br[stream].advance(uint8(v))
|
|
||||||
|
|
||||||
v2 = single[br[stream2].peekByteFast()>>shift].entry
|
|
||||||
buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
|
buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
|
||||||
br[stream2].advance(uint8(v2))
|
|
||||||
|
|
||||||
v = single[br[stream].peekByteFast()>>shift].entry
|
v = single[uint8(br[stream].value>>shift)].entry
|
||||||
|
v2 = single[uint8(br[stream2].value>>shift)].entry
|
||||||
|
br[stream].bitsRead += uint8(v)
|
||||||
|
br[stream].value <<= v & 63
|
||||||
|
br[stream2].bitsRead += uint8(v2)
|
||||||
|
br[stream2].value <<= v2 & 63
|
||||||
buf[off+bufoff*stream+2] = uint8(v >> 8)
|
buf[off+bufoff*stream+2] = uint8(v >> 8)
|
||||||
br[stream].advance(uint8(v))
|
|
||||||
|
|
||||||
v2 = single[br[stream2].peekByteFast()>>shift].entry
|
|
||||||
buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
|
buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
|
||||||
br[stream2].advance(uint8(v2))
|
|
||||||
|
|
||||||
v = single[br[stream].peekByteFast()>>shift].entry
|
v = single[uint8(br[stream].value>>shift)].entry
|
||||||
|
v2 = single[uint8(br[stream2].value>>shift)].entry
|
||||||
|
br[stream].bitsRead += uint8(v)
|
||||||
|
br[stream].value <<= v & 63
|
||||||
|
br[stream2].bitsRead += uint8(v2)
|
||||||
|
br[stream2].value <<= v2 & 63
|
||||||
buf[off+bufoff*stream+3] = uint8(v >> 8)
|
buf[off+bufoff*stream+3] = uint8(v >> 8)
|
||||||
br[stream].advance(uint8(v))
|
|
||||||
|
|
||||||
v2 = single[br[stream2].peekByteFast()>>shift].entry
|
|
||||||
buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
|
buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
|
||||||
br[stream2].advance(uint8(v2))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
off += 4
|
off += 4
|
||||||
|
@ -1280,7 +1296,7 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read value and increment offset.
|
// Read value and increment offset.
|
||||||
v := single[br.peekByteFast()>>shift].entry
|
v := single[br.peekByteFast()].entry
|
||||||
nBits := uint8(v)
|
nBits := uint8(v)
|
||||||
br.advance(nBits)
|
br.advance(nBits)
|
||||||
bitsLeft -= int(nBits)
|
bitsLeft -= int(nBits)
|
||||||
|
|
|
@ -50,16 +50,23 @@ func (b *bitReader) getBits(n uint8) int {
|
||||||
if n == 0 /*|| b.bitsRead >= 64 */ {
|
if n == 0 /*|| b.bitsRead >= 64 */ {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
return b.getBitsFast(n)
|
return int(b.get32BitsFast(n))
|
||||||
}
|
}
|
||||||
|
|
||||||
// getBitsFast requires that at least one bit is requested every time.
|
// get32BitsFast requires that at least one bit is requested every time.
|
||||||
// There are no checks if the buffer is filled.
|
// There are no checks if the buffer is filled.
|
||||||
func (b *bitReader) getBitsFast(n uint8) int {
|
func (b *bitReader) get32BitsFast(n uint8) uint32 {
|
||||||
const regMask = 64 - 1
|
const regMask = 64 - 1
|
||||||
v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
|
v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
|
||||||
b.bitsRead += n
|
b.bitsRead += n
|
||||||
return int(v)
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *bitReader) get16BitsFast(n uint8) uint16 {
|
||||||
|
const regMask = 64 - 1
|
||||||
|
v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
|
||||||
|
b.bitsRead += n
|
||||||
|
return v
|
||||||
}
|
}
|
||||||
|
|
||||||
// fillFast() will make sure at least 32 bits are available.
|
// fillFast() will make sure at least 32 bits are available.
|
||||||
|
|
|
@ -38,7 +38,7 @@ func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
|
||||||
b.nBits += bits
|
b.nBits += bits
|
||||||
}
|
}
|
||||||
|
|
||||||
// addBits32NC will add up to 32 bits.
|
// addBits32NC will add up to 31 bits.
|
||||||
// It will not check if there is space for them,
|
// It will not check if there is space for them,
|
||||||
// so the caller must ensure that it has flushed recently.
|
// so the caller must ensure that it has flushed recently.
|
||||||
func (b *bitWriter) addBits32NC(value uint32, bits uint8) {
|
func (b *bitWriter) addBits32NC(value uint32, bits uint8) {
|
||||||
|
@ -46,6 +46,26 @@ func (b *bitWriter) addBits32NC(value uint32, bits uint8) {
|
||||||
b.nBits += bits
|
b.nBits += bits
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// addBits64NC will add up to 64 bits.
|
||||||
|
// There must be space for 32 bits.
|
||||||
|
func (b *bitWriter) addBits64NC(value uint64, bits uint8) {
|
||||||
|
if bits <= 31 {
|
||||||
|
b.addBits32Clean(uint32(value), bits)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
b.addBits32Clean(uint32(value), 32)
|
||||||
|
b.flush32()
|
||||||
|
b.addBits32Clean(uint32(value>>32), bits-32)
|
||||||
|
}
|
||||||
|
|
||||||
|
// addBits32Clean will add up to 32 bits.
|
||||||
|
// It will not check if there is space for them.
|
||||||
|
// The input must not contain more bits than specified.
|
||||||
|
func (b *bitWriter) addBits32Clean(value uint32, bits uint8) {
|
||||||
|
b.bitContainer |= uint64(value) << (b.nBits & 63)
|
||||||
|
b.nBits += bits
|
||||||
|
}
|
||||||
|
|
||||||
// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
|
// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
|
||||||
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
|
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
|
||||||
func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
|
func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
|
||||||
|
|
|
@ -51,7 +51,7 @@ func (b *blockEnc) init() {
|
||||||
if cap(b.literals) < maxCompressedBlockSize {
|
if cap(b.literals) < maxCompressedBlockSize {
|
||||||
b.literals = make([]byte, 0, maxCompressedBlockSize)
|
b.literals = make([]byte, 0, maxCompressedBlockSize)
|
||||||
}
|
}
|
||||||
const defSeqs = 200
|
const defSeqs = 2000
|
||||||
if cap(b.sequences) < defSeqs {
|
if cap(b.sequences) < defSeqs {
|
||||||
b.sequences = make([]seq, 0, defSeqs)
|
b.sequences = make([]seq, 0, defSeqs)
|
||||||
}
|
}
|
||||||
|
@ -426,7 +426,7 @@ func fuzzFseEncoder(data []byte) int {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
enc := fseEncoder{}
|
enc := fseEncoder{}
|
||||||
hist := enc.Histogram()[:256]
|
hist := enc.Histogram()
|
||||||
maxSym := uint8(0)
|
maxSym := uint8(0)
|
||||||
for i, v := range data {
|
for i, v := range data {
|
||||||
v = v & 63
|
v = v & 63
|
||||||
|
@ -722,52 +722,53 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
|
||||||
println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB)
|
println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB)
|
||||||
}
|
}
|
||||||
seq--
|
seq--
|
||||||
if llEnc.maxBits+mlEnc.maxBits+ofEnc.maxBits <= 32 {
|
// Store sequences in reverse...
|
||||||
// No need to flush (common)
|
for seq >= 0 {
|
||||||
for seq >= 0 {
|
s = b.sequences[seq]
|
||||||
s = b.sequences[seq]
|
|
||||||
wr.flush32()
|
|
||||||
llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode]
|
|
||||||
// tabelog max is 8 for all.
|
|
||||||
of.encode(ofB)
|
|
||||||
ml.encode(mlB)
|
|
||||||
ll.encode(llB)
|
|
||||||
wr.flush32()
|
|
||||||
|
|
||||||
// We checked that all can stay within 32 bits
|
ofB := ofTT[s.ofCode]
|
||||||
wr.addBits32NC(s.litLen, llB.outBits)
|
wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits.
|
||||||
wr.addBits32NC(s.matchLen, mlB.outBits)
|
//of.encode(ofB)
|
||||||
wr.addBits32NC(s.offset, ofB.outBits)
|
nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16
|
||||||
|
dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState)
|
||||||
|
wr.addBits16NC(of.state, uint8(nbBitsOut))
|
||||||
|
of.state = of.stateTable[dstState]
|
||||||
|
|
||||||
if debugSequences {
|
// Accumulate extra bits.
|
||||||
println("Encoded seq", seq, s)
|
outBits := ofB.outBits & 31
|
||||||
}
|
extraBits := uint64(s.offset & bitMask32[outBits])
|
||||||
|
extraBitsN := outBits
|
||||||
|
|
||||||
seq--
|
mlB := mlTT[s.mlCode]
|
||||||
|
//ml.encode(mlB)
|
||||||
|
nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16
|
||||||
|
dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState)
|
||||||
|
wr.addBits16NC(ml.state, uint8(nbBitsOut))
|
||||||
|
ml.state = ml.stateTable[dstState]
|
||||||
|
|
||||||
|
outBits = mlB.outBits & 31
|
||||||
|
extraBits = extraBits<<outBits | uint64(s.matchLen&bitMask32[outBits])
|
||||||
|
extraBitsN += outBits
|
||||||
|
|
||||||
|
llB := llTT[s.llCode]
|
||||||
|
//ll.encode(llB)
|
||||||
|
nbBitsOut = (uint32(ll.state) + llB.deltaNbBits) >> 16
|
||||||
|
dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState)
|
||||||
|
wr.addBits16NC(ll.state, uint8(nbBitsOut))
|
||||||
|
ll.state = ll.stateTable[dstState]
|
||||||
|
|
||||||
|
outBits = llB.outBits & 31
|
||||||
|
extraBits = extraBits<<outBits | uint64(s.litLen&bitMask32[outBits])
|
||||||
|
extraBitsN += outBits
|
||||||
|
|
||||||
|
wr.flush32()
|
||||||
|
wr.addBits64NC(extraBits, extraBitsN)
|
||||||
|
|
||||||
|
if debugSequences {
|
||||||
|
println("Encoded seq", seq, s)
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
for seq >= 0 {
|
|
||||||
s = b.sequences[seq]
|
|
||||||
wr.flush32()
|
|
||||||
llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode]
|
|
||||||
// tabelog max is below 8 for each.
|
|
||||||
of.encode(ofB)
|
|
||||||
ml.encode(mlB)
|
|
||||||
ll.encode(llB)
|
|
||||||
wr.flush32()
|
|
||||||
|
|
||||||
// ml+ll = max 32 bits total
|
seq--
|
||||||
wr.addBits32NC(s.litLen, llB.outBits)
|
|
||||||
wr.addBits32NC(s.matchLen, mlB.outBits)
|
|
||||||
wr.flush32()
|
|
||||||
wr.addBits32NC(s.offset, ofB.outBits)
|
|
||||||
|
|
||||||
if debugSequences {
|
|
||||||
println("Encoded seq", seq, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
seq--
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
ml.flush(mlEnc.actualTableLog)
|
ml.flush(mlEnc.actualTableLog)
|
||||||
of.flush(ofEnc.actualTableLog)
|
of.flush(ofEnc.actualTableLog)
|
||||||
|
@ -801,14 +802,13 @@ func (b *blockEnc) genCodes() {
|
||||||
// nothing to do
|
// nothing to do
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(b.sequences) > math.MaxUint16 {
|
if len(b.sequences) > math.MaxUint16 {
|
||||||
panic("can only encode up to 64K sequences")
|
panic("can only encode up to 64K sequences")
|
||||||
}
|
}
|
||||||
// No bounds checks after here:
|
// No bounds checks after here:
|
||||||
llH := b.coders.llEnc.Histogram()[:256]
|
llH := b.coders.llEnc.Histogram()
|
||||||
ofH := b.coders.ofEnc.Histogram()[:256]
|
ofH := b.coders.ofEnc.Histogram()
|
||||||
mlH := b.coders.mlEnc.Histogram()[:256]
|
mlH := b.coders.mlEnc.Histogram()
|
||||||
for i := range llH {
|
for i := range llH {
|
||||||
llH[i] = 0
|
llH[i] = 0
|
||||||
}
|
}
|
||||||
|
@ -820,7 +820,8 @@ func (b *blockEnc) genCodes() {
|
||||||
}
|
}
|
||||||
|
|
||||||
var llMax, ofMax, mlMax uint8
|
var llMax, ofMax, mlMax uint8
|
||||||
for i, seq := range b.sequences {
|
for i := range b.sequences {
|
||||||
|
seq := &b.sequences[i]
|
||||||
v := llCode(seq.litLen)
|
v := llCode(seq.litLen)
|
||||||
seq.llCode = v
|
seq.llCode = v
|
||||||
llH[v]++
|
llH[v]++
|
||||||
|
@ -844,7 +845,6 @@ func (b *blockEnc) genCodes() {
|
||||||
panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen))
|
panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
b.sequences[i] = seq
|
|
||||||
}
|
}
|
||||||
maxCount := func(a []uint32) int {
|
maxCount := func(a []uint32) int {
|
||||||
var max uint32
|
var max uint32
|
||||||
|
|
|
@ -108,11 +108,6 @@ func (e *fastBase) UseBlock(enc *blockEnc) {
|
||||||
e.blk = enc
|
e.blk = enc
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *fastBase) matchlenNoHist(s, t int32, src []byte) int32 {
|
|
||||||
// Extend the match to be as long as possible.
|
|
||||||
return int32(matchLen(src[s:], src[t:]))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
|
func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
|
||||||
if debugAsserts {
|
if debugAsserts {
|
||||||
if s < 0 {
|
if s < 0 {
|
||||||
|
@ -131,9 +126,24 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
|
||||||
panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize))
|
panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
a := src[s:]
|
||||||
|
b := src[t:]
|
||||||
|
b = b[:len(a)]
|
||||||
|
end := int32((len(a) >> 3) << 3)
|
||||||
|
for i := int32(0); i < end; i += 8 {
|
||||||
|
if diff := load6432(a, i) ^ load6432(b, i); diff != 0 {
|
||||||
|
return i + int32(bits.TrailingZeros64(diff)>>3)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Extend the match to be as long as possible.
|
a = a[end:]
|
||||||
return int32(matchLen(src[s:], src[t:]))
|
b = b[end:]
|
||||||
|
for i := range a {
|
||||||
|
if a[i] != b[i] {
|
||||||
|
return int32(i) + end
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return int32(len(a)) + end
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reset the encoding table.
|
// Reset the encoding table.
|
||||||
|
|
|
@ -6,8 +6,6 @@ package zstd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
|
||||||
"math/bits"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -136,20 +134,7 @@ encodeLoop:
|
||||||
// Consider history as well.
|
// Consider history as well.
|
||||||
var seq seq
|
var seq seq
|
||||||
var length int32
|
var length int32
|
||||||
// length = 4 + e.matchlen(s+6, repIndex+4, src)
|
length = 4 + e.matchlen(s+6, repIndex+4, src)
|
||||||
{
|
|
||||||
a := src[s+6:]
|
|
||||||
b := src[repIndex+4:]
|
|
||||||
endI := len(a) & (math.MaxInt32 - 7)
|
|
||||||
length = int32(endI) + 4
|
|
||||||
for i := 0; i < endI; i += 8 {
|
|
||||||
if diff := load64(a, i) ^ load64(b, i); diff != 0 {
|
|
||||||
length = int32(i+bits.TrailingZeros64(diff)>>3) + 4
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
seq.matchLen = uint32(length - zstdMinMatch)
|
seq.matchLen = uint32(length - zstdMinMatch)
|
||||||
|
|
||||||
// We might be able to match backwards.
|
// We might be able to match backwards.
|
||||||
|
@ -236,20 +221,7 @@ encodeLoop:
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extend the 4-byte match as long as possible.
|
// Extend the 4-byte match as long as possible.
|
||||||
//l := e.matchlen(s+4, t+4, src) + 4
|
l := e.matchlen(s+4, t+4, src) + 4
|
||||||
var l int32
|
|
||||||
{
|
|
||||||
a := src[s+4:]
|
|
||||||
b := src[t+4:]
|
|
||||||
endI := len(a) & (math.MaxInt32 - 7)
|
|
||||||
l = int32(endI) + 4
|
|
||||||
for i := 0; i < endI; i += 8 {
|
|
||||||
if diff := load64(a, i) ^ load64(b, i); diff != 0 {
|
|
||||||
l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extend backwards
|
// Extend backwards
|
||||||
tMin := s - e.maxMatchOff
|
tMin := s - e.maxMatchOff
|
||||||
|
@ -286,20 +258,7 @@ encodeLoop:
|
||||||
if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) {
|
if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) {
|
||||||
// We have at least 4 byte match.
|
// We have at least 4 byte match.
|
||||||
// No need to check backwards. We come straight from a match
|
// No need to check backwards. We come straight from a match
|
||||||
//l := 4 + e.matchlen(s+4, o2+4, src)
|
l := 4 + e.matchlen(s+4, o2+4, src)
|
||||||
var l int32
|
|
||||||
{
|
|
||||||
a := src[s+4:]
|
|
||||||
b := src[o2+4:]
|
|
||||||
endI := len(a) & (math.MaxInt32 - 7)
|
|
||||||
l = int32(endI) + 4
|
|
||||||
for i := 0; i < endI; i += 8 {
|
|
||||||
if diff := load64(a, i) ^ load64(b, i); diff != 0 {
|
|
||||||
l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store this, since we have it.
|
// Store this, since we have it.
|
||||||
nextHash := hashLen(cv, hashLog, tableFastHashLen)
|
nextHash := hashLen(cv, hashLog, tableFastHashLen)
|
||||||
|
@ -418,21 +377,7 @@ encodeLoop:
|
||||||
if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) {
|
if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) {
|
||||||
// Consider history as well.
|
// Consider history as well.
|
||||||
var seq seq
|
var seq seq
|
||||||
// length := 4 + e.matchlen(s+6, repIndex+4, src)
|
length := 4 + e.matchlen(s+6, repIndex+4, src)
|
||||||
// length := 4 + int32(matchLen(src[s+6:], src[repIndex+4:]))
|
|
||||||
var length int32
|
|
||||||
{
|
|
||||||
a := src[s+6:]
|
|
||||||
b := src[repIndex+4:]
|
|
||||||
endI := len(a) & (math.MaxInt32 - 7)
|
|
||||||
length = int32(endI) + 4
|
|
||||||
for i := 0; i < endI; i += 8 {
|
|
||||||
if diff := load64(a, i) ^ load64(b, i); diff != 0 {
|
|
||||||
length = int32(i+bits.TrailingZeros64(diff)>>3) + 4
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
seq.matchLen = uint32(length - zstdMinMatch)
|
seq.matchLen = uint32(length - zstdMinMatch)
|
||||||
|
|
||||||
|
@ -522,21 +467,7 @@ encodeLoop:
|
||||||
panic(fmt.Sprintf("t (%d) < 0 ", t))
|
panic(fmt.Sprintf("t (%d) < 0 ", t))
|
||||||
}
|
}
|
||||||
// Extend the 4-byte match as long as possible.
|
// Extend the 4-byte match as long as possible.
|
||||||
//l := e.matchlenNoHist(s+4, t+4, src) + 4
|
l := e.matchlen(s+4, t+4, src) + 4
|
||||||
// l := int32(matchLen(src[s+4:], src[t+4:])) + 4
|
|
||||||
var l int32
|
|
||||||
{
|
|
||||||
a := src[s+4:]
|
|
||||||
b := src[t+4:]
|
|
||||||
endI := len(a) & (math.MaxInt32 - 7)
|
|
||||||
l = int32(endI) + 4
|
|
||||||
for i := 0; i < endI; i += 8 {
|
|
||||||
if diff := load64(a, i) ^ load64(b, i); diff != 0 {
|
|
||||||
l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extend backwards
|
// Extend backwards
|
||||||
tMin := s - e.maxMatchOff
|
tMin := s - e.maxMatchOff
|
||||||
|
@ -573,21 +504,7 @@ encodeLoop:
|
||||||
if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) {
|
if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) {
|
||||||
// We have at least 4 byte match.
|
// We have at least 4 byte match.
|
||||||
// No need to check backwards. We come straight from a match
|
// No need to check backwards. We come straight from a match
|
||||||
//l := 4 + e.matchlenNoHist(s+4, o2+4, src)
|
l := 4 + e.matchlen(s+4, o2+4, src)
|
||||||
// l := 4 + int32(matchLen(src[s+4:], src[o2+4:]))
|
|
||||||
var l int32
|
|
||||||
{
|
|
||||||
a := src[s+4:]
|
|
||||||
b := src[o2+4:]
|
|
||||||
endI := len(a) & (math.MaxInt32 - 7)
|
|
||||||
l = int32(endI) + 4
|
|
||||||
for i := 0; i < endI; i += 8 {
|
|
||||||
if diff := load64(a, i) ^ load64(b, i); diff != 0 {
|
|
||||||
l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store this, since we have it.
|
// Store this, since we have it.
|
||||||
nextHash := hashLen(cv, hashLog, tableFastHashLen)
|
nextHash := hashLen(cv, hashLog, tableFastHashLen)
|
||||||
|
@ -731,19 +648,7 @@ encodeLoop:
|
||||||
// Consider history as well.
|
// Consider history as well.
|
||||||
var seq seq
|
var seq seq
|
||||||
var length int32
|
var length int32
|
||||||
// length = 4 + e.matchlen(s+6, repIndex+4, src)
|
length = 4 + e.matchlen(s+6, repIndex+4, src)
|
||||||
{
|
|
||||||
a := src[s+6:]
|
|
||||||
b := src[repIndex+4:]
|
|
||||||
endI := len(a) & (math.MaxInt32 - 7)
|
|
||||||
length = int32(endI) + 4
|
|
||||||
for i := 0; i < endI; i += 8 {
|
|
||||||
if diff := load64(a, i) ^ load64(b, i); diff != 0 {
|
|
||||||
length = int32(i+bits.TrailingZeros64(diff)>>3) + 4
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
seq.matchLen = uint32(length - zstdMinMatch)
|
seq.matchLen = uint32(length - zstdMinMatch)
|
||||||
|
|
||||||
|
@ -831,20 +736,7 @@ encodeLoop:
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extend the 4-byte match as long as possible.
|
// Extend the 4-byte match as long as possible.
|
||||||
//l := e.matchlen(s+4, t+4, src) + 4
|
l := e.matchlen(s+4, t+4, src) + 4
|
||||||
var l int32
|
|
||||||
{
|
|
||||||
a := src[s+4:]
|
|
||||||
b := src[t+4:]
|
|
||||||
endI := len(a) & (math.MaxInt32 - 7)
|
|
||||||
l = int32(endI) + 4
|
|
||||||
for i := 0; i < endI; i += 8 {
|
|
||||||
if diff := load64(a, i) ^ load64(b, i); diff != 0 {
|
|
||||||
l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extend backwards
|
// Extend backwards
|
||||||
tMin := s - e.maxMatchOff
|
tMin := s - e.maxMatchOff
|
||||||
|
@ -881,20 +773,7 @@ encodeLoop:
|
||||||
if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) {
|
if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) {
|
||||||
// We have at least 4 byte match.
|
// We have at least 4 byte match.
|
||||||
// No need to check backwards. We come straight from a match
|
// No need to check backwards. We come straight from a match
|
||||||
//l := 4 + e.matchlen(s+4, o2+4, src)
|
l := 4 + e.matchlen(s+4, o2+4, src)
|
||||||
var l int32
|
|
||||||
{
|
|
||||||
a := src[s+4:]
|
|
||||||
b := src[o2+4:]
|
|
||||||
endI := len(a) & (math.MaxInt32 - 7)
|
|
||||||
l = int32(endI) + 4
|
|
||||||
for i := 0; i < endI; i += 8 {
|
|
||||||
if diff := load64(a, i) ^ load64(b, i); diff != 0 {
|
|
||||||
l = int32(i+bits.TrailingZeros64(diff)>>3) + 4
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store this, since we have it.
|
// Store this, since we have it.
|
||||||
nextHash := hashLen(cv, hashLog, tableFastHashLen)
|
nextHash := hashLen(cv, hashLog, tableFastHashLen)
|
||||||
|
|
|
@ -379,7 +379,7 @@ func (s decSymbol) final() (int, uint8) {
|
||||||
// This can only be used if no symbols are 0 bits.
|
// This can only be used if no symbols are 0 bits.
|
||||||
// At least tablelog bits must be available in the bit reader.
|
// At least tablelog bits must be available in the bit reader.
|
||||||
func (s *fseState) nextFast(br *bitReader) (uint32, uint8) {
|
func (s *fseState) nextFast(br *bitReader) (uint32, uint8) {
|
||||||
lowBits := uint16(br.getBitsFast(s.state.nbBits()))
|
lowBits := br.get16BitsFast(s.state.nbBits())
|
||||||
s.state = s.dt[s.state.newState()+lowBits]
|
s.state = s.dt[s.state.newState()+lowBits]
|
||||||
return s.state.baseline(), s.state.addBits()
|
return s.state.baseline(), s.state.addBits()
|
||||||
}
|
}
|
||||||
|
|
|
@ -62,9 +62,8 @@ func (s symbolTransform) String() string {
|
||||||
// To indicate that you have populated the histogram call HistogramFinished
|
// To indicate that you have populated the histogram call HistogramFinished
|
||||||
// with the value of the highest populated symbol, as well as the number of entries
|
// with the value of the highest populated symbol, as well as the number of entries
|
||||||
// in the most populated entry. These are accepted at face value.
|
// in the most populated entry. These are accepted at face value.
|
||||||
// The returned slice will always be length 256.
|
func (s *fseEncoder) Histogram() *[256]uint32 {
|
||||||
func (s *fseEncoder) Histogram() []uint32 {
|
return &s.count
|
||||||
return s.count[:]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// HistogramFinished can be called to indicate that the histogram has been populated.
|
// HistogramFinished can be called to indicate that the histogram has been populated.
|
||||||
|
|
189
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
generated
vendored
Normal file
189
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
generated
vendored
Normal file
|
@ -0,0 +1,189 @@
|
||||||
|
// +build gc,!purego
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
|
||||||
|
// Register allocation.
|
||||||
|
#define digest R1
|
||||||
|
#define h R2 // Return value.
|
||||||
|
#define p R3 // Input pointer.
|
||||||
|
#define len R4
|
||||||
|
#define nblocks R5 // len / 32.
|
||||||
|
#define prime1 R7
|
||||||
|
#define prime2 R8
|
||||||
|
#define prime3 R9
|
||||||
|
#define prime4 R10
|
||||||
|
#define prime5 R11
|
||||||
|
#define v1 R12
|
||||||
|
#define v2 R13
|
||||||
|
#define v3 R14
|
||||||
|
#define v4 R15
|
||||||
|
#define x1 R20
|
||||||
|
#define x2 R21
|
||||||
|
#define x3 R22
|
||||||
|
#define x4 R23
|
||||||
|
|
||||||
|
#define round(acc, x) \
|
||||||
|
MADD prime2, acc, x, acc \
|
||||||
|
ROR $64-31, acc \
|
||||||
|
MUL prime1, acc \
|
||||||
|
|
||||||
|
// x = round(0, x).
|
||||||
|
#define round0(x) \
|
||||||
|
MUL prime2, x \
|
||||||
|
ROR $64-31, x \
|
||||||
|
MUL prime1, x \
|
||||||
|
|
||||||
|
#define mergeRound(x) \
|
||||||
|
round0(x) \
|
||||||
|
EOR x, h \
|
||||||
|
MADD h, prime4, prime1, h \
|
||||||
|
|
||||||
|
// Update v[1-4] with 32-byte blocks. Assumes len >= 32.
|
||||||
|
#define blocksLoop() \
|
||||||
|
LSR $5, len, nblocks \
|
||||||
|
PCALIGN $16 \
|
||||||
|
loop: \
|
||||||
|
LDP.P 32(p), (x1, x2) \
|
||||||
|
round(v1, x1) \
|
||||||
|
LDP -16(p), (x3, x4) \
|
||||||
|
round(v2, x2) \
|
||||||
|
SUB $1, nblocks \
|
||||||
|
round(v3, x3) \
|
||||||
|
round(v4, x4) \
|
||||||
|
CBNZ nblocks, loop \
|
||||||
|
|
||||||
|
|
||||||
|
// The primes are repeated here to ensure that they're stored
|
||||||
|
// in a contiguous array, so we can load them with LDP.
|
||||||
|
DATA primes<> +0(SB)/8, $11400714785074694791
|
||||||
|
DATA primes<> +8(SB)/8, $14029467366897019727
|
||||||
|
DATA primes<>+16(SB)/8, $1609587929392839161
|
||||||
|
DATA primes<>+24(SB)/8, $9650029242287828579
|
||||||
|
DATA primes<>+32(SB)/8, $2870177450012600261
|
||||||
|
GLOBL primes<>(SB), NOPTR+RODATA, $40
|
||||||
|
|
||||||
|
|
||||||
|
// func Sum64(b []byte) uint64
|
||||||
|
TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32
|
||||||
|
LDP b_base+0(FP), (p, len)
|
||||||
|
|
||||||
|
LDP primes<> +0(SB), (prime1, prime2)
|
||||||
|
LDP primes<>+16(SB), (prime3, prime4)
|
||||||
|
MOVD primes<>+32(SB), prime5
|
||||||
|
|
||||||
|
CMP $32, len
|
||||||
|
CSEL LO, prime5, ZR, h // if len < 32 { h = prime5 } else { h = 0 }
|
||||||
|
BLO afterLoop
|
||||||
|
|
||||||
|
ADD prime1, prime2, v1
|
||||||
|
MOVD prime2, v2
|
||||||
|
MOVD $0, v3
|
||||||
|
NEG prime1, v4
|
||||||
|
|
||||||
|
blocksLoop()
|
||||||
|
|
||||||
|
ROR $64-1, v1, x1
|
||||||
|
ROR $64-7, v2, x2
|
||||||
|
ADD x1, x2
|
||||||
|
ROR $64-12, v3, x3
|
||||||
|
ROR $64-18, v4, x4
|
||||||
|
ADD x3, x4
|
||||||
|
ADD x2, x4, h
|
||||||
|
|
||||||
|
mergeRound(v1)
|
||||||
|
mergeRound(v2)
|
||||||
|
mergeRound(v3)
|
||||||
|
mergeRound(v4)
|
||||||
|
|
||||||
|
afterLoop:
|
||||||
|
ADD len, h
|
||||||
|
|
||||||
|
TBZ $4, len, try8
|
||||||
|
LDP.P 16(p), (x1, x2)
|
||||||
|
|
||||||
|
round0(x1)
|
||||||
|
ROR $64-27, h
|
||||||
|
EOR x1 @> 64-27, h, h
|
||||||
|
MADD h, prime4, prime1, h
|
||||||
|
|
||||||
|
round0(x2)
|
||||||
|
ROR $64-27, h
|
||||||
|
EOR x2 @> 64-27, h
|
||||||
|
MADD h, prime4, prime1, h
|
||||||
|
|
||||||
|
try8:
|
||||||
|
TBZ $3, len, try4
|
||||||
|
MOVD.P 8(p), x1
|
||||||
|
|
||||||
|
round0(x1)
|
||||||
|
ROR $64-27, h
|
||||||
|
EOR x1 @> 64-27, h
|
||||||
|
MADD h, prime4, prime1, h
|
||||||
|
|
||||||
|
try4:
|
||||||
|
TBZ $2, len, try2
|
||||||
|
MOVWU.P 4(p), x2
|
||||||
|
|
||||||
|
MUL prime1, x2
|
||||||
|
ROR $64-23, h
|
||||||
|
EOR x2 @> 64-23, h
|
||||||
|
MADD h, prime3, prime2, h
|
||||||
|
|
||||||
|
try2:
|
||||||
|
TBZ $1, len, try1
|
||||||
|
MOVHU.P 2(p), x3
|
||||||
|
AND $255, x3, x1
|
||||||
|
LSR $8, x3, x2
|
||||||
|
|
||||||
|
MUL prime5, x1
|
||||||
|
ROR $64-11, h
|
||||||
|
EOR x1 @> 64-11, h
|
||||||
|
MUL prime1, h
|
||||||
|
|
||||||
|
MUL prime5, x2
|
||||||
|
ROR $64-11, h
|
||||||
|
EOR x2 @> 64-11, h
|
||||||
|
MUL prime1, h
|
||||||
|
|
||||||
|
try1:
|
||||||
|
TBZ $0, len, end
|
||||||
|
MOVBU (p), x4
|
||||||
|
|
||||||
|
MUL prime5, x4
|
||||||
|
ROR $64-11, h
|
||||||
|
EOR x4 @> 64-11, h
|
||||||
|
MUL prime1, h
|
||||||
|
|
||||||
|
end:
|
||||||
|
EOR h >> 33, h
|
||||||
|
MUL prime2, h
|
||||||
|
EOR h >> 29, h
|
||||||
|
MUL prime3, h
|
||||||
|
EOR h >> 32, h
|
||||||
|
|
||||||
|
MOVD h, ret+24(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
|
||||||
|
// func writeBlocks(d *Digest, b []byte) int
|
||||||
|
//
|
||||||
|
// Assumes len(b) >= 32.
|
||||||
|
TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40
|
||||||
|
LDP primes<>(SB), (prime1, prime2)
|
||||||
|
|
||||||
|
// Load state. Assume v[1-4] are stored contiguously.
|
||||||
|
MOVD d+0(FP), digest
|
||||||
|
LDP 0(digest), (v1, v2)
|
||||||
|
LDP 16(digest), (v3, v4)
|
||||||
|
|
||||||
|
LDP b_base+8(FP), (p, len)
|
||||||
|
|
||||||
|
blocksLoop()
|
||||||
|
|
||||||
|
// Store updated state.
|
||||||
|
STP (v1, v2), 0(digest)
|
||||||
|
STP (v3, v4), 16(digest)
|
||||||
|
|
||||||
|
BIC $31, len
|
||||||
|
MOVD len, ret+32(FP)
|
||||||
|
RET
|
|
@ -1,5 +1,8 @@
|
||||||
//go:build !appengine && gc && !purego
|
//go:build (amd64 || arm64) && !appengine && gc && !purego
|
||||||
// +build !appengine,gc,!purego
|
// +build amd64 arm64
|
||||||
|
// +build !appengine
|
||||||
|
// +build gc
|
||||||
|
// +build !purego
|
||||||
|
|
||||||
package xxhash
|
package xxhash
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
//go:build !amd64 || appengine || !gc || purego
|
//go:build (!amd64 && !arm64) || appengine || !gc || purego
|
||||||
// +build !amd64 appengine !gc purego
|
// +build !amd64,!arm64 appengine !gc purego
|
||||||
|
|
||||||
package xxhash
|
package xxhash
|
||||||
|
|
||||||
|
|
|
@ -278,7 +278,7 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
|
||||||
mlState = mlTable[mlState.newState()&maxTableMask]
|
mlState = mlTable[mlState.newState()&maxTableMask]
|
||||||
ofState = ofTable[ofState.newState()&maxTableMask]
|
ofState = ofTable[ofState.newState()&maxTableMask]
|
||||||
} else {
|
} else {
|
||||||
bits := br.getBitsFast(nBits)
|
bits := br.get32BitsFast(nBits)
|
||||||
lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
|
lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
|
||||||
llState = llTable[(llState.newState()+lowBits)&maxTableMask]
|
llState = llTable[(llState.newState()+lowBits)&maxTableMask]
|
||||||
|
|
||||||
|
@ -326,7 +326,7 @@ func (s *sequenceDecs) updateAlt(br *bitReader) {
|
||||||
s.offsets.state.state = s.offsets.state.dt[c.newState()]
|
s.offsets.state.state = s.offsets.state.dt[c.newState()]
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
bits := br.getBitsFast(nBits)
|
bits := br.get32BitsFast(nBits)
|
||||||
lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31))
|
lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31))
|
||||||
s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits]
|
s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits]
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,7 @@ github.com/Microsoft/go-winio/backuptar
|
||||||
github.com/Microsoft/go-winio/pkg/guid
|
github.com/Microsoft/go-winio/pkg/guid
|
||||||
github.com/Microsoft/go-winio/pkg/security
|
github.com/Microsoft/go-winio/pkg/security
|
||||||
github.com/Microsoft/go-winio/vhd
|
github.com/Microsoft/go-winio/vhd
|
||||||
# github.com/Microsoft/hcsshim v0.9.1
|
# github.com/Microsoft/hcsshim v0.9.2
|
||||||
github.com/Microsoft/hcsshim
|
github.com/Microsoft/hcsshim
|
||||||
github.com/Microsoft/hcsshim/computestorage
|
github.com/Microsoft/hcsshim/computestorage
|
||||||
github.com/Microsoft/hcsshim/internal/cow
|
github.com/Microsoft/hcsshim/internal/cow
|
||||||
|
@ -109,7 +109,7 @@ github.com/containers/buildah/pkg/rusage
|
||||||
github.com/containers/buildah/pkg/sshagent
|
github.com/containers/buildah/pkg/sshagent
|
||||||
github.com/containers/buildah/pkg/util
|
github.com/containers/buildah/pkg/util
|
||||||
github.com/containers/buildah/util
|
github.com/containers/buildah/util
|
||||||
# github.com/containers/common v0.46.1-0.20220117145719-da777f8b15b1
|
# github.com/containers/common v0.46.1-0.20220119203335-0e7aca71d00a
|
||||||
## explicit
|
## explicit
|
||||||
github.com/containers/common/libimage
|
github.com/containers/common/libimage
|
||||||
github.com/containers/common/libimage/manifests
|
github.com/containers/common/libimage/manifests
|
||||||
|
@ -230,7 +230,7 @@ github.com/containers/psgo/internal/dev
|
||||||
github.com/containers/psgo/internal/host
|
github.com/containers/psgo/internal/host
|
||||||
github.com/containers/psgo/internal/proc
|
github.com/containers/psgo/internal/proc
|
||||||
github.com/containers/psgo/internal/process
|
github.com/containers/psgo/internal/process
|
||||||
# github.com/containers/storage v1.37.1-0.20211213220314-73a749e4fec5
|
# github.com/containers/storage v1.38.0
|
||||||
## explicit
|
## explicit
|
||||||
github.com/containers/storage
|
github.com/containers/storage
|
||||||
github.com/containers/storage/drivers
|
github.com/containers/storage/drivers
|
||||||
|
@ -455,7 +455,7 @@ github.com/jinzhu/copier
|
||||||
# github.com/json-iterator/go v1.1.12
|
# github.com/json-iterator/go v1.1.12
|
||||||
## explicit
|
## explicit
|
||||||
github.com/json-iterator/go
|
github.com/json-iterator/go
|
||||||
# github.com/klauspost/compress v1.13.6
|
# github.com/klauspost/compress v1.14.1
|
||||||
github.com/klauspost/compress
|
github.com/klauspost/compress
|
||||||
github.com/klauspost/compress/flate
|
github.com/klauspost/compress/flate
|
||||||
github.com/klauspost/compress/fse
|
github.com/klauspost/compress/fse
|
||||||
|
|
Loading…
Reference in New Issue