mirror of https://github.com/containers/podman.git
				
				
				
			Vendor buildah 1.14.2
Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
		
							parent
							
								
									797da2a57b
								
							
						
					
					
						commit
						0015c376a3
					
				
							
								
								
									
										4
									
								
								go.mod
								
								
								
								
							
							
						
						
									
										4
									
								
								go.mod
								
								
								
								
							|  | @ -8,12 +8,12 @@ require ( | ||||||
| 	github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b | 	github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b | ||||||
| 	github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784 | 	github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784 | ||||||
| 	github.com/containernetworking/plugins v0.8.5 | 	github.com/containernetworking/plugins v0.8.5 | ||||||
| 	github.com/containers/buildah v1.14.1 | 	github.com/containers/buildah v1.14.2 | ||||||
| 	github.com/containers/common v0.4.2 | 	github.com/containers/common v0.4.2 | ||||||
| 	github.com/containers/conmon v2.0.10+incompatible | 	github.com/containers/conmon v2.0.10+incompatible | ||||||
| 	github.com/containers/image/v5 v5.2.1 | 	github.com/containers/image/v5 v5.2.1 | ||||||
| 	github.com/containers/psgo v1.4.0 | 	github.com/containers/psgo v1.4.0 | ||||||
| 	github.com/containers/storage v1.16.0 | 	github.com/containers/storage v1.16.1 | ||||||
| 	github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f | 	github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f | ||||||
| 	github.com/cri-o/ocicni v0.1.1-0.20190920040751-deac903fd99b | 	github.com/cri-o/ocicni v0.1.1-0.20190920040751-deac903fd99b | ||||||
| 	github.com/cyphar/filepath-securejoin v0.2.2 | 	github.com/cyphar/filepath-securejoin v0.2.2 | ||||||
|  |  | ||||||
							
								
								
									
										6
									
								
								go.sum
								
								
								
								
							
							
						
						
									
										6
									
								
								go.sum
								
								
								
								
							|  | @ -80,6 +80,8 @@ github.com/containers/buildah v1.14.1-0.20200227103754-f0c3fd7c3d34 h1:SaK9ADT5J | ||||||
| github.com/containers/buildah v1.14.1-0.20200227103754-f0c3fd7c3d34/go.mod h1:sdMVVcCTvvAj9o9dk/j6EnNJJadjxqjcI4Yy9WoWxSg= | github.com/containers/buildah v1.14.1-0.20200227103754-f0c3fd7c3d34/go.mod h1:sdMVVcCTvvAj9o9dk/j6EnNJJadjxqjcI4Yy9WoWxSg= | ||||||
| github.com/containers/buildah v1.14.1 h1:H0uubyWJN98xRFmwzJeJDb5NIypx+sPcJu5kCzO6hGs= | github.com/containers/buildah v1.14.1 h1:H0uubyWJN98xRFmwzJeJDb5NIypx+sPcJu5kCzO6hGs= | ||||||
| github.com/containers/buildah v1.14.1/go.mod h1:sdMVVcCTvvAj9o9dk/j6EnNJJadjxqjcI4Yy9WoWxSg= | github.com/containers/buildah v1.14.1/go.mod h1:sdMVVcCTvvAj9o9dk/j6EnNJJadjxqjcI4Yy9WoWxSg= | ||||||
|  | github.com/containers/buildah v1.14.2 h1:rzrOVqWL3C3xA3MBmkDgWntRsBgkI3FGKODluBO+svU= | ||||||
|  | github.com/containers/buildah v1.14.2/go.mod h1:HZ6MuZfHYq6ZMeoV9o3k9GwoCk1p3RWZOYbBXZtR7wE= | ||||||
| github.com/containers/common v0.0.7 h1:eKYZLKfJ2d/RNDgecLDFv45cHb4imYzIcrQHx1Y029M= | github.com/containers/common v0.0.7 h1:eKYZLKfJ2d/RNDgecLDFv45cHb4imYzIcrQHx1Y029M= | ||||||
| github.com/containers/common v0.0.7/go.mod h1:lhWV3MLhO1+KGE2x6v9+K38MxpjXGso+edmpkFnCOqI= | github.com/containers/common v0.0.7/go.mod h1:lhWV3MLhO1+KGE2x6v9+K38MxpjXGso+edmpkFnCOqI= | ||||||
| github.com/containers/common v0.3.0 h1:9ysL/OfPcMls1Ac3jzFA4XZJVSD/JG7Dst3uQSwQtwA= | github.com/containers/common v0.3.0 h1:9ysL/OfPcMls1Ac3jzFA4XZJVSD/JG7Dst3uQSwQtwA= | ||||||
|  | @ -100,6 +102,8 @@ github.com/containers/psgo v1.4.0/go.mod h1:ENXXLQ5E1At4K0EUsGogXBJi/C28gwqkONWe | ||||||
| github.com/containers/storage v1.15.8/go.mod h1:zhvjIIl/fR6wt/lgqQAC+xanHQ+8gUQ0GBVeXYN81qI= | github.com/containers/storage v1.15.8/go.mod h1:zhvjIIl/fR6wt/lgqQAC+xanHQ+8gUQ0GBVeXYN81qI= | ||||||
| github.com/containers/storage v1.16.0 h1:sD+s7BmiNBh61CuHN3j8PXGCwMtV9zPVJETAlshIf3w= | github.com/containers/storage v1.16.0 h1:sD+s7BmiNBh61CuHN3j8PXGCwMtV9zPVJETAlshIf3w= | ||||||
| github.com/containers/storage v1.16.0/go.mod h1:nqN09JSi1/RSI1UAUwDYXPRiGSlq5FPbNkN/xb0TfG0= | github.com/containers/storage v1.16.0/go.mod h1:nqN09JSi1/RSI1UAUwDYXPRiGSlq5FPbNkN/xb0TfG0= | ||||||
|  | github.com/containers/storage v1.16.1 h1:gVLVqbqaoyopLJbcQ9PQdsnm8SzVy6Vw24fofwMgkE0= | ||||||
|  | github.com/containers/storage v1.16.1/go.mod h1:toFp72SLn/iyJ6YbrnrZ0bW63aH2Qw3dA8JVwL4ADPo= | ||||||
| github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= | github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= | ||||||
| github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= | github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= | ||||||
| github.com/coreos/go-iptables v0.4.5 h1:DpHb9vJrZQEFMcVLFKAAGMUVX0XoRC0ptCthinRYm38= | github.com/coreos/go-iptables v0.4.5 h1:DpHb9vJrZQEFMcVLFKAAGMUVX0XoRC0ptCthinRYm38= | ||||||
|  | @ -270,6 +274,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o | ||||||
| github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= | github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= | ||||||
| github.com/klauspost/compress v1.10.0 h1:92XGj1AcYzA6UrVdd4qIIBrT8OroryvRvdmg/IfmC7Y= | github.com/klauspost/compress v1.10.0 h1:92XGj1AcYzA6UrVdd4qIIBrT8OroryvRvdmg/IfmC7Y= | ||||||
| github.com/klauspost/compress v1.10.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= | github.com/klauspost/compress v1.10.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= | ||||||
|  | github.com/klauspost/compress v1.10.2 h1:Znfn6hXZAHaLPNnlqUYRrBSReFHYybslgv4PTiyz6P0= | ||||||
|  | github.com/klauspost/compress v1.10.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= | ||||||
| github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= | github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= | ||||||
| github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM= | github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM= | ||||||
| github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= | github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= | ||||||
|  |  | ||||||
|  | @ -2,6 +2,15 @@ | ||||||
| 
 | 
 | ||||||
| # Changelog | # Changelog | ||||||
| 
 | 
 | ||||||
|  | ## v1.14.2 (2020-03-03) | ||||||
|  |     Add Buildah pull request template | ||||||
|  |     Bump to containers/storage v1.16.1 | ||||||
|  |     run_linux: fix tight loop if file is not pollable | ||||||
|  |     Bump github.com/opencontainers/selinux from 1.3.2 to 1.3.3 | ||||||
|  |     Bump github.com/containers/common from 0.4.1 to 0.4.2 | ||||||
|  |     Bump back to v1.15.0-dev | ||||||
|  |     Add Containerfile to build a versioned stable image on quay.io | ||||||
|  | 
 | ||||||
| ## v1.14.1 (2020-02-27) | ## v1.14.1 (2020-02-27) | ||||||
|     Search for local runtime per values in containers.conf |     Search for local runtime per values in containers.conf | ||||||
|     Set correct ownership on working directory |     Set correct ownership on working directory | ||||||
|  |  | ||||||
|  | @ -27,7 +27,7 @@ const ( | ||||||
| 	Package = "buildah" | 	Package = "buildah" | ||||||
| 	// Version for the Package.  Bump version in contrib/rpm/buildah.spec
 | 	// Version for the Package.  Bump version in contrib/rpm/buildah.spec
 | ||||||
| 	// too.
 | 	// too.
 | ||||||
| 	Version = "1.14.1" | 	Version = "1.14.2" | ||||||
| 	// The value we use to identify what type of information, currently a
 | 	// The value we use to identify what type of information, currently a
 | ||||||
| 	// serialized Builder structure, we are using as per-container state.
 | 	// serialized Builder structure, we are using as per-container state.
 | ||||||
| 	// This should only be changed when we make incompatible changes to
 | 	// This should only be changed when we make incompatible changes to
 | ||||||
|  |  | ||||||
|  | @ -1,3 +1,12 @@ | ||||||
|  | - Changelog for v1.14.2 (2020-03-03) | ||||||
|  |   * Add Buildah pull request template | ||||||
|  |   * Bump to containers/storage v1.16.1 | ||||||
|  |   * run_linux: fix tight loop if file is not pollable | ||||||
|  |   * Bump github.com/opencontainers/selinux from 1.3.2 to 1.3.3 | ||||||
|  |   * Bump github.com/containers/common from 0.4.1 to 0.4.2 | ||||||
|  |   * Bump back to v1.15.0-dev | ||||||
|  |   * Add Containerfile to build a versioned stable image on quay.io | ||||||
|  | 
 | ||||||
| - Changelog for v1.14.1 (2020-02-27) | - Changelog for v1.14.1 (2020-02-27) | ||||||
|   * Search for local runtime per values in containers.conf |   * Search for local runtime per values in containers.conf | ||||||
|   * Set correct ownership on working directory |   * Set correct ownership on working directory | ||||||
|  |  | ||||||
|  | @ -4,9 +4,9 @@ go 1.12 | ||||||
| 
 | 
 | ||||||
| require ( | require ( | ||||||
| 	github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784 | 	github.com/containernetworking/cni v0.7.2-0.20190904153231-83439463f784 | ||||||
| 	github.com/containers/common v0.4.1 | 	github.com/containers/common v0.4.2 | ||||||
| 	github.com/containers/image/v5 v5.2.1 | 	github.com/containers/image/v5 v5.2.1 | ||||||
| 	github.com/containers/storage v1.16.0 | 	github.com/containers/storage v1.16.1 | ||||||
| 	github.com/cyphar/filepath-securejoin v0.2.2 | 	github.com/cyphar/filepath-securejoin v0.2.2 | ||||||
| 	github.com/docker/distribution v2.7.1+incompatible | 	github.com/docker/distribution v2.7.1+incompatible | ||||||
| 	github.com/docker/go-metrics v0.0.1 // indirect | 	github.com/docker/go-metrics v0.0.1 // indirect | ||||||
|  | @ -25,7 +25,7 @@ require ( | ||||||
| 	github.com/opencontainers/runc v1.0.0-rc9 | 	github.com/opencontainers/runc v1.0.0-rc9 | ||||||
| 	github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7 | 	github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7 | ||||||
| 	github.com/opencontainers/runtime-tools v0.9.0 | 	github.com/opencontainers/runtime-tools v0.9.0 | ||||||
| 	github.com/opencontainers/selinux v1.3.2 | 	github.com/opencontainers/selinux v1.3.3 | ||||||
| 	github.com/openshift/api v0.0.0-20200106203948-7ab22a2c8316 | 	github.com/openshift/api v0.0.0-20200106203948-7ab22a2c8316 | ||||||
| 	github.com/openshift/imagebuilder v1.1.1 | 	github.com/openshift/imagebuilder v1.1.1 | ||||||
| 	github.com/pkg/errors v0.9.1 | 	github.com/pkg/errors v0.9.1 | ||||||
|  |  | ||||||
|  | @ -101,6 +101,8 @@ github.com/containers/common v0.4.0 h1:LpX2J19cZKSpn4PBtbLX/tTk3JzTtaqRWbaEoX5YG | ||||||
| github.com/containers/common v0.4.0/go.mod h1:AiPCv0ZcBOVshnup/X6MuaqkySZQZ3iBWfInjJFIl40= | github.com/containers/common v0.4.0/go.mod h1:AiPCv0ZcBOVshnup/X6MuaqkySZQZ3iBWfInjJFIl40= | ||||||
| github.com/containers/common v0.4.1 h1:Uu7f2ZDM/5xsqOkZwIEVKSjUI3YxKjvNIY5x57kjaKo= | github.com/containers/common v0.4.1 h1:Uu7f2ZDM/5xsqOkZwIEVKSjUI3YxKjvNIY5x57kjaKo= | ||||||
| github.com/containers/common v0.4.1/go.mod h1:m62kenckrWi5rZx32kaLje2Og0hpf6NsaTBn6+b+Oys= | github.com/containers/common v0.4.1/go.mod h1:m62kenckrWi5rZx32kaLje2Og0hpf6NsaTBn6+b+Oys= | ||||||
|  | github.com/containers/common v0.4.2 h1:O5d1gj/xdpQdZi0MEivRQ/7AeRaVeHdbSP/bvShw458= | ||||||
|  | github.com/containers/common v0.4.2/go.mod h1:m62kenckrWi5rZx32kaLje2Og0hpf6NsaTBn6+b+Oys= | ||||||
| github.com/containers/conmon v2.0.10+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= | github.com/containers/conmon v2.0.10+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= | ||||||
| github.com/containers/image/v4 v4.0.1 h1:idNGHChj0Pyv3vLrxul2oSVMZLeFqpoq3CjLeVgapSQ= | github.com/containers/image/v4 v4.0.1 h1:idNGHChj0Pyv3vLrxul2oSVMZLeFqpoq3CjLeVgapSQ= | ||||||
| github.com/containers/image/v4 v4.0.1/go.mod h1:0ASJH1YgJiX/eqFZObqepgsvIA4XjCgpyfwn9pDGafA= | github.com/containers/image/v4 v4.0.1/go.mod h1:0ASJH1YgJiX/eqFZObqepgsvIA4XjCgpyfwn9pDGafA= | ||||||
|  | @ -145,6 +147,8 @@ github.com/containers/storage v1.15.8 h1:ef7OfUMTpyq0PIVAhV7qfufEI92gAldk25nItri | ||||||
| github.com/containers/storage v1.15.8/go.mod h1:zhvjIIl/fR6wt/lgqQAC+xanHQ+8gUQ0GBVeXYN81qI= | github.com/containers/storage v1.15.8/go.mod h1:zhvjIIl/fR6wt/lgqQAC+xanHQ+8gUQ0GBVeXYN81qI= | ||||||
| github.com/containers/storage v1.16.0 h1:sD+s7BmiNBh61CuHN3j8PXGCwMtV9zPVJETAlshIf3w= | github.com/containers/storage v1.16.0 h1:sD+s7BmiNBh61CuHN3j8PXGCwMtV9zPVJETAlshIf3w= | ||||||
| github.com/containers/storage v1.16.0/go.mod h1:nqN09JSi1/RSI1UAUwDYXPRiGSlq5FPbNkN/xb0TfG0= | github.com/containers/storage v1.16.0/go.mod h1:nqN09JSi1/RSI1UAUwDYXPRiGSlq5FPbNkN/xb0TfG0= | ||||||
|  | github.com/containers/storage v1.16.1 h1:gVLVqbqaoyopLJbcQ9PQdsnm8SzVy6Vw24fofwMgkE0= | ||||||
|  | github.com/containers/storage v1.16.1/go.mod h1:toFp72SLn/iyJ6YbrnrZ0bW63aH2Qw3dA8JVwL4ADPo= | ||||||
| github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= | github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= | ||||||
| github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= | github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= | ||||||
| github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= | github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= | ||||||
|  | @ -363,6 +367,8 @@ github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82 | ||||||
| github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= | github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= | ||||||
| github.com/klauspost/compress v1.10.0 h1:92XGj1AcYzA6UrVdd4qIIBrT8OroryvRvdmg/IfmC7Y= | github.com/klauspost/compress v1.10.0 h1:92XGj1AcYzA6UrVdd4qIIBrT8OroryvRvdmg/IfmC7Y= | ||||||
| github.com/klauspost/compress v1.10.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= | github.com/klauspost/compress v1.10.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= | ||||||
|  | github.com/klauspost/compress v1.10.2 h1:Znfn6hXZAHaLPNnlqUYRrBSReFHYybslgv4PTiyz6P0= | ||||||
|  | github.com/klauspost/compress v1.10.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= | ||||||
| github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= | github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= | ||||||
| github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= | github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= | ||||||
| github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM= | github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM= | ||||||
|  | @ -487,6 +493,8 @@ github.com/opencontainers/selinux v1.3.1 h1:dn2Rc3wTEvTB6iVqoFrKKeMb0uZ38ZheeyMu | ||||||
| github.com/opencontainers/selinux v1.3.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= | github.com/opencontainers/selinux v1.3.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= | ||||||
| github.com/opencontainers/selinux v1.3.2 h1:DR4lL9SYVjgcTZKEZIncvDU06fKSc/eygjmNGOA3E1s= | github.com/opencontainers/selinux v1.3.2 h1:DR4lL9SYVjgcTZKEZIncvDU06fKSc/eygjmNGOA3E1s= | ||||||
| github.com/opencontainers/selinux v1.3.2/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= | github.com/opencontainers/selinux v1.3.2/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= | ||||||
|  | github.com/opencontainers/selinux v1.3.3 h1:RX0wAeqtvVSYQcr017X3pFXPkLEtB6V4NjRD7gVQgg4= | ||||||
|  | github.com/opencontainers/selinux v1.3.3/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= | ||||||
| github.com/openshift/api v0.0.0-20200106203948-7ab22a2c8316 h1:enQG2QUGwug4fR1yM6hL0Fjzx6Km/exZY6RbSPwMu3o= | github.com/openshift/api v0.0.0-20200106203948-7ab22a2c8316 h1:enQG2QUGwug4fR1yM6hL0Fjzx6Km/exZY6RbSPwMu3o= | ||||||
| github.com/openshift/api v0.0.0-20200106203948-7ab22a2c8316/go.mod h1:dv+J0b/HWai0QnMVb37/H0v36klkLBi2TNpPeWDxX10= | github.com/openshift/api v0.0.0-20200106203948-7ab22a2c8316/go.mod h1:dv+J0b/HWai0QnMVb37/H0v36klkLBi2TNpPeWDxX10= | ||||||
| github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible h1:s55wx8JIG/CKnewev892HifTBrtKzMdvgB3rm4rxC2s= | github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible h1:s55wx8JIG/CKnewev892HifTBrtKzMdvgB3rm4rxC2s= | ||||||
|  |  | ||||||
|  | @ -1203,6 +1203,13 @@ func runCopyStdio(stdio *sync.WaitGroup, copyPipes bool, stdioPipe [][]int, copy | ||||||
| 	runCopyStdioPassData(copyPipes, stdioPipe, finishCopy, relayMap, relayBuffer, readDesc, writeDesc) | 	runCopyStdioPassData(copyPipes, stdioPipe, finishCopy, relayMap, relayBuffer, readDesc, writeDesc) | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | func canRetry(err error) bool { | ||||||
|  | 	if errno, isErrno := err.(syscall.Errno); isErrno { | ||||||
|  | 		return errno == syscall.EINTR || errno == syscall.EAGAIN | ||||||
|  | 	} | ||||||
|  | 	return false | ||||||
|  | } | ||||||
|  | 
 | ||||||
| func runCopyStdioPassData(copyPipes bool, stdioPipe [][]int, finishCopy []int, relayMap map[int]int, relayBuffer map[int]*bytes.Buffer, readDesc map[int]string, writeDesc map[int]string) { | func runCopyStdioPassData(copyPipes bool, stdioPipe [][]int, finishCopy []int, relayMap map[int]int, relayBuffer map[int]*bytes.Buffer, readDesc map[int]string, writeDesc map[int]string) { | ||||||
| 	closeStdin := false | 	closeStdin := false | ||||||
| 
 | 
 | ||||||
|  | @ -1250,7 +1257,7 @@ func runCopyStdioPassData(copyPipes bool, stdioPipe [][]int, finishCopy []int, r | ||||||
| 				// If it's zero-length on our stdin and we're
 | 				// If it's zero-length on our stdin and we're
 | ||||||
| 				// using pipes, it's an EOF, so close the stdin
 | 				// using pipes, it's an EOF, so close the stdin
 | ||||||
| 				// pipe's writing end.
 | 				// pipe's writing end.
 | ||||||
| 				if n == 0 && copyPipes && int(pollFd.Fd) == unix.Stdin { | 				if n == 0 && !canRetry(err) && int(pollFd.Fd) == unix.Stdin { | ||||||
| 					removes[int(pollFd.Fd)] = struct{}{} | 					removes[int(pollFd.Fd)] = struct{}{} | ||||||
| 				} else if n > 0 { | 				} else if n > 0 { | ||||||
| 					// Buffer the data in case we get blocked on where they need to go.
 | 					// Buffer the data in case we get blocked on where they need to go.
 | ||||||
|  |  | ||||||
|  | @ -19,7 +19,7 @@ env: | ||||||
|     #### |     #### | ||||||
|     # GCE project where images live |     # GCE project where images live | ||||||
|     IMAGE_PROJECT: "libpod-218412" |     IMAGE_PROJECT: "libpod-218412" | ||||||
|     _BUILT_IMAGE_SUFFIX: "libpod-6228273469587456" |     _BUILT_IMAGE_SUFFIX: "libpod-5874660151656448" | ||||||
|     FEDORA_CACHE_IMAGE_NAME: "fedora-31-${_BUILT_IMAGE_SUFFIX}" |     FEDORA_CACHE_IMAGE_NAME: "fedora-31-${_BUILT_IMAGE_SUFFIX}" | ||||||
|     PRIOR_FEDORA_CACHE_IMAGE_NAME: "fedora-30-${_BUILT_IMAGE_SUFFIX}" |     PRIOR_FEDORA_CACHE_IMAGE_NAME: "fedora-30-${_BUILT_IMAGE_SUFFIX}" | ||||||
|     UBUNTU_CACHE_IMAGE_NAME: "ubuntu-19-${_BUILT_IMAGE_SUFFIX}" |     UBUNTU_CACHE_IMAGE_NAME: "ubuntu-19-${_BUILT_IMAGE_SUFFIX}" | ||||||
|  | @ -50,32 +50,51 @@ gce_instance: | ||||||
|     disk: 200 |     disk: 200 | ||||||
|     image_name: "${FEDORA_CACHE_IMAGE_NAME}" |     image_name: "${FEDORA_CACHE_IMAGE_NAME}" | ||||||
| 
 | 
 | ||||||
|  | 
 | ||||||
| testing_task: | testing_task: | ||||||
|  | 
 | ||||||
|     depends_on: |     depends_on: | ||||||
|         - lint |         - lint | ||||||
|  | 
 | ||||||
|  |     # Not all $TEST_DRIVER combinations are valid for all OS types. | ||||||
|  |     # Note: Nested-variable resolution happens at runtime, not eval. time. | ||||||
|  |     # Use verbose logic for ease of reading/maintaining. | ||||||
|  |     only_if: >- | ||||||
|  |         ( $VM_IMAGE =~ '.*UBUNTU.*' && $TEST_DRIVER == "vfs" ) || | ||||||
|  |         ( $VM_IMAGE =~ '.*UBUNTU.*' && $TEST_DRIVER == "aufs" ) || | ||||||
|  |         ( $VM_IMAGE =~ '.*UBUNTU.*' && $TEST_DRIVER == "overlay" ) || | ||||||
|  |         ( $VM_IMAGE =~ '.*UBUNTU.*' && $TEST_DRIVER == "fuse-overlay" ) || | ||||||
|  |         ( $VM_IMAGE =~ '.*FEDORA.*' && $TEST_DRIVER != "aufs" ) | ||||||
|  | 
 | ||||||
|  |     allow_failures: $TEST_DRIVER == "devicemapper" | ||||||
|  | 
 | ||||||
|  |     env: | ||||||
|  |         matrix: | ||||||
|  |             VM_IMAGE: "${FEDORA_CACHE_IMAGE_NAME}" | ||||||
|  |             VM_IMAGE: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" | ||||||
|  |             VM_IMAGE: "${UBUNTU_CACHE_IMAGE_NAME}" | ||||||
|  |             # VM_IMAGE: "${PRIOR_UBUNTU_CACHE_IMAGE_NAME}"  # No fuse3 support | ||||||
|  |         matrix:  # See ./contrib/cirrus/build_and_test.sh | ||||||
|  |             TEST_DRIVER: "vfs" | ||||||
|  |             TEST_DRIVER: "aufs" | ||||||
|  |             TEST_DRIVER: "overlay" | ||||||
|  |             TEST_DRIVER: "fuse-overlay" | ||||||
|  |             TEST_DRIVER: "devicemapper" | ||||||
|  |             TEST_DRIVER: "fuse-overlay-whiteout" | ||||||
|  | 
 | ||||||
|     gce_instance:  # Only need to specify differences from defaults (above) |     gce_instance:  # Only need to specify differences from defaults (above) | ||||||
|         matrix:  # Duplicate this task for each matrix product. |         image_name: "${VM_IMAGE}" | ||||||
|             image_name: "${FEDORA_CACHE_IMAGE_NAME}" |  | ||||||
|             image_name: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}" |  | ||||||
|             image_name: "${UBUNTU_CACHE_IMAGE_NAME}" |  | ||||||
|             # image_name: "${PRIOR_UBUNTU_CACHE_IMAGE_NAME}"  # No fuse3 support |  | ||||||
| 
 | 
 | ||||||
|     # Separate scripts for separate outputs, makes debugging easier. |     # Separate scripts for separate outputs, makes debugging easier. | ||||||
|     setup_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}' |     setup_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}' | ||||||
|     build_and_test_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/build_and_test.sh |& ${_TIMESTAMP}' |     build_and_test_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/build_and_test.sh |& ${_TIMESTAMP}' | ||||||
| 
 | 
 | ||||||
|     # Log collection when job was successful |     always: | ||||||
|         df_script: '${_DFCMD} || true' |         df_script: '${_DFCMD} || true' | ||||||
|         rh_audit_log_script: '${_RAUDITCMD} || true' |         rh_audit_log_script: '${_RAUDITCMD} || true' | ||||||
|         ubuntu_audit_log_script: '${_UAUDITCMD} || true' |         ubuntu_audit_log_script: '${_UAUDITCMD} || true' | ||||||
|         journal_log_script: '${_JOURNALCMD} || true' |         journal_log_script: '${_JOURNALCMD} || true' | ||||||
| 
 | 
 | ||||||
|     on_failure:  # Script names must be different from above |  | ||||||
|         failure_df_script: '${_DFCMD} || true' |  | ||||||
|         failure_rh_audit_log_script: '${_RAUDITCMD} || true' |  | ||||||
|         failure_ubuntu_audit_log_script: '${_UAUDITCMD} || true' |  | ||||||
|         failure_journal_log_script: '${_JOURNALCMD} || true' |  | ||||||
| 
 |  | ||||||
| lint_task: | lint_task: | ||||||
|     env: |     env: | ||||||
|         CIRRUS_WORKING_DIR: "/go/src/github.com/containers/storage" |         CIRRUS_WORKING_DIR: "/go/src/github.com/containers/storage" | ||||||
|  | @ -94,7 +113,7 @@ lint_task: | ||||||
| meta_task: | meta_task: | ||||||
| 
 | 
 | ||||||
|     container: |     container: | ||||||
|         image: "quay.io/libpod/imgts:latest"  # see contrib/imgts |         image: "quay.io/libpod/imgts:master" | ||||||
|         cpu: 1 |         cpu: 1 | ||||||
|         memory: 1 |         memory: 1 | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -1,62 +0,0 @@ | ||||||
| --- |  | ||||||
| 
 |  | ||||||
| sudo: required |  | ||||||
| 
 |  | ||||||
| # N/B: host go env. not actually used, see .run_ci_tests.sh |  | ||||||
| language: go |  | ||||||
| go: |  | ||||||
|     - master |  | ||||||
| 
 |  | ||||||
| services: |  | ||||||
|     - docker |  | ||||||
| 
 |  | ||||||
| env: |  | ||||||
|     # Ubuntu |  | ||||||
|     - GO_VERSION="stable" |  | ||||||
|       DISTRO="ubuntu" |  | ||||||
| 
 |  | ||||||
|     - GO_VERSION="1.12.12" |  | ||||||
|       DISTRO="ubuntu" |  | ||||||
| 
 |  | ||||||
|     # Fedora |  | ||||||
|     - GO_VERSION="stable" |  | ||||||
|       DISTRO="fedora" |  | ||||||
| 
 |  | ||||||
|     - GO_VERSION="1.12.12" |  | ||||||
|       DISTRO="fedora" |  | ||||||
| 
 |  | ||||||
|     # CentOS |  | ||||||
|     - GO_VERSION="stable" |  | ||||||
|       DISTRO="centos" |  | ||||||
| 
 |  | ||||||
|     - GO_VERSION="1.12.12" |  | ||||||
|       DISTRO="centos" |  | ||||||
| 
 |  | ||||||
| # GO_VERSION="stable" builds successfully, but tests fail on all platforms. |  | ||||||
| # Run the tests, but ignore the result (for now) |  | ||||||
| matrix: |  | ||||||
|     allow_failures: |  | ||||||
|         - env: GO_VERSION="stable" DISTRO="ubuntu" |  | ||||||
|         - env: GO_VERSION="stable" DISTRO="fedora" |  | ||||||
|         - env: GO_VERSION="stable" DISTRO="centos" |  | ||||||
| 
 |  | ||||||
| before_install: |  | ||||||
|     - sudo apt-get -qq update |  | ||||||
|     - sudo apt-get -qq install realpath |  | ||||||
| 
 |  | ||||||
| script: |  | ||||||
|     - echo "Travis/host environment:" |  | ||||||
|     - export TRAVIS_ENV="-e TRAVIS=$TRAVIS |  | ||||||
|                          -e CI=$CI |  | ||||||
|                          -e TRAVIS_COMMIT=$TRAVIS_COMMIT |  | ||||||
|                          -e TRAVIS_COMMIT_RANGE=$TRAVIS_COMMIT_RANGE |  | ||||||
|                          -e TRAVIS_REPO_SLUG=$TRAVIS_REPO_SLUG |  | ||||||
|                          -e TRAVIS_PULL_REQUEST=$TRAVIS_PULL_REQUEST |  | ||||||
|                          -e TRAVIS_PULL_REQUEST_SHA=$TRAVIS_PULL_REQUEST_SHA |  | ||||||
|                          -e TRAVIS_PULL_REQUEST_SLUG=$TRAVIS_PULL_REQUEST_SLUG |  | ||||||
|                          -e TRAVIS_BRANCH=$TRAVIS_BRANCH |  | ||||||
|                          -e TRAVIS_JOB_ID=$TRAVIS_JOB_ID |  | ||||||
|                          -e TRAVIS_BUILD_DIR=$TRAVIS_BUILD_DIR" |  | ||||||
|     - env |  | ||||||
|     - echo "Running tests in SPC using ./hack/run_ci_tests.sh" |  | ||||||
|     - ./hack/run_ci_tests.sh |  | ||||||
|  | @ -1 +1 @@ | ||||||
| 1.16.0 | 1.16.1 | ||||||
|  |  | ||||||
|  | @ -7,19 +7,19 @@ require ( | ||||||
| 	github.com/Microsoft/hcsshim v0.8.7 | 	github.com/Microsoft/hcsshim v0.8.7 | ||||||
| 	github.com/docker/docker v0.0.0-20171019062838-86f080cff091 // indirect | 	github.com/docker/docker v0.0.0-20171019062838-86f080cff091 // indirect | ||||||
| 	github.com/docker/go-units v0.4.0 | 	github.com/docker/go-units v0.4.0 | ||||||
| 	github.com/klauspost/compress v1.10.0 | 	github.com/klauspost/compress v1.10.2 | ||||||
| 	github.com/klauspost/cpuid v1.2.1 // indirect | 	github.com/klauspost/cpuid v1.2.1 // indirect | ||||||
| 	github.com/klauspost/pgzip v1.2.1 | 	github.com/klauspost/pgzip v1.2.1 | ||||||
| 	github.com/mattn/go-shellwords v1.0.10 | 	github.com/mattn/go-shellwords v1.0.10 | ||||||
| 	github.com/mistifyio/go-zfs v2.1.1+incompatible | 	github.com/mistifyio/go-zfs v2.1.1+incompatible | ||||||
| 	github.com/opencontainers/go-digest v1.0.0-rc1 | 	github.com/opencontainers/go-digest v1.0.0-rc1 | ||||||
| 	github.com/opencontainers/runc v1.0.0-rc9 | 	github.com/opencontainers/runc v1.0.0-rc9 | ||||||
| 	github.com/opencontainers/selinux v1.3.1 | 	github.com/opencontainers/selinux v1.3.3 | ||||||
| 	github.com/pkg/errors v0.9.1 | 	github.com/pkg/errors v0.9.1 | ||||||
| 	github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7 | 	github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7 | ||||||
| 	github.com/sirupsen/logrus v1.4.2 | 	github.com/sirupsen/logrus v1.4.2 | ||||||
| 	github.com/spf13/pflag v1.0.3 // indirect | 	github.com/spf13/pflag v1.0.3 // indirect | ||||||
| 	github.com/stretchr/testify v1.4.0 | 	github.com/stretchr/testify v1.5.1 | ||||||
| 	github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 | 	github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 | ||||||
| 	github.com/tchap/go-patricia v2.3.0+incompatible | 	github.com/tchap/go-patricia v2.3.0+incompatible | ||||||
| 	github.com/vbatts/tar-split v0.11.1 | 	github.com/vbatts/tar-split v0.11.1 | ||||||
|  |  | ||||||
|  | @ -81,6 +81,10 @@ github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82 | ||||||
| github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= | github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= | ||||||
| github.com/klauspost/compress v1.10.0 h1:92XGj1AcYzA6UrVdd4qIIBrT8OroryvRvdmg/IfmC7Y= | github.com/klauspost/compress v1.10.0 h1:92XGj1AcYzA6UrVdd4qIIBrT8OroryvRvdmg/IfmC7Y= | ||||||
| github.com/klauspost/compress v1.10.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= | github.com/klauspost/compress v1.10.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= | ||||||
|  | github.com/klauspost/compress v1.10.1 h1:a/QY0o9S6wCi0XhxaMX/QmusicNUqCqFugR6WKPOSoQ= | ||||||
|  | github.com/klauspost/compress v1.10.1/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= | ||||||
|  | github.com/klauspost/compress v1.10.2 h1:Znfn6hXZAHaLPNnlqUYRrBSReFHYybslgv4PTiyz6P0= | ||||||
|  | github.com/klauspost/compress v1.10.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= | ||||||
| github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= | github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= | ||||||
| github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= | github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= | ||||||
| github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM= | github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM= | ||||||
|  | @ -121,6 +125,10 @@ github.com/opencontainers/selinux v1.3.0 h1:xsI95WzPZu5exzA6JzkLSfdr/DilzOhCJOqG | ||||||
| github.com/opencontainers/selinux v1.3.0/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= | github.com/opencontainers/selinux v1.3.0/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= | ||||||
| github.com/opencontainers/selinux v1.3.1 h1:dn2Rc3wTEvTB6iVqoFrKKeMb0uZ38ZheeyMu2h5C1TI= | github.com/opencontainers/selinux v1.3.1 h1:dn2Rc3wTEvTB6iVqoFrKKeMb0uZ38ZheeyMu2h5C1TI= | ||||||
| github.com/opencontainers/selinux v1.3.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= | github.com/opencontainers/selinux v1.3.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= | ||||||
|  | github.com/opencontainers/selinux v1.3.2 h1:DR4lL9SYVjgcTZKEZIncvDU06fKSc/eygjmNGOA3E1s= | ||||||
|  | github.com/opencontainers/selinux v1.3.2/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= | ||||||
|  | github.com/opencontainers/selinux v1.3.3 h1:RX0wAeqtvVSYQcr017X3pFXPkLEtB6V4NjRD7gVQgg4= | ||||||
|  | github.com/opencontainers/selinux v1.3.3/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= | ||||||
| github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= | github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= | ||||||
| github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= | github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= | ||||||
| github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= | github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= | ||||||
|  | @ -151,6 +159,10 @@ github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0 | ||||||
| github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= | ||||||
| github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= | github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= | ||||||
| github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= | github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= | ||||||
|  | github.com/stretchr/testify v1.5.0 h1:DMOzIV76tmoDNE9pX6RSN0aDtCYeCg5VueieJaAo1uw= | ||||||
|  | github.com/stretchr/testify v1.5.0/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= | ||||||
|  | github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= | ||||||
|  | github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= | ||||||
| github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= | github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= | ||||||
| github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8= | github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8= | ||||||
| github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= | github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= | ||||||
|  |  | ||||||
|  | @ -13,6 +13,10 @@ runroot = "/var/run/containers/storage" | ||||||
| # Primary Read/Write location of container storage | # Primary Read/Write location of container storage | ||||||
| graphroot = "/var/lib/containers/storage" | graphroot = "/var/lib/containers/storage" | ||||||
| 
 | 
 | ||||||
|  | # Storage path for rootless users | ||||||
|  | # | ||||||
|  | # rootless_storage_path = "$HOME/.local/share/containers/storage" | ||||||
|  | 
 | ||||||
| [storage.options] | [storage.options] | ||||||
| # Storage options to be passed to underlying storage drivers | # Storage options to be passed to underlying storage drivers | ||||||
| 
 | 
 | ||||||
|  | @ -107,7 +111,7 @@ mountopt = "nodev" | ||||||
| # Value 0% disables | # Value 0% disables | ||||||
| # min_free_space = "10%" | # min_free_space = "10%" | ||||||
| 
 | 
 | ||||||
| # mkfsarg specifies extra mkfs arguments to be used when creating the base. | # mkfsarg specifies extra mkfs arguments to be used when creating the base | ||||||
| # device. | # device. | ||||||
| # mkfsarg = "" | # mkfsarg = "" | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -139,6 +139,9 @@ type StoreOptions struct { | ||||||
| 	// GraphRoot is the filesystem path under which we will store the
 | 	// GraphRoot is the filesystem path under which we will store the
 | ||||||
| 	// contents of layers, images, and containers.
 | 	// contents of layers, images, and containers.
 | ||||||
| 	GraphRoot string `json:"root,omitempty"` | 	GraphRoot string `json:"root,omitempty"` | ||||||
|  | 	// RootlessStoragePath is the storage path for rootless users
 | ||||||
|  | 	// default $HOME/.local/share/containers/storage
 | ||||||
|  | 	RootlessStoragePath string `toml:"rootless_storage_path"` | ||||||
| 	// GraphDriverName is the underlying storage driver that we'll be
 | 	// GraphDriverName is the underlying storage driver that we'll be
 | ||||||
| 	// using.  It only needs to be specified the first time a Store is
 | 	// using.  It only needs to be specified the first time a Store is
 | ||||||
| 	// initialized for a given RunRoot and GraphRoot.
 | 	// initialized for a given RunRoot and GraphRoot.
 | ||||||
|  | @ -3291,6 +3294,7 @@ type tomlConfig struct { | ||||||
| 		Driver              string            `toml:"driver"` | 		Driver              string            `toml:"driver"` | ||||||
| 		RunRoot             string            `toml:"runroot"` | 		RunRoot             string            `toml:"runroot"` | ||||||
| 		GraphRoot           string            `toml:"graphroot"` | 		GraphRoot           string            `toml:"graphroot"` | ||||||
|  | 		RootlessStoragePath string            `toml:"rootless_storage_path"` | ||||||
| 		Options             cfg.OptionsConfig `toml:"options"` | 		Options             cfg.OptionsConfig `toml:"options"` | ||||||
| 	} `toml:"storage"` | 	} `toml:"storage"` | ||||||
| } | } | ||||||
|  | @ -3312,6 +3316,9 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { | ||||||
| 		fmt.Printf("Failed to parse %s %v\n", configFile, err.Error()) | 		fmt.Printf("Failed to parse %s %v\n", configFile, err.Error()) | ||||||
| 		return | 		return | ||||||
| 	} | 	} | ||||||
|  | 	if os.Getenv("STORAGE_DRIVER") != "" { | ||||||
|  | 		config.Storage.Driver = os.Getenv("STORAGE_DRIVER") | ||||||
|  | 	} | ||||||
| 	if config.Storage.Driver != "" { | 	if config.Storage.Driver != "" { | ||||||
| 		storeOptions.GraphDriverName = config.Storage.Driver | 		storeOptions.GraphDriverName = config.Storage.Driver | ||||||
| 	} | 	} | ||||||
|  | @ -3321,6 +3328,9 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { | ||||||
| 	if config.Storage.GraphRoot != "" { | 	if config.Storage.GraphRoot != "" { | ||||||
| 		storeOptions.GraphRoot = config.Storage.GraphRoot | 		storeOptions.GraphRoot = config.Storage.GraphRoot | ||||||
| 	} | 	} | ||||||
|  | 	if config.Storage.RootlessStoragePath != "" { | ||||||
|  | 		storeOptions.RootlessStoragePath = config.Storage.RootlessStoragePath | ||||||
|  | 	} | ||||||
| 	for _, s := range config.Storage.Options.AdditionalImageStores { | 	for _, s := range config.Storage.Options.AdditionalImageStores { | ||||||
| 		storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.imagestore=%s", config.Storage.Driver, s)) | 		storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.imagestore=%s", config.Storage.Driver, s)) | ||||||
| 	} | 	} | ||||||
|  | @ -3364,11 +3374,8 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { | ||||||
| 	} else { | 	} else { | ||||||
| 		storeOptions.GIDMap = append(storeOptions.GIDMap, gidmap...) | 		storeOptions.GIDMap = append(storeOptions.GIDMap, gidmap...) | ||||||
| 	} | 	} | ||||||
| 	if os.Getenv("STORAGE_DRIVER") != "" { |  | ||||||
| 		storeOptions.GraphDriverName = os.Getenv("STORAGE_DRIVER") |  | ||||||
| 	} |  | ||||||
| 
 | 
 | ||||||
| 	storeOptions.GraphDriverOptions = cfg.GetGraphDriverOptions(storeOptions.GraphDriverName, config.Storage.Options) | 	storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, cfg.GetGraphDriverOptions(storeOptions.GraphDriverName, config.Storage.Options)...) | ||||||
| 
 | 
 | ||||||
| 	if os.Getenv("STORAGE_OPTS") != "" { | 	if os.Getenv("STORAGE_OPTS") != "" { | ||||||
| 		storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, strings.Split(os.Getenv("STORAGE_OPTS"), ",")...) | 		storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, strings.Split(os.Getenv("STORAGE_OPTS"), ",")...) | ||||||
|  |  | ||||||
|  | @ -4,7 +4,9 @@ import ( | ||||||
| 	"fmt" | 	"fmt" | ||||||
| 	"os" | 	"os" | ||||||
| 	"os/exec" | 	"os/exec" | ||||||
|  | 	"os/user" | ||||||
| 	"path/filepath" | 	"path/filepath" | ||||||
|  | 	"regexp" | ||||||
| 	"strconv" | 	"strconv" | ||||||
| 	"strings" | 	"strings" | ||||||
| 
 | 
 | ||||||
|  | @ -146,6 +148,7 @@ func getRootlessStorageOpts(rootlessUID int) (StoreOptions, error) { | ||||||
| 	} | 	} | ||||||
| 	opts.RunRoot = rootlessRuntime | 	opts.RunRoot = rootlessRuntime | ||||||
| 	opts.GraphRoot = filepath.Join(dataDir, "containers", "storage") | 	opts.GraphRoot = filepath.Join(dataDir, "containers", "storage") | ||||||
|  | 	opts.RootlessStoragePath = opts.GraphRoot | ||||||
| 	if path, err := exec.LookPath("fuse-overlayfs"); err == nil { | 	if path, err := exec.LookPath("fuse-overlayfs"); err == nil { | ||||||
| 		opts.GraphDriverName = "overlay" | 		opts.GraphDriverName = "overlay" | ||||||
| 		opts.GraphDriverOptions = []string{fmt.Sprintf("overlay.mount_program=%s", path)} | 		opts.GraphDriverOptions = []string{fmt.Sprintf("overlay.mount_program=%s", path)} | ||||||
|  | @ -161,6 +164,7 @@ func getTomlStorage(storeOptions *StoreOptions) *tomlConfig { | ||||||
| 	config.Storage.Driver = storeOptions.GraphDriverName | 	config.Storage.Driver = storeOptions.GraphDriverName | ||||||
| 	config.Storage.RunRoot = storeOptions.RunRoot | 	config.Storage.RunRoot = storeOptions.RunRoot | ||||||
| 	config.Storage.GraphRoot = storeOptions.GraphRoot | 	config.Storage.GraphRoot = storeOptions.GraphRoot | ||||||
|  | 	config.Storage.RootlessStoragePath = storeOptions.RootlessStoragePath | ||||||
| 	for _, i := range storeOptions.GraphDriverOptions { | 	for _, i := range storeOptions.GraphDriverOptions { | ||||||
| 		s := strings.Split(i, "=") | 		s := strings.Split(i, "=") | ||||||
| 		if s[0] == "overlay.mount_program" { | 		if s[0] == "overlay.mount_program" { | ||||||
|  | @ -227,6 +231,19 @@ func DefaultStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) { | ||||||
| 			if storageOpts.GraphRoot == "" { | 			if storageOpts.GraphRoot == "" { | ||||||
| 				storageOpts.GraphRoot = defaultRootlessGraphRoot | 				storageOpts.GraphRoot = defaultRootlessGraphRoot | ||||||
| 			} | 			} | ||||||
|  | 			if storageOpts.RootlessStoragePath != "" { | ||||||
|  | 				if err = validRootlessStoragePathFormat(storageOpts.RootlessStoragePath); err != nil { | ||||||
|  | 					return storageOpts, err | ||||||
|  | 				} | ||||||
|  | 				rootlessStoragePath := strings.Replace(storageOpts.RootlessStoragePath, "$HOME", homedir.Get(), -1) | ||||||
|  | 				rootlessStoragePath = strings.Replace(rootlessStoragePath, "$UID", strconv.Itoa(rootlessUID), -1) | ||||||
|  | 				usr, err := user.LookupId(strconv.Itoa(rootlessUID)) | ||||||
|  | 				if err != nil { | ||||||
|  | 					return storageOpts, err | ||||||
|  | 				} | ||||||
|  | 				rootlessStoragePath = strings.Replace(rootlessStoragePath, "$USER", usr.Username, -1) | ||||||
|  | 				storageOpts.GraphRoot = rootlessStoragePath | ||||||
|  | 			} | ||||||
| 		} else { | 		} else { | ||||||
| 			if err := os.MkdirAll(filepath.Dir(storageConf), 0755); err != nil { | 			if err := os.MkdirAll(filepath.Dir(storageConf), 0755); err != nil { | ||||||
| 				return storageOpts, errors.Wrapf(err, "cannot make directory %s", filepath.Dir(storageConf)) | 				return storageOpts, errors.Wrapf(err, "cannot make directory %s", filepath.Dir(storageConf)) | ||||||
|  | @ -248,3 +265,21 @@ func DefaultStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) { | ||||||
| 	} | 	} | ||||||
| 	return storageOpts, nil | 	return storageOpts, nil | ||||||
| } | } | ||||||
|  | 
 | ||||||
|  | // validRootlessStoragePathFormat checks if the environments contained in the path are accepted
 | ||||||
|  | func validRootlessStoragePathFormat(path string) error { | ||||||
|  | 	if !strings.Contains(path, "$") { | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	splitPaths := strings.SplitAfter(path, "$") | ||||||
|  | 	validEnv := regexp.MustCompile(`^(HOME|USER|UID)([^a-zA-Z]|$)`).MatchString | ||||||
|  | 	if len(splitPaths) > 1 { | ||||||
|  | 		for _, p := range splitPaths[1:] { | ||||||
|  | 			if !validEnv(p) { | ||||||
|  | 				return errors.Errorf("Unrecognized environment variable") | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 	} | ||||||
|  | 	return nil | ||||||
|  | } | ||||||
|  |  | ||||||
|  | @ -48,6 +48,8 @@ const ( | ||||||
| 	maxHashOffset       = 1 << 24 | 	maxHashOffset       = 1 << 24 | ||||||
| 
 | 
 | ||||||
| 	skipNever = math.MaxInt32 | 	skipNever = math.MaxInt32 | ||||||
|  | 
 | ||||||
|  | 	debugDeflate = false | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| type compressionLevel struct { | type compressionLevel struct { | ||||||
|  | @ -59,15 +61,13 @@ type compressionLevel struct { | ||||||
| // See https://blog.klauspost.com/rebalancing-deflate-compression-levels/
 | // See https://blog.klauspost.com/rebalancing-deflate-compression-levels/
 | ||||||
| var levels = []compressionLevel{ | var levels = []compressionLevel{ | ||||||
| 	{}, // 0
 | 	{}, // 0
 | ||||||
| 	// Level 1-4 uses specialized algorithm - values not used
 | 	// Level 1-6 uses specialized algorithm - values not used
 | ||||||
| 	{0, 0, 0, 0, 0, 1}, | 	{0, 0, 0, 0, 0, 1}, | ||||||
| 	{0, 0, 0, 0, 0, 2}, | 	{0, 0, 0, 0, 0, 2}, | ||||||
| 	{0, 0, 0, 0, 0, 3}, | 	{0, 0, 0, 0, 0, 3}, | ||||||
| 	{0, 0, 0, 0, 0, 4}, | 	{0, 0, 0, 0, 0, 4}, | ||||||
| 	// For levels 5-6 we don't bother trying with lazy matches.
 | 	{0, 0, 0, 0, 0, 5}, | ||||||
| 	// Lazy matching is at least 30% slower, with 1.5% increase.
 | 	{0, 0, 0, 0, 0, 6}, | ||||||
| 	{6, 0, 12, 8, 12, 5}, |  | ||||||
| 	{8, 0, 24, 16, 16, 6}, |  | ||||||
| 	// Levels 7-9 use increasingly more lazy matching
 | 	// Levels 7-9 use increasingly more lazy matching
 | ||||||
| 	// and increasingly stringent conditions for "good enough".
 | 	// and increasingly stringent conditions for "good enough".
 | ||||||
| 	{8, 8, 24, 16, skipNever, 7}, | 	{8, 8, 24, 16, skipNever, 7}, | ||||||
|  | @ -203,9 +203,8 @@ func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error { | ||||||
| // This is much faster than doing a full encode.
 | // This is much faster than doing a full encode.
 | ||||||
| // Should only be used after a start/reset.
 | // Should only be used after a start/reset.
 | ||||||
| func (d *compressor) fillWindow(b []byte) { | func (d *compressor) fillWindow(b []byte) { | ||||||
| 	// Do not fill window if we are in store-only mode,
 | 	// Do not fill window if we are in store-only or huffman mode.
 | ||||||
| 	// use constant or Snappy compression.
 | 	if d.level <= 0 { | ||||||
| 	if d.level == 0 { |  | ||||||
| 		return | 		return | ||||||
| 	} | 	} | ||||||
| 	if d.fast != nil { | 	if d.fast != nil { | ||||||
|  | @ -368,7 +367,7 @@ func (d *compressor) deflateLazy() { | ||||||
| 	// Sanity enables additional runtime tests.
 | 	// Sanity enables additional runtime tests.
 | ||||||
| 	// It's intended to be used during development
 | 	// It's intended to be used during development
 | ||||||
| 	// to supplement the currently ad-hoc unit tests.
 | 	// to supplement the currently ad-hoc unit tests.
 | ||||||
| 	const sanity = false | 	const sanity = debugDeflate | ||||||
| 
 | 
 | ||||||
| 	if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync { | 	if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync { | ||||||
| 		return | 		return | ||||||
|  | @ -667,6 +666,7 @@ func (d *compressor) init(w io.Writer, level int) (err error) { | ||||||
| 	default: | 	default: | ||||||
| 		return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level) | 		return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level) | ||||||
| 	} | 	} | ||||||
|  | 	d.level = level | ||||||
| 	return nil | 	return nil | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -720,6 +720,7 @@ func (d *compressor) close() error { | ||||||
| 		return d.w.err | 		return d.w.err | ||||||
| 	} | 	} | ||||||
| 	d.w.flush() | 	d.w.flush() | ||||||
|  | 	d.w.reset(nil) | ||||||
| 	return d.w.err | 	return d.w.err | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -750,8 +751,7 @@ func NewWriter(w io.Writer, level int) (*Writer, error) { | ||||||
| // can only be decompressed by a Reader initialized with the
 | // can only be decompressed by a Reader initialized with the
 | ||||||
| // same dictionary.
 | // same dictionary.
 | ||||||
| func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) { | func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) { | ||||||
| 	dw := &dictWriter{w} | 	zw, err := NewWriter(w, level) | ||||||
| 	zw, err := NewWriter(dw, level) |  | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		return nil, err | 		return nil, err | ||||||
| 	} | 	} | ||||||
|  | @ -760,14 +760,6 @@ func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) { | ||||||
| 	return zw, err | 	return zw, err | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| type dictWriter struct { |  | ||||||
| 	w io.Writer |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| func (w *dictWriter) Write(b []byte) (n int, err error) { |  | ||||||
| 	return w.w.Write(b) |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| // A Writer takes data written to it and writes the compressed
 | // A Writer takes data written to it and writes the compressed
 | ||||||
| // form of that data to an underlying writer (see NewWriter).
 | // form of that data to an underlying writer (see NewWriter).
 | ||||||
| type Writer struct { | type Writer struct { | ||||||
|  | @ -805,11 +797,12 @@ func (w *Writer) Close() error { | ||||||
| // the result of NewWriter or NewWriterDict called with dst
 | // the result of NewWriter or NewWriterDict called with dst
 | ||||||
| // and w's level and dictionary.
 | // and w's level and dictionary.
 | ||||||
| func (w *Writer) Reset(dst io.Writer) { | func (w *Writer) Reset(dst io.Writer) { | ||||||
| 	if dw, ok := w.d.w.writer.(*dictWriter); ok { | 	if len(w.dict) > 0 { | ||||||
| 		// w was created with NewWriterDict
 | 		// w was created with NewWriterDict
 | ||||||
| 		dw.w = dst | 		w.d.reset(dst) | ||||||
| 		w.d.reset(dw) | 		if dst != nil { | ||||||
| 			w.d.fillWindow(w.dict) | 			w.d.fillWindow(w.dict) | ||||||
|  | 		} | ||||||
| 	} else { | 	} else { | ||||||
| 		// w was created with NewWriter
 | 		// w was created with NewWriter
 | ||||||
| 		w.d.reset(dst) | 		w.d.reset(dst) | ||||||
|  |  | ||||||
|  | @ -35,16 +35,16 @@ func newFastEnc(level int) fastEnc { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| const ( | const ( | ||||||
| 	tableBits       = 16             // Bits used in the table
 | 	tableBits       = 15             // Bits used in the table
 | ||||||
| 	tableSize       = 1 << tableBits // Size of the table
 | 	tableSize       = 1 << tableBits // Size of the table
 | ||||||
| 	tableShift      = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
 | 	tableShift      = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
 | ||||||
| 	baseMatchOffset = 1              // The smallest match offset
 | 	baseMatchOffset = 1              // The smallest match offset
 | ||||||
| 	baseMatchLength = 3              // The smallest match length per the RFC section 3.2.5
 | 	baseMatchLength = 3              // The smallest match length per the RFC section 3.2.5
 | ||||||
| 	maxMatchOffset  = 1 << 15        // The largest match offset
 | 	maxMatchOffset  = 1 << 15        // The largest match offset
 | ||||||
| 
 | 
 | ||||||
| 	bTableBits   = 18                                               // Bits used in the big tables
 | 	bTableBits   = 17                                               // Bits used in the big tables
 | ||||||
| 	bTableSize   = 1 << bTableBits                                  // Size of the table
 | 	bTableSize   = 1 << bTableBits                                  // Size of the table
 | ||||||
| 	allocHistory = maxStoreBlockSize * 20                           // Size to preallocate for history.
 | 	allocHistory = maxStoreBlockSize * 10                           // Size to preallocate for history.
 | ||||||
| 	bufferReset  = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this.
 | 	bufferReset  = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this.
 | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
|  | @ -92,7 +92,6 @@ func hash(u uint32) uint32 { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| type tableEntry struct { | type tableEntry struct { | ||||||
| 	val    uint32 |  | ||||||
| 	offset int32 | 	offset int32 | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -0,0 +1,274 @@ | ||||||
|  | // +build generate
 | ||||||
|  | 
 | ||||||
|  | //go:generate go run $GOFILE && gofmt -w inflate_gen.go
 | ||||||
|  | 
 | ||||||
|  | package main | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"os" | ||||||
|  | 	"strings" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | func main() { | ||||||
|  | 	f, err := os.Create("inflate_gen.go") | ||||||
|  | 	if err != nil { | ||||||
|  | 		panic(err) | ||||||
|  | 	} | ||||||
|  | 	defer f.Close() | ||||||
|  | 	types := []string{"*bytes.Buffer", "*bytes.Reader", "*bufio.Reader", "*strings.Reader"} | ||||||
|  | 	names := []string{"BytesBuffer", "BytesReader", "BufioReader", "StringsReader"} | ||||||
|  | 	imports := []string{"bytes", "bufio", "io", "strings", "math/bits"} | ||||||
|  | 	f.WriteString(`// Code generated by go generate gen_inflate.go. DO NOT EDIT.
 | ||||||
|  | 
 | ||||||
|  | package flate | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | `) | ||||||
|  | 
 | ||||||
|  | 	for _, imp := range imports { | ||||||
|  | 		f.WriteString("\t\"" + imp + "\"\n") | ||||||
|  | 	} | ||||||
|  | 	f.WriteString(")\n\n") | ||||||
|  | 
 | ||||||
|  | 	template := ` | ||||||
|  | 
 | ||||||
|  | // Decode a single Huffman block from f.
 | ||||||
|  | // hl and hd are the Huffman states for the lit/length values
 | ||||||
|  | // and the distance values, respectively. If hd == nil, using the
 | ||||||
|  | // fixed distance encoding associated with fixed Huffman blocks.
 | ||||||
|  | func (f *decompressor) $FUNCNAME$() { | ||||||
|  | 	const ( | ||||||
|  | 		stateInit = iota // Zero value must be stateInit
 | ||||||
|  | 		stateDict | ||||||
|  | 	) | ||||||
|  | 	fr := f.r.($TYPE$) | ||||||
|  | 	moreBits := func() error { | ||||||
|  | 		c, err := fr.ReadByte() | ||||||
|  | 		if err != nil { | ||||||
|  | 			return noEOF(err) | ||||||
|  | 		} | ||||||
|  | 		f.roffset++ | ||||||
|  | 		f.b |= uint32(c) << f.nb | ||||||
|  | 		f.nb += 8 | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	switch f.stepState { | ||||||
|  | 	case stateInit: | ||||||
|  | 		goto readLiteral | ||||||
|  | 	case stateDict: | ||||||
|  | 		goto copyHistory | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | readLiteral: | ||||||
|  | 	// Read literal and/or (length, distance) according to RFC section 3.2.3.
 | ||||||
|  | 	{ | ||||||
|  | 		var v int | ||||||
|  | 		{ | ||||||
|  | 			// Inlined v, err := f.huffSym(f.hl)
 | ||||||
|  | 			// Since a huffmanDecoder can be empty or be composed of a degenerate tree
 | ||||||
|  | 			// with single element, huffSym must error on these two edge cases. In both
 | ||||||
|  | 			// cases, the chunks slice will be 0 for the invalid sequence, leading it
 | ||||||
|  | 			// satisfy the n == 0 check below.
 | ||||||
|  | 			n := uint(f.hl.maxRead) | ||||||
|  | 			// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
 | ||||||
|  | 			// but is smart enough to keep local variables in registers, so use nb and b,
 | ||||||
|  | 			// inline call to moreBits and reassign b,nb back to f on return.
 | ||||||
|  | 			nb, b := f.nb, f.b | ||||||
|  | 			for { | ||||||
|  | 				for nb < n { | ||||||
|  | 					c, err := fr.ReadByte() | ||||||
|  | 					if err != nil { | ||||||
|  | 						f.b = b | ||||||
|  | 						f.nb = nb | ||||||
|  | 						f.err = noEOF(err) | ||||||
|  | 						return | ||||||
|  | 					} | ||||||
|  | 					f.roffset++ | ||||||
|  | 					b |= uint32(c) << (nb & 31) | ||||||
|  | 					nb += 8 | ||||||
|  | 				} | ||||||
|  | 				chunk := f.hl.chunks[b&(huffmanNumChunks-1)] | ||||||
|  | 				n = uint(chunk & huffmanCountMask) | ||||||
|  | 				if n > huffmanChunkBits { | ||||||
|  | 					chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] | ||||||
|  | 					n = uint(chunk & huffmanCountMask) | ||||||
|  | 				} | ||||||
|  | 				if n <= nb { | ||||||
|  | 					if n == 0 { | ||||||
|  | 						f.b = b | ||||||
|  | 						f.nb = nb | ||||||
|  | 						if debugDecode { | ||||||
|  | 							fmt.Println("huffsym: n==0") | ||||||
|  | 						} | ||||||
|  | 						f.err = CorruptInputError(f.roffset) | ||||||
|  | 						return | ||||||
|  | 					} | ||||||
|  | 					f.b = b >> (n & 31) | ||||||
|  | 					f.nb = nb - n | ||||||
|  | 					v = int(chunk >> huffmanValueShift) | ||||||
|  | 					break | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		var n uint // number of bits extra
 | ||||||
|  | 		var length int | ||||||
|  | 		var err error | ||||||
|  | 		switch { | ||||||
|  | 		case v < 256: | ||||||
|  | 			f.dict.writeByte(byte(v)) | ||||||
|  | 			if f.dict.availWrite() == 0 { | ||||||
|  | 				f.toRead = f.dict.readFlush() | ||||||
|  | 				f.step = (*decompressor).$FUNCNAME$ | ||||||
|  | 				f.stepState = stateInit | ||||||
|  | 				return | ||||||
|  | 			} | ||||||
|  | 			goto readLiteral | ||||||
|  | 		case v == 256: | ||||||
|  | 			f.finishBlock() | ||||||
|  | 			return | ||||||
|  | 		// otherwise, reference to older data
 | ||||||
|  | 		case v < 265: | ||||||
|  | 			length = v - (257 - 3) | ||||||
|  | 			n = 0 | ||||||
|  | 		case v < 269: | ||||||
|  | 			length = v*2 - (265*2 - 11) | ||||||
|  | 			n = 1 | ||||||
|  | 		case v < 273: | ||||||
|  | 			length = v*4 - (269*4 - 19) | ||||||
|  | 			n = 2 | ||||||
|  | 		case v < 277: | ||||||
|  | 			length = v*8 - (273*8 - 35) | ||||||
|  | 			n = 3 | ||||||
|  | 		case v < 281: | ||||||
|  | 			length = v*16 - (277*16 - 67) | ||||||
|  | 			n = 4 | ||||||
|  | 		case v < 285: | ||||||
|  | 			length = v*32 - (281*32 - 131) | ||||||
|  | 			n = 5 | ||||||
|  | 		case v < maxNumLit: | ||||||
|  | 			length = 258 | ||||||
|  | 			n = 0 | ||||||
|  | 		default: | ||||||
|  | 			if debugDecode { | ||||||
|  | 				fmt.Println(v, ">= maxNumLit") | ||||||
|  | 			} | ||||||
|  | 			f.err = CorruptInputError(f.roffset) | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 		if n > 0 { | ||||||
|  | 			for f.nb < n { | ||||||
|  | 				if err = moreBits(); err != nil { | ||||||
|  | 					if debugDecode { | ||||||
|  | 						fmt.Println("morebits n>0:", err) | ||||||
|  | 					} | ||||||
|  | 					f.err = err | ||||||
|  | 					return | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 			length += int(f.b & uint32(1<<n-1)) | ||||||
|  | 			f.b >>= n | ||||||
|  | 			f.nb -= n | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		var dist int | ||||||
|  | 		if f.hd == nil { | ||||||
|  | 			for f.nb < 5 { | ||||||
|  | 				if err = moreBits(); err != nil { | ||||||
|  | 					if debugDecode { | ||||||
|  | 						fmt.Println("morebits f.nb<5:", err) | ||||||
|  | 					} | ||||||
|  | 					f.err = err | ||||||
|  | 					return | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 			dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) | ||||||
|  | 			f.b >>= 5 | ||||||
|  | 			f.nb -= 5 | ||||||
|  | 		} else { | ||||||
|  | 			if dist, err = f.huffSym(f.hd); err != nil { | ||||||
|  | 				if debugDecode { | ||||||
|  | 					fmt.Println("huffsym:", err) | ||||||
|  | 				} | ||||||
|  | 				f.err = err | ||||||
|  | 				return | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		switch { | ||||||
|  | 		case dist < 4: | ||||||
|  | 			dist++ | ||||||
|  | 		case dist < maxNumDist: | ||||||
|  | 			nb := uint(dist-2) >> 1 | ||||||
|  | 			// have 1 bit in bottom of dist, need nb more.
 | ||||||
|  | 			extra := (dist & 1) << nb | ||||||
|  | 			for f.nb < nb { | ||||||
|  | 				if err = moreBits(); err != nil { | ||||||
|  | 					if debugDecode { | ||||||
|  | 						fmt.Println("morebits f.nb<nb:", err) | ||||||
|  | 					} | ||||||
|  | 					f.err = err | ||||||
|  | 					return | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 			extra |= int(f.b & uint32(1<<nb-1)) | ||||||
|  | 			f.b >>= nb | ||||||
|  | 			f.nb -= nb | ||||||
|  | 			dist = 1<<(nb+1) + 1 + extra | ||||||
|  | 		default: | ||||||
|  | 			if debugDecode { | ||||||
|  | 				fmt.Println("dist too big:", dist, maxNumDist) | ||||||
|  | 			} | ||||||
|  | 			f.err = CorruptInputError(f.roffset) | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// No check on length; encoding can be prescient.
 | ||||||
|  | 		if dist > f.dict.histSize() { | ||||||
|  | 			if debugDecode { | ||||||
|  | 				fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) | ||||||
|  | 			} | ||||||
|  | 			f.err = CorruptInputError(f.roffset) | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		f.copyLen, f.copyDist = length, dist | ||||||
|  | 		goto copyHistory | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | copyHistory: | ||||||
|  | 	// Perform a backwards copy according to RFC section 3.2.3.
 | ||||||
|  | 	{ | ||||||
|  | 		cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) | ||||||
|  | 		if cnt == 0 { | ||||||
|  | 			cnt = f.dict.writeCopy(f.copyDist, f.copyLen) | ||||||
|  | 		} | ||||||
|  | 		f.copyLen -= cnt | ||||||
|  | 
 | ||||||
|  | 		if f.dict.availWrite() == 0 || f.copyLen > 0 { | ||||||
|  | 			f.toRead = f.dict.readFlush() | ||||||
|  | 			f.step = (*decompressor).$FUNCNAME$ // We need to continue this work
 | ||||||
|  | 			f.stepState = stateDict | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 		goto readLiteral | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | ` | ||||||
|  | 	for i, t := range types { | ||||||
|  | 		s := strings.Replace(template, "$FUNCNAME$", "huffman"+names[i], -1) | ||||||
|  | 		s = strings.Replace(s, "$TYPE$", t, -1) | ||||||
|  | 		f.WriteString(s) | ||||||
|  | 	} | ||||||
|  | 	f.WriteString("func (f *decompressor) huffmanBlockDecoder() func() {\n") | ||||||
|  | 	f.WriteString("\tswitch f.r.(type) {\n") | ||||||
|  | 	for i, t := range types { | ||||||
|  | 		f.WriteString("\t\tcase " + t + ":\n") | ||||||
|  | 		f.WriteString("\t\t\treturn f.huffman" + names[i] + "\n") | ||||||
|  | 	} | ||||||
|  | 	f.WriteString("\t\tdefault:\n") | ||||||
|  | 	f.WriteString("\t\t\treturn f.huffmanBlockGeneric") | ||||||
|  | 	f.WriteString("\t}\n}\n") | ||||||
|  | } | ||||||
|  | @ -484,6 +484,9 @@ func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, n | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | // writeStoredHeader will write a stored header.
 | ||||||
|  | // If the stored block is only used for EOF,
 | ||||||
|  | // it is replaced with a fixed huffman block.
 | ||||||
| func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { | func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { | ||||||
| 	if w.err != nil { | 	if w.err != nil { | ||||||
| 		return | 		return | ||||||
|  | @ -493,6 +496,16 @@ func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { | ||||||
| 		w.writeCode(w.literalEncoding.codes[endBlockMarker]) | 		w.writeCode(w.literalEncoding.codes[endBlockMarker]) | ||||||
| 		w.lastHeader = 0 | 		w.lastHeader = 0 | ||||||
| 	} | 	} | ||||||
|  | 
 | ||||||
|  | 	// To write EOF, use a fixed encoding block. 10 bits instead of 5 bytes.
 | ||||||
|  | 	if length == 0 && isEof { | ||||||
|  | 		w.writeFixedHeader(isEof) | ||||||
|  | 		// EOB: 7 bits, value: 0
 | ||||||
|  | 		w.writeBits(0, 7) | ||||||
|  | 		w.flush() | ||||||
|  | 		return | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
| 	var flag int32 | 	var flag int32 | ||||||
| 	if isEof { | 	if isEof { | ||||||
| 		flag = 1 | 		flag = 1 | ||||||
|  |  | ||||||
|  | @ -109,8 +109,8 @@ func generateFixedOffsetEncoding() *huffmanEncoder { | ||||||
| 	return h | 	return h | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| var fixedLiteralEncoding *huffmanEncoder = generateFixedLiteralEncoding() | var fixedLiteralEncoding = generateFixedLiteralEncoding() | ||||||
| var fixedOffsetEncoding *huffmanEncoder = generateFixedOffsetEncoding() | var fixedOffsetEncoding = generateFixedOffsetEncoding() | ||||||
| 
 | 
 | ||||||
| func (h *huffmanEncoder) bitLength(freq []uint16) int { | func (h *huffmanEncoder) bitLength(freq []uint16) int { | ||||||
| 	var total int | 	var total int | ||||||
|  |  | ||||||
|  | @ -106,7 +106,7 @@ const ( | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| type huffmanDecoder struct { | type huffmanDecoder struct { | ||||||
| 	min      int                       // the minimum code length
 | 	maxRead  int                       // the maximum number of bits we can read and not overread
 | ||||||
| 	chunks   *[huffmanNumChunks]uint16 // chunks as described above
 | 	chunks   *[huffmanNumChunks]uint16 // chunks as described above
 | ||||||
| 	links    [][]uint16                // overflow links
 | 	links    [][]uint16                // overflow links
 | ||||||
| 	linkMask uint32                    // mask the width of the link table
 | 	linkMask uint32                    // mask the width of the link table
 | ||||||
|  | @ -126,12 +126,12 @@ func (h *huffmanDecoder) init(lengths []int) bool { | ||||||
| 	if h.chunks == nil { | 	if h.chunks == nil { | ||||||
| 		h.chunks = &[huffmanNumChunks]uint16{} | 		h.chunks = &[huffmanNumChunks]uint16{} | ||||||
| 	} | 	} | ||||||
| 	if h.min != 0 { | 	if h.maxRead != 0 { | ||||||
| 		*h = huffmanDecoder{chunks: h.chunks, links: h.links} | 		*h = huffmanDecoder{chunks: h.chunks, links: h.links} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// Count number of codes of each length,
 | 	// Count number of codes of each length,
 | ||||||
| 	// compute min and max length.
 | 	// compute maxRead and max length.
 | ||||||
| 	var count [maxCodeLen]int | 	var count [maxCodeLen]int | ||||||
| 	var min, max int | 	var min, max int | ||||||
| 	for _, n := range lengths { | 	for _, n := range lengths { | ||||||
|  | @ -178,7 +178,7 @@ func (h *huffmanDecoder) init(lengths []int) bool { | ||||||
| 		return false | 		return false | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	h.min = min | 	h.maxRead = min | ||||||
| 	chunks := h.chunks[:] | 	chunks := h.chunks[:] | ||||||
| 	for i := range chunks { | 	for i := range chunks { | ||||||
| 		chunks[i] = 0 | 		chunks[i] = 0 | ||||||
|  | @ -342,7 +342,7 @@ func (f *decompressor) nextBlock() { | ||||||
| 		// compressed, fixed Huffman tables
 | 		// compressed, fixed Huffman tables
 | ||||||
| 		f.hl = &fixedHuffmanDecoder | 		f.hl = &fixedHuffmanDecoder | ||||||
| 		f.hd = nil | 		f.hd = nil | ||||||
| 		f.huffmanBlock() | 		f.huffmanBlockDecoder()() | ||||||
| 	case 2: | 	case 2: | ||||||
| 		// compressed, dynamic Huffman tables
 | 		// compressed, dynamic Huffman tables
 | ||||||
| 		if f.err = f.readHuffman(); f.err != nil { | 		if f.err = f.readHuffman(); f.err != nil { | ||||||
|  | @ -350,7 +350,7 @@ func (f *decompressor) nextBlock() { | ||||||
| 		} | 		} | ||||||
| 		f.hl = &f.h1 | 		f.hl = &f.h1 | ||||||
| 		f.hd = &f.h2 | 		f.hd = &f.h2 | ||||||
| 		f.huffmanBlock() | 		f.huffmanBlockDecoder()() | ||||||
| 	default: | 	default: | ||||||
| 		// 3 is reserved.
 | 		// 3 is reserved.
 | ||||||
| 		if debugDecode { | 		if debugDecode { | ||||||
|  | @ -543,12 +543,18 @@ func (f *decompressor) readHuffman() error { | ||||||
| 		return CorruptInputError(f.roffset) | 		return CorruptInputError(f.roffset) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	// As an optimization, we can initialize the min bits to read at a time
 | 	// As an optimization, we can initialize the maxRead bits to read at a time
 | ||||||
| 	// for the HLIT tree to the length of the EOB marker since we know that
 | 	// for the HLIT tree to the length of the EOB marker since we know that
 | ||||||
| 	// every block must terminate with one. This preserves the property that
 | 	// every block must terminate with one. This preserves the property that
 | ||||||
| 	// we never read any extra bytes after the end of the DEFLATE stream.
 | 	// we never read any extra bytes after the end of the DEFLATE stream.
 | ||||||
| 	if f.h1.min < f.bits[endBlockMarker] { | 	if f.h1.maxRead < f.bits[endBlockMarker] { | ||||||
| 		f.h1.min = f.bits[endBlockMarker] | 		f.h1.maxRead = f.bits[endBlockMarker] | ||||||
|  | 	} | ||||||
|  | 	if !f.final { | ||||||
|  | 		// If not the final block, the smallest block possible is
 | ||||||
|  | 		// a predefined table, BTYPE=01, with a single EOB marker.
 | ||||||
|  | 		// This will take up 3 + 7 bits.
 | ||||||
|  | 		f.h1.maxRead += 10 | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	return nil | 	return nil | ||||||
|  | @ -558,7 +564,7 @@ func (f *decompressor) readHuffman() error { | ||||||
| // hl and hd are the Huffman states for the lit/length values
 | // hl and hd are the Huffman states for the lit/length values
 | ||||||
| // and the distance values, respectively. If hd == nil, using the
 | // and the distance values, respectively. If hd == nil, using the
 | ||||||
| // fixed distance encoding associated with fixed Huffman blocks.
 | // fixed distance encoding associated with fixed Huffman blocks.
 | ||||||
| func (f *decompressor) huffmanBlock() { | func (f *decompressor) huffmanBlockGeneric() { | ||||||
| 	const ( | 	const ( | ||||||
| 		stateInit = iota // Zero value must be stateInit
 | 		stateInit = iota // Zero value must be stateInit
 | ||||||
| 		stateDict | 		stateDict | ||||||
|  | @ -574,19 +580,64 @@ func (f *decompressor) huffmanBlock() { | ||||||
| readLiteral: | readLiteral: | ||||||
| 	// Read literal and/or (length, distance) according to RFC section 3.2.3.
 | 	// Read literal and/or (length, distance) according to RFC section 3.2.3.
 | ||||||
| 	{ | 	{ | ||||||
| 		v, err := f.huffSym(f.hl) | 		var v int | ||||||
|  | 		{ | ||||||
|  | 			// Inlined v, err := f.huffSym(f.hl)
 | ||||||
|  | 			// Since a huffmanDecoder can be empty or be composed of a degenerate tree
 | ||||||
|  | 			// with single element, huffSym must error on these two edge cases. In both
 | ||||||
|  | 			// cases, the chunks slice will be 0 for the invalid sequence, leading it
 | ||||||
|  | 			// satisfy the n == 0 check below.
 | ||||||
|  | 			n := uint(f.hl.maxRead) | ||||||
|  | 			// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
 | ||||||
|  | 			// but is smart enough to keep local variables in registers, so use nb and b,
 | ||||||
|  | 			// inline call to moreBits and reassign b,nb back to f on return.
 | ||||||
|  | 			nb, b := f.nb, f.b | ||||||
|  | 			for { | ||||||
|  | 				for nb < n { | ||||||
|  | 					c, err := f.r.ReadByte() | ||||||
| 					if err != nil { | 					if err != nil { | ||||||
| 			f.err = err | 						f.b = b | ||||||
|  | 						f.nb = nb | ||||||
|  | 						f.err = noEOF(err) | ||||||
| 						return | 						return | ||||||
| 					} | 					} | ||||||
|  | 					f.roffset++ | ||||||
|  | 					b |= uint32(c) << (nb & 31) | ||||||
|  | 					nb += 8 | ||||||
|  | 				} | ||||||
|  | 				chunk := f.hl.chunks[b&(huffmanNumChunks-1)] | ||||||
|  | 				n = uint(chunk & huffmanCountMask) | ||||||
|  | 				if n > huffmanChunkBits { | ||||||
|  | 					chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] | ||||||
|  | 					n = uint(chunk & huffmanCountMask) | ||||||
|  | 				} | ||||||
|  | 				if n <= nb { | ||||||
|  | 					if n == 0 { | ||||||
|  | 						f.b = b | ||||||
|  | 						f.nb = nb | ||||||
|  | 						if debugDecode { | ||||||
|  | 							fmt.Println("huffsym: n==0") | ||||||
|  | 						} | ||||||
|  | 						f.err = CorruptInputError(f.roffset) | ||||||
|  | 						return | ||||||
|  | 					} | ||||||
|  | 					f.b = b >> (n & 31) | ||||||
|  | 					f.nb = nb - n | ||||||
|  | 					v = int(chunk >> huffmanValueShift) | ||||||
|  | 					break | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
| 		var n uint // number of bits extra
 | 		var n uint // number of bits extra
 | ||||||
| 		var length int | 		var length int | ||||||
|  | 		var err error | ||||||
| 		switch { | 		switch { | ||||||
| 		case v < 256: | 		case v < 256: | ||||||
| 			f.dict.writeByte(byte(v)) | 			f.dict.writeByte(byte(v)) | ||||||
| 			if f.dict.availWrite() == 0 { | 			if f.dict.availWrite() == 0 { | ||||||
| 				f.toRead = f.dict.readFlush() | 				f.toRead = f.dict.readFlush() | ||||||
| 				f.step = (*decompressor).huffmanBlock | 				f.step = (*decompressor).huffmanBlockGeneric | ||||||
| 				f.stepState = stateInit | 				f.stepState = stateInit | ||||||
| 				return | 				return | ||||||
| 			} | 			} | ||||||
|  | @ -714,7 +765,7 @@ copyHistory: | ||||||
| 
 | 
 | ||||||
| 		if f.dict.availWrite() == 0 || f.copyLen > 0 { | 		if f.dict.availWrite() == 0 || f.copyLen > 0 { | ||||||
| 			f.toRead = f.dict.readFlush() | 			f.toRead = f.dict.readFlush() | ||||||
| 			f.step = (*decompressor).huffmanBlock // We need to continue this work
 | 			f.step = (*decompressor).huffmanBlockGeneric // We need to continue this work
 | ||||||
| 			f.stepState = stateDict | 			f.stepState = stateDict | ||||||
| 			return | 			return | ||||||
| 		} | 		} | ||||||
|  | @ -726,21 +777,33 @@ copyHistory: | ||||||
| func (f *decompressor) dataBlock() { | func (f *decompressor) dataBlock() { | ||||||
| 	// Uncompressed.
 | 	// Uncompressed.
 | ||||||
| 	// Discard current half-byte.
 | 	// Discard current half-byte.
 | ||||||
| 	f.nb = 0 | 	left := (f.nb) & 7 | ||||||
| 	f.b = 0 | 	f.nb -= left | ||||||
|  | 	f.b >>= left | ||||||
|  | 
 | ||||||
|  | 	offBytes := f.nb >> 3 | ||||||
|  | 	// Unfilled values will be overwritten.
 | ||||||
|  | 	f.buf[0] = uint8(f.b) | ||||||
|  | 	f.buf[1] = uint8(f.b >> 8) | ||||||
|  | 	f.buf[2] = uint8(f.b >> 16) | ||||||
|  | 	f.buf[3] = uint8(f.b >> 24) | ||||||
|  | 
 | ||||||
|  | 	f.roffset += int64(offBytes) | ||||||
|  | 	f.nb, f.b = 0, 0 | ||||||
| 
 | 
 | ||||||
| 	// Length then ones-complement of length.
 | 	// Length then ones-complement of length.
 | ||||||
| 	nr, err := io.ReadFull(f.r, f.buf[0:4]) | 	nr, err := io.ReadFull(f.r, f.buf[offBytes:4]) | ||||||
| 	f.roffset += int64(nr) | 	f.roffset += int64(nr) | ||||||
| 	if err != nil { | 	if err != nil { | ||||||
| 		f.err = noEOF(err) | 		f.err = noEOF(err) | ||||||
| 		return | 		return | ||||||
| 	} | 	} | ||||||
| 	n := int(f.buf[0]) | int(f.buf[1])<<8 | 	n := uint16(f.buf[0]) | uint16(f.buf[1])<<8 | ||||||
| 	nn := int(f.buf[2]) | int(f.buf[3])<<8 | 	nn := uint16(f.buf[2]) | uint16(f.buf[3])<<8 | ||||||
| 	if uint16(nn) != uint16(^n) { | 	if nn != ^n { | ||||||
| 		if debugDecode { | 		if debugDecode { | ||||||
| 			fmt.Println("uint16(nn) != uint16(^n)", nn, ^n) | 			ncomp := ^n | ||||||
|  | 			fmt.Println("uint16(nn) != uint16(^n)", nn, ncomp) | ||||||
| 		} | 		} | ||||||
| 		f.err = CorruptInputError(f.roffset) | 		f.err = CorruptInputError(f.roffset) | ||||||
| 		return | 		return | ||||||
|  | @ -752,7 +815,7 @@ func (f *decompressor) dataBlock() { | ||||||
| 		return | 		return | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	f.copyLen = n | 	f.copyLen = int(n) | ||||||
| 	f.copyData() | 	f.copyData() | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -816,7 +879,7 @@ func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { | ||||||
| 	// with single element, huffSym must error on these two edge cases. In both
 | 	// with single element, huffSym must error on these two edge cases. In both
 | ||||||
| 	// cases, the chunks slice will be 0 for the invalid sequence, leading it
 | 	// cases, the chunks slice will be 0 for the invalid sequence, leading it
 | ||||||
| 	// satisfy the n == 0 check below.
 | 	// satisfy the n == 0 check below.
 | ||||||
| 	n := uint(h.min) | 	n := uint(h.maxRead) | ||||||
| 	// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
 | 	// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
 | ||||||
| 	// but is smart enough to keep local variables in registers, so use nb and b,
 | 	// but is smart enough to keep local variables in registers, so use nb and b,
 | ||||||
| 	// inline call to moreBits and reassign b,nb back to f on return.
 | 	// inline call to moreBits and reassign b,nb back to f on return.
 | ||||||
|  |  | ||||||
|  | @ -0,0 +1,922 @@ | ||||||
|  | // Code generated by go generate gen_inflate.go. DO NOT EDIT.
 | ||||||
|  | 
 | ||||||
|  | package flate | ||||||
|  | 
 | ||||||
|  | import ( | ||||||
|  | 	"bufio" | ||||||
|  | 	"bytes" | ||||||
|  | 	"fmt" | ||||||
|  | 	"math/bits" | ||||||
|  | 	"strings" | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | // Decode a single Huffman block from f.
 | ||||||
|  | // hl and hd are the Huffman states for the lit/length values
 | ||||||
|  | // and the distance values, respectively. If hd == nil, using the
 | ||||||
|  | // fixed distance encoding associated with fixed Huffman blocks.
 | ||||||
|  | func (f *decompressor) huffmanBytesBuffer() { | ||||||
|  | 	const ( | ||||||
|  | 		stateInit = iota // Zero value must be stateInit
 | ||||||
|  | 		stateDict | ||||||
|  | 	) | ||||||
|  | 	fr := f.r.(*bytes.Buffer) | ||||||
|  | 	moreBits := func() error { | ||||||
|  | 		c, err := fr.ReadByte() | ||||||
|  | 		if err != nil { | ||||||
|  | 			return noEOF(err) | ||||||
|  | 		} | ||||||
|  | 		f.roffset++ | ||||||
|  | 		f.b |= uint32(c) << f.nb | ||||||
|  | 		f.nb += 8 | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	switch f.stepState { | ||||||
|  | 	case stateInit: | ||||||
|  | 		goto readLiteral | ||||||
|  | 	case stateDict: | ||||||
|  | 		goto copyHistory | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | readLiteral: | ||||||
|  | 	// Read literal and/or (length, distance) according to RFC section 3.2.3.
 | ||||||
|  | 	{ | ||||||
|  | 		var v int | ||||||
|  | 		{ | ||||||
|  | 			// Inlined v, err := f.huffSym(f.hl)
 | ||||||
|  | 			// Since a huffmanDecoder can be empty or be composed of a degenerate tree
 | ||||||
|  | 			// with single element, huffSym must error on these two edge cases. In both
 | ||||||
|  | 			// cases, the chunks slice will be 0 for the invalid sequence, leading it
 | ||||||
|  | 			// satisfy the n == 0 check below.
 | ||||||
|  | 			n := uint(f.hl.maxRead) | ||||||
|  | 			// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
 | ||||||
|  | 			// but is smart enough to keep local variables in registers, so use nb and b,
 | ||||||
|  | 			// inline call to moreBits and reassign b,nb back to f on return.
 | ||||||
|  | 			nb, b := f.nb, f.b | ||||||
|  | 			for { | ||||||
|  | 				for nb < n { | ||||||
|  | 					c, err := fr.ReadByte() | ||||||
|  | 					if err != nil { | ||||||
|  | 						f.b = b | ||||||
|  | 						f.nb = nb | ||||||
|  | 						f.err = noEOF(err) | ||||||
|  | 						return | ||||||
|  | 					} | ||||||
|  | 					f.roffset++ | ||||||
|  | 					b |= uint32(c) << (nb & 31) | ||||||
|  | 					nb += 8 | ||||||
|  | 				} | ||||||
|  | 				chunk := f.hl.chunks[b&(huffmanNumChunks-1)] | ||||||
|  | 				n = uint(chunk & huffmanCountMask) | ||||||
|  | 				if n > huffmanChunkBits { | ||||||
|  | 					chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] | ||||||
|  | 					n = uint(chunk & huffmanCountMask) | ||||||
|  | 				} | ||||||
|  | 				if n <= nb { | ||||||
|  | 					if n == 0 { | ||||||
|  | 						f.b = b | ||||||
|  | 						f.nb = nb | ||||||
|  | 						if debugDecode { | ||||||
|  | 							fmt.Println("huffsym: n==0") | ||||||
|  | 						} | ||||||
|  | 						f.err = CorruptInputError(f.roffset) | ||||||
|  | 						return | ||||||
|  | 					} | ||||||
|  | 					f.b = b >> (n & 31) | ||||||
|  | 					f.nb = nb - n | ||||||
|  | 					v = int(chunk >> huffmanValueShift) | ||||||
|  | 					break | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		var n uint // number of bits extra
 | ||||||
|  | 		var length int | ||||||
|  | 		var err error | ||||||
|  | 		switch { | ||||||
|  | 		case v < 256: | ||||||
|  | 			f.dict.writeByte(byte(v)) | ||||||
|  | 			if f.dict.availWrite() == 0 { | ||||||
|  | 				f.toRead = f.dict.readFlush() | ||||||
|  | 				f.step = (*decompressor).huffmanBytesBuffer | ||||||
|  | 				f.stepState = stateInit | ||||||
|  | 				return | ||||||
|  | 			} | ||||||
|  | 			goto readLiteral | ||||||
|  | 		case v == 256: | ||||||
|  | 			f.finishBlock() | ||||||
|  | 			return | ||||||
|  | 		// otherwise, reference to older data
 | ||||||
|  | 		case v < 265: | ||||||
|  | 			length = v - (257 - 3) | ||||||
|  | 			n = 0 | ||||||
|  | 		case v < 269: | ||||||
|  | 			length = v*2 - (265*2 - 11) | ||||||
|  | 			n = 1 | ||||||
|  | 		case v < 273: | ||||||
|  | 			length = v*4 - (269*4 - 19) | ||||||
|  | 			n = 2 | ||||||
|  | 		case v < 277: | ||||||
|  | 			length = v*8 - (273*8 - 35) | ||||||
|  | 			n = 3 | ||||||
|  | 		case v < 281: | ||||||
|  | 			length = v*16 - (277*16 - 67) | ||||||
|  | 			n = 4 | ||||||
|  | 		case v < 285: | ||||||
|  | 			length = v*32 - (281*32 - 131) | ||||||
|  | 			n = 5 | ||||||
|  | 		case v < maxNumLit: | ||||||
|  | 			length = 258 | ||||||
|  | 			n = 0 | ||||||
|  | 		default: | ||||||
|  | 			if debugDecode { | ||||||
|  | 				fmt.Println(v, ">= maxNumLit") | ||||||
|  | 			} | ||||||
|  | 			f.err = CorruptInputError(f.roffset) | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 		if n > 0 { | ||||||
|  | 			for f.nb < n { | ||||||
|  | 				if err = moreBits(); err != nil { | ||||||
|  | 					if debugDecode { | ||||||
|  | 						fmt.Println("morebits n>0:", err) | ||||||
|  | 					} | ||||||
|  | 					f.err = err | ||||||
|  | 					return | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 			length += int(f.b & uint32(1<<n-1)) | ||||||
|  | 			f.b >>= n | ||||||
|  | 			f.nb -= n | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		var dist int | ||||||
|  | 		if f.hd == nil { | ||||||
|  | 			for f.nb < 5 { | ||||||
|  | 				if err = moreBits(); err != nil { | ||||||
|  | 					if debugDecode { | ||||||
|  | 						fmt.Println("morebits f.nb<5:", err) | ||||||
|  | 					} | ||||||
|  | 					f.err = err | ||||||
|  | 					return | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 			dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) | ||||||
|  | 			f.b >>= 5 | ||||||
|  | 			f.nb -= 5 | ||||||
|  | 		} else { | ||||||
|  | 			if dist, err = f.huffSym(f.hd); err != nil { | ||||||
|  | 				if debugDecode { | ||||||
|  | 					fmt.Println("huffsym:", err) | ||||||
|  | 				} | ||||||
|  | 				f.err = err | ||||||
|  | 				return | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		switch { | ||||||
|  | 		case dist < 4: | ||||||
|  | 			dist++ | ||||||
|  | 		case dist < maxNumDist: | ||||||
|  | 			nb := uint(dist-2) >> 1 | ||||||
|  | 			// have 1 bit in bottom of dist, need nb more.
 | ||||||
|  | 			extra := (dist & 1) << nb | ||||||
|  | 			for f.nb < nb { | ||||||
|  | 				if err = moreBits(); err != nil { | ||||||
|  | 					if debugDecode { | ||||||
|  | 						fmt.Println("morebits f.nb<nb:", err) | ||||||
|  | 					} | ||||||
|  | 					f.err = err | ||||||
|  | 					return | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 			extra |= int(f.b & uint32(1<<nb-1)) | ||||||
|  | 			f.b >>= nb | ||||||
|  | 			f.nb -= nb | ||||||
|  | 			dist = 1<<(nb+1) + 1 + extra | ||||||
|  | 		default: | ||||||
|  | 			if debugDecode { | ||||||
|  | 				fmt.Println("dist too big:", dist, maxNumDist) | ||||||
|  | 			} | ||||||
|  | 			f.err = CorruptInputError(f.roffset) | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// No check on length; encoding can be prescient.
 | ||||||
|  | 		if dist > f.dict.histSize() { | ||||||
|  | 			if debugDecode { | ||||||
|  | 				fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) | ||||||
|  | 			} | ||||||
|  | 			f.err = CorruptInputError(f.roffset) | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		f.copyLen, f.copyDist = length, dist | ||||||
|  | 		goto copyHistory | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | copyHistory: | ||||||
|  | 	// Perform a backwards copy according to RFC section 3.2.3.
 | ||||||
|  | 	{ | ||||||
|  | 		cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) | ||||||
|  | 		if cnt == 0 { | ||||||
|  | 			cnt = f.dict.writeCopy(f.copyDist, f.copyLen) | ||||||
|  | 		} | ||||||
|  | 		f.copyLen -= cnt | ||||||
|  | 
 | ||||||
|  | 		if f.dict.availWrite() == 0 || f.copyLen > 0 { | ||||||
|  | 			f.toRead = f.dict.readFlush() | ||||||
|  | 			f.step = (*decompressor).huffmanBytesBuffer // We need to continue this work
 | ||||||
|  | 			f.stepState = stateDict | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 		goto readLiteral | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Decode a single Huffman block from f.
 | ||||||
|  | // hl and hd are the Huffman states for the lit/length values
 | ||||||
|  | // and the distance values, respectively. If hd == nil, using the
 | ||||||
|  | // fixed distance encoding associated with fixed Huffman blocks.
 | ||||||
|  | func (f *decompressor) huffmanBytesReader() { | ||||||
|  | 	const ( | ||||||
|  | 		stateInit = iota // Zero value must be stateInit
 | ||||||
|  | 		stateDict | ||||||
|  | 	) | ||||||
|  | 	fr := f.r.(*bytes.Reader) | ||||||
|  | 	moreBits := func() error { | ||||||
|  | 		c, err := fr.ReadByte() | ||||||
|  | 		if err != nil { | ||||||
|  | 			return noEOF(err) | ||||||
|  | 		} | ||||||
|  | 		f.roffset++ | ||||||
|  | 		f.b |= uint32(c) << f.nb | ||||||
|  | 		f.nb += 8 | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	switch f.stepState { | ||||||
|  | 	case stateInit: | ||||||
|  | 		goto readLiteral | ||||||
|  | 	case stateDict: | ||||||
|  | 		goto copyHistory | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | readLiteral: | ||||||
|  | 	// Read literal and/or (length, distance) according to RFC section 3.2.3.
 | ||||||
|  | 	{ | ||||||
|  | 		var v int | ||||||
|  | 		{ | ||||||
|  | 			// Inlined v, err := f.huffSym(f.hl)
 | ||||||
|  | 			// Since a huffmanDecoder can be empty or be composed of a degenerate tree
 | ||||||
|  | 			// with single element, huffSym must error on these two edge cases. In both
 | ||||||
|  | 			// cases, the chunks slice will be 0 for the invalid sequence, leading it
 | ||||||
|  | 			// satisfy the n == 0 check below.
 | ||||||
|  | 			n := uint(f.hl.maxRead) | ||||||
|  | 			// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
 | ||||||
|  | 			// but is smart enough to keep local variables in registers, so use nb and b,
 | ||||||
|  | 			// inline call to moreBits and reassign b,nb back to f on return.
 | ||||||
|  | 			nb, b := f.nb, f.b | ||||||
|  | 			for { | ||||||
|  | 				for nb < n { | ||||||
|  | 					c, err := fr.ReadByte() | ||||||
|  | 					if err != nil { | ||||||
|  | 						f.b = b | ||||||
|  | 						f.nb = nb | ||||||
|  | 						f.err = noEOF(err) | ||||||
|  | 						return | ||||||
|  | 					} | ||||||
|  | 					f.roffset++ | ||||||
|  | 					b |= uint32(c) << (nb & 31) | ||||||
|  | 					nb += 8 | ||||||
|  | 				} | ||||||
|  | 				chunk := f.hl.chunks[b&(huffmanNumChunks-1)] | ||||||
|  | 				n = uint(chunk & huffmanCountMask) | ||||||
|  | 				if n > huffmanChunkBits { | ||||||
|  | 					chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] | ||||||
|  | 					n = uint(chunk & huffmanCountMask) | ||||||
|  | 				} | ||||||
|  | 				if n <= nb { | ||||||
|  | 					if n == 0 { | ||||||
|  | 						f.b = b | ||||||
|  | 						f.nb = nb | ||||||
|  | 						if debugDecode { | ||||||
|  | 							fmt.Println("huffsym: n==0") | ||||||
|  | 						} | ||||||
|  | 						f.err = CorruptInputError(f.roffset) | ||||||
|  | 						return | ||||||
|  | 					} | ||||||
|  | 					f.b = b >> (n & 31) | ||||||
|  | 					f.nb = nb - n | ||||||
|  | 					v = int(chunk >> huffmanValueShift) | ||||||
|  | 					break | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		var n uint // number of bits extra
 | ||||||
|  | 		var length int | ||||||
|  | 		var err error | ||||||
|  | 		switch { | ||||||
|  | 		case v < 256: | ||||||
|  | 			f.dict.writeByte(byte(v)) | ||||||
|  | 			if f.dict.availWrite() == 0 { | ||||||
|  | 				f.toRead = f.dict.readFlush() | ||||||
|  | 				f.step = (*decompressor).huffmanBytesReader | ||||||
|  | 				f.stepState = stateInit | ||||||
|  | 				return | ||||||
|  | 			} | ||||||
|  | 			goto readLiteral | ||||||
|  | 		case v == 256: | ||||||
|  | 			f.finishBlock() | ||||||
|  | 			return | ||||||
|  | 		// otherwise, reference to older data
 | ||||||
|  | 		case v < 265: | ||||||
|  | 			length = v - (257 - 3) | ||||||
|  | 			n = 0 | ||||||
|  | 		case v < 269: | ||||||
|  | 			length = v*2 - (265*2 - 11) | ||||||
|  | 			n = 1 | ||||||
|  | 		case v < 273: | ||||||
|  | 			length = v*4 - (269*4 - 19) | ||||||
|  | 			n = 2 | ||||||
|  | 		case v < 277: | ||||||
|  | 			length = v*8 - (273*8 - 35) | ||||||
|  | 			n = 3 | ||||||
|  | 		case v < 281: | ||||||
|  | 			length = v*16 - (277*16 - 67) | ||||||
|  | 			n = 4 | ||||||
|  | 		case v < 285: | ||||||
|  | 			length = v*32 - (281*32 - 131) | ||||||
|  | 			n = 5 | ||||||
|  | 		case v < maxNumLit: | ||||||
|  | 			length = 258 | ||||||
|  | 			n = 0 | ||||||
|  | 		default: | ||||||
|  | 			if debugDecode { | ||||||
|  | 				fmt.Println(v, ">= maxNumLit") | ||||||
|  | 			} | ||||||
|  | 			f.err = CorruptInputError(f.roffset) | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 		if n > 0 { | ||||||
|  | 			for f.nb < n { | ||||||
|  | 				if err = moreBits(); err != nil { | ||||||
|  | 					if debugDecode { | ||||||
|  | 						fmt.Println("morebits n>0:", err) | ||||||
|  | 					} | ||||||
|  | 					f.err = err | ||||||
|  | 					return | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 			length += int(f.b & uint32(1<<n-1)) | ||||||
|  | 			f.b >>= n | ||||||
|  | 			f.nb -= n | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		var dist int | ||||||
|  | 		if f.hd == nil { | ||||||
|  | 			for f.nb < 5 { | ||||||
|  | 				if err = moreBits(); err != nil { | ||||||
|  | 					if debugDecode { | ||||||
|  | 						fmt.Println("morebits f.nb<5:", err) | ||||||
|  | 					} | ||||||
|  | 					f.err = err | ||||||
|  | 					return | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 			dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) | ||||||
|  | 			f.b >>= 5 | ||||||
|  | 			f.nb -= 5 | ||||||
|  | 		} else { | ||||||
|  | 			if dist, err = f.huffSym(f.hd); err != nil { | ||||||
|  | 				if debugDecode { | ||||||
|  | 					fmt.Println("huffsym:", err) | ||||||
|  | 				} | ||||||
|  | 				f.err = err | ||||||
|  | 				return | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		switch { | ||||||
|  | 		case dist < 4: | ||||||
|  | 			dist++ | ||||||
|  | 		case dist < maxNumDist: | ||||||
|  | 			nb := uint(dist-2) >> 1 | ||||||
|  | 			// have 1 bit in bottom of dist, need nb more.
 | ||||||
|  | 			extra := (dist & 1) << nb | ||||||
|  | 			for f.nb < nb { | ||||||
|  | 				if err = moreBits(); err != nil { | ||||||
|  | 					if debugDecode { | ||||||
|  | 						fmt.Println("morebits f.nb<nb:", err) | ||||||
|  | 					} | ||||||
|  | 					f.err = err | ||||||
|  | 					return | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 			extra |= int(f.b & uint32(1<<nb-1)) | ||||||
|  | 			f.b >>= nb | ||||||
|  | 			f.nb -= nb | ||||||
|  | 			dist = 1<<(nb+1) + 1 + extra | ||||||
|  | 		default: | ||||||
|  | 			if debugDecode { | ||||||
|  | 				fmt.Println("dist too big:", dist, maxNumDist) | ||||||
|  | 			} | ||||||
|  | 			f.err = CorruptInputError(f.roffset) | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// No check on length; encoding can be prescient.
 | ||||||
|  | 		if dist > f.dict.histSize() { | ||||||
|  | 			if debugDecode { | ||||||
|  | 				fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) | ||||||
|  | 			} | ||||||
|  | 			f.err = CorruptInputError(f.roffset) | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		f.copyLen, f.copyDist = length, dist | ||||||
|  | 		goto copyHistory | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | copyHistory: | ||||||
|  | 	// Perform a backwards copy according to RFC section 3.2.3.
 | ||||||
|  | 	{ | ||||||
|  | 		cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) | ||||||
|  | 		if cnt == 0 { | ||||||
|  | 			cnt = f.dict.writeCopy(f.copyDist, f.copyLen) | ||||||
|  | 		} | ||||||
|  | 		f.copyLen -= cnt | ||||||
|  | 
 | ||||||
|  | 		if f.dict.availWrite() == 0 || f.copyLen > 0 { | ||||||
|  | 			f.toRead = f.dict.readFlush() | ||||||
|  | 			f.step = (*decompressor).huffmanBytesReader // We need to continue this work
 | ||||||
|  | 			f.stepState = stateDict | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 		goto readLiteral | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Decode a single Huffman block from f.
 | ||||||
|  | // hl and hd are the Huffman states for the lit/length values
 | ||||||
|  | // and the distance values, respectively. If hd == nil, using the
 | ||||||
|  | // fixed distance encoding associated with fixed Huffman blocks.
 | ||||||
|  | func (f *decompressor) huffmanBufioReader() { | ||||||
|  | 	const ( | ||||||
|  | 		stateInit = iota // Zero value must be stateInit
 | ||||||
|  | 		stateDict | ||||||
|  | 	) | ||||||
|  | 	fr := f.r.(*bufio.Reader) | ||||||
|  | 	moreBits := func() error { | ||||||
|  | 		c, err := fr.ReadByte() | ||||||
|  | 		if err != nil { | ||||||
|  | 			return noEOF(err) | ||||||
|  | 		} | ||||||
|  | 		f.roffset++ | ||||||
|  | 		f.b |= uint32(c) << f.nb | ||||||
|  | 		f.nb += 8 | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	switch f.stepState { | ||||||
|  | 	case stateInit: | ||||||
|  | 		goto readLiteral | ||||||
|  | 	case stateDict: | ||||||
|  | 		goto copyHistory | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | readLiteral: | ||||||
|  | 	// Read literal and/or (length, distance) according to RFC section 3.2.3.
 | ||||||
|  | 	{ | ||||||
|  | 		var v int | ||||||
|  | 		{ | ||||||
|  | 			// Inlined v, err := f.huffSym(f.hl)
 | ||||||
|  | 			// Since a huffmanDecoder can be empty or be composed of a degenerate tree
 | ||||||
|  | 			// with single element, huffSym must error on these two edge cases. In both
 | ||||||
|  | 			// cases, the chunks slice will be 0 for the invalid sequence, leading it
 | ||||||
|  | 			// satisfy the n == 0 check below.
 | ||||||
|  | 			n := uint(f.hl.maxRead) | ||||||
|  | 			// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
 | ||||||
|  | 			// but is smart enough to keep local variables in registers, so use nb and b,
 | ||||||
|  | 			// inline call to moreBits and reassign b,nb back to f on return.
 | ||||||
|  | 			nb, b := f.nb, f.b | ||||||
|  | 			for { | ||||||
|  | 				for nb < n { | ||||||
|  | 					c, err := fr.ReadByte() | ||||||
|  | 					if err != nil { | ||||||
|  | 						f.b = b | ||||||
|  | 						f.nb = nb | ||||||
|  | 						f.err = noEOF(err) | ||||||
|  | 						return | ||||||
|  | 					} | ||||||
|  | 					f.roffset++ | ||||||
|  | 					b |= uint32(c) << (nb & 31) | ||||||
|  | 					nb += 8 | ||||||
|  | 				} | ||||||
|  | 				chunk := f.hl.chunks[b&(huffmanNumChunks-1)] | ||||||
|  | 				n = uint(chunk & huffmanCountMask) | ||||||
|  | 				if n > huffmanChunkBits { | ||||||
|  | 					chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] | ||||||
|  | 					n = uint(chunk & huffmanCountMask) | ||||||
|  | 				} | ||||||
|  | 				if n <= nb { | ||||||
|  | 					if n == 0 { | ||||||
|  | 						f.b = b | ||||||
|  | 						f.nb = nb | ||||||
|  | 						if debugDecode { | ||||||
|  | 							fmt.Println("huffsym: n==0") | ||||||
|  | 						} | ||||||
|  | 						f.err = CorruptInputError(f.roffset) | ||||||
|  | 						return | ||||||
|  | 					} | ||||||
|  | 					f.b = b >> (n & 31) | ||||||
|  | 					f.nb = nb - n | ||||||
|  | 					v = int(chunk >> huffmanValueShift) | ||||||
|  | 					break | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		var n uint // number of bits extra
 | ||||||
|  | 		var length int | ||||||
|  | 		var err error | ||||||
|  | 		switch { | ||||||
|  | 		case v < 256: | ||||||
|  | 			f.dict.writeByte(byte(v)) | ||||||
|  | 			if f.dict.availWrite() == 0 { | ||||||
|  | 				f.toRead = f.dict.readFlush() | ||||||
|  | 				f.step = (*decompressor).huffmanBufioReader | ||||||
|  | 				f.stepState = stateInit | ||||||
|  | 				return | ||||||
|  | 			} | ||||||
|  | 			goto readLiteral | ||||||
|  | 		case v == 256: | ||||||
|  | 			f.finishBlock() | ||||||
|  | 			return | ||||||
|  | 		// otherwise, reference to older data
 | ||||||
|  | 		case v < 265: | ||||||
|  | 			length = v - (257 - 3) | ||||||
|  | 			n = 0 | ||||||
|  | 		case v < 269: | ||||||
|  | 			length = v*2 - (265*2 - 11) | ||||||
|  | 			n = 1 | ||||||
|  | 		case v < 273: | ||||||
|  | 			length = v*4 - (269*4 - 19) | ||||||
|  | 			n = 2 | ||||||
|  | 		case v < 277: | ||||||
|  | 			length = v*8 - (273*8 - 35) | ||||||
|  | 			n = 3 | ||||||
|  | 		case v < 281: | ||||||
|  | 			length = v*16 - (277*16 - 67) | ||||||
|  | 			n = 4 | ||||||
|  | 		case v < 285: | ||||||
|  | 			length = v*32 - (281*32 - 131) | ||||||
|  | 			n = 5 | ||||||
|  | 		case v < maxNumLit: | ||||||
|  | 			length = 258 | ||||||
|  | 			n = 0 | ||||||
|  | 		default: | ||||||
|  | 			if debugDecode { | ||||||
|  | 				fmt.Println(v, ">= maxNumLit") | ||||||
|  | 			} | ||||||
|  | 			f.err = CorruptInputError(f.roffset) | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 		if n > 0 { | ||||||
|  | 			for f.nb < n { | ||||||
|  | 				if err = moreBits(); err != nil { | ||||||
|  | 					if debugDecode { | ||||||
|  | 						fmt.Println("morebits n>0:", err) | ||||||
|  | 					} | ||||||
|  | 					f.err = err | ||||||
|  | 					return | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 			length += int(f.b & uint32(1<<n-1)) | ||||||
|  | 			f.b >>= n | ||||||
|  | 			f.nb -= n | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		var dist int | ||||||
|  | 		if f.hd == nil { | ||||||
|  | 			for f.nb < 5 { | ||||||
|  | 				if err = moreBits(); err != nil { | ||||||
|  | 					if debugDecode { | ||||||
|  | 						fmt.Println("morebits f.nb<5:", err) | ||||||
|  | 					} | ||||||
|  | 					f.err = err | ||||||
|  | 					return | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 			dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) | ||||||
|  | 			f.b >>= 5 | ||||||
|  | 			f.nb -= 5 | ||||||
|  | 		} else { | ||||||
|  | 			if dist, err = f.huffSym(f.hd); err != nil { | ||||||
|  | 				if debugDecode { | ||||||
|  | 					fmt.Println("huffsym:", err) | ||||||
|  | 				} | ||||||
|  | 				f.err = err | ||||||
|  | 				return | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		switch { | ||||||
|  | 		case dist < 4: | ||||||
|  | 			dist++ | ||||||
|  | 		case dist < maxNumDist: | ||||||
|  | 			nb := uint(dist-2) >> 1 | ||||||
|  | 			// have 1 bit in bottom of dist, need nb more.
 | ||||||
|  | 			extra := (dist & 1) << nb | ||||||
|  | 			for f.nb < nb { | ||||||
|  | 				if err = moreBits(); err != nil { | ||||||
|  | 					if debugDecode { | ||||||
|  | 						fmt.Println("morebits f.nb<nb:", err) | ||||||
|  | 					} | ||||||
|  | 					f.err = err | ||||||
|  | 					return | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 			extra |= int(f.b & uint32(1<<nb-1)) | ||||||
|  | 			f.b >>= nb | ||||||
|  | 			f.nb -= nb | ||||||
|  | 			dist = 1<<(nb+1) + 1 + extra | ||||||
|  | 		default: | ||||||
|  | 			if debugDecode { | ||||||
|  | 				fmt.Println("dist too big:", dist, maxNumDist) | ||||||
|  | 			} | ||||||
|  | 			f.err = CorruptInputError(f.roffset) | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// No check on length; encoding can be prescient.
 | ||||||
|  | 		if dist > f.dict.histSize() { | ||||||
|  | 			if debugDecode { | ||||||
|  | 				fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) | ||||||
|  | 			} | ||||||
|  | 			f.err = CorruptInputError(f.roffset) | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		f.copyLen, f.copyDist = length, dist | ||||||
|  | 		goto copyHistory | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | copyHistory: | ||||||
|  | 	// Perform a backwards copy according to RFC section 3.2.3.
 | ||||||
|  | 	{ | ||||||
|  | 		cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) | ||||||
|  | 		if cnt == 0 { | ||||||
|  | 			cnt = f.dict.writeCopy(f.copyDist, f.copyLen) | ||||||
|  | 		} | ||||||
|  | 		f.copyLen -= cnt | ||||||
|  | 
 | ||||||
|  | 		if f.dict.availWrite() == 0 || f.copyLen > 0 { | ||||||
|  | 			f.toRead = f.dict.readFlush() | ||||||
|  | 			f.step = (*decompressor).huffmanBufioReader // We need to continue this work
 | ||||||
|  | 			f.stepState = stateDict | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 		goto readLiteral | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | // Decode a single Huffman block from f.
 | ||||||
|  | // hl and hd are the Huffman states for the lit/length values
 | ||||||
|  | // and the distance values, respectively. If hd == nil, using the
 | ||||||
|  | // fixed distance encoding associated with fixed Huffman blocks.
 | ||||||
|  | func (f *decompressor) huffmanStringsReader() { | ||||||
|  | 	const ( | ||||||
|  | 		stateInit = iota // Zero value must be stateInit
 | ||||||
|  | 		stateDict | ||||||
|  | 	) | ||||||
|  | 	fr := f.r.(*strings.Reader) | ||||||
|  | 	moreBits := func() error { | ||||||
|  | 		c, err := fr.ReadByte() | ||||||
|  | 		if err != nil { | ||||||
|  | 			return noEOF(err) | ||||||
|  | 		} | ||||||
|  | 		f.roffset++ | ||||||
|  | 		f.b |= uint32(c) << f.nb | ||||||
|  | 		f.nb += 8 | ||||||
|  | 		return nil | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | 	switch f.stepState { | ||||||
|  | 	case stateInit: | ||||||
|  | 		goto readLiteral | ||||||
|  | 	case stateDict: | ||||||
|  | 		goto copyHistory | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | readLiteral: | ||||||
|  | 	// Read literal and/or (length, distance) according to RFC section 3.2.3.
 | ||||||
|  | 	{ | ||||||
|  | 		var v int | ||||||
|  | 		{ | ||||||
|  | 			// Inlined v, err := f.huffSym(f.hl)
 | ||||||
|  | 			// Since a huffmanDecoder can be empty or be composed of a degenerate tree
 | ||||||
|  | 			// with single element, huffSym must error on these two edge cases. In both
 | ||||||
|  | 			// cases, the chunks slice will be 0 for the invalid sequence, leading it
 | ||||||
|  | 			// satisfy the n == 0 check below.
 | ||||||
|  | 			n := uint(f.hl.maxRead) | ||||||
|  | 			// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
 | ||||||
|  | 			// but is smart enough to keep local variables in registers, so use nb and b,
 | ||||||
|  | 			// inline call to moreBits and reassign b,nb back to f on return.
 | ||||||
|  | 			nb, b := f.nb, f.b | ||||||
|  | 			for { | ||||||
|  | 				for nb < n { | ||||||
|  | 					c, err := fr.ReadByte() | ||||||
|  | 					if err != nil { | ||||||
|  | 						f.b = b | ||||||
|  | 						f.nb = nb | ||||||
|  | 						f.err = noEOF(err) | ||||||
|  | 						return | ||||||
|  | 					} | ||||||
|  | 					f.roffset++ | ||||||
|  | 					b |= uint32(c) << (nb & 31) | ||||||
|  | 					nb += 8 | ||||||
|  | 				} | ||||||
|  | 				chunk := f.hl.chunks[b&(huffmanNumChunks-1)] | ||||||
|  | 				n = uint(chunk & huffmanCountMask) | ||||||
|  | 				if n > huffmanChunkBits { | ||||||
|  | 					chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask] | ||||||
|  | 					n = uint(chunk & huffmanCountMask) | ||||||
|  | 				} | ||||||
|  | 				if n <= nb { | ||||||
|  | 					if n == 0 { | ||||||
|  | 						f.b = b | ||||||
|  | 						f.nb = nb | ||||||
|  | 						if debugDecode { | ||||||
|  | 							fmt.Println("huffsym: n==0") | ||||||
|  | 						} | ||||||
|  | 						f.err = CorruptInputError(f.roffset) | ||||||
|  | 						return | ||||||
|  | 					} | ||||||
|  | 					f.b = b >> (n & 31) | ||||||
|  | 					f.nb = nb - n | ||||||
|  | 					v = int(chunk >> huffmanValueShift) | ||||||
|  | 					break | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		var n uint // number of bits extra
 | ||||||
|  | 		var length int | ||||||
|  | 		var err error | ||||||
|  | 		switch { | ||||||
|  | 		case v < 256: | ||||||
|  | 			f.dict.writeByte(byte(v)) | ||||||
|  | 			if f.dict.availWrite() == 0 { | ||||||
|  | 				f.toRead = f.dict.readFlush() | ||||||
|  | 				f.step = (*decompressor).huffmanStringsReader | ||||||
|  | 				f.stepState = stateInit | ||||||
|  | 				return | ||||||
|  | 			} | ||||||
|  | 			goto readLiteral | ||||||
|  | 		case v == 256: | ||||||
|  | 			f.finishBlock() | ||||||
|  | 			return | ||||||
|  | 		// otherwise, reference to older data
 | ||||||
|  | 		case v < 265: | ||||||
|  | 			length = v - (257 - 3) | ||||||
|  | 			n = 0 | ||||||
|  | 		case v < 269: | ||||||
|  | 			length = v*2 - (265*2 - 11) | ||||||
|  | 			n = 1 | ||||||
|  | 		case v < 273: | ||||||
|  | 			length = v*4 - (269*4 - 19) | ||||||
|  | 			n = 2 | ||||||
|  | 		case v < 277: | ||||||
|  | 			length = v*8 - (273*8 - 35) | ||||||
|  | 			n = 3 | ||||||
|  | 		case v < 281: | ||||||
|  | 			length = v*16 - (277*16 - 67) | ||||||
|  | 			n = 4 | ||||||
|  | 		case v < 285: | ||||||
|  | 			length = v*32 - (281*32 - 131) | ||||||
|  | 			n = 5 | ||||||
|  | 		case v < maxNumLit: | ||||||
|  | 			length = 258 | ||||||
|  | 			n = 0 | ||||||
|  | 		default: | ||||||
|  | 			if debugDecode { | ||||||
|  | 				fmt.Println(v, ">= maxNumLit") | ||||||
|  | 			} | ||||||
|  | 			f.err = CorruptInputError(f.roffset) | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 		if n > 0 { | ||||||
|  | 			for f.nb < n { | ||||||
|  | 				if err = moreBits(); err != nil { | ||||||
|  | 					if debugDecode { | ||||||
|  | 						fmt.Println("morebits n>0:", err) | ||||||
|  | 					} | ||||||
|  | 					f.err = err | ||||||
|  | 					return | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 			length += int(f.b & uint32(1<<n-1)) | ||||||
|  | 			f.b >>= n | ||||||
|  | 			f.nb -= n | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		var dist int | ||||||
|  | 		if f.hd == nil { | ||||||
|  | 			for f.nb < 5 { | ||||||
|  | 				if err = moreBits(); err != nil { | ||||||
|  | 					if debugDecode { | ||||||
|  | 						fmt.Println("morebits f.nb<5:", err) | ||||||
|  | 					} | ||||||
|  | 					f.err = err | ||||||
|  | 					return | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 			dist = int(bits.Reverse8(uint8(f.b & 0x1F << 3))) | ||||||
|  | 			f.b >>= 5 | ||||||
|  | 			f.nb -= 5 | ||||||
|  | 		} else { | ||||||
|  | 			if dist, err = f.huffSym(f.hd); err != nil { | ||||||
|  | 				if debugDecode { | ||||||
|  | 					fmt.Println("huffsym:", err) | ||||||
|  | 				} | ||||||
|  | 				f.err = err | ||||||
|  | 				return | ||||||
|  | 			} | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		switch { | ||||||
|  | 		case dist < 4: | ||||||
|  | 			dist++ | ||||||
|  | 		case dist < maxNumDist: | ||||||
|  | 			nb := uint(dist-2) >> 1 | ||||||
|  | 			// have 1 bit in bottom of dist, need nb more.
 | ||||||
|  | 			extra := (dist & 1) << nb | ||||||
|  | 			for f.nb < nb { | ||||||
|  | 				if err = moreBits(); err != nil { | ||||||
|  | 					if debugDecode { | ||||||
|  | 						fmt.Println("morebits f.nb<nb:", err) | ||||||
|  | 					} | ||||||
|  | 					f.err = err | ||||||
|  | 					return | ||||||
|  | 				} | ||||||
|  | 			} | ||||||
|  | 			extra |= int(f.b & uint32(1<<nb-1)) | ||||||
|  | 			f.b >>= nb | ||||||
|  | 			f.nb -= nb | ||||||
|  | 			dist = 1<<(nb+1) + 1 + extra | ||||||
|  | 		default: | ||||||
|  | 			if debugDecode { | ||||||
|  | 				fmt.Println("dist too big:", dist, maxNumDist) | ||||||
|  | 			} | ||||||
|  | 			f.err = CorruptInputError(f.roffset) | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		// No check on length; encoding can be prescient.
 | ||||||
|  | 		if dist > f.dict.histSize() { | ||||||
|  | 			if debugDecode { | ||||||
|  | 				fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) | ||||||
|  | 			} | ||||||
|  | 			f.err = CorruptInputError(f.roffset) | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 
 | ||||||
|  | 		f.copyLen, f.copyDist = length, dist | ||||||
|  | 		goto copyHistory | ||||||
|  | 	} | ||||||
|  | 
 | ||||||
|  | copyHistory: | ||||||
|  | 	// Perform a backwards copy according to RFC section 3.2.3.
 | ||||||
|  | 	{ | ||||||
|  | 		cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) | ||||||
|  | 		if cnt == 0 { | ||||||
|  | 			cnt = f.dict.writeCopy(f.copyDist, f.copyLen) | ||||||
|  | 		} | ||||||
|  | 		f.copyLen -= cnt | ||||||
|  | 
 | ||||||
|  | 		if f.dict.availWrite() == 0 || f.copyLen > 0 { | ||||||
|  | 			f.toRead = f.dict.readFlush() | ||||||
|  | 			f.step = (*decompressor).huffmanStringsReader // We need to continue this work
 | ||||||
|  | 			f.stepState = stateDict | ||||||
|  | 			return | ||||||
|  | 		} | ||||||
|  | 		goto readLiteral | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | func (f *decompressor) huffmanBlockDecoder() func() { | ||||||
|  | 	switch f.r.(type) { | ||||||
|  | 	case *bytes.Buffer: | ||||||
|  | 		return f.huffmanBytesBuffer | ||||||
|  | 	case *bytes.Reader: | ||||||
|  | 		return f.huffmanBytesReader | ||||||
|  | 	case *bufio.Reader: | ||||||
|  | 		return f.huffmanBufioReader | ||||||
|  | 	case *strings.Reader: | ||||||
|  | 		return f.huffmanStringsReader | ||||||
|  | 	default: | ||||||
|  | 		return f.huffmanBlockGeneric | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  | @ -16,7 +16,7 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { | ||||||
| 		inputMargin            = 12 - 1 | 		inputMargin            = 12 - 1 | ||||||
| 		minNonLiteralBlockSize = 1 + 1 + inputMargin | 		minNonLiteralBlockSize = 1 + 1 + inputMargin | ||||||
| 	) | 	) | ||||||
| 	if debugDecode && e.cur < 0 { | 	if debugDeflate && e.cur < 0 { | ||||||
| 		panic(fmt.Sprint("e.cur < 0: ", e.cur)) | 		panic(fmt.Sprint("e.cur < 0: ", e.cur)) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -81,12 +81,12 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| 			now := load6432(src, nextS) | 			now := load6432(src, nextS) | ||||||
| 			e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv} | 			e.table[nextHash] = tableEntry{offset: s + e.cur} | ||||||
| 			nextHash = hash(uint32(now)) | 			nextHash = hash(uint32(now)) | ||||||
| 
 | 
 | ||||||
| 			offset := s - (candidate.offset - e.cur) | 			offset := s - (candidate.offset - e.cur) | ||||||
| 			if offset < maxMatchOffset && cv == candidate.val { | 			if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) { | ||||||
| 				e.table[nextHash] = tableEntry{offset: nextS + e.cur, val: uint32(now)} | 				e.table[nextHash] = tableEntry{offset: nextS + e.cur} | ||||||
| 				break | 				break | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
|  | @ -96,11 +96,11 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { | ||||||
| 			nextS++ | 			nextS++ | ||||||
| 			candidate = e.table[nextHash] | 			candidate = e.table[nextHash] | ||||||
| 			now >>= 8 | 			now >>= 8 | ||||||
| 			e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv} | 			e.table[nextHash] = tableEntry{offset: s + e.cur} | ||||||
| 
 | 
 | ||||||
| 			offset = s - (candidate.offset - e.cur) | 			offset = s - (candidate.offset - e.cur) | ||||||
| 			if offset < maxMatchOffset && cv == candidate.val { | 			if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) { | ||||||
| 				e.table[nextHash] = tableEntry{offset: nextS + e.cur, val: uint32(now)} | 				e.table[nextHash] = tableEntry{offset: nextS + e.cur} | ||||||
| 				break | 				break | ||||||
| 			} | 			} | ||||||
| 			cv = uint32(now) | 			cv = uint32(now) | ||||||
|  | @ -139,7 +139,7 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { | ||||||
| 				// Index first pair after match end.
 | 				// Index first pair after match end.
 | ||||||
| 				if int(s+l+4) < len(src) { | 				if int(s+l+4) < len(src) { | ||||||
| 					cv := load3232(src, s) | 					cv := load3232(src, s) | ||||||
| 					e.table[hash(cv)] = tableEntry{offset: s + e.cur, val: cv} | 					e.table[hash(cv)] = tableEntry{offset: s + e.cur} | ||||||
| 				} | 				} | ||||||
| 				goto emitRemainder | 				goto emitRemainder | ||||||
| 			} | 			} | ||||||
|  | @ -153,14 +153,14 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { | ||||||
| 			x := load6432(src, s-2) | 			x := load6432(src, s-2) | ||||||
| 			o := e.cur + s - 2 | 			o := e.cur + s - 2 | ||||||
| 			prevHash := hash(uint32(x)) | 			prevHash := hash(uint32(x)) | ||||||
| 			e.table[prevHash] = tableEntry{offset: o, val: uint32(x)} | 			e.table[prevHash] = tableEntry{offset: o} | ||||||
| 			x >>= 16 | 			x >>= 16 | ||||||
| 			currHash := hash(uint32(x)) | 			currHash := hash(uint32(x)) | ||||||
| 			candidate = e.table[currHash] | 			candidate = e.table[currHash] | ||||||
| 			e.table[currHash] = tableEntry{offset: o + 2, val: uint32(x)} | 			e.table[currHash] = tableEntry{offset: o + 2} | ||||||
| 
 | 
 | ||||||
| 			offset := s - (candidate.offset - e.cur) | 			offset := s - (candidate.offset - e.cur) | ||||||
| 			if offset > maxMatchOffset || uint32(x) != candidate.val { | 			if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) { | ||||||
| 				cv = uint32(x >> 8) | 				cv = uint32(x >> 8) | ||||||
| 				s++ | 				s++ | ||||||
| 				break | 				break | ||||||
|  |  | ||||||
|  | @ -18,7 +18,7 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { | ||||||
| 		minNonLiteralBlockSize = 1 + 1 + inputMargin | 		minNonLiteralBlockSize = 1 + 1 + inputMargin | ||||||
| 	) | 	) | ||||||
| 
 | 
 | ||||||
| 	if debugDecode && e.cur < 0 { | 	if debugDeflate && e.cur < 0 { | ||||||
| 		panic(fmt.Sprint("e.cur < 0: ", e.cur)) | 		panic(fmt.Sprint("e.cur < 0: ", e.cur)) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -83,12 +83,12 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { | ||||||
| 			} | 			} | ||||||
| 			candidate = e.table[nextHash] | 			candidate = e.table[nextHash] | ||||||
| 			now := load6432(src, nextS) | 			now := load6432(src, nextS) | ||||||
| 			e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv} | 			e.table[nextHash] = tableEntry{offset: s + e.cur} | ||||||
| 			nextHash = hash4u(uint32(now), bTableBits) | 			nextHash = hash4u(uint32(now), bTableBits) | ||||||
| 
 | 
 | ||||||
| 			offset := s - (candidate.offset - e.cur) | 			offset := s - (candidate.offset - e.cur) | ||||||
| 			if offset < maxMatchOffset && cv == candidate.val { | 			if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) { | ||||||
| 				e.table[nextHash] = tableEntry{offset: nextS + e.cur, val: uint32(now)} | 				e.table[nextHash] = tableEntry{offset: nextS + e.cur} | ||||||
| 				break | 				break | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
|  | @ -98,10 +98,10 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { | ||||||
| 			nextS++ | 			nextS++ | ||||||
| 			candidate = e.table[nextHash] | 			candidate = e.table[nextHash] | ||||||
| 			now >>= 8 | 			now >>= 8 | ||||||
| 			e.table[nextHash] = tableEntry{offset: s + e.cur, val: cv} | 			e.table[nextHash] = tableEntry{offset: s + e.cur} | ||||||
| 
 | 
 | ||||||
| 			offset = s - (candidate.offset - e.cur) | 			offset = s - (candidate.offset - e.cur) | ||||||
| 			if offset < maxMatchOffset && cv == candidate.val { | 			if offset < maxMatchOffset && cv == load3232(src, candidate.offset-e.cur) { | ||||||
| 				break | 				break | ||||||
| 			} | 			} | ||||||
| 			cv = uint32(now) | 			cv = uint32(now) | ||||||
|  | @ -148,7 +148,7 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { | ||||||
| 				// Index first pair after match end.
 | 				// Index first pair after match end.
 | ||||||
| 				if int(s+l+4) < len(src) { | 				if int(s+l+4) < len(src) { | ||||||
| 					cv := load3232(src, s) | 					cv := load3232(src, s) | ||||||
| 					e.table[hash4u(cv, bTableBits)] = tableEntry{offset: s + e.cur, val: cv} | 					e.table[hash4u(cv, bTableBits)] = tableEntry{offset: s + e.cur} | ||||||
| 				} | 				} | ||||||
| 				goto emitRemainder | 				goto emitRemainder | ||||||
| 			} | 			} | ||||||
|  | @ -157,15 +157,15 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { | ||||||
| 			for i := s - l + 2; i < s-5; i += 7 { | 			for i := s - l + 2; i < s-5; i += 7 { | ||||||
| 				x := load6432(src, int32(i)) | 				x := load6432(src, int32(i)) | ||||||
| 				nextHash := hash4u(uint32(x), bTableBits) | 				nextHash := hash4u(uint32(x), bTableBits) | ||||||
| 				e.table[nextHash] = tableEntry{offset: e.cur + i, val: uint32(x)} | 				e.table[nextHash] = tableEntry{offset: e.cur + i} | ||||||
| 				// Skip one
 | 				// Skip one
 | ||||||
| 				x >>= 16 | 				x >>= 16 | ||||||
| 				nextHash = hash4u(uint32(x), bTableBits) | 				nextHash = hash4u(uint32(x), bTableBits) | ||||||
| 				e.table[nextHash] = tableEntry{offset: e.cur + i + 2, val: uint32(x)} | 				e.table[nextHash] = tableEntry{offset: e.cur + i + 2} | ||||||
| 				// Skip one
 | 				// Skip one
 | ||||||
| 				x >>= 16 | 				x >>= 16 | ||||||
| 				nextHash = hash4u(uint32(x), bTableBits) | 				nextHash = hash4u(uint32(x), bTableBits) | ||||||
| 				e.table[nextHash] = tableEntry{offset: e.cur + i + 4, val: uint32(x)} | 				e.table[nextHash] = tableEntry{offset: e.cur + i + 4} | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| 			// We could immediately start working at s now, but to improve
 | 			// We could immediately start working at s now, but to improve
 | ||||||
|  | @ -178,14 +178,14 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { | ||||||
| 			o := e.cur + s - 2 | 			o := e.cur + s - 2 | ||||||
| 			prevHash := hash4u(uint32(x), bTableBits) | 			prevHash := hash4u(uint32(x), bTableBits) | ||||||
| 			prevHash2 := hash4u(uint32(x>>8), bTableBits) | 			prevHash2 := hash4u(uint32(x>>8), bTableBits) | ||||||
| 			e.table[prevHash] = tableEntry{offset: o, val: uint32(x)} | 			e.table[prevHash] = tableEntry{offset: o} | ||||||
| 			e.table[prevHash2] = tableEntry{offset: o + 1, val: uint32(x >> 8)} | 			e.table[prevHash2] = tableEntry{offset: o + 1} | ||||||
| 			currHash := hash4u(uint32(x>>16), bTableBits) | 			currHash := hash4u(uint32(x>>16), bTableBits) | ||||||
| 			candidate = e.table[currHash] | 			candidate = e.table[currHash] | ||||||
| 			e.table[currHash] = tableEntry{offset: o + 2, val: uint32(x >> 16)} | 			e.table[currHash] = tableEntry{offset: o + 2} | ||||||
| 
 | 
 | ||||||
| 			offset := s - (candidate.offset - e.cur) | 			offset := s - (candidate.offset - e.cur) | ||||||
| 			if offset > maxMatchOffset || uint32(x>>16) != candidate.val { | 			if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) { | ||||||
| 				cv = uint32(x >> 24) | 				cv = uint32(x >> 24) | ||||||
| 				s++ | 				s++ | ||||||
| 				break | 				break | ||||||
|  |  | ||||||
|  | @ -15,7 +15,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { | ||||||
| 		minNonLiteralBlockSize = 1 + 1 + inputMargin | 		minNonLiteralBlockSize = 1 + 1 + inputMargin | ||||||
| 	) | 	) | ||||||
| 
 | 
 | ||||||
| 	if debugDecode && e.cur < 0 { | 	if debugDeflate && e.cur < 0 { | ||||||
| 		panic(fmt.Sprint("e.cur < 0: ", e.cur)) | 		panic(fmt.Sprint("e.cur < 0: ", e.cur)) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -81,22 +81,26 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { | ||||||
| 			} | 			} | ||||||
| 			candidates := e.table[nextHash] | 			candidates := e.table[nextHash] | ||||||
| 			now := load3232(src, nextS) | 			now := load3232(src, nextS) | ||||||
| 			e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}} | 
 | ||||||
|  | 			// Safe offset distance until s + 4...
 | ||||||
|  | 			minOffset := e.cur + s - (maxMatchOffset - 4) | ||||||
|  | 			e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur}} | ||||||
| 
 | 
 | ||||||
| 			// Check both candidates
 | 			// Check both candidates
 | ||||||
| 			candidate = candidates.Cur | 			candidate = candidates.Cur | ||||||
| 			offset := s - (candidate.offset - e.cur) | 			if candidate.offset < minOffset { | ||||||
| 			if cv == candidate.val { |  | ||||||
| 				if offset > maxMatchOffset { |  | ||||||
| 				cv = now | 				cv = now | ||||||
| 				// Previous will also be invalid, we have nothing.
 | 				// Previous will also be invalid, we have nothing.
 | ||||||
| 				continue | 				continue | ||||||
| 			} | 			} | ||||||
| 				o2 := s - (candidates.Prev.offset - e.cur) | 
 | ||||||
| 				if cv != candidates.Prev.val || o2 > maxMatchOffset { | 			if cv == load3232(src, candidate.offset-e.cur) { | ||||||
|  | 				if candidates.Prev.offset < minOffset || cv != load3232(src, candidates.Prev.offset-e.cur) { | ||||||
| 					break | 					break | ||||||
| 				} | 				} | ||||||
| 				// Both match and are valid, pick longest.
 | 				// Both match and are valid, pick longest.
 | ||||||
|  | 				offset := s - (candidate.offset - e.cur) | ||||||
|  | 				o2 := s - (candidates.Prev.offset - e.cur) | ||||||
| 				l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:]) | 				l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:]) | ||||||
| 				if l2 > l1 { | 				if l2 > l1 { | ||||||
| 					candidate = candidates.Prev | 					candidate = candidates.Prev | ||||||
|  | @ -106,13 +110,10 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { | ||||||
| 				// We only check if value mismatches.
 | 				// We only check if value mismatches.
 | ||||||
| 				// Offset will always be invalid in other cases.
 | 				// Offset will always be invalid in other cases.
 | ||||||
| 				candidate = candidates.Prev | 				candidate = candidates.Prev | ||||||
| 				if cv == candidate.val { | 				if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) { | ||||||
| 					offset := s - (candidate.offset - e.cur) |  | ||||||
| 					if offset <= maxMatchOffset { |  | ||||||
| 					break | 					break | ||||||
| 				} | 				} | ||||||
| 			} | 			} | ||||||
| 			} |  | ||||||
| 			cv = now | 			cv = now | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
|  | @ -158,7 +159,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { | ||||||
| 					nextHash := hash(cv) | 					nextHash := hash(cv) | ||||||
| 					e.table[nextHash] = tableEntryPrev{ | 					e.table[nextHash] = tableEntryPrev{ | ||||||
| 						Prev: e.table[nextHash].Cur, | 						Prev: e.table[nextHash].Cur, | ||||||
| 						Cur:  tableEntry{offset: e.cur + t, val: cv}, | 						Cur:  tableEntry{offset: e.cur + t}, | ||||||
| 					} | 					} | ||||||
| 				} | 				} | ||||||
| 				goto emitRemainder | 				goto emitRemainder | ||||||
|  | @ -170,21 +171,21 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { | ||||||
| 			prevHash := hash(uint32(x)) | 			prevHash := hash(uint32(x)) | ||||||
| 			e.table[prevHash] = tableEntryPrev{ | 			e.table[prevHash] = tableEntryPrev{ | ||||||
| 				Prev: e.table[prevHash].Cur, | 				Prev: e.table[prevHash].Cur, | ||||||
| 				Cur:  tableEntry{offset: e.cur + s - 3, val: uint32(x)}, | 				Cur:  tableEntry{offset: e.cur + s - 3}, | ||||||
| 			} | 			} | ||||||
| 			x >>= 8 | 			x >>= 8 | ||||||
| 			prevHash = hash(uint32(x)) | 			prevHash = hash(uint32(x)) | ||||||
| 
 | 
 | ||||||
| 			e.table[prevHash] = tableEntryPrev{ | 			e.table[prevHash] = tableEntryPrev{ | ||||||
| 				Prev: e.table[prevHash].Cur, | 				Prev: e.table[prevHash].Cur, | ||||||
| 				Cur:  tableEntry{offset: e.cur + s - 2, val: uint32(x)}, | 				Cur:  tableEntry{offset: e.cur + s - 2}, | ||||||
| 			} | 			} | ||||||
| 			x >>= 8 | 			x >>= 8 | ||||||
| 			prevHash = hash(uint32(x)) | 			prevHash = hash(uint32(x)) | ||||||
| 
 | 
 | ||||||
| 			e.table[prevHash] = tableEntryPrev{ | 			e.table[prevHash] = tableEntryPrev{ | ||||||
| 				Prev: e.table[prevHash].Cur, | 				Prev: e.table[prevHash].Cur, | ||||||
| 				Cur:  tableEntry{offset: e.cur + s - 1, val: uint32(x)}, | 				Cur:  tableEntry{offset: e.cur + s - 1}, | ||||||
| 			} | 			} | ||||||
| 			x >>= 8 | 			x >>= 8 | ||||||
| 			currHash := hash(uint32(x)) | 			currHash := hash(uint32(x)) | ||||||
|  | @ -192,21 +193,18 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { | ||||||
| 			cv = uint32(x) | 			cv = uint32(x) | ||||||
| 			e.table[currHash] = tableEntryPrev{ | 			e.table[currHash] = tableEntryPrev{ | ||||||
| 				Prev: candidates.Cur, | 				Prev: candidates.Cur, | ||||||
| 				Cur:  tableEntry{offset: s + e.cur, val: cv}, | 				Cur:  tableEntry{offset: s + e.cur}, | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| 			// Check both candidates
 | 			// Check both candidates
 | ||||||
| 			candidate = candidates.Cur | 			candidate = candidates.Cur | ||||||
| 			if cv == candidate.val { | 			minOffset := e.cur + s - (maxMatchOffset - 4) | ||||||
| 				offset := s - (candidate.offset - e.cur) | 
 | ||||||
| 				if offset <= maxMatchOffset { | 			if candidate.offset > minOffset && cv != load3232(src, candidate.offset-e.cur) { | ||||||
| 					continue |  | ||||||
| 				} |  | ||||||
| 			} else { |  | ||||||
| 				// We only check if value mismatches.
 | 				// We only check if value mismatches.
 | ||||||
| 				// Offset will always be invalid in other cases.
 | 				// Offset will always be invalid in other cases.
 | ||||||
| 				candidate = candidates.Prev | 				candidate = candidates.Prev | ||||||
| 				if cv == candidate.val { | 				if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) { | ||||||
| 					offset := s - (candidate.offset - e.cur) | 					offset := s - (candidate.offset - e.cur) | ||||||
| 					if offset <= maxMatchOffset { | 					if offset <= maxMatchOffset { | ||||||
| 						continue | 						continue | ||||||
|  |  | ||||||
|  | @ -13,7 +13,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { | ||||||
| 		inputMargin            = 12 - 1 | 		inputMargin            = 12 - 1 | ||||||
| 		minNonLiteralBlockSize = 1 + 1 + inputMargin | 		minNonLiteralBlockSize = 1 + 1 + inputMargin | ||||||
| 	) | 	) | ||||||
| 	if debugDecode && e.cur < 0 { | 	if debugDeflate && e.cur < 0 { | ||||||
| 		panic(fmt.Sprint("e.cur < 0: ", e.cur)) | 		panic(fmt.Sprint("e.cur < 0: ", e.cur)) | ||||||
| 	} | 	} | ||||||
| 	// Protect against e.cur wraparound.
 | 	// Protect against e.cur wraparound.
 | ||||||
|  | @ -92,24 +92,24 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { | ||||||
| 			sCandidate := e.table[nextHashS] | 			sCandidate := e.table[nextHashS] | ||||||
| 			lCandidate := e.bTable[nextHashL] | 			lCandidate := e.bTable[nextHashL] | ||||||
| 			next := load6432(src, nextS) | 			next := load6432(src, nextS) | ||||||
| 			entry := tableEntry{offset: s + e.cur, val: uint32(cv)} | 			entry := tableEntry{offset: s + e.cur} | ||||||
| 			e.table[nextHashS] = entry | 			e.table[nextHashS] = entry | ||||||
| 			e.bTable[nextHashL] = entry | 			e.bTable[nextHashL] = entry | ||||||
| 
 | 
 | ||||||
| 			t = lCandidate.offset - e.cur | 			t = lCandidate.offset - e.cur | ||||||
| 			if s-t < maxMatchOffset && uint32(cv) == lCandidate.val { | 			if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.offset-e.cur) { | ||||||
| 				// We got a long match. Use that.
 | 				// We got a long match. Use that.
 | ||||||
| 				break | 				break | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| 			t = sCandidate.offset - e.cur | 			t = sCandidate.offset - e.cur | ||||||
| 			if s-t < maxMatchOffset && uint32(cv) == sCandidate.val { | 			if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { | ||||||
| 				// Found a 4 match...
 | 				// Found a 4 match...
 | ||||||
| 				lCandidate = e.bTable[hash7(next, tableBits)] | 				lCandidate = e.bTable[hash7(next, tableBits)] | ||||||
| 
 | 
 | ||||||
| 				// If the next long is a candidate, check if we should use that instead...
 | 				// If the next long is a candidate, check if we should use that instead...
 | ||||||
| 				lOff := nextS - (lCandidate.offset - e.cur) | 				lOff := nextS - (lCandidate.offset - e.cur) | ||||||
| 				if lOff < maxMatchOffset && lCandidate.val == uint32(next) { | 				if lOff < maxMatchOffset && load3232(src, lCandidate.offset-e.cur) == uint32(next) { | ||||||
| 					l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:]) | 					l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:]) | ||||||
| 					if l2 > l1 { | 					if l2 > l1 { | ||||||
| 						s = nextS | 						s = nextS | ||||||
|  | @ -137,7 +137,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { | ||||||
| 		if nextEmit < s { | 		if nextEmit < s { | ||||||
| 			emitLiteral(dst, src[nextEmit:s]) | 			emitLiteral(dst, src[nextEmit:s]) | ||||||
| 		} | 		} | ||||||
| 		if false { | 		if debugDeflate { | ||||||
| 			if t >= s { | 			if t >= s { | ||||||
| 				panic("s-t") | 				panic("s-t") | ||||||
| 			} | 			} | ||||||
|  | @ -160,8 +160,8 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { | ||||||
| 			// Index first pair after match end.
 | 			// Index first pair after match end.
 | ||||||
| 			if int(s+8) < len(src) { | 			if int(s+8) < len(src) { | ||||||
| 				cv := load6432(src, s) | 				cv := load6432(src, s) | ||||||
| 				e.table[hash4x64(cv, tableBits)] = tableEntry{offset: s + e.cur, val: uint32(cv)} | 				e.table[hash4x64(cv, tableBits)] = tableEntry{offset: s + e.cur} | ||||||
| 				e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur, val: uint32(cv)} | 				e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur} | ||||||
| 			} | 			} | ||||||
| 			goto emitRemainder | 			goto emitRemainder | ||||||
| 		} | 		} | ||||||
|  | @ -171,20 +171,20 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { | ||||||
| 			i := nextS | 			i := nextS | ||||||
| 			if i < s-1 { | 			if i < s-1 { | ||||||
| 				cv := load6432(src, i) | 				cv := load6432(src, i) | ||||||
| 				t := tableEntry{offset: i + e.cur, val: uint32(cv)} | 				t := tableEntry{offset: i + e.cur} | ||||||
| 				t2 := tableEntry{val: uint32(cv >> 8), offset: t.offset + 1} | 				t2 := tableEntry{offset: t.offset + 1} | ||||||
| 				e.bTable[hash7(cv, tableBits)] = t | 				e.bTable[hash7(cv, tableBits)] = t | ||||||
| 				e.bTable[hash7(cv>>8, tableBits)] = t2 | 				e.bTable[hash7(cv>>8, tableBits)] = t2 | ||||||
| 				e.table[hash4u(t2.val, tableBits)] = t2 | 				e.table[hash4u(uint32(cv>>8), tableBits)] = t2 | ||||||
| 
 | 
 | ||||||
| 				i += 3 | 				i += 3 | ||||||
| 				for ; i < s-1; i += 3 { | 				for ; i < s-1; i += 3 { | ||||||
| 					cv := load6432(src, i) | 					cv := load6432(src, i) | ||||||
| 					t := tableEntry{offset: i + e.cur, val: uint32(cv)} | 					t := tableEntry{offset: i + e.cur} | ||||||
| 					t2 := tableEntry{val: uint32(cv >> 8), offset: t.offset + 1} | 					t2 := tableEntry{offset: t.offset + 1} | ||||||
| 					e.bTable[hash7(cv, tableBits)] = t | 					e.bTable[hash7(cv, tableBits)] = t | ||||||
| 					e.bTable[hash7(cv>>8, tableBits)] = t2 | 					e.bTable[hash7(cv>>8, tableBits)] = t2 | ||||||
| 					e.table[hash4u(t2.val, tableBits)] = t2 | 					e.table[hash4u(uint32(cv>>8), tableBits)] = t2 | ||||||
| 				} | 				} | ||||||
| 			} | 			} | ||||||
| 		} | 		} | ||||||
|  | @ -195,8 +195,8 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { | ||||||
| 		o := e.cur + s - 1 | 		o := e.cur + s - 1 | ||||||
| 		prevHashS := hash4x64(x, tableBits) | 		prevHashS := hash4x64(x, tableBits) | ||||||
| 		prevHashL := hash7(x, tableBits) | 		prevHashL := hash7(x, tableBits) | ||||||
| 		e.table[prevHashS] = tableEntry{offset: o, val: uint32(x)} | 		e.table[prevHashS] = tableEntry{offset: o} | ||||||
| 		e.bTable[prevHashL] = tableEntry{offset: o, val: uint32(x)} | 		e.bTable[prevHashL] = tableEntry{offset: o} | ||||||
| 		cv = x >> 8 | 		cv = x >> 8 | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -13,7 +13,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { | ||||||
| 		inputMargin            = 12 - 1 | 		inputMargin            = 12 - 1 | ||||||
| 		minNonLiteralBlockSize = 1 + 1 + inputMargin | 		minNonLiteralBlockSize = 1 + 1 + inputMargin | ||||||
| 	) | 	) | ||||||
| 	if debugDecode && e.cur < 0 { | 	if debugDeflate && e.cur < 0 { | ||||||
| 		panic(fmt.Sprint("e.cur < 0: ", e.cur)) | 		panic(fmt.Sprint("e.cur < 0: ", e.cur)) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -100,7 +100,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { | ||||||
| 			sCandidate := e.table[nextHashS] | 			sCandidate := e.table[nextHashS] | ||||||
| 			lCandidate := e.bTable[nextHashL] | 			lCandidate := e.bTable[nextHashL] | ||||||
| 			next := load6432(src, nextS) | 			next := load6432(src, nextS) | ||||||
| 			entry := tableEntry{offset: s + e.cur, val: uint32(cv)} | 			entry := tableEntry{offset: s + e.cur} | ||||||
| 			e.table[nextHashS] = entry | 			e.table[nextHashS] = entry | ||||||
| 			eLong := &e.bTable[nextHashL] | 			eLong := &e.bTable[nextHashL] | ||||||
| 			eLong.Cur, eLong.Prev = entry, eLong.Cur | 			eLong.Cur, eLong.Prev = entry, eLong.Cur | ||||||
|  | @ -110,14 +110,14 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { | ||||||
| 
 | 
 | ||||||
| 			t = lCandidate.Cur.offset - e.cur | 			t = lCandidate.Cur.offset - e.cur | ||||||
| 			if s-t < maxMatchOffset { | 			if s-t < maxMatchOffset { | ||||||
| 				if uint32(cv) == lCandidate.Cur.val { | 				if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { | ||||||
| 					// Store the next match
 | 					// Store the next match
 | ||||||
| 					e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} | 					e.table[nextHashS] = tableEntry{offset: nextS + e.cur} | ||||||
| 					eLong := &e.bTable[nextHashL] | 					eLong := &e.bTable[nextHashL] | ||||||
| 					eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur | 					eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur | ||||||
| 
 | 
 | ||||||
| 					t2 := lCandidate.Prev.offset - e.cur | 					t2 := lCandidate.Prev.offset - e.cur | ||||||
| 					if s-t2 < maxMatchOffset && uint32(cv) == lCandidate.Prev.val { | 					if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { | ||||||
| 						l = e.matchlen(s+4, t+4, src) + 4 | 						l = e.matchlen(s+4, t+4, src) + 4 | ||||||
| 						ml1 := e.matchlen(s+4, t2+4, src) + 4 | 						ml1 := e.matchlen(s+4, t2+4, src) + 4 | ||||||
| 						if ml1 > l { | 						if ml1 > l { | ||||||
|  | @ -129,30 +129,30 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { | ||||||
| 					break | 					break | ||||||
| 				} | 				} | ||||||
| 				t = lCandidate.Prev.offset - e.cur | 				t = lCandidate.Prev.offset - e.cur | ||||||
| 				if s-t < maxMatchOffset && uint32(cv) == lCandidate.Prev.val { | 				if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { | ||||||
| 					// Store the next match
 | 					// Store the next match
 | ||||||
| 					e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} | 					e.table[nextHashS] = tableEntry{offset: nextS + e.cur} | ||||||
| 					eLong := &e.bTable[nextHashL] | 					eLong := &e.bTable[nextHashL] | ||||||
| 					eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur | 					eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur | ||||||
| 					break | 					break | ||||||
| 				} | 				} | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| 			t = sCandidate.offset - e.cur | 			t = sCandidate.offset - e.cur | ||||||
| 			if s-t < maxMatchOffset && uint32(cv) == sCandidate.val { | 			if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { | ||||||
| 				// Found a 4 match...
 | 				// Found a 4 match...
 | ||||||
| 				l = e.matchlen(s+4, t+4, src) + 4 | 				l = e.matchlen(s+4, t+4, src) + 4 | ||||||
| 				lCandidate = e.bTable[nextHashL] | 				lCandidate = e.bTable[nextHashL] | ||||||
| 				// Store the next match
 | 				// Store the next match
 | ||||||
| 
 | 
 | ||||||
| 				e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} | 				e.table[nextHashS] = tableEntry{offset: nextS + e.cur} | ||||||
| 				eLong := &e.bTable[nextHashL] | 				eLong := &e.bTable[nextHashL] | ||||||
| 				eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur | 				eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur | ||||||
| 
 | 
 | ||||||
| 				// If the next long is a candidate, use that...
 | 				// If the next long is a candidate, use that...
 | ||||||
| 				t2 := lCandidate.Cur.offset - e.cur | 				t2 := lCandidate.Cur.offset - e.cur | ||||||
| 				if nextS-t2 < maxMatchOffset { | 				if nextS-t2 < maxMatchOffset { | ||||||
| 					if lCandidate.Cur.val == uint32(next) { | 					if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { | ||||||
| 						ml := e.matchlen(nextS+4, t2+4, src) + 4 | 						ml := e.matchlen(nextS+4, t2+4, src) + 4 | ||||||
| 						if ml > l { | 						if ml > l { | ||||||
| 							t = t2 | 							t = t2 | ||||||
|  | @ -163,7 +163,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { | ||||||
| 					} | 					} | ||||||
| 					// If the previous long is a candidate, use that...
 | 					// If the previous long is a candidate, use that...
 | ||||||
| 					t2 = lCandidate.Prev.offset - e.cur | 					t2 = lCandidate.Prev.offset - e.cur | ||||||
| 					if nextS-t2 < maxMatchOffset && lCandidate.Prev.val == uint32(next) { | 					if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { | ||||||
| 						ml := e.matchlen(nextS+4, t2+4, src) + 4 | 						ml := e.matchlen(nextS+4, t2+4, src) + 4 | ||||||
| 						if ml > l { | 						if ml > l { | ||||||
| 							t = t2 | 							t = t2 | ||||||
|  | @ -197,7 +197,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { | ||||||
| 		if nextEmit < s { | 		if nextEmit < s { | ||||||
| 			emitLiteral(dst, src[nextEmit:s]) | 			emitLiteral(dst, src[nextEmit:s]) | ||||||
| 		} | 		} | ||||||
| 		if false { | 		if debugDeflate { | ||||||
| 			if t >= s { | 			if t >= s { | ||||||
| 				panic(fmt.Sprintln("s-t", s, t)) | 				panic(fmt.Sprintln("s-t", s, t)) | ||||||
| 			} | 			} | ||||||
|  | @ -226,31 +226,31 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { | ||||||
| 			i := s - l + 1 | 			i := s - l + 1 | ||||||
| 			if i < s-1 { | 			if i < s-1 { | ||||||
| 				cv := load6432(src, i) | 				cv := load6432(src, i) | ||||||
| 				t := tableEntry{offset: i + e.cur, val: uint32(cv)} | 				t := tableEntry{offset: i + e.cur} | ||||||
| 				e.table[hash4x64(cv, tableBits)] = t | 				e.table[hash4x64(cv, tableBits)] = t | ||||||
| 				eLong := &e.bTable[hash7(cv, tableBits)] | 				eLong := &e.bTable[hash7(cv, tableBits)] | ||||||
| 				eLong.Cur, eLong.Prev = t, eLong.Cur | 				eLong.Cur, eLong.Prev = t, eLong.Cur | ||||||
| 
 | 
 | ||||||
| 				// Do an long at i+1
 | 				// Do an long at i+1
 | ||||||
| 				cv >>= 8 | 				cv >>= 8 | ||||||
| 				t = tableEntry{offset: t.offset + 1, val: uint32(cv)} | 				t = tableEntry{offset: t.offset + 1} | ||||||
| 				eLong = &e.bTable[hash7(cv, tableBits)] | 				eLong = &e.bTable[hash7(cv, tableBits)] | ||||||
| 				eLong.Cur, eLong.Prev = t, eLong.Cur | 				eLong.Cur, eLong.Prev = t, eLong.Cur | ||||||
| 
 | 
 | ||||||
| 				// We only have enough bits for a short entry at i+2
 | 				// We only have enough bits for a short entry at i+2
 | ||||||
| 				cv >>= 8 | 				cv >>= 8 | ||||||
| 				t = tableEntry{offset: t.offset + 1, val: uint32(cv)} | 				t = tableEntry{offset: t.offset + 1} | ||||||
| 				e.table[hash4x64(cv, tableBits)] = t | 				e.table[hash4x64(cv, tableBits)] = t | ||||||
| 
 | 
 | ||||||
| 				// Skip one - otherwise we risk hitting 's'
 | 				// Skip one - otherwise we risk hitting 's'
 | ||||||
| 				i += 4 | 				i += 4 | ||||||
| 				for ; i < s-1; i += hashEvery { | 				for ; i < s-1; i += hashEvery { | ||||||
| 					cv := load6432(src, i) | 					cv := load6432(src, i) | ||||||
| 					t := tableEntry{offset: i + e.cur, val: uint32(cv)} | 					t := tableEntry{offset: i + e.cur} | ||||||
| 					t2 := tableEntry{offset: t.offset + 1, val: uint32(cv >> 8)} | 					t2 := tableEntry{offset: t.offset + 1} | ||||||
| 					eLong := &e.bTable[hash7(cv, tableBits)] | 					eLong := &e.bTable[hash7(cv, tableBits)] | ||||||
| 					eLong.Cur, eLong.Prev = t, eLong.Cur | 					eLong.Cur, eLong.Prev = t, eLong.Cur | ||||||
| 					e.table[hash4u(t2.val, tableBits)] = t2 | 					e.table[hash4u(uint32(cv>>8), tableBits)] = t2 | ||||||
| 				} | 				} | ||||||
| 			} | 			} | ||||||
| 		} | 		} | ||||||
|  | @ -261,9 +261,9 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { | ||||||
| 		o := e.cur + s - 1 | 		o := e.cur + s - 1 | ||||||
| 		prevHashS := hash4x64(x, tableBits) | 		prevHashS := hash4x64(x, tableBits) | ||||||
| 		prevHashL := hash7(x, tableBits) | 		prevHashL := hash7(x, tableBits) | ||||||
| 		e.table[prevHashS] = tableEntry{offset: o, val: uint32(x)} | 		e.table[prevHashS] = tableEntry{offset: o} | ||||||
| 		eLong := &e.bTable[prevHashL] | 		eLong := &e.bTable[prevHashL] | ||||||
| 		eLong.Cur, eLong.Prev = tableEntry{offset: o, val: uint32(x)}, eLong.Cur | 		eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur | ||||||
| 		cv = x >> 8 | 		cv = x >> 8 | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -13,7 +13,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { | ||||||
| 		inputMargin            = 12 - 1 | 		inputMargin            = 12 - 1 | ||||||
| 		minNonLiteralBlockSize = 1 + 1 + inputMargin | 		minNonLiteralBlockSize = 1 + 1 + inputMargin | ||||||
| 	) | 	) | ||||||
| 	if debugDecode && e.cur < 0 { | 	if debugDeflate && e.cur < 0 { | ||||||
| 		panic(fmt.Sprint("e.cur < 0: ", e.cur)) | 		panic(fmt.Sprint("e.cur < 0: ", e.cur)) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -101,7 +101,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { | ||||||
| 			sCandidate := e.table[nextHashS] | 			sCandidate := e.table[nextHashS] | ||||||
| 			lCandidate := e.bTable[nextHashL] | 			lCandidate := e.bTable[nextHashL] | ||||||
| 			next := load6432(src, nextS) | 			next := load6432(src, nextS) | ||||||
| 			entry := tableEntry{offset: s + e.cur, val: uint32(cv)} | 			entry := tableEntry{offset: s + e.cur} | ||||||
| 			e.table[nextHashS] = entry | 			e.table[nextHashS] = entry | ||||||
| 			eLong := &e.bTable[nextHashL] | 			eLong := &e.bTable[nextHashL] | ||||||
| 			eLong.Cur, eLong.Prev = entry, eLong.Cur | 			eLong.Cur, eLong.Prev = entry, eLong.Cur | ||||||
|  | @ -112,17 +112,17 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { | ||||||
| 
 | 
 | ||||||
| 			t = lCandidate.Cur.offset - e.cur | 			t = lCandidate.Cur.offset - e.cur | ||||||
| 			if s-t < maxMatchOffset { | 			if s-t < maxMatchOffset { | ||||||
| 				if uint32(cv) == lCandidate.Cur.val { | 				if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { | ||||||
| 					// Long candidate matches at least 4 bytes.
 | 					// Long candidate matches at least 4 bytes.
 | ||||||
| 
 | 
 | ||||||
| 					// Store the next match
 | 					// Store the next match
 | ||||||
| 					e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} | 					e.table[nextHashS] = tableEntry{offset: nextS + e.cur} | ||||||
| 					eLong := &e.bTable[nextHashL] | 					eLong := &e.bTable[nextHashL] | ||||||
| 					eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur | 					eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur | ||||||
| 
 | 
 | ||||||
| 					// Check the previous long candidate as well.
 | 					// Check the previous long candidate as well.
 | ||||||
| 					t2 := lCandidate.Prev.offset - e.cur | 					t2 := lCandidate.Prev.offset - e.cur | ||||||
| 					if s-t2 < maxMatchOffset && uint32(cv) == lCandidate.Prev.val { | 					if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { | ||||||
| 						l = e.matchlen(s+4, t+4, src) + 4 | 						l = e.matchlen(s+4, t+4, src) + 4 | ||||||
| 						ml1 := e.matchlen(s+4, t2+4, src) + 4 | 						ml1 := e.matchlen(s+4, t2+4, src) + 4 | ||||||
| 						if ml1 > l { | 						if ml1 > l { | ||||||
|  | @ -135,17 +135,17 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { | ||||||
| 				} | 				} | ||||||
| 				// Current value did not match, but check if previous long value does.
 | 				// Current value did not match, but check if previous long value does.
 | ||||||
| 				t = lCandidate.Prev.offset - e.cur | 				t = lCandidate.Prev.offset - e.cur | ||||||
| 				if s-t < maxMatchOffset && uint32(cv) == lCandidate.Prev.val { | 				if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { | ||||||
| 					// Store the next match
 | 					// Store the next match
 | ||||||
| 					e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} | 					e.table[nextHashS] = tableEntry{offset: nextS + e.cur} | ||||||
| 					eLong := &e.bTable[nextHashL] | 					eLong := &e.bTable[nextHashL] | ||||||
| 					eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur | 					eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur | ||||||
| 					break | 					break | ||||||
| 				} | 				} | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| 			t = sCandidate.offset - e.cur | 			t = sCandidate.offset - e.cur | ||||||
| 			if s-t < maxMatchOffset && uint32(cv) == sCandidate.val { | 			if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { | ||||||
| 				// Found a 4 match...
 | 				// Found a 4 match...
 | ||||||
| 				l = e.matchlen(s+4, t+4, src) + 4 | 				l = e.matchlen(s+4, t+4, src) + 4 | ||||||
| 
 | 
 | ||||||
|  | @ -153,9 +153,9 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { | ||||||
| 				lCandidate = e.bTable[nextHashL] | 				lCandidate = e.bTable[nextHashL] | ||||||
| 
 | 
 | ||||||
| 				// Store the next match
 | 				// Store the next match
 | ||||||
| 				e.table[nextHashS] = tableEntry{offset: nextS + e.cur, val: uint32(next)} | 				e.table[nextHashS] = tableEntry{offset: nextS + e.cur} | ||||||
| 				eLong := &e.bTable[nextHashL] | 				eLong := &e.bTable[nextHashL] | ||||||
| 				eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur, val: uint32(next)}, eLong.Cur | 				eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur | ||||||
| 
 | 
 | ||||||
| 				// Check repeat at s + repOff
 | 				// Check repeat at s + repOff
 | ||||||
| 				const repOff = 1 | 				const repOff = 1 | ||||||
|  | @ -174,7 +174,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { | ||||||
| 				// If the next long is a candidate, use that...
 | 				// If the next long is a candidate, use that...
 | ||||||
| 				t2 = lCandidate.Cur.offset - e.cur | 				t2 = lCandidate.Cur.offset - e.cur | ||||||
| 				if nextS-t2 < maxMatchOffset { | 				if nextS-t2 < maxMatchOffset { | ||||||
| 					if lCandidate.Cur.val == uint32(next) { | 					if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { | ||||||
| 						ml := e.matchlen(nextS+4, t2+4, src) + 4 | 						ml := e.matchlen(nextS+4, t2+4, src) + 4 | ||||||
| 						if ml > l { | 						if ml > l { | ||||||
| 							t = t2 | 							t = t2 | ||||||
|  | @ -185,7 +185,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { | ||||||
| 					} | 					} | ||||||
| 					// If the previous long is a candidate, use that...
 | 					// If the previous long is a candidate, use that...
 | ||||||
| 					t2 = lCandidate.Prev.offset - e.cur | 					t2 = lCandidate.Prev.offset - e.cur | ||||||
| 					if nextS-t2 < maxMatchOffset && lCandidate.Prev.val == uint32(next) { | 					if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { | ||||||
| 						ml := e.matchlen(nextS+4, t2+4, src) + 4 | 						ml := e.matchlen(nextS+4, t2+4, src) + 4 | ||||||
| 						if ml > l { | 						if ml > l { | ||||||
| 							t = t2 | 							t = t2 | ||||||
|  | @ -244,9 +244,9 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { | ||||||
| 			// Index after match end.
 | 			// Index after match end.
 | ||||||
| 			for i := nextS + 1; i < int32(len(src))-8; i += 2 { | 			for i := nextS + 1; i < int32(len(src))-8; i += 2 { | ||||||
| 				cv := load6432(src, i) | 				cv := load6432(src, i) | ||||||
| 				e.table[hash4x64(cv, tableBits)] = tableEntry{offset: i + e.cur, val: uint32(cv)} | 				e.table[hash4x64(cv, tableBits)] = tableEntry{offset: i + e.cur} | ||||||
| 				eLong := &e.bTable[hash7(cv, tableBits)] | 				eLong := &e.bTable[hash7(cv, tableBits)] | ||||||
| 				eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur, val: uint32(cv)}, eLong.Cur | 				eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur | ||||||
| 			} | 			} | ||||||
| 			goto emitRemainder | 			goto emitRemainder | ||||||
| 		} | 		} | ||||||
|  | @ -255,8 +255,8 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { | ||||||
| 		if true { | 		if true { | ||||||
| 			for i := nextS + 1; i < s-1; i += 2 { | 			for i := nextS + 1; i < s-1; i += 2 { | ||||||
| 				cv := load6432(src, i) | 				cv := load6432(src, i) | ||||||
| 				t := tableEntry{offset: i + e.cur, val: uint32(cv)} | 				t := tableEntry{offset: i + e.cur} | ||||||
| 				t2 := tableEntry{offset: t.offset + 1, val: uint32(cv >> 8)} | 				t2 := tableEntry{offset: t.offset + 1} | ||||||
| 				eLong := &e.bTable[hash7(cv, tableBits)] | 				eLong := &e.bTable[hash7(cv, tableBits)] | ||||||
| 				eLong2 := &e.bTable[hash7(cv>>8, tableBits)] | 				eLong2 := &e.bTable[hash7(cv>>8, tableBits)] | ||||||
| 				e.table[hash4x64(cv, tableBits)] = t | 				e.table[hash4x64(cv, tableBits)] = t | ||||||
|  |  | ||||||
|  | @ -262,7 +262,7 @@ func (t *tokens) EstimatedBits() int { | ||||||
| // AddMatch adds a match to the tokens.
 | // AddMatch adds a match to the tokens.
 | ||||||
| // This function is very sensitive to inlining and right on the border.
 | // This function is very sensitive to inlining and right on the border.
 | ||||||
| func (t *tokens) AddMatch(xlength uint32, xoffset uint32) { | func (t *tokens) AddMatch(xlength uint32, xoffset uint32) { | ||||||
| 	if debugDecode { | 	if debugDeflate { | ||||||
| 		if xlength >= maxMatchLength+baseMatchLength { | 		if xlength >= maxMatchLength+baseMatchLength { | ||||||
| 			panic(fmt.Errorf("invalid length: %v", xlength)) | 			panic(fmt.Errorf("invalid length: %v", xlength)) | ||||||
| 		} | 		} | ||||||
|  | @ -281,7 +281,7 @@ func (t *tokens) AddMatch(xlength uint32, xoffset uint32) { | ||||||
| // AddMatchLong adds a match to the tokens, potentially longer than max match length.
 | // AddMatchLong adds a match to the tokens, potentially longer than max match length.
 | ||||||
| // Length should NOT have the base subtracted, only offset should.
 | // Length should NOT have the base subtracted, only offset should.
 | ||||||
| func (t *tokens) AddMatchLong(xlength int32, xoffset uint32) { | func (t *tokens) AddMatchLong(xlength int32, xoffset uint32) { | ||||||
| 	if debugDecode { | 	if debugDeflate { | ||||||
| 		if xoffset >= maxMatchOffset+baseMatchOffset { | 		if xoffset >= maxMatchOffset+baseMatchOffset { | ||||||
| 			panic(fmt.Errorf("invalid offset: %v", xoffset)) | 			panic(fmt.Errorf("invalid offset: %v", xoffset)) | ||||||
| 		} | 		} | ||||||
|  |  | ||||||
|  | @ -806,7 +806,7 @@ func (b *blockEnc) genCodes() { | ||||||
| 		mlH[v]++ | 		mlH[v]++ | ||||||
| 		if v > mlMax { | 		if v > mlMax { | ||||||
| 			mlMax = v | 			mlMax = v | ||||||
| 			if debug && mlMax > maxMatchLengthSymbol { | 			if debugAsserts && mlMax > maxMatchLengthSymbol { | ||||||
| 				panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) | 				panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) | ||||||
| 			} | 			} | ||||||
| 		} | 		} | ||||||
|  | @ -821,13 +821,13 @@ func (b *blockEnc) genCodes() { | ||||||
| 		} | 		} | ||||||
| 		return int(max) | 		return int(max) | ||||||
| 	} | 	} | ||||||
| 	if mlMax > maxMatchLengthSymbol { | 	if debugAsserts && mlMax > maxMatchLengthSymbol { | ||||||
| 		panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) | 		panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) | ||||||
| 	} | 	} | ||||||
| 	if ofMax > maxOffsetBits { | 	if debugAsserts && ofMax > maxOffsetBits { | ||||||
| 		panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) | 		panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) | ||||||
| 	} | 	} | ||||||
| 	if llMax > maxLiteralLengthSymbol { | 	if debugAsserts && llMax > maxLiteralLengthSymbol { | ||||||
| 		panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) | 		panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -30,7 +30,7 @@ type byteBuffer interface { | ||||||
| type byteBuf []byte | type byteBuf []byte | ||||||
| 
 | 
 | ||||||
| func (b *byteBuf) readSmall(n int) []byte { | func (b *byteBuf) readSmall(n int) []byte { | ||||||
| 	if debug && n > 8 { | 	if debugAsserts && n > 8 { | ||||||
| 		panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) | 		panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) | ||||||
| 	} | 	} | ||||||
| 	bb := *b | 	bb := *b | ||||||
|  | @ -82,7 +82,7 @@ type readerWrapper struct { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (r *readerWrapper) readSmall(n int) []byte { | func (r *readerWrapper) readSmall(n int) []byte { | ||||||
| 	if debug && n > 8 { | 	if debugAsserts && n > 8 { | ||||||
| 		panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) | 		panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) | ||||||
| 	} | 	} | ||||||
| 	n2, err := io.ReadFull(r.r, r.tmp[:n]) | 	n2, err := io.ReadFull(r.r, r.tmp[:n]) | ||||||
|  |  | ||||||
|  | @ -315,7 +315,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { | ||||||
| 			if size > 1<<20 { | 			if size > 1<<20 { | ||||||
| 				size = 1 << 20 | 				size = 1 << 20 | ||||||
| 			} | 			} | ||||||
| 			dst = make([]byte, 0, frame.WindowSize) | 			dst = make([]byte, 0, size) | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		dst, err = frame.runDecoder(dst, block) | 		dst, err = frame.runDecoder(dst, block) | ||||||
|  |  | ||||||
|  | @ -4,6 +4,8 @@ | ||||||
| 
 | 
 | ||||||
| package zstd | package zstd | ||||||
| 
 | 
 | ||||||
|  | import "fmt" | ||||||
|  | 
 | ||||||
| const ( | const ( | ||||||
| 	dFastLongTableBits = 17                      // Bits used in the long match table
 | 	dFastLongTableBits = 17                      // Bits used in the long match table
 | ||||||
| 	dFastLongTableSize = 1 << dFastLongTableBits // Size of the table
 | 	dFastLongTableSize = 1 << dFastLongTableBits // Size of the table
 | ||||||
|  | @ -29,7 +31,7 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { | ||||||
| 	) | 	) | ||||||
| 
 | 
 | ||||||
| 	// Protect against e.cur wraparound.
 | 	// Protect against e.cur wraparound.
 | ||||||
| 	for e.cur > (1<<30)+e.maxMatchOff { | 	for e.cur >= bufferReset { | ||||||
| 		if len(e.hist) == 0 { | 		if len(e.hist) == 0 { | ||||||
| 			for i := range e.table[:] { | 			for i := range e.table[:] { | ||||||
| 				e.table[i] = tableEntry{} | 				e.table[i] = tableEntry{} | ||||||
|  | @ -61,6 +63,7 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { | ||||||
| 			e.longTable[i].offset = v | 			e.longTable[i].offset = v | ||||||
| 		} | 		} | ||||||
| 		e.cur = e.maxMatchOff | 		e.cur = e.maxMatchOff | ||||||
|  | 		break | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	s := e.addBlock(src) | 	s := e.addBlock(src) | ||||||
|  | @ -110,7 +113,7 @@ encodeLoop: | ||||||
| 		canRepeat := len(blk.sequences) > 2 | 		canRepeat := len(blk.sequences) > 2 | ||||||
| 
 | 
 | ||||||
| 		for { | 		for { | ||||||
| 			if debug && canRepeat && offset1 == 0 { | 			if debugAsserts && canRepeat && offset1 == 0 { | ||||||
| 				panic("offset0 was 0") | 				panic("offset0 was 0") | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
|  | @ -229,10 +232,10 @@ encodeLoop: | ||||||
| 				// Reference encoder checks all 8 bytes, we only check 4,
 | 				// Reference encoder checks all 8 bytes, we only check 4,
 | ||||||
| 				// but the likelihood of both the first 4 bytes and the hash matching should be enough.
 | 				// but the likelihood of both the first 4 bytes and the hash matching should be enough.
 | ||||||
| 				t = candidateL.offset - e.cur | 				t = candidateL.offset - e.cur | ||||||
| 				if debug && s <= t { | 				if debugAsserts && s <= t { | ||||||
| 					panic("s <= t") | 					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) | ||||||
| 				} | 				} | ||||||
| 				if debug && s-t > e.maxMatchOff { | 				if debugAsserts && s-t > e.maxMatchOff { | ||||||
| 					panic("s - t >e.maxMatchOff") | 					panic("s - t >e.maxMatchOff") | ||||||
| 				} | 				} | ||||||
| 				if debugMatches { | 				if debugMatches { | ||||||
|  | @ -266,13 +269,13 @@ encodeLoop: | ||||||
| 				} | 				} | ||||||
| 
 | 
 | ||||||
| 				t = candidateS.offset - e.cur | 				t = candidateS.offset - e.cur | ||||||
| 				if debug && s <= t { | 				if debugAsserts && s <= t { | ||||||
| 					panic("s <= t") | 					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) | ||||||
| 				} | 				} | ||||||
| 				if debug && s-t > e.maxMatchOff { | 				if debugAsserts && s-t > e.maxMatchOff { | ||||||
| 					panic("s - t >e.maxMatchOff") | 					panic("s - t >e.maxMatchOff") | ||||||
| 				} | 				} | ||||||
| 				if debug && t < 0 { | 				if debugAsserts && t < 0 { | ||||||
| 					panic("t<0") | 					panic("t<0") | ||||||
| 				} | 				} | ||||||
| 				if debugMatches { | 				if debugMatches { | ||||||
|  | @ -294,11 +297,11 @@ encodeLoop: | ||||||
| 		offset2 = offset1 | 		offset2 = offset1 | ||||||
| 		offset1 = s - t | 		offset1 = s - t | ||||||
| 
 | 
 | ||||||
| 		if debug && s <= t { | 		if debugAsserts && s <= t { | ||||||
| 			panic("s <= t") | 			panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		if debug && canRepeat && int(offset1) > len(src) { | 		if debugAsserts && canRepeat && int(offset1) > len(src) { | ||||||
| 			panic("invalid offset") | 			panic("invalid offset") | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
|  | @ -424,7 +427,7 @@ func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { | ||||||
| 	) | 	) | ||||||
| 
 | 
 | ||||||
| 	// Protect against e.cur wraparound.
 | 	// Protect against e.cur wraparound.
 | ||||||
| 	if e.cur > (1<<30)+e.maxMatchOff { | 	if e.cur >= bufferReset { | ||||||
| 		for i := range e.table[:] { | 		for i := range e.table[:] { | ||||||
| 			e.table[i] = tableEntry{} | 			e.table[i] = tableEntry{} | ||||||
| 		} | 		} | ||||||
|  | @ -545,10 +548,10 @@ encodeLoop: | ||||||
| 				// Reference encoder checks all 8 bytes, we only check 4,
 | 				// Reference encoder checks all 8 bytes, we only check 4,
 | ||||||
| 				// but the likelihood of both the first 4 bytes and the hash matching should be enough.
 | 				// but the likelihood of both the first 4 bytes and the hash matching should be enough.
 | ||||||
| 				t = candidateL.offset - e.cur | 				t = candidateL.offset - e.cur | ||||||
| 				if debug && s <= t { | 				if debugAsserts && s <= t { | ||||||
| 					panic("s <= t") | 					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) | ||||||
| 				} | 				} | ||||||
| 				if debug && s-t > e.maxMatchOff { | 				if debugAsserts && s-t > e.maxMatchOff { | ||||||
| 					panic("s - t >e.maxMatchOff") | 					panic("s - t >e.maxMatchOff") | ||||||
| 				} | 				} | ||||||
| 				if debugMatches { | 				if debugMatches { | ||||||
|  | @ -582,13 +585,13 @@ encodeLoop: | ||||||
| 				} | 				} | ||||||
| 
 | 
 | ||||||
| 				t = candidateS.offset - e.cur | 				t = candidateS.offset - e.cur | ||||||
| 				if debug && s <= t { | 				if debugAsserts && s <= t { | ||||||
| 					panic("s <= t") | 					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) | ||||||
| 				} | 				} | ||||||
| 				if debug && s-t > e.maxMatchOff { | 				if debugAsserts && s-t > e.maxMatchOff { | ||||||
| 					panic("s - t >e.maxMatchOff") | 					panic("s - t >e.maxMatchOff") | ||||||
| 				} | 				} | ||||||
| 				if debug && t < 0 { | 				if debugAsserts && t < 0 { | ||||||
| 					panic("t<0") | 					panic("t<0") | ||||||
| 				} | 				} | ||||||
| 				if debugMatches { | 				if debugMatches { | ||||||
|  | @ -610,8 +613,8 @@ encodeLoop: | ||||||
| 		offset2 = offset1 | 		offset2 = offset1 | ||||||
| 		offset1 = s - t | 		offset1 = s - t | ||||||
| 
 | 
 | ||||||
| 		if debug && s <= t { | 		if debugAsserts && s <= t { | ||||||
| 			panic("s <= t") | 			panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		// Extend the 4-byte match as long as possible.
 | 		// Extend the 4-byte match as long as possible.
 | ||||||
|  |  | ||||||
|  | @ -5,6 +5,7 @@ | ||||||
| package zstd | package zstd | ||||||
| 
 | 
 | ||||||
| import ( | import ( | ||||||
|  | 	"fmt" | ||||||
| 	"math/bits" | 	"math/bits" | ||||||
| 
 | 
 | ||||||
| 	"github.com/klauspost/compress/zstd/internal/xxhash" | 	"github.com/klauspost/compress/zstd/internal/xxhash" | ||||||
|  | @ -74,7 +75,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { | ||||||
| 	) | 	) | ||||||
| 
 | 
 | ||||||
| 	// Protect against e.cur wraparound.
 | 	// Protect against e.cur wraparound.
 | ||||||
| 	for e.cur > (1<<30)+e.maxMatchOff { | 	for e.cur >= bufferReset { | ||||||
| 		if len(e.hist) == 0 { | 		if len(e.hist) == 0 { | ||||||
| 			for i := range e.table[:] { | 			for i := range e.table[:] { | ||||||
| 				e.table[i] = tableEntry{} | 				e.table[i] = tableEntry{} | ||||||
|  | @ -94,6 +95,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { | ||||||
| 			e.table[i].offset = v | 			e.table[i].offset = v | ||||||
| 		} | 		} | ||||||
| 		e.cur = e.maxMatchOff | 		e.cur = e.maxMatchOff | ||||||
|  | 		break | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	s := e.addBlock(src) | 	s := e.addBlock(src) | ||||||
|  | @ -151,7 +153,7 @@ encodeLoop: | ||||||
| 		canRepeat := len(blk.sequences) > 2 | 		canRepeat := len(blk.sequences) > 2 | ||||||
| 
 | 
 | ||||||
| 		for { | 		for { | ||||||
| 			if debug && canRepeat && offset1 == 0 { | 			if debugAsserts && canRepeat && offset1 == 0 { | ||||||
| 				panic("offset0 was 0") | 				panic("offset0 was 0") | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
|  | @ -212,10 +214,10 @@ encodeLoop: | ||||||
| 			if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { | 			if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { | ||||||
| 				// found a regular match
 | 				// found a regular match
 | ||||||
| 				t = candidate.offset - e.cur | 				t = candidate.offset - e.cur | ||||||
| 				if debug && s <= t { | 				if debugAsserts && s <= t { | ||||||
| 					panic("s <= t") | 					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) | ||||||
| 				} | 				} | ||||||
| 				if debug && s-t > e.maxMatchOff { | 				if debugAsserts && s-t > e.maxMatchOff { | ||||||
| 					panic("s - t >e.maxMatchOff") | 					panic("s - t >e.maxMatchOff") | ||||||
| 				} | 				} | ||||||
| 				break | 				break | ||||||
|  | @ -225,13 +227,13 @@ encodeLoop: | ||||||
| 				// found a regular match
 | 				// found a regular match
 | ||||||
| 				t = candidate2.offset - e.cur | 				t = candidate2.offset - e.cur | ||||||
| 				s++ | 				s++ | ||||||
| 				if debug && s <= t { | 				if debugAsserts && s <= t { | ||||||
| 					panic("s <= t") | 					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) | ||||||
| 				} | 				} | ||||||
| 				if debug && s-t > e.maxMatchOff { | 				if debugAsserts && s-t > e.maxMatchOff { | ||||||
| 					panic("s - t >e.maxMatchOff") | 					panic("s - t >e.maxMatchOff") | ||||||
| 				} | 				} | ||||||
| 				if debug && t < 0 { | 				if debugAsserts && t < 0 { | ||||||
| 					panic("t<0") | 					panic("t<0") | ||||||
| 				} | 				} | ||||||
| 				break | 				break | ||||||
|  | @ -246,11 +248,11 @@ encodeLoop: | ||||||
| 		offset2 = offset1 | 		offset2 = offset1 | ||||||
| 		offset1 = s - t | 		offset1 = s - t | ||||||
| 
 | 
 | ||||||
| 		if debug && s <= t { | 		if debugAsserts && s <= t { | ||||||
| 			panic("s <= t") | 			panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		if debug && canRepeat && int(offset1) > len(src) { | 		if debugAsserts && canRepeat && int(offset1) > len(src) { | ||||||
| 			panic("invalid offset") | 			panic("invalid offset") | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
|  | @ -343,7 +345,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 	// Protect against e.cur wraparound.
 | 	// Protect against e.cur wraparound.
 | ||||||
| 	if e.cur > (1<<30)+e.maxMatchOff { | 	if e.cur >= bufferReset { | ||||||
| 		for i := range e.table[:] { | 		for i := range e.table[:] { | ||||||
| 			e.table[i] = tableEntry{} | 			e.table[i] = tableEntry{} | ||||||
| 		} | 		} | ||||||
|  | @ -456,10 +458,10 @@ encodeLoop: | ||||||
| 			if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { | 			if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { | ||||||
| 				// found a regular match
 | 				// found a regular match
 | ||||||
| 				t = candidate.offset - e.cur | 				t = candidate.offset - e.cur | ||||||
| 				if debug && s <= t { | 				if debugAsserts && s <= t { | ||||||
| 					panic("s <= t") | 					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) | ||||||
| 				} | 				} | ||||||
| 				if debug && s-t > e.maxMatchOff { | 				if debugAsserts && s-t > e.maxMatchOff { | ||||||
| 					panic("s - t >e.maxMatchOff") | 					panic("s - t >e.maxMatchOff") | ||||||
| 				} | 				} | ||||||
| 				break | 				break | ||||||
|  | @ -469,13 +471,13 @@ encodeLoop: | ||||||
| 				// found a regular match
 | 				// found a regular match
 | ||||||
| 				t = candidate2.offset - e.cur | 				t = candidate2.offset - e.cur | ||||||
| 				s++ | 				s++ | ||||||
| 				if debug && s <= t { | 				if debugAsserts && s <= t { | ||||||
| 					panic("s <= t") | 					panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) | ||||||
| 				} | 				} | ||||||
| 				if debug && s-t > e.maxMatchOff { | 				if debugAsserts && s-t > e.maxMatchOff { | ||||||
| 					panic("s - t >e.maxMatchOff") | 					panic("s - t >e.maxMatchOff") | ||||||
| 				} | 				} | ||||||
| 				if debug && t < 0 { | 				if debugAsserts && t < 0 { | ||||||
| 					panic("t<0") | 					panic("t<0") | ||||||
| 				} | 				} | ||||||
| 				break | 				break | ||||||
|  | @ -490,8 +492,8 @@ encodeLoop: | ||||||
| 		offset2 = offset1 | 		offset2 = offset1 | ||||||
| 		offset1 = s - t | 		offset1 = s - t | ||||||
| 
 | 
 | ||||||
| 		if debug && s <= t { | 		if debugAsserts && s <= t { | ||||||
| 			panic("s <= t") | 			panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		// Extend the 4-byte match as long as possible.
 | 		// Extend the 4-byte match as long as possible.
 | ||||||
|  | @ -570,6 +572,9 @@ encodeLoop: | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (e *fastEncoder) addBlock(src []byte) int32 { | func (e *fastEncoder) addBlock(src []byte) int32 { | ||||||
|  | 	if debugAsserts && e.cur > bufferReset { | ||||||
|  | 		panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset)) | ||||||
|  | 	} | ||||||
| 	// check if we have space already
 | 	// check if we have space already
 | ||||||
| 	if len(e.hist)+len(src) > cap(e.hist) { | 	if len(e.hist)+len(src) > cap(e.hist) { | ||||||
| 		if cap(e.hist) == 0 { | 		if cap(e.hist) == 0 { | ||||||
|  | @ -608,15 +613,18 @@ func (e *fastEncoder) matchlenNoHist(s, t int32, src []byte) int32 { | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| func (e *fastEncoder) matchlen(s, t int32, src []byte) int32 { | func (e *fastEncoder) matchlen(s, t int32, src []byte) int32 { | ||||||
| 	if debug { | 	if debugAsserts { | ||||||
| 		if s < 0 { | 		if s < 0 { | ||||||
| 			panic("s<0") | 			err := fmt.Sprintf("s (%d) < 0", s) | ||||||
|  | 			panic(err) | ||||||
| 		} | 		} | ||||||
| 		if t < 0 { | 		if t < 0 { | ||||||
| 			panic("t<0") | 			err := fmt.Sprintf("s (%d) < 0", s) | ||||||
|  | 			panic(err) | ||||||
| 		} | 		} | ||||||
| 		if s-t > e.maxMatchOff { | 		if s-t > e.maxMatchOff { | ||||||
| 			panic(s - t) | 			err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) | ||||||
|  | 			panic(err) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 	s1 := int(s) + maxMatchLength - 4 | 	s1 := int(s) + maxMatchLength - 4 | ||||||
|  | @ -650,7 +658,10 @@ func (e *fastEncoder) Reset() { | ||||||
| 		} | 		} | ||||||
| 		e.hist = make([]byte, 0, l) | 		e.hist = make([]byte, 0, l) | ||||||
| 	} | 	} | ||||||
| 	// We offset current position so everything will be out of reach
 | 	// We offset current position so everything will be out of reach.
 | ||||||
|  | 	// If above reset line, history will be purged.
 | ||||||
|  | 	if e.cur < bufferReset { | ||||||
| 		e.cur += e.maxMatchOff + int32(len(e.hist)) | 		e.cur += e.maxMatchOff + int32(len(e.hist)) | ||||||
|  | 	} | ||||||
| 	e.hist = e.hist[:0] | 	e.hist = e.hist[:0] | ||||||
| } | } | ||||||
|  |  | ||||||
|  | @ -156,7 +156,7 @@ func (e *Encoder) Write(p []byte) (n int, err error) { | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			return n, err | 			return n, err | ||||||
| 		} | 		} | ||||||
| 		if debug && len(s.filling) > 0 { | 		if debugAsserts && len(s.filling) > 0 { | ||||||
| 			panic(len(s.filling)) | 			panic(len(s.filling)) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
|  | @ -50,7 +50,7 @@ type frameDec struct { | ||||||
| const ( | const ( | ||||||
| 	// The minimum Window_Size is 1 KB.
 | 	// The minimum Window_Size is 1 KB.
 | ||||||
| 	MinWindowSize = 1 << 10 | 	MinWindowSize = 1 << 10 | ||||||
| 	MaxWindowSize = 1 << 30 | 	MaxWindowSize = 1 << 29 | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| var ( | var ( | ||||||
|  |  | ||||||
|  | @ -118,7 +118,7 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { | ||||||
| 
 | 
 | ||||||
| 		if int32(bitStream)&(threshold-1) < max { | 		if int32(bitStream)&(threshold-1) < max { | ||||||
| 			count = int32(bitStream) & (threshold - 1) | 			count = int32(bitStream) & (threshold - 1) | ||||||
| 			if debug && nbBits < 1 { | 			if debugAsserts && nbBits < 1 { | ||||||
| 				panic("nbBits underflow") | 				panic("nbBits underflow") | ||||||
| 			} | 			} | ||||||
| 			bitCount += nbBits - 1 | 			bitCount += nbBits - 1 | ||||||
|  |  | ||||||
|  | @ -327,7 +327,7 @@ func (s *fseEncoder) normalizeCount(length int) error { | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			return err | 			return err | ||||||
| 		} | 		} | ||||||
| 		if debug { | 		if debugAsserts { | ||||||
| 			err = s.validateNorm() | 			err = s.validateNorm() | ||||||
| 			if err != nil { | 			if err != nil { | ||||||
| 				return err | 				return err | ||||||
|  | @ -336,7 +336,7 @@ func (s *fseEncoder) normalizeCount(length int) error { | ||||||
| 		return s.buildCTable() | 		return s.buildCTable() | ||||||
| 	} | 	} | ||||||
| 	s.norm[largest] += stillToDistribute | 	s.norm[largest] += stillToDistribute | ||||||
| 	if debug { | 	if debugAsserts { | ||||||
| 		err := s.validateNorm() | 		err := s.validateNorm() | ||||||
| 		if err != nil { | 		if err != nil { | ||||||
| 			return err | 			return err | ||||||
|  | @ -619,7 +619,7 @@ func (s *fseEncoder) writeCount(out []byte) ([]byte, error) { | ||||||
| func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { | func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { | ||||||
| 	minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 | 	minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 | ||||||
| 	threshold := (minNbBits + 1) << 16 | 	threshold := (minNbBits + 1) << 16 | ||||||
| 	if debug { | 	if debugAsserts { | ||||||
| 		if !(s.actualTableLog < 16) { | 		if !(s.actualTableLog < 16) { | ||||||
| 			panic("!s.actualTableLog < 16") | 			panic("!s.actualTableLog < 16") | ||||||
| 		} | 		} | ||||||
|  | @ -633,7 +633,7 @@ func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { | ||||||
| 	// linear interpolation (very approximate)
 | 	// linear interpolation (very approximate)
 | ||||||
| 	normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog | 	normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog | ||||||
| 	bitMultiplier := uint32(1) << accuracyLog | 	bitMultiplier := uint32(1) << accuracyLog | ||||||
| 	if debug { | 	if debugAsserts { | ||||||
| 		if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { | 		if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { | ||||||
| 			panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") | 			panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") | ||||||
| 		} | 		} | ||||||
|  |  | ||||||
|  | @ -179,13 +179,13 @@ TEXT ·writeBlocks(SB), NOSPLIT, $0-40 | ||||||
| 	MOVQ ·prime2v(SB), R14 | 	MOVQ ·prime2v(SB), R14 | ||||||
| 
 | 
 | ||||||
| 	// Load slice. | 	// Load slice. | ||||||
| 	MOVQ b_base+8(FP), CX | 	MOVQ arg1_base+8(FP), CX | ||||||
| 	MOVQ b_len+16(FP), DX | 	MOVQ arg1_len+16(FP), DX | ||||||
| 	LEAQ (CX)(DX*1), BX | 	LEAQ (CX)(DX*1), BX | ||||||
| 	SUBQ $32, BX | 	SUBQ $32, BX | ||||||
| 
 | 
 | ||||||
| 	// Load vN from d. | 	// Load vN from d. | ||||||
| 	MOVQ d+0(FP), AX | 	MOVQ arg+0(FP), AX | ||||||
| 	MOVQ 0(AX), R8   // v1 | 	MOVQ 0(AX), R8   // v1 | ||||||
| 	MOVQ 8(AX), R9   // v2 | 	MOVQ 8(AX), R9   // v2 | ||||||
| 	MOVQ 16(AX), R10 // v3 | 	MOVQ 16(AX), R10 // v3 | ||||||
|  | @ -209,7 +209,7 @@ blockLoop: | ||||||
| 	MOVQ R11, 24(AX) | 	MOVQ R11, 24(AX) | ||||||
| 
 | 
 | ||||||
| 	// The number of bytes written is CX minus the old base pointer. | 	// The number of bytes written is CX minus the old base pointer. | ||||||
| 	SUBQ b_base+8(FP), CX | 	SUBQ arg1_base+8(FP), CX | ||||||
| 	MOVQ CX, ret+32(FP) | 	MOVQ CX, ret+32(FP) | ||||||
| 
 | 
 | ||||||
| 	RET | 	RET | ||||||
|  |  | ||||||
|  | @ -6,11 +6,20 @@ package zstd | ||||||
| import ( | import ( | ||||||
| 	"errors" | 	"errors" | ||||||
| 	"log" | 	"log" | ||||||
|  | 	"math" | ||||||
| 	"math/bits" | 	"math/bits" | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
|  | // enable debug printing
 | ||||||
| const debug = false | const debug = false | ||||||
|  | 
 | ||||||
|  | // Enable extra assertions.
 | ||||||
|  | const debugAsserts = debug || false | ||||||
|  | 
 | ||||||
|  | // print sequence details
 | ||||||
| const debugSequences = false | const debugSequences = false | ||||||
|  | 
 | ||||||
|  | // print detailed matching information
 | ||||||
| const debugMatches = false | const debugMatches = false | ||||||
| 
 | 
 | ||||||
| // force encoder to use predefined tables.
 | // force encoder to use predefined tables.
 | ||||||
|  | @ -19,6 +28,9 @@ const forcePreDef = false | ||||||
| // zstdMinMatch is the minimum zstd match length.
 | // zstdMinMatch is the minimum zstd match length.
 | ||||||
| const zstdMinMatch = 3 | const zstdMinMatch = 3 | ||||||
| 
 | 
 | ||||||
|  | // Reset the buffer offset when reaching this.
 | ||||||
|  | const bufferReset = math.MaxInt32 - MaxWindowSize | ||||||
|  | 
 | ||||||
| var ( | var ( | ||||||
| 	// ErrReservedBlockType is returned when a reserved block type is found.
 | 	// ErrReservedBlockType is returned when a reserved block type is found.
 | ||||||
| 	// Typically this indicates wrong or corrupted input.
 | 	// Typically this indicates wrong or corrupted input.
 | ||||||
|  |  | ||||||
|  | @ -62,7 +62,7 @@ github.com/containernetworking/plugins/pkg/ns | ||||||
| github.com/containernetworking/plugins/pkg/utils/hwaddr | github.com/containernetworking/plugins/pkg/utils/hwaddr | ||||||
| github.com/containernetworking/plugins/plugins/ipam/host-local/backend | github.com/containernetworking/plugins/plugins/ipam/host-local/backend | ||||||
| github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator | github.com/containernetworking/plugins/plugins/ipam/host-local/backend/allocator | ||||||
| # github.com/containers/buildah v1.14.1 | # github.com/containers/buildah v1.14.2 | ||||||
| github.com/containers/buildah | github.com/containers/buildah | ||||||
| github.com/containers/buildah/bind | github.com/containers/buildah/bind | ||||||
| github.com/containers/buildah/chroot | github.com/containers/buildah/chroot | ||||||
|  | @ -142,7 +142,7 @@ github.com/containers/psgo/internal/dev | ||||||
| github.com/containers/psgo/internal/host | github.com/containers/psgo/internal/host | ||||||
| github.com/containers/psgo/internal/proc | github.com/containers/psgo/internal/proc | ||||||
| github.com/containers/psgo/internal/process | github.com/containers/psgo/internal/process | ||||||
| # github.com/containers/storage v1.16.0 | # github.com/containers/storage v1.16.1 | ||||||
| github.com/containers/storage | github.com/containers/storage | ||||||
| github.com/containers/storage/drivers | github.com/containers/storage/drivers | ||||||
| github.com/containers/storage/drivers/aufs | github.com/containers/storage/drivers/aufs | ||||||
|  | @ -314,7 +314,7 @@ github.com/inconshreveable/mousetrap | ||||||
| github.com/ishidawataru/sctp | github.com/ishidawataru/sctp | ||||||
| # github.com/json-iterator/go v1.1.9 | # github.com/json-iterator/go v1.1.9 | ||||||
| github.com/json-iterator/go | github.com/json-iterator/go | ||||||
| # github.com/klauspost/compress v1.10.0 | # github.com/klauspost/compress v1.10.2 | ||||||
| github.com/klauspost/compress/flate | github.com/klauspost/compress/flate | ||||||
| github.com/klauspost/compress/fse | github.com/klauspost/compress/fse | ||||||
| github.com/klauspost/compress/huff0 | github.com/klauspost/compress/huff0 | ||||||
|  |  | ||||||
		Loading…
	
		Reference in New Issue