Compare commits

...

9 Commits
main ... v1.2.2

Author SHA1 Message Date
Daniel J Walsh e72dd9c5c8
Merge pull request #1202 from containers/skopeo_v1.2.2_try2
Skopeo v1.2.2
2021-02-18 18:05:20 -05:00
TomSweeneyRedHat b94b7dc0f0 Bump to Skopeo v1.2.2
Signed-off-by: TomSweeneyRedHat <tsweeney@redhat.com>
2021-02-18 17:44:16 -05:00
Daniel J Walsh 3abb778b4d
Merge pull request #1197 from containers/dev/tsweeney/vendordance
Bump c/common c/image and c/storage
2021-02-17 19:24:05 -05:00
TomSweeneyRedHat f78bf42c12 Bump c/common c/image and c/storage to latest
Signed-off-by: TomSweeneyRedHat <tsweeney@redhat.com>
2021-02-17 16:13:37 -05:00
TomSweeneyRedHat b4210c0ba0 Fix gating test in release-1.2 port #1169
Cherry pick of #1169 to get gatin tests happy.
Addresses: https://bugzilla.redhat.com/show_bug.cgi?id=1914884

Signed-off-by: TomSweeneyRedHat <tsweeney@redhat.com>
2021-02-14 12:24:07 +01:00
Miloslav Trmač 6c0e35a50c
Merge pull request #1187 from containers/dev/tsweeney/protobuf1.2
Bump vendor/modules.txt in release-1.2
2021-02-05 12:15:26 +01:00
TomSweeneyRedHat af2d2b416f Bump vendor/modules.txt in release-1.2
Signed-off-by: TomSweeneyRedHat <tsweeney@redhat.com>
2021-02-04 20:18:37 -05:00
Daniel J Walsh a05ddb8d1d
Merge pull request #1179 from containers/crypto_bump_release-1.2
Bump golang.org/x/crypto to the latest
2021-02-01 08:50:24 -05:00
TomSweeneyRedHat 8237787b53 Bump golang.org/x/crypto to the latest
Signed-off-by: TomSweeneyRedHat <tsweeney@redhat.com>
2021-01-30 18:45:06 -05:00
178 changed files with 6389 additions and 2861 deletions

10
go.mod
View File

@ -3,13 +3,14 @@ module github.com/containers/skopeo
go 1.12
require (
github.com/containers/common v0.33.0
github.com/containers/image/v5 v5.9.0
github.com/containers/common v0.33.4
github.com/containers/image/v5 v5.10.2
github.com/containers/ocicrypt v1.0.3
github.com/containers/storage v1.24.5
github.com/containers/storage v1.24.6
github.com/docker/docker v17.12.0-ce-rc1.0.20201020191947-73dc6a680cdd+incompatible
github.com/dsnet/compress v0.0.1 // indirect
github.com/go-check/check v0.0.0-20180628173108-788fd7840127
github.com/gogo/protobuf v1.3.2 // indirect
github.com/moby/term v0.0.0-20201110203204-bea5bbe245bf // indirect
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6
@ -20,9 +21,10 @@ require (
github.com/sirupsen/logrus v1.7.0
github.com/spf13/cobra v1.1.1
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.6.1
github.com/stretchr/testify v1.7.0
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
go4.org v0.0.0-20190218023631-ce4c26f7be8e // indirect
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad // indirect
gopkg.in/yaml.v2 v2.4.0
gotest.tools/v3 v3.0.3 // indirect
)

62
go.sum
View File

@ -18,11 +18,8 @@ github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331 h1:3YnB7Hpmh1lPecPE8doMOtYCrMdrpedZOvxfuNES/Vk=
github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
github.com/Microsoft/hcsshim v0.8.9 h1:VrfodqvztU8YSOvygU+DN1BGaSGxmrNfqOv5oOuX2Bk=
github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
github.com/Microsoft/hcsshim v0.8.14 h1:lbPVK25c1cu5xTLITwpUcxoA9vKrKErASPYygvouJns=
github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
@ -52,8 +49,6 @@ github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmE
github.com/cilium/ebpf v0.0.0-20200507155900-a9f01edf17e3/go.mod h1:XT+cAw5wfvsodedcijoh1l9cf7v1x9FlFB/3VmF/O8s=
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f h1:tSNMc+rJDfmYntojat8lljbt1mgKNpTxUZJsSzJ9Y1s=
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59 h1:qWj4qVYZ95vLWwqyNJCQg7rDsG5wPdze0UaPolH7DUk=
github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
@ -65,17 +60,19 @@ github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
github.com/containers/common v0.33.0 h1:7Z6aAQ2s2iniEXd/IoGgc0ukmgmzAE8Oa929t6huVB8=
github.com/containers/common v0.33.0/go.mod h1:mjDo/NKeweL/onaspLhZ38WnHXaYmrELHclIdvSnYpY=
github.com/containers/image/v5 v5.9.0 h1:dRmUtcluQcmasNo3DpnRoZjfU0rOu1qZeL6wlDJr10Q=
github.com/containers/image/v5 v5.9.0/go.mod h1:blOEFd/iFdeyh891ByhCVUc+xAcaI3gBegXECwz9UbQ=
github.com/containers/common v0.33.4 h1:f1jowItfo6xw0bZGZq8oE5dw1pBIkldqB0FqW+HHzG8=
github.com/containers/common v0.33.4/go.mod h1:PhgL71XuC4jJ/1BIqeP7doke3aMFkCP90YBXwDeUr9g=
github.com/containers/image/v5 v5.10.1/go.mod h1:JlRLJZv7elVbtHaaaR6Kz8i6G3k2ttj4t7fubwxD9Hs=
github.com/containers/image/v5 v5.10.2 h1:STD9GYR9p/X0qTLmBYsyx8dEM7zQW+qZ8KHoL/64fkg=
github.com/containers/image/v5 v5.10.2/go.mod h1:JlRLJZv7elVbtHaaaR6Kz8i6G3k2ttj4t7fubwxD9Hs=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.3 h1:vYgl+RZ9Q3DPMuTfxmN+qp0X2Bj52uuY2vnt6GzVe1c=
github.com/containers/ocicrypt v1.0.3/go.mod h1:CUBa+8MRNL/VkpxYIpaMtgn1WgXGyvPQj8jcy0EVG6g=
github.com/containers/storage v1.23.7/go.mod h1:cUT2zHjtx+WlVri30obWmM2gpqpi8jfPsmIzP1TVpEI=
github.com/containers/storage v1.24.5 h1:BusfdU0rCS2/Daa/DPw+0iLfGRlYA7UVF7D0el3N7Vk=
github.com/containers/storage v1.24.5/go.mod h1:YC+2pY8SkfEAcZkwycxYbpK8EiRbx5soPPwz9dxe4IQ=
github.com/containers/storage v1.24.6 h1:9PBb9PoGuj5B/3MGfxx//RmUjMAklmx3rBbuCkuIc94=
github.com/containers/storage v1.24.6/go.mod h1:YC+2pY8SkfEAcZkwycxYbpK8EiRbx5soPPwz9dxe4IQ=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
@ -125,12 +122,13 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@ -211,13 +209,13 @@ github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a/go.mod h1:UJSiEoRfvx
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.11.1/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.3 h1:dB4Bn0tN3wdCzQxnS8r06kV74qN/TAfaIS0bVE8h3jc=
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.5 h1:xNCE0uE6yvTPRS+0wGNMHPo3NIpwnk6aluQZ6R6kRcc=
github.com/klauspost/compress v1.11.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg=
github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
@ -293,7 +291,6 @@ github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5X
github.com/opencontainers/runc v1.0.0-rc91/go.mod h1:3Sm6Dt7OT8z88EbdQqqcRN2oCT54jbi72tT/HqgflT8=
github.com/opencontainers/runc v1.0.0-rc92 h1:+IczUKCRzDzFDnw99O/PAqrcBBCoRp9xN3cB1SYSNS4=
github.com/opencontainers/runc v1.0.0-rc92/go.mod h1:X1zlU4p7wOlX4+WRCz+hvlRv8phdL7UqbYD+vQwNMmE=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.3-0.20200710190001-3e4195d92445/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
@ -378,6 +375,8 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
@ -385,15 +384,14 @@ github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmD
github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ=
github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/ulikunitz/xz v0.5.9 h1:RsKRIA2MO8x56wkkcd3LbtcE/uMszhb6DpRf+3uwa3I=
github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
github.com/vbauerster/mpb/v5 v5.3.0 h1:vgrEJjUzHaSZKDRRxul5Oh4C72Yy/5VEMb0em+9M0mQ=
github.com/vbauerster/mpb/v5 v5.3.0/go.mod h1:4yTkvAb8Cm4eylAp6t0JRq6pXDkFJ4krUlDqWYkakAs=
github.com/vbauerster/mpb/v5 v5.4.0 h1:n8JPunifvQvh6P1D1HAl2Ur9YcmKT1tpoUuiea5mlmg=
github.com/vbauerster/mpb/v5 v5.4.0/go.mod h1:fi4wVo7BVQ22QcvFObm+VwliQXlV1eBT8JDaKXR4JGI=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243 h1:R43TdZy32XXSXjJn7M/HhALJ9imq6ztLnChfYJpVDnM=
@ -408,6 +406,8 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
@ -427,9 +427,12 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY=
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -448,6 +451,8 @@ golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -465,8 +470,10 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -477,8 +484,11 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -493,7 +503,6 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -512,13 +521,16 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3 h1:kzM6+9dur93BcC2kVlYl34cHU+TYZLanmpSJHVMmL64=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201218084310-7d0127a74742 h1:+CBz4km/0KPU3RGTwARGh/noP3bEwtHcq+0YcBQM2JQ=
golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221 h1:/ZHdbVpdR/jk3g30/d4yUL0JU9kksj8+F/bnQUVLGDM=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
@ -548,9 +560,15 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=

View File

@ -65,59 +65,47 @@ END_EXPECT
}
@test "inspect: env" {
remote_image=docker://docker.io/fedora:latest
remote_image=docker://quay.io/libpod/fedora:31
run_skopeo inspect $remote_image
inspect_remote=$output
# Simple check on 'inspect' output with environment variables.
# 1) Get remote image values of environment variables (the value of 'Env')
# 2) Confirm substring in check_array and the value of 'Env' match.
check_array=(PATH=.* )
check_array=(FGC=f31 DISTTAG=f31container)
remote=$(jq '.Env[]' <<<"$inspect_remote")
for substr in ${check_array[@]}; do
expect_output --from="$remote" --substring "$substr"
done
}
# Tests https://github.com/containers/skopeo/pull/708
@test "inspect: image manifest list w/ diff platform" {
# When --raw is provided, can inspect show the raw manifest list, w/o
# requiring any particular platform to be present
# To test whether container image can be inspected successfully w/o
# platform dependency.
# 1) Get current platform arch
# 2) Inspect container image is different from current platform arch
# 3) Compare output w/ expected result
# This image's manifest is for an os + arch that is... um, unlikely
# to support skopeo in the foreseeable future. Or past. The image
# is created by the make-noarch-manifest script in this directory.
img=docker://quay.io/libpod/notmyarch:20210121
# Here we see a revolting workaround for a podman incompatibility
# change: in April 2020, podman info completely changed format
# of the keys. What worked until then now throws an error. We
# need to work with both old and new podman.
arch=$(podman info --format '{{.host.arch}}' || true)
if [[ -z "$arch" ]]; then
arch=$(podman info --format '{{.Host.Arch}}')
fi
# Get our host arch (what we're running on). This assumes that skopeo
# arch matches podman; it also assumes running podman >= April 2020
# (prior to that, the format keys were lower-case).
arch=$(podman info --format '{{.Host.Arch}}')
case $arch in
"amd64")
diff_arch_list="s390x ppc64le"
;;
"s390x")
diff_arch_list="amd64 ppc64le"
;;
"ppc64le")
diff_arch_list="amd64 s390x"
;;
"*")
diff_arch_list="amd64 s390x ppc64le"
;;
esac
# By default, 'inspect' tries to match our host os+arch. This should fail.
run_skopeo 1 inspect $img
expect_output --substring "Error parsing manifest for image: Error choosing image instance: no image found in manifest list for architecture $arch, variant " \
"skopeo inspect, without --raw, fails"
for arch in $diff_arch_list; do
remote_image=docker://docker.io/$arch/golang
run_skopeo inspect --tls-verify=false --raw $remote_image
remote_arch=$(jq -r '.manifests[0]["platform"]["architecture"]' <<< "$output")
expect_output --from="$remote_arch" "$arch" "platform arch of $remote_image"
done
# With --raw, we can inspect
run_skopeo inspect --raw $img
expect_output --substring "manifests.*platform.*architecture" \
"skopeo inspect --raw returns reasonable output"
# ...and what we get should be consistent with what our script created.
archinfo=$(jq -r '.manifests[0].platform | {os,variant,architecture} | join("-")' <<<"$output")
expect_output --from="$archinfo" "amigaos-1000-mc68000" \
"os - variant - architecture of $img"
}
# vim: filetype=sh

View File

@ -14,7 +14,7 @@ function setup() {
# From remote, to dir1, to local, to dir2;
# compare dir1 and dir2, expect no changes
@test "copy: dir, round trip" {
local remote_image=docker://docker.io/library/busybox:latest
local remote_image=docker://quay.io/libpod/busybox:latest
local localimg=docker://localhost:5000/busybox:unsigned
local dir1=$TESTDIR/dir1
@ -30,7 +30,7 @@ function setup() {
# Same as above, but using 'oci:' instead of 'dir:' and with a :latest tag
@test "copy: oci, round trip" {
local remote_image=docker://docker.io/library/busybox:latest
local remote_image=docker://quay.io/libpod/busybox:latest
local localimg=docker://localhost:5000/busybox:unsigned
local dir1=$TESTDIR/oci1
@ -46,7 +46,7 @@ function setup() {
# Compression zstd
@test "copy: oci, round trip, zstd" {
local remote_image=docker://docker.io/library/busybox:latest
local remote_image=docker://quay.io/libpod/busybox:latest
local dir=$TESTDIR/dir
@ -61,7 +61,7 @@ function setup() {
# Same image, extracted once with :tag and once without
@test "copy: oci w/ and w/o tags" {
local remote_image=docker://docker.io/library/busybox:latest
local remote_image=docker://quay.io/libpod/busybox:latest
local dir1=$TESTDIR/dir1
local dir2=$TESTDIR/dir2
@ -78,7 +78,7 @@ function setup() {
# Registry -> storage -> oci-archive
@test "copy: registry -> storage -> oci-archive" {
local alpine=docker.io/library/alpine:latest
local alpine=quay.io/libpod/alpine:latest
local tmp=$TESTDIR/oci
run_skopeo copy docker://$alpine containers-storage:$alpine

View File

@ -14,7 +14,7 @@ function setup() {
@test "local registry, with cert" {
# Push to local registry...
run_skopeo copy --dest-cert-dir=$TESTDIR/client-auth \
docker://docker.io/library/busybox:latest \
docker://quay.io/libpod/busybox:latest \
docker://localhost:5000/busybox:unsigned
# ...and pull it back out

View File

@ -43,7 +43,7 @@ function setup() {
# These should pass
run_skopeo copy --dest-tls-verify=false --dcreds=$testuser:$testpassword \
docker://docker.io/library/busybox:latest \
docker://quay.io/libpod/busybox:latest \
docker://localhost:5000/busybox:mine
run_skopeo inspect --tls-verify=false --creds=$testuser:$testpassword \
docker://localhost:5000/busybox:mine
@ -55,7 +55,7 @@ function setup() {
podman login --tls-verify=false -u $testuser -p $testpassword localhost:5000
run_skopeo copy --dest-tls-verify=false \
docker://docker.io/library/busybox:latest \
docker://quay.io/libpod/busybox:latest \
docker://localhost:5000/busybox:mine
run_skopeo inspect --tls-verify=false docker://localhost:5000/busybox:mine
expect_output --substring "localhost:5000/busybox"

View File

@ -92,7 +92,7 @@ END_POLICY_JSON
fi
# Cache local copy
run_skopeo copy docker://docker.io/library/busybox:latest \
run_skopeo copy docker://quay.io/libpod/busybox:latest \
dir:$TESTDIR/busybox
# Push a bunch of images. Do so *without* --policy flag; this lets us

View File

@ -13,7 +13,7 @@ function setup() {
# delete image from registry
@test "delete: remove image from registry" {
local remote_image=docker://docker.io/library/busybox:latest
local remote_image=docker://quay.io/libpod/busybox:latest
local localimg=docker://localhost:5000/busybox:unsigned
local output=

View File

@ -6,7 +6,7 @@ SKOPEO_BINARY=${SKOPEO_BINARY:-$(dirname ${BASH_SOURCE})/../skopeo}
SKOPEO_TIMEOUT=${SKOPEO_TIMEOUT:-300}
# Default image to run as a local registry
REGISTRY_FQIN=${SKOPEO_TEST_REGISTRY_FQIN:-docker.io/library/registry:2}
REGISTRY_FQIN=${SKOPEO_TEST_REGISTRY_FQIN:-quay.io/libpod/registry:2}
###############################################################################
# BEGIN setup/teardown

70
systemtest/make-noarch-manifest Executable file
View File

@ -0,0 +1,70 @@
#!/bin/sh
#
# Tool for creating an image whose OS and arch will (probably) never
# match a system on which skopeo will run. This image will be used
# in the 'inspect' test.
#
set -ex
# Name and tag of the image we create
imgname=notmyarch
imgtag=$(date +%Y%m%d)
# (In case older image exists from a prior run)
buildah rmi $imgname:$imgtag &>/dev/null || true
#
# Step 1: create an image containing only a README and a copy of this script
#
id=$(buildah from scratch)
now=$(date --rfc-3339=seconds)
readme=$(mktemp -t README.XXXXXXXX)
ME=$(basename $0)
cat >| $readme <<EOF
This is a dummy image intended solely for skopeo testing.
This image was created $now
The script used to create this image is available as $ME
EOF
buildah copy $id $readme /README
buildah copy $id $0 /$ME
buildah commit $id my_tmp_image
buildah rm $id
#
# Step 2: create a manifest list, then add the above image but with
# an os+arch override.
#
buildah manifest create $imgname:$imgtag
buildah manifest add \
--os amigaos \
--arch mc68000 \
--variant 1000 \
$imgname:$imgtag my_tmp_image
# Done. Show instructions.
cat <<EOF
DONE!
You can inspect the created image with:
skopeo inspect --raw containers-storage:localhost/$imgname:$imgtag | jq .
(FIXME: is there a way to, like, mount the image and verify the files?)
If you're happy with this image, you can now:
buildah manifest push --all $imgname:$imgtag docker://quay.io/libpod/$imgname:$imgtag
Once done, you urgently need to:
buildah rmi $imgname:$imgtag my_tmp_image
If you don't do this, 'podman images' will barf catastrophically!
EOF

View File

@ -38,7 +38,17 @@ Helpers:
... process JSON and output
}
and
Template Functions:
The following template functions are added to the template when parsed:
- join strings.Join, {{join .Field separator}}
- lower strings.ToLower {{ .Field | lower }}
- split strings.Split {{ .Field | split }}
- title strings.Title {{ .Field | title }}
- upper strings.ToUpper {{ .Field | upper }}
report.Funcs() may be used to add additional template functions.
Adding an existing function will replace that function for the life of that template.
Note: Your code should not ignore errors

View File

@ -1,6 +1,8 @@
package report
import (
"bytes"
"encoding/json"
"reflect"
"strings"
"text/template"
@ -21,16 +23,32 @@ type FuncMap template.FuncMap
var tableReplacer = strings.NewReplacer(
"table ", "",
`\t`, "\t",
`\n`, "\n",
" ", "\t",
)
// escapedReplacer will clean up escaped characters from CLI
var escapedReplacer = strings.NewReplacer(
`\t`, "\t",
`\n`, "\n",
)
var DefaultFuncs = FuncMap{
"join": strings.Join,
"json": func(v interface{}) string {
buf := &bytes.Buffer{}
enc := json.NewEncoder(buf)
enc.SetEscapeHTML(false)
enc.Encode(v)
// Remove the trailing new line added by the encoder
return strings.TrimSpace(buf.String())
},
"lower": strings.ToLower,
"pad": padWithSpace,
"split": strings.Split,
"title": strings.Title,
"truncate": truncateWithLength,
"upper": strings.ToUpper,
}
// NormalizeFormat reads given go template format provided by CLI and munges it into what we need
func NormalizeFormat(format string) string {
var f string
@ -47,6 +65,22 @@ func NormalizeFormat(format string) string {
return f
}
// padWithSpace adds spaces*prefix and spaces*suffix to the input when it is non-empty
func padWithSpace(source string, prefix, suffix int) string {
if source == "" {
return source
}
return strings.Repeat(" ", prefix) + source + strings.Repeat(" ", suffix)
}
// truncateWithLength truncates the source string up to the length provided by the input
func truncateWithLength(source string, length int) string {
if len(source) < length {
return source
}
return source[:length]
}
// Headers queries the interface for field names.
// Array of map is returned to support range templates
// Note: unexported fields can be supported by adding field to overrides
@ -88,7 +122,7 @@ func Headers(object interface{}, overrides map[string]string) []map[string]strin
// NewTemplate creates a new template object
func NewTemplate(name string) *Template {
return &Template{template.New(name), false}
return &Template{Template: template.New(name).Funcs(template.FuncMap(DefaultFuncs))}
}
// Parse parses text as a template body for t
@ -100,13 +134,21 @@ func (t *Template) Parse(text string) (*Template, error) {
text = NormalizeFormat(text)
}
tt, err := t.Template.Parse(text)
tt, err := t.Template.Funcs(template.FuncMap(DefaultFuncs)).Parse(text)
return &Template{tt, t.isTable}, err
}
// Funcs adds the elements of the argument map to the template's function map
// Funcs adds the elements of the argument map to the template's function map.
// A default template function will be replace if there is a key collision.
func (t *Template) Funcs(funcMap FuncMap) *Template {
return &Template{t.Template.Funcs(template.FuncMap(funcMap)), t.isTable}
m := make(FuncMap)
for k, v := range DefaultFuncs {
m[k] = v
}
for k, v := range funcMap {
m[k] = v
}
return &Template{Template: t.Template.Funcs(template.FuncMap(m)), isTable: t.isTable}
}
// IsTable returns true if format string defines a "table"

View File

@ -69,7 +69,7 @@ func isRetryable(err error) bool {
}
return isRetryable(e.Err)
case syscall.Errno:
return shouldRestart(e)
return isErrnoRetryable(e)
case errcode.Errors:
// if this error is a group of errors, process them all in turn
for i := range e {
@ -94,10 +94,10 @@ func isRetryable(err error) bool {
return false
}
func shouldRestart(e error) bool {
func isErrnoRetryable(e error) bool {
switch e {
case syscall.ECONNREFUSED, syscall.EINTR, syscall.EAGAIN, syscall.EBUSY, syscall.ENETDOWN, syscall.ENETUNREACH, syscall.ENETRESET, syscall.ECONNABORTED, syscall.ECONNRESET, syscall.ETIMEDOUT, syscall.EHOSTDOWN, syscall.EHOSTUNREACH:
return true
}
return shouldRestartPlatform(e)
return isErrnoERESTART(e)
}

View File

@ -4,6 +4,6 @@ import (
"syscall"
)
func shouldRestartPlatform(e error) bool {
func isErrnoERESTART(e error) bool {
return e == syscall.ERESTART
}

View File

@ -2,6 +2,6 @@
package retry
func shouldRestartPlatform(e error) bool {
func isErrnoERESTART(e error) bool {
return false
}

View File

@ -14,6 +14,7 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image"
internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/internal/pkg/platform"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache"
@ -47,7 +48,7 @@ var (
// maxParallelDownloads is used to limit the maxmimum number of parallel
// downloads. Let's follow Firefox by limiting it to 6.
maxParallelDownloads = 6
maxParallelDownloads = uint(6)
)
// compressionBufferSize is the buffer size used to compress a blob
@ -107,18 +108,19 @@ func (d *digestingReader) Read(p []byte) (int, error) {
// copier allows us to keep track of diffID values for blobs, and other
// data shared across one or more images in a possible manifest list.
type copier struct {
dest types.ImageDestination
rawSource types.ImageSource
reportWriter io.Writer
progressOutput io.Writer
progressInterval time.Duration
progress chan types.ProgressProperties
blobInfoCache types.BlobInfoCache
copyInParallel bool
compressionFormat compression.Algorithm
compressionLevel *int
ociDecryptConfig *encconfig.DecryptConfig
ociEncryptConfig *encconfig.EncryptConfig
dest types.ImageDestination
rawSource types.ImageSource
reportWriter io.Writer
progressOutput io.Writer
progressInterval time.Duration
progress chan types.ProgressProperties
blobInfoCache internalblobinfocache.BlobInfoCache2
copyInParallel bool
compressionFormat compression.Algorithm
compressionLevel *int
ociDecryptConfig *encconfig.DecryptConfig
ociEncryptConfig *encconfig.EncryptConfig
maxParallelDownloads uint
}
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
@ -190,6 +192,8 @@ type Options struct {
// OciDecryptConfig contains the config that can be used to decrypt an image if it is
// encrypted if non-nil. If nil, it does not attempt to decrypt an image.
OciDecryptConfig *encconfig.DecryptConfig
// MaxParallelDownloads indicates the maximum layers to pull at the same time. A reasonable default is used if this is left as 0.
MaxParallelDownloads uint
}
// validateImageListSelection returns an error if the passed-in value is not one that we recognize as a valid ImageListSelection value
@ -265,9 +269,10 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
// FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx.
// For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually
// we might want to add a separate CommonCtx — or would that be too confusing?
blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx),
ociDecryptConfig: options.OciDecryptConfig,
ociEncryptConfig: options.OciEncryptConfig,
blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)),
ociDecryptConfig: options.OciDecryptConfig,
ociEncryptConfig: options.OciEncryptConfig,
maxParallelDownloads: options.MaxParallelDownloads,
}
// Default to using gzip compression unless specified otherwise.
if options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil {
@ -648,13 +653,19 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
// With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only;
// and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support
// without actually trying to upload something and getting a types.ManifestTypeRejectedError.
// So, try the preferred manifest MIME type. If the process succeeds, fine…
// So, try the preferred manifest MIME type with possibly-updated blob digests, media types, and sizes if
// we're altering how they're compressed. If the process succeeds, fine…
manifestBytes, retManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
retManifestType = preferredManifestMIMEType
if err != nil {
logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err)
// … if it fails, _and_ the failure is because the manifest is rejected, we may have other options.
if _, isManifestRejected := errors.Cause(err).(types.ManifestTypeRejectedError); !isManifestRejected || len(otherManifestMIMETypeCandidates) == 0 {
// … if it fails, and the failure is either because the manifest is rejected by the registry, or
// because we failed to create a manifest of the specified type because the specific manifest type
// doesn't support the type of compression we're trying to use (e.g. docker v2s2 and zstd), we may
// have other options available that could still succeed.
_, isManifestRejected := errors.Cause(err).(types.ManifestTypeRejectedError)
_, isCompressionIncompatible := errors.Cause(err).(manifest.ManifestLayerCompressionIncompatibilityError)
if (!isManifestRejected && !isCompressionIncompatible) || len(otherManifestMIMETypeCandidates) == 0 {
// We dont have other options.
// In principle the code below would handle this as well, but the resulting error message is fairly ugly.
// Dont bother the user with MIME types if we have no choice.
@ -809,7 +820,11 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
// avoid malicious images causing troubles and to be nice to servers.
var copySemaphore *semaphore.Weighted
if ic.c.copyInParallel {
copySemaphore = semaphore.NewWeighted(int64(maxParallelDownloads))
max := ic.c.maxParallelDownloads
if max == 0 {
max = maxParallelDownloads
}
copySemaphore = semaphore.NewWeighted(int64(max))
} else {
copySemaphore = semaphore.NewWeighted(int64(1))
}
@ -896,7 +911,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
return nil
}
// layerDigestsDiffer return true iff the digests in a and b differ (ignoring sizes and possible other fields)
// layerDigestsDiffer returns true iff the digests in a and b differ (ignoring sizes and possible other fields)
func layerDigestsDiffer(a, b []types.BlobInfo) bool {
if len(a) != len(b) {
return true
@ -951,7 +966,7 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc
instanceDigest = &manifestDigest
}
if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil {
return nil, "", errors.Wrap(err, "Error writing manifest")
return nil, "", errors.Wrapf(err, "Error writing manifest %q", string(man))
}
return man, manifestDigest, nil
}
@ -1049,7 +1064,7 @@ type diffIDResult struct {
err error
}
// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps compressing it if canCompress,
// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it,
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress) (types.BlobInfo, digest.Digest, error) {
cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
@ -1058,6 +1073,12 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source.
if !diffIDIsNeeded {
// TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm
// that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing
// a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause
// a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob.
// Fixing that will probably require passing more information to TryReusingBlob() than the current version of
// the ImageDestination interface lets us pass in.
reused, blobInfo, err := ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs)
if err != nil {
return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest)
@ -1115,7 +1136,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope.
// it copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest,
// perhaps compressing the stream if canCompress,
// perhaps (de/re/)compressing the stream,
// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
diffIDIsNeeded bool, toEncrypt bool, bar *mpb.Bar) (types.BlobInfo, <-chan diffIDResult, error) {
@ -1191,11 +1212,15 @@ func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
// copyBlobFromStream copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest,
// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil,
// perhaps compressing it if canCompress,
// perhaps (de/re/)compressing it if canModifyBlob,
// and returns a complete blobInfo of the copied blob.
func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer,
canModifyBlob bool, isConfig bool, toEncrypt bool, bar *mpb.Bar) (types.BlobInfo, error) {
if isConfig { // This is guaranteed by the caller, but set it here to be explicit.
canModifyBlob = false
}
// The copying happens through a pipeline of connected io.Readers.
// === Input: srcStream
@ -1253,16 +1278,23 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
originalLayerReader = destStream
}
desiredCompressionFormat := c.compressionFormat
// === Deal with layer compression/decompression if necessary
var inputInfo types.BlobInfo
var compressionOperation types.LayerCompression
uploadCompressionFormat := &c.compressionFormat
srcCompressorName := internalblobinfocache.Uncompressed
if isCompressed {
srcCompressorName = compressionFormat.Name()
}
var uploadCompressorName string
if canModifyBlob && isOciEncrypted(srcInfo.MediaType) {
// PreserveOriginal due to any compression not being able to be done on an encrypted blob unless decrypted
logrus.Debugf("Using original blob without modification for encrypted blob")
compressionOperation = types.PreserveOriginal
inputInfo = srcInfo
srcCompressorName = internalblobinfocache.UnknownCompression
uploadCompressorName = internalblobinfocache.UnknownCompression
uploadCompressionFormat = nil
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed {
logrus.Debugf("Compressing blob on the fly")
compressionOperation = types.Compress
@ -1272,11 +1304,12 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
// If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise,
// e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed,
// we dont care.
go c.compressGoroutine(pipeWriter, destStream, desiredCompressionFormat) // Closes pipeWriter
go c.compressGoroutine(pipeWriter, destStream, *uploadCompressionFormat) // Closes pipeWriter
destStream = pipeReader
inputInfo.Digest = ""
inputInfo.Size = -1
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed && desiredCompressionFormat.Name() != compressionFormat.Name() {
uploadCompressorName = uploadCompressionFormat.Name()
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && isCompressed && uploadCompressionFormat.Name() != compressionFormat.Name() {
// When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally
// re-compressed using the desired format.
logrus.Debugf("Blob will be converted")
@ -1291,11 +1324,12 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
pipeReader, pipeWriter := io.Pipe()
defer pipeReader.Close()
go c.compressGoroutine(pipeWriter, s, desiredCompressionFormat) // Closes pipeWriter
go c.compressGoroutine(pipeWriter, s, *uploadCompressionFormat) // Closes pipeWriter
destStream = pipeReader
inputInfo.Digest = ""
inputInfo.Size = -1
uploadCompressorName = uploadCompressionFormat.Name()
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed {
logrus.Debugf("Blob will be decompressed")
compressionOperation = types.Decompress
@ -1307,11 +1341,15 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
destStream = s
inputInfo.Digest = ""
inputInfo.Size = -1
uploadCompressorName = internalblobinfocache.Uncompressed
uploadCompressionFormat = nil
} else {
// PreserveOriginal might also need to recompress the original blob if the desired compression format is different.
logrus.Debugf("Using original blob without modification")
compressionOperation = types.PreserveOriginal
inputInfo = srcInfo
uploadCompressorName = srcCompressorName
uploadCompressionFormat = nil
}
// Perform image encryption for valid mediatypes if ociEncryptConfig provided
@ -1371,9 +1409,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
uploadedInfo.CompressionOperation = compressionOperation
// If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest.
if canModifyBlob && !isConfig {
uploadedInfo.CompressionAlgorithm = &desiredCompressionFormat
}
uploadedInfo.CompressionAlgorithm = uploadCompressionFormat
if decrypted {
uploadedInfo.CryptoOperation = types.Decrypt
} else if encrypted {
@ -1390,7 +1426,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
}
}
// This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consumer
// This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consume
// all of the input (to compute DiffIDs), even if dest.PutBlob does not need it.
// So, read everything from originalLayerReader, which will cause the rest to be
// sent there if we are not already at EOF.
@ -1423,6 +1459,12 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
default:
return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation)
}
if uploadCompressorName != "" && uploadCompressorName != internalblobinfocache.UnknownCompression {
c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, uploadCompressorName)
}
if srcInfo.Digest != "" && srcCompressorName != "" && srcCompressorName != internalblobinfocache.UnknownCompression {
c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, srcCompressorName)
}
}
return uploadedInfo, nil
}

View File

@ -194,7 +194,9 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
@ -210,7 +212,6 @@ func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.Blo
return false, types.BlobInfo{}, err
}
return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
}
// PutManifest writes manifest to the destination.
@ -251,7 +252,7 @@ func pathExists(path string) (bool, error) {
if err == nil {
return true, nil
}
if err != nil && os.IsNotExist(err) {
if os.IsNotExist(err) {
return false, nil
}
return false, err

View File

@ -23,6 +23,7 @@ import (
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/pkg/tlsclientconfig"
"github.com/containers/image/v5/types"
"github.com/containers/image/v5/version"
"github.com/containers/storage/pkg/homedir"
clientLib "github.com/docker/distribution/registry/client"
"github.com/docker/go-connections/tlsconfig"
@ -65,6 +66,8 @@ var (
{path: "/etc/containers/certs.d", absolute: true},
{path: "/etc/docker/certs.d", absolute: true},
}
defaultUserAgent = "containers/" + version.Version + " (github.com/containers/image)"
)
// extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go:
@ -92,8 +95,9 @@ type bearerToken struct {
// dockerClient is configuration for dealing with a single Docker registry.
type dockerClient struct {
// The following members are set by newDockerClient and do not change afterwards.
sys *types.SystemContext
registry string
sys *types.SystemContext
registry string
userAgent string
// tlsClientConfig is setup by newDockerClient and will be used and updated
// by detectProperties(). Callers can edit tlsClientConfig.InsecureSkipVerify in the meantime.
@ -200,9 +204,7 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
logrus.Debugf("error accessing certs directory due to permissions: %v", err)
continue
}
if err != nil {
return "", err
}
return "", err
}
return fullCertDirPath, nil
}
@ -277,9 +279,15 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
}
tlsClientConfig.InsecureSkipVerify = skipVerify
userAgent := defaultUserAgent
if sys != nil && sys.DockerRegistryUserAgent != "" {
userAgent = sys.DockerRegistryUserAgent
}
return &dockerClient{
sys: sys,
registry: registry,
userAgent: userAgent,
tlsClientConfig: tlsClientConfig,
}, nil
}
@ -529,9 +537,7 @@ func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method,
req.Header.Add(n, hh)
}
}
if c.sys != nil && c.sys.DockerRegistryUserAgent != "" {
req.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent)
}
req.Header.Add("User-Agent", c.userAgent)
if auth == v2Auth {
if err := c.setupRequestAuth(req, extraScope); err != nil {
return nil, err
@ -637,9 +643,7 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall
params.Add("client_id", "containers/image")
authReq.Body = ioutil.NopCloser(bytes.NewBufferString(params.Encode()))
if c.sys != nil && c.sys.DockerRegistryUserAgent != "" {
authReq.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent)
}
authReq.Header.Add("User-Agent", c.userAgent)
authReq.Header.Add("Content-Type", "application/x-www-form-urlencoded")
logrus.Debugf("%s %s", authReq.Method, authReq.URL.String())
res, err := c.client.Do(authReq)
@ -692,9 +696,7 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
if c.auth.Username != "" && c.auth.Password != "" {
authReq.SetBasicAuth(c.auth.Username, c.auth.Password)
}
if c.sys != nil && c.sys.DockerRegistryUserAgent != "" {
authReq.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent)
}
authReq.Header.Add("User-Agent", c.userAgent)
logrus.Debugf("%s %s", authReq.Method, authReq.URL.String())
res, err := c.client.Do(authReq)

View File

@ -15,6 +15,7 @@ import (
"strings"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/internal/iolimits"
"github.com/containers/image/v5/internal/uploadreader"
"github.com/containers/image/v5/manifest"
@ -284,7 +285,9 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
@ -299,17 +302,23 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.
}
if exists {
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref))
return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil
return true, types.BlobInfo{Digest: info.Digest, MediaType: info.MediaType, Size: size}, nil
}
// Then try reusing blobs from other locations.
for _, candidate := range cache.CandidateLocations(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute) {
bic := blobinfocache.FromBlobInfoCache(cache)
candidates := bic.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute)
for _, candidate := range candidates {
candidateRepo, err := parseBICLocationReference(candidate.Location)
if err != nil {
logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
continue
}
logrus.Debugf("Trying to reuse cached location %s in %s", candidate.Digest.String(), candidateRepo.Name())
if candidate.CompressorName != blobinfocache.Uncompressed {
logrus.Debugf("Trying to reuse cached location %s compressed with %s in %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name())
} else {
logrus.Debugf("Trying to reuse cached location %s with no compression in %s", candidate.Digest.String(), candidateRepo.Name())
}
// Sanity checks:
if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) {
@ -351,8 +360,16 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.
continue
}
}
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
return true, types.BlobInfo{Digest: candidate.Digest, Size: size}, nil
bic.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName)
if err != nil {
logrus.Debugf("... Failed: %v", err)
continue
}
return true, types.BlobInfo{Digest: candidate.Digest, MediaType: info.MediaType, Size: size, CompressionOperation: compressionOperation, CompressionAlgorithm: compressionAlgorithm}, nil
}
return false, types.BlobInfo{}, nil

View File

@ -64,6 +64,11 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
}
attempts := []attempt{}
for _, pullSource := range pullSources {
if sys != nil && sys.DockerLogMirrorChoice {
logrus.Infof("Trying to access %q", pullSource.Reference)
} else {
logrus.Debugf("Trying to access %q", pullSource.Reference)
}
logrus.Debugf("Trying to access %q", pullSource.Reference)
s, err := newImageSourceAttempt(ctx, sys, ref, pullSource)
if err == nil {

View File

@ -159,7 +159,9 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {

View File

@ -21,7 +21,7 @@ import (
// systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage.
// You can override this at build time with
// -ldflags '-X github.com/containers/image/docker.systemRegistriesDirPath=$your_path'
// -ldflags '-X github.com/containers/image/v5/docker.systemRegistriesDirPath=$your_path'
var systemRegistriesDirPath = builtinRegistriesDirPath
// builtinRegistriesDirPath is the path to registries.d.
@ -96,10 +96,16 @@ func SignatureStorageBaseURL(sys *types.SystemContext, ref types.ImageReference,
// registriesDirPath returns a path to registries.d
func registriesDirPath(sys *types.SystemContext) string {
return registriesDirPathWithHomeDir(sys, homedir.Get())
}
// registriesDirPathWithHomeDir is an internal implementation detail of registriesDirPath,
// it exists only to allow testing it with an artificial home directory.
func registriesDirPathWithHomeDir(sys *types.SystemContext, homeDir string) string {
if sys != nil && sys.RegistriesDirPath != "" {
return sys.RegistriesDirPath
}
userRegistriesDirPath := filepath.Join(homedir.Get(), userRegistriesDir)
userRegistriesDirPath := filepath.Join(homeDir, userRegistriesDir)
if _, err := os.Stat(userRegistriesDirPath); err == nil {
return userRegistriesDirPath
}

View File

@ -52,5 +52,26 @@ func DockerReferenceNamespaces(ref reference.Named) []string {
}
name = name[:lastSlash]
}
// Strip port number if any, before appending to res slice.
// Currently, the most compatible behavior is to return
// example.com:8443/ns, example.com:8443, *.com.
// If a port number is not specified, the expected behavior would be
// example.com/ns, example.com, *.com
portNumColon := strings.Index(name, ":")
if portNumColon != -1 {
name = name[:portNumColon]
}
// Append wildcarded domains to res slice
for {
firstDot := strings.Index(name, ".")
if firstDot == -1 {
break
}
name = name[firstDot+1:]
res = append(res, "*."+name)
}
return res
}

View File

@ -154,6 +154,9 @@ func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUp
// UpdatedImage returns a types.Image modified according to options.
// This does not change the state of the original Image object.
// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError
// if the CompressionOperation and CompressionAlgorithm specified in one or more
// options.LayerInfos items is anything other than gzip.
func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) {
copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc.
src: m.src,

View File

@ -134,6 +134,10 @@ func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdat
// UpdatedImage returns a types.Image modified according to options.
// This does not change the state of the original Image object.
// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError
// if the combination of CompressionOperation and CompressionAlgorithm specified
// in one or more options.LayerInfos items indicates that a layer is compressed using
// an algorithm that is not allowed in OCI.
func (m *manifestOCI1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) {
copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc.
src: m.src,

View File

@ -0,0 +1,63 @@
package blobinfocache
import (
"github.com/containers/image/v5/pkg/compression"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
)
// FromBlobInfoCache returns a BlobInfoCache2 based on a BlobInfoCache, returning the original
// object if it implements BlobInfoCache2, or a wrapper which discards compression information
// if it only implements BlobInfoCache.
func FromBlobInfoCache(bic types.BlobInfoCache) BlobInfoCache2 {
if bic2, ok := bic.(BlobInfoCache2); ok {
return bic2
}
return &v1OnlyBlobInfoCache{
BlobInfoCache: bic,
}
}
type v1OnlyBlobInfoCache struct {
types.BlobInfoCache
}
func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) {
}
func (bic *v1OnlyBlobInfoCache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2 {
return nil
}
// CandidateLocationsFromV2 converts a slice of BICReplacementCandidate2 to a slice of
// types.BICReplacementCandidate, dropping compression information.
func CandidateLocationsFromV2(v2candidates []BICReplacementCandidate2) []types.BICReplacementCandidate {
candidates := make([]types.BICReplacementCandidate, 0, len(v2candidates))
for _, c := range v2candidates {
candidates = append(candidates, types.BICReplacementCandidate{
Digest: c.Digest,
Location: c.Location,
})
}
return candidates
}
// OperationAndAlgorithmForCompressor returns CompressionOperation and CompressionAlgorithm
// values suitable for inclusion in a types.BlobInfo structure, based on the name of the
// compression algorithm, or Uncompressed, or UnknownCompression. This is typically used by
// TryReusingBlob() implementations to set values in the BlobInfo structure that they return
// upon success.
func OperationAndAlgorithmForCompressor(compressorName string) (types.LayerCompression, *compression.Algorithm, error) {
switch compressorName {
case Uncompressed:
return types.Decompress, nil, nil
case UnknownCompression:
return types.PreserveOriginal, nil, nil
default:
algo, err := compression.AlgorithmByName(compressorName)
if err == nil {
return types.Compress, &algo, nil
}
return types.PreserveOriginal, nil, err
}
}

View File

@ -0,0 +1,45 @@
package blobinfocache
import (
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
)
const (
// Uncompressed is the value we store in a blob info cache to indicate that we know that
// the blob in the corresponding location is not compressed.
Uncompressed = "uncompressed"
// UnknownCompression is the value we store in a blob info cache to indicate that we don't
// know if the blob in the corresponding location is compressed (and if so, how) or not.
UnknownCompression = "unknown"
)
// BlobInfoCache2 extends BlobInfoCache by adding the ability to track information about what kind
// of compression was applied to the blobs it keeps information about.
type BlobInfoCache2 interface {
types.BlobInfoCache
// RecordDigestCompressorName records a compressor for the blob with the specified digest,
// or Uncompressed or UnknownCompression.
// WARNING: Only call this with LOCALLY VERIFIED data; dont record a compressor for a
// digest just because some remote author claims so (e.g. because a manifest says so);
// otherwise the cache could be poisoned and cause us to make incorrect edits to type
// information in a manifest.
RecordDigestCompressorName(anyDigest digest.Digest, compressorName string)
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations
// that could possibly be reused within the specified (transport scope) (if they still
// exist, which is not guaranteed).
//
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if
// canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look
// up variants of the blob which have the same uncompressed digest.
//
// The CompressorName fields in returned data must never be UnknownCompression.
CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate2
}
// BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2.
type BICReplacementCandidate2 struct {
Digest digest.Digest
CompressorName string // either the Name() of a known pkg/compression.Algorithm, or Uncompressed or UnknownCompression
Location types.BICLocationReference
}

View File

@ -9,7 +9,7 @@ import (
// unixTempDirForBigFiles is the directory path to store big files on non Windows systems.
// You can override this at build time with
// -ldflags '-X github.com/containers/image/internal/tmpdir.unixTempDirForBigFiles=$your_path'
// -ldflags '-X github.com/containers/image/v5/internal/tmpdir.unixTempDirForBigFiles=$your_path'
var unixTempDirForBigFiles = builtinUnixTempDirForBigFiles
// builtinUnixTempDirForBigFiles is the directory path to store big files.

View File

@ -5,7 +5,6 @@ import (
"github.com/containers/image/v5/pkg/compression"
"github.com/containers/image/v5/types"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@ -54,6 +53,12 @@ const mtsUnsupportedMIMEType = "" // A value in compressionMIMETypeSet that mean
// compressionVariantMIMEType returns a variant of mimeType for the specified algorithm (which may be nil
// to mean "no compression"), based on variantTable.
// The returned error will be a ManifestLayerCompressionIncompatibilityError if mimeType has variants
// that differ only in what type of compression is applied, but it can't be combined with this
// algorithm to produce an updated MIME type that complies with the standard that defines mimeType.
// If the compression algorithm is unrecognized, or mimeType is not known to have variants that
// differ from it only in what type of compression has been applied, the returned error will not be
// a ManifestLayerCompressionIncompatibilityError.
func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType string, algorithm *compression.Algorithm) (string, error) {
if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries
return "", fmt.Errorf("cannot update unknown MIME type")
@ -70,15 +75,15 @@ func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType
return res, nil
}
if name != mtsUncompressed {
return "", fmt.Errorf("%s compression is not supported", name)
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("%s compression is not supported for type %q", name, mt)}
}
return "", errors.New("uncompressed variant is not supported")
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mt)}
}
if name != mtsUncompressed {
return "", fmt.Errorf("unknown compression algorithm %s", name)
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %s", name, mt)}
}
// We can't very well say “the idea of no compression is unknown”
return "", errors.New("uncompressed variant is not supported")
return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mt)}
}
}
}
@ -89,7 +94,11 @@ func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType
}
// updatedMIMEType returns the result of applying edits in updated (MediaType, CompressionOperation) to
// mimeType, based on variantTable. It may use updated.Digest for error messages.
// mimeType, based on variantTable. It may use updated.Digest for error messages.
// The returned error will be a ManifestLayerCompressionIncompatibilityError if mimeType has variants
// that differ only in what type of compression is applied, but applying updated.CompressionOperation
// and updated.CompressionAlgorithm to it won't produce an updated MIME type that complies with the
// standard that defines mimeType.
func updatedMIMEType(variantTable []compressionMIMETypeSet, mimeType string, updated types.BlobInfo) (string, error) {
// Note that manifests in containers-storage might be reporting the
// wrong media type since the original manifests are stored while layers
@ -99,6 +108,12 @@ func updatedMIMEType(variantTable []compressionMIMETypeSet, mimeType string, upd
// {de}compressed.
switch updated.CompressionOperation {
case types.PreserveOriginal:
// Force a change to the media type if we're being told to use a particular compressor,
// since it might be different from the one associated with the media type. Otherwise,
// try to keep the original media type.
if updated.CompressionAlgorithm != nil {
return compressionVariantMIMEType(variantTable, mimeType, updated.CompressionAlgorithm)
}
// Keep the original media type.
return mimeType, nil
@ -116,3 +131,14 @@ func updatedMIMEType(variantTable []compressionMIMETypeSet, mimeType string, upd
return "", fmt.Errorf("unknown compression operation (%d)", updated.CompressionOperation)
}
}
// ManifestLayerCompressionIncompatibilityError indicates that a specified compression algorithm
// could not be applied to a layer MIME type. A caller that receives this should either retry
// the call with a different compression algorithm, or attempt to use a different manifest type.
type ManifestLayerCompressionIncompatibilityError struct {
text string
}
func (m ManifestLayerCompressionIncompatibilityError) Error() string {
return m.text
}

View File

@ -226,6 +226,8 @@ var schema2CompressionMIMETypeSets = []compressionMIMETypeSet{
}
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if any of the layerInfos includes a combination of CompressionOperation and
// CompressionAlgorithm that would result in anything other than gzip compression.
func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
if len(m.LayersDescriptors) != len(layerInfos) {
return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos))

View File

@ -108,6 +108,8 @@ var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{
}
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls+mediatype), in order (the root layer first, and then successive layered layers)
// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if any of the layerInfos includes a combination of CompressionOperation and
// CompressionAlgorithm that isn't supported by OCI.
func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
if len(m.Layers) != len(layerInfos) {
return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos))

View File

@ -103,7 +103,9 @@ func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Read
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {

View File

@ -186,7 +186,9 @@ func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
@ -204,6 +206,7 @@ func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.Blo
if err != nil {
return false, types.BlobInfo{}, err
}
return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
}

View File

@ -410,7 +410,9 @@ func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reade
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {

View File

@ -339,7 +339,9 @@ func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobTo
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {

View File

@ -7,6 +7,7 @@ import (
"sync"
"time"
"github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
@ -22,6 +23,9 @@ var (
// uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest.
uncompressedDigestBucket = []byte("uncompressedDigest")
// digestCompressorBucket stores a mapping from any digest to a compressor, or blobinfocache.Uncompressed
// It may not exist in caches created by older versions, even if uncompressedDigestBucket is present.
digestCompressorBucket = []byte("digestCompressor")
// digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest
// (as a set of key=digest, value="" pairs)
digestByUncompressedBucket = []byte("digestByUncompressed")
@ -95,6 +99,9 @@ type cache struct {
//
// Most users should call blobinfocache.DefaultCache instead.
func New(path string) types.BlobInfoCache {
return new2(path)
}
func new2(path string) *cache {
return &cache{path: path}
}
@ -220,6 +227,30 @@ func (bdc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompre
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
}
// RecordDigestCompressorName records that the blob with digest anyDigest was compressed with the specified
// compressor, or is blobinfocache.Uncompressed.
// WARNING: Only call this for LOCALLY VERIFIED data; dont record a digest pair just because some remote author claims so (e.g.
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
func (bdc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) {
_ = bdc.update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists(digestCompressorBucket)
if err != nil {
return err
}
key := []byte(anyDigest.String())
if previousBytes := b.Get(key); previousBytes != nil {
if string(previousBytes) != compressorName {
logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, string(previousBytes), compressorName)
}
}
if compressorName == blobinfocache.UnknownCompression {
return b.Delete(key)
}
return b.Put(key, []byte(compressorName))
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
}
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
// and can be reused given the opaque location data.
func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
@ -251,21 +282,34 @@ func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope type
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
}
// appendReplacementCandiates creates prioritize.CandidateWithTime values for digest in scopeBucket, and returns the result of appending them to candidates.
func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket *bolt.Bucket, digest digest.Digest) []prioritize.CandidateWithTime {
b := scopeBucket.Bucket([]byte(digest.String()))
// appendReplacementCandiates creates prioritize.CandidateWithTime values for digest in scopeBucket with corresponding compression info from compressionBucket (if compressionBucket is not nil), and returns the result of appending them to candidates.
func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket, compressionBucket *bolt.Bucket, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime {
digestKey := []byte(digest.String())
b := scopeBucket.Bucket(digestKey)
if b == nil {
return candidates
}
compressorName := blobinfocache.UnknownCompression
if compressionBucket != nil {
// the bucket won't exist if the cache was created by a v1 implementation and
// hasn't yet been updated by a v2 implementation
if compressorNameValue := compressionBucket.Get(digestKey); len(compressorNameValue) > 0 {
compressorName = string(compressorNameValue)
}
}
if compressorName == blobinfocache.UnknownCompression && requireCompressionInfo {
return candidates
}
_ = b.ForEach(func(k, v []byte) error {
t := time.Time{}
if err := t.UnmarshalBinary(v); err != nil {
return err
}
candidates = append(candidates, prioritize.CandidateWithTime{
Candidate: types.BICReplacementCandidate{
Digest: digest,
Location: types.BICLocationReference{Opaque: string(k)},
Candidate: blobinfocache.BICReplacementCandidate2{
Digest: digest,
CompressorName: compressorName,
Location: types.BICLocationReference{Opaque: string(k)},
},
LastSeen: t,
})
@ -274,13 +318,17 @@ func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
return candidates
}
// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations that could possibly be reused
// within the specified (transport scope) (if they still exist, which is not guaranteed).
//
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
// uncompressed digest.
func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
func (bdc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 {
return bdc.candidateLocations(transport, scope, primaryDigest, canSubstitute, true)
}
func (bdc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 {
res := []prioritize.CandidateWithTime{}
var uncompressedDigestValue digest.Digest // = ""
if err := bdc.view(func(tx *bolt.Tx) error {
@ -296,8 +344,11 @@ func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types
if scopeBucket == nil {
return nil
}
// compressionBucket won't have been created if previous writers never recorded info about compression,
// and we don't want to fail just because of that
compressionBucket := tx.Bucket(digestCompressorBucket)
res = bdc.appendReplacementCandidates(res, scopeBucket, primaryDigest)
res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, primaryDigest, requireCompressionInfo)
if canSubstitute {
if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" {
b := tx.Bucket(digestByUncompressedBucket)
@ -310,7 +361,7 @@ func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types
return err
}
if d != primaryDigest && d != uncompressedDigestValue {
res = bdc.appendReplacementCandidates(res, scopeBucket, d)
res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, d, requireCompressionInfo)
}
return nil
}); err != nil {
@ -319,14 +370,24 @@ func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types
}
}
if uncompressedDigestValue != primaryDigest {
res = bdc.appendReplacementCandidates(res, scopeBucket, uncompressedDigestValue)
res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, uncompressedDigestValue, requireCompressionInfo)
}
}
}
return nil
}); err != nil { // Including os.IsNotExist(err)
return []types.BICReplacementCandidate{} // FIXME? Log err (but throttle the log volume on repeated accesses)?
return []blobinfocache.BICReplacementCandidate2{} // FIXME? Log err (but throttle the log volume on repeated accesses)?
}
return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue)
}
// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
// within the specified (transport scope) (if they still exist, which is not guaranteed).
//
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
// uncompressed digest.
func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
return blobinfocache.CandidateLocationsFromV2(bdc.candidateLocations(transport, scope, primaryDigest, canSubstitute, false))
}

View File

@ -6,7 +6,7 @@ import (
"sort"
"time"
"github.com/containers/image/v5/types"
"github.com/containers/image/v5/internal/blobinfocache"
"github.com/opencontainers/go-digest"
)
@ -17,8 +17,8 @@ const replacementAttempts = 5
// CandidateWithTime is the input to types.BICReplacementCandidate prioritization.
type CandidateWithTime struct {
Candidate types.BICReplacementCandidate // The replacement candidate
LastSeen time.Time // Time the candidate was last known to exist (either read or written)
Candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate
LastSeen time.Time // Time the candidate was last known to exist (either read or written)
}
// candidateSortState is a local state implementing sort.Interface on candidates to prioritize,
@ -79,7 +79,7 @@ func (css *candidateSortState) Swap(i, j int) {
// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the
// number of entries to limit, only to make testing simpler.
func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []types.BICReplacementCandidate {
func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []blobinfocache.BICReplacementCandidate2 {
// We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should
// compare equal.
sort.Sort(&candidateSortState{
@ -92,7 +92,7 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime,
if resLength > maxCandidates {
resLength = maxCandidates
}
res := make([]types.BICReplacementCandidate, resLength)
res := make([]blobinfocache.BICReplacementCandidate2, resLength)
for i := range res {
res[i] = cs[i].Candidate
}
@ -105,6 +105,6 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime,
//
// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course
// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.)
func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []types.BICReplacementCandidate {
func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []blobinfocache.BICReplacementCandidate2 {
return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts)
}

View File

@ -5,6 +5,7 @@ import (
"sync"
"time"
"github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
@ -25,6 +26,7 @@ type cache struct {
uncompressedDigests map[digest.Digest]digest.Digest
digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Unknown, for each digest
}
// New returns a BlobInfoCache implementation which is in-memory only.
@ -36,10 +38,15 @@ type cache struct {
// Manual users of types.{ImageSource,ImageDestination} might also use
// this instead of a persistent cache.
func New() types.BlobInfoCache {
return new2()
}
func new2() *cache {
return &cache{
uncompressedDigests: map[digest.Digest]digest.Digest{},
digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{},
knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
compressors: map[digest.Digest]string{},
}
}
@ -101,14 +108,34 @@ func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope type
locationScope[location] = time.Now() // Possibly overwriting an older entry.
}
// RecordDigestCompressorName records that the blob with the specified digest is either compressed with the specified
// algorithm, or uncompressed, or that we no longer know.
func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compressorName string) {
mem.mutex.Lock()
defer mem.mutex.Unlock()
if compressorName == blobinfocache.UnknownCompression {
delete(mem.compressors, blobDigest)
return
}
mem.compressors[blobDigest] = compressorName
}
// appendReplacementCandiates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest) []prioritize.CandidateWithTime {
func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime {
locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present
for l, t := range locations {
compressorName, compressorKnown := mem.compressors[digest]
if !compressorKnown {
if requireCompressionInfo {
continue
}
compressorName = blobinfocache.UnknownCompression
}
candidates = append(candidates, prioritize.CandidateWithTime{
Candidate: types.BICReplacementCandidate{
Digest: digest,
Location: l,
Candidate: blobinfocache.BICReplacementCandidate2{
Digest: digest,
CompressorName: compressorName,
Location: l,
},
LastSeen: t,
})
@ -123,21 +150,35 @@ func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
// uncompressed digest.
func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, false))
}
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations that could possibly be reused
// within the specified (transport scope) (if they still exist, which is not guaranteed).
//
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
// uncompressed digest.
func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 {
return mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, true)
}
func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 {
mem.mutex.Lock()
defer mem.mutex.Unlock()
res := []prioritize.CandidateWithTime{}
res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest)
res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, requireCompressionInfo)
var uncompressedDigest digest.Digest // = ""
if canSubstitute {
if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" {
otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map
for d := range otherDigests {
if d != primaryDigest && d != uncompressedDigest {
res = mem.appendReplacementCandidates(res, transport, scope, d)
res = mem.appendReplacementCandidates(res, transport, scope, d, requireCompressionInfo)
}
}
if uncompressedDigest != primaryDigest {
res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest)
res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, requireCompressionInfo)
}
}
}

View File

@ -2,6 +2,7 @@
package none
import (
"github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
)
@ -16,7 +17,7 @@ type noCache struct {
// Manifest.Inspect, because configs only have one representation.
// Any use of BlobInfoCache with blobs should usually use at least a
// short-lived cache, ideally blobinfocache.DefaultCache.
var NoCache types.BlobInfoCache = noCache{}
var NoCache blobinfocache.BlobInfoCache2 = blobinfocache.FromBlobInfoCache(&noCache{})
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
// May return anyDigest if it is known to be uncompressed.

View File

@ -86,7 +86,7 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
// Note: we need to read the auth files in the inverse order to prevent
// a priority inversion when writing to the map.
authConfigs := make(map[string]types.DockerAuthConfig)
paths := getAuthFilePaths(sys)
paths := getAuthFilePaths(sys, homedir.Get())
for i := len(paths) - 1; i >= 0; i-- {
path := paths[i]
// readJSONFile returns an empty map in case the path doesn't exist.
@ -126,7 +126,9 @@ func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthCon
// getAuthFilePaths returns a slice of authPaths based on the system context
// in the order they should be searched. Note that some paths may not exist.
func getAuthFilePaths(sys *types.SystemContext) []authPath {
// The homeDir parameter should always be homedir.Get(), and is only intended to be overridden
// by tests.
func getAuthFilePaths(sys *types.SystemContext, homeDir string) []authPath {
paths := []authPath{}
pathToAuth, lf, err := getPathToAuth(sys)
if err == nil {
@ -139,7 +141,7 @@ func getAuthFilePaths(sys *types.SystemContext) []authPath {
}
xdgCfgHome := os.Getenv("XDG_CONFIG_HOME")
if xdgCfgHome == "" {
xdgCfgHome = filepath.Join(homedir.Get(), ".config")
xdgCfgHome = filepath.Join(homeDir, ".config")
}
paths = append(paths, authPath{path: filepath.Join(xdgCfgHome, xdgConfigHomePath), legacyFormat: false})
if dockerConfig := os.Getenv("DOCKER_CONFIG"); dockerConfig != "" {
@ -148,11 +150,11 @@ func getAuthFilePaths(sys *types.SystemContext) []authPath {
)
} else {
paths = append(paths,
authPath{path: filepath.Join(homedir.Get(), dockerHomePath), legacyFormat: false},
authPath{path: filepath.Join(homeDir, dockerHomePath), legacyFormat: false},
)
}
paths = append(paths,
authPath{path: filepath.Join(homedir.Get(), dockerLegacyHomePath), legacyFormat: true},
authPath{path: filepath.Join(homeDir, dockerLegacyHomePath), legacyFormat: true},
)
return paths
}
@ -161,6 +163,12 @@ func getAuthFilePaths(sys *types.SystemContext) []authPath {
// file or .docker/config.json, including support for OAuth2 and IdentityToken.
// If an entry is not found, an empty struct is returned.
func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuthConfig, error) {
return getCredentialsWithHomeDir(sys, registry, homedir.Get())
}
// getCredentialsWithHomeDir is an internal implementation detail of GetCredentials,
// it exists only to allow testing it with an artificial home directory.
func getCredentialsWithHomeDir(sys *types.SystemContext, registry, homeDir string) (types.DockerAuthConfig, error) {
if sys != nil && sys.DockerAuthConfig != nil {
logrus.Debug("Returning credentials from DockerAuthConfig")
return *sys.DockerAuthConfig, nil
@ -177,7 +185,7 @@ func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuth
}
}
for _, path := range getAuthFilePaths(sys) {
for _, path := range getAuthFilePaths(sys, homeDir) {
authConfig, err := findAuthentication(registry, path.path, path.legacyFormat)
if err != nil {
logrus.Debugf("Credentials not found")
@ -203,7 +211,13 @@ func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuth
// GetCredentials API. The new API should be used and this API is kept to
// maintain backward compatibility.
func GetAuthentication(sys *types.SystemContext, registry string) (string, string, error) {
auth, err := GetCredentials(sys, registry)
return getAuthenticationWithHomeDir(sys, registry, homedir.Get())
}
// getAuthenticationWithHomeDir is an internal implementation detail of GetAuthentication,
// it exists only to allow testing it with an artificial home directory.
func getAuthenticationWithHomeDir(sys *types.SystemContext, registry, homeDir string) (string, string, error) {
auth, err := getCredentialsWithHomeDir(sys, registry, homeDir)
if err != nil {
return "", "", err
}
@ -262,6 +276,12 @@ func RemoveAllAuthentication(sys *types.SystemContext) error {
// getPathToAuth gets the path of the auth.json file used for reading and writing credentials
// returns the path, and a bool specifies whether the file is in legacy format
func getPathToAuth(sys *types.SystemContext) (string, bool, error) {
return getPathToAuthWithOS(sys, runtime.GOOS)
}
// getPathToAuthWithOS is an internal implementation detail of getPathToAuth,
// it exists only to allow testing it with an artificial runtime.GOOS.
func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (string, bool, error) {
if sys != nil {
if sys.AuthFilePath != "" {
return sys.AuthFilePath, false, nil
@ -273,7 +293,7 @@ func getPathToAuth(sys *types.SystemContext) (string, bool, error) {
return filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), false, nil
}
}
if runtime.GOOS == "windows" || runtime.GOOS == "darwin" {
if goOS == "windows" || goOS == "darwin" {
return filepath.Join(homedir.Get(), nonLinuxAuthFilePath), false, nil
}

View File

@ -7,6 +7,7 @@ import (
"github.com/BurntSushi/toml"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/rootless"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/lockfile"
@ -27,12 +28,24 @@ func shortNameAliasesConfPath(ctx *types.SystemContext) (string, error) {
return ctx.UserShortNameAliasConfPath, nil
}
configHome, err := homedir.GetConfigHome()
if err != nil {
return "", err
if rootless.GetRootlessEUID() == 0 {
// Root user or in a non-conforming user NS
return filepath.Join("/var/cache", userShortNamesFile), nil
}
return filepath.Join(configHome, userShortNamesFile), nil
// Rootless user
var cacheRoot string
if xdgCache := os.Getenv("XDG_CACHE_HOME"); xdgCache != "" {
cacheRoot = xdgCache
} else {
configHome, err := homedir.GetConfigHome()
if err != nil {
return "", err
}
cacheRoot = filepath.Join(configHome, ".cache")
}
return filepath.Join(cacheRoot, userShortNamesFile), nil
}
// shortNameAliasConf is a subset of the `V2RegistriesConf` format. It's used in the

View File

@ -20,7 +20,7 @@ import (
// systemRegistriesConfPath is the path to the system-wide registry
// configuration file and is used to add/subtract potential registries for
// obtaining images. You can override this at build time with
// -ldflags '-X github.com/containers/image/sysregistries.systemRegistriesConfPath=$your_path'
// -ldflags '-X github.com/containers/image/v5/sysregistries.systemRegistriesConfPath=$your_path'
var systemRegistriesConfPath = builtinRegistriesConfPath
// builtinRegistriesConfPath is the path to the registry configuration file.
@ -30,7 +30,7 @@ const builtinRegistriesConfPath = "/etc/containers/registries.conf"
// systemRegistriesConfDirPath is the path to the system-wide registry
// configuration directory and is used to add/subtract potential registries for
// obtaining images. You can override this at build time with
// -ldflags '-X github.com/containers/image/sysregistries.systemRegistriesConfDirecotyPath=$your_path'
// -ldflags '-X github.com/containers/image/v5/sysregistries.systemRegistriesConfDirecotyPath=$your_path'
var systemRegistriesConfDirPath = builtinRegistriesConfDirPath
// builtinRegistriesConfDirPath is the path to the registry configuration directory.
@ -405,9 +405,15 @@ type configWrapper struct {
// newConfigWrapper returns a configWrapper for the specified SystemContext.
func newConfigWrapper(ctx *types.SystemContext) configWrapper {
return newConfigWrapperWithHomeDir(ctx, homedir.Get())
}
// newConfigWrapperWithHomeDir is an internal implementation detail of newConfigWrapper,
// it exists only to allow testing it with an artificial home directory.
func newConfigWrapperWithHomeDir(ctx *types.SystemContext, homeDir string) configWrapper {
var wrapper configWrapper
userRegistriesFilePath := filepath.Join(homedir.Get(), userRegistriesFile)
userRegistriesDirPath := filepath.Join(homedir.Get(), userRegistriesDir)
userRegistriesFilePath := filepath.Join(homeDir, userRegistriesFile)
userRegistriesDirPath := filepath.Join(homeDir, userRegistriesDir)
// decide configPath using per-user path or system file
if ctx != nil && ctx.SystemRegistriesConfPath != "" {

View File

@ -30,7 +30,7 @@ import (
// systemDefaultPolicyPath is the policy path used for DefaultPolicy().
// You can override this at build time with
// -ldflags '-X github.com/containers/image/signature.systemDefaultPolicyPath=$your_path'
// -ldflags '-X github.com/containers/image/v5/signature.systemDefaultPolicyPath=$your_path'
var systemDefaultPolicyPath = builtinDefaultPolicyPath
// builtinDefaultPolicyPath is the policy path used for DefaultPolicy().
@ -59,10 +59,16 @@ func DefaultPolicy(sys *types.SystemContext) (*Policy, error) {
// defaultPolicyPath returns a path to the default policy of the system.
func defaultPolicyPath(sys *types.SystemContext) string {
return defaultPolicyPathWithHomeDir(sys, homedir.Get())
}
// defaultPolicyPathWithHomeDir is an internal implementation detail of defaultPolicyPath,
// it exists only to allow testing it with an artificial home directory.
func defaultPolicyPathWithHomeDir(sys *types.SystemContext, homeDir string) string {
if sys != nil && sys.SignaturePolicyPath != "" {
return sys.SignaturePolicyPath
}
userPolicyFilePath := filepath.Join(homedir.Get(), userPolicyFile)
userPolicyFilePath := filepath.Join(homeDir, userPolicyFile)
if _, err := os.Stat(userPolicyFilePath); err == nil {
return userPolicyFilePath
}

View File

@ -463,7 +463,9 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {

View File

@ -5,11 +5,13 @@
// package main
//
// import (
// "fmt"
// "context"
//
// cp "github.com/containers/image/v5/copy"
// "github.com/containers/image/v5/signature"
// "github.com/containers/image/v5/tarball"
// "github.com/containers/image/v5/transports/alltransports"
// "github.com/containers/image/v5/types"
// imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
// )
//
@ -39,7 +41,18 @@
// if err != nil {
// panic(err)
// }
// err = cp.Image(nil, dest, src, nil)
//
// policy, err := signature.DefaultPolicy(nil)
// if err != nil {
// panic(err)
// }
//
// pc, err := signature.NewPolicyContext(policy)
// if err != nil {
// panic(err)
// }
// defer pc.Destroy()
// _, err = cp.Image(context.TODO(), pc, dest, src, nil)
// if err != nil {
// panic(err)
// }

View File

@ -126,14 +126,18 @@ type BlobInfo struct {
Annotations map[string]string
MediaType string
// CompressionOperation is used in Image.UpdateLayerInfos to instruct
// whether the original layer should be preserved or (de)compressed. The
// field defaults to preserve the original layer.
// whether the original layer's "compressed or not" should be preserved,
// possibly while changing the compression algorithm from one to another,
// or if it should be compressed or decompressed. The field defaults to
// preserve the original layer's compressedness.
// TODO: To remove together with CryptoOperation in re-design to remove
// field out out of BlobInfo.
CompressionOperation LayerCompression
// CompressionAlgorithm is used in Image.UpdateLayerInfos to set the correct
// MIME type for compressed layers (e.g., gzip or zstd). This field MUST be
// set when `CompressionOperation == Compress`.
// set when `CompressionOperation == Compress` and MAY be set when
// `CompressionOperation == PreserveOriginal` and the compression type is
// being changed for an already-compressed layer.
CompressionAlgorithm *compression.Algorithm
// CryptoOperation is used in Image.UpdateLayerInfos to instruct
// whether the original layer was encrypted/decrypted
@ -194,6 +198,9 @@ type BICReplacementCandidate struct {
//
// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal;
// users of the cache should just fall back to copying the blobs the usual way.
//
// The BlobInfoCache interface is deprecated. Consumers of this library should use one of the implementations provided by
// subpackages of the library's "pkg/blobinfocache" package in preference to implementing the interface on their own.
type BlobInfoCache interface {
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
// May return anyDigest if it is known to be uncompressed.
@ -306,7 +313,9 @@ type ImageDestination interface {
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error)
@ -397,6 +406,12 @@ type Image interface {
// UpdatedImage returns a types.Image modified according to options.
// Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired.
// This does not change the state of the original Image object.
// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if
// manifests of type options.ManifestMIMEType can not include layers that are compressed
// in accordance with the CompressionOperation and CompressionAlgorithm specified in one
// or more options.LayerInfos items, though retrying with a different
// options.ManifestMIMEType or with different CompressionOperation+CompressionAlgorithm
// values might succeed.
UpdatedImage(ctx context.Context, options ManifestUpdateOptions) (Image, error)
// SupportsEncryption returns an indicator that the image supports encryption
//
@ -600,6 +615,8 @@ type SystemContext struct {
DockerDisableV1Ping bool
// If true, dockerImageDestination.SupportedManifestMIMETypes will omit the Schema1 media types from the supported list
DockerDisableDestSchema1MIMETypes bool
// If true, the physical pull source of docker transport images logged as info level
DockerLogMirrorChoice bool
// Directory to use for OSTree temporary files
OSTreeTmpDirPath string

View File

@ -6,9 +6,9 @@ const (
// VersionMajor is for an API incompatible changes
VersionMajor = 5
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 9
VersionMinor = 10
// VersionPatch is for backwards-compatible bug fixes
VersionPatch = 0
VersionPatch = 2
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = ""

View File

@ -25,7 +25,7 @@ env:
# GCE project where images live
IMAGE_PROJECT: "libpod-218412"
# VM Image built in containers/automation_images
_BUILT_IMAGE_SUFFIX: "c6233039174893568"
_BUILT_IMAGE_SUFFIX: "c6524344056676352"
FEDORA_CACHE_IMAGE_NAME: "fedora-${_BUILT_IMAGE_SUFFIX}"
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${_BUILT_IMAGE_SUFFIX}"
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${_BUILT_IMAGE_SUFFIX}"

View File

@ -1 +1 @@
1.24.5
1.24.6

View File

@ -125,6 +125,15 @@ func init() {
graphdriver.Register("overlay2", Init)
}
func hasMetacopyOption(opts []string) bool {
for _, s := range opts {
if s == "metacopy=on" {
return true
}
}
return false
}
// Init returns the a native diff driver for overlay filesystem.
// If overlay filesystem is not supported on the host, a wrapped graphdriver.ErrNotSupported is returned as error.
// If an overlay filesystem is not supported over an existing filesystem then a wrapped graphdriver.ErrIncompatibleFS is returned.
@ -863,7 +872,17 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
readWrite := true
for _, o := range options.Options {
optsList := options.Options
if len(optsList) == 0 {
optsList = strings.Split(d.options.mountOptions, ",")
} else {
// If metacopy=on is present in d.options.mountOptions it must be present in the mount
// options otherwise the kernel refuses to follow the metacopy xattr.
if hasMetacopyOption(strings.Split(d.options.mountOptions, ",")) && !hasMetacopyOption(options.Options) {
optsList = append(optsList, "metacopy=on")
}
}
for _, o := range optsList {
if o == "ro" {
readWrite = false
break
@ -1001,10 +1020,8 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
} else {
opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(absLowers, ":"))
}
if len(options.Options) > 0 {
opts = fmt.Sprintf("%s,%s", strings.Join(options.Options, ","), opts)
} else if d.options.mountOptions != "" {
opts = fmt.Sprintf("%s,%s", d.options.mountOptions, opts)
if len(optsList) > 0 {
opts = fmt.Sprintf("%s,%s", strings.Join(optsList, ","), opts)
}
mountData := label.FormatMountLabel(opts, options.MountLabel)
mountFunc := unix.Mount

View File

@ -318,7 +318,7 @@ func unescape(s string) (ch string, tail string, err error) {
if i > utf8.MaxRune {
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
}
return string(i), s, nil
return string(rune(i)), s, nil
}
return "", "", fmt.Errorf(`unknown escape \%c`, r)
}

View File

@ -1,294 +0,0 @@
// +build generate
//go:generate go run $GOFILE && gofmt -w inflate_gen.go
package main
import (
"os"
"strings"
)
func main() {
f, err := os.Create("inflate_gen.go")
if err != nil {
panic(err)
}
defer f.Close()
types := []string{"*bytes.Buffer", "*bytes.Reader", "*bufio.Reader", "*strings.Reader"}
names := []string{"BytesBuffer", "BytesReader", "BufioReader", "StringsReader"}
imports := []string{"bytes", "bufio", "io", "strings", "math/bits"}
f.WriteString(`// Code generated by go generate gen_inflate.go. DO NOT EDIT.
package flate
import (
`)
for _, imp := range imports {
f.WriteString("\t\"" + imp + "\"\n")
}
f.WriteString(")\n\n")
template := `
// Decode a single Huffman block from f.
// hl and hd are the Huffman states for the lit/length values
// and the distance values, respectively. If hd == nil, using the
// fixed distance encoding associated with fixed Huffman blocks.
func (f *decompressor) $FUNCNAME$() {
const (
stateInit = iota // Zero value must be stateInit
stateDict
)
fr := f.r.($TYPE$)
switch f.stepState {
case stateInit:
goto readLiteral
case stateDict:
goto copyHistory
}
readLiteral:
// Read literal and/or (length, distance) according to RFC section 3.2.3.
{
var v int
{
// Inlined v, err := f.huffSym(f.hl)
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
// with single element, huffSym must error on these two edge cases. In both
// cases, the chunks slice will be 0 for the invalid sequence, leading it
// satisfy the n == 0 check below.
n := uint(f.hl.maxRead)
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
nb, b := f.nb, f.b
for {
for nb < n {
c, err := fr.ReadByte()
if err != nil {
f.b = b
f.nb = nb
f.err = noEOF(err)
return
}
f.roffset++
b |= uint32(c) << (nb & regSizeMaskUint32)
nb += 8
}
chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
n = uint(chunk & huffmanCountMask)
}
if n <= nb {
if n == 0 {
f.b = b
f.nb = nb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
f.b = b >> (n & regSizeMaskUint32)
f.nb = nb - n
v = int(chunk >> huffmanValueShift)
break
}
}
}
var length int
switch {
case v < 256:
f.dict.writeByte(byte(v))
if f.dict.availWrite() == 0 {
f.toRead = f.dict.readFlush()
f.step = (*decompressor).$FUNCNAME$
f.stepState = stateInit
return
}
goto readLiteral
case v == 256:
f.finishBlock()
return
// otherwise, reference to older data
case v < 265:
length = v - (257 - 3)
case v < maxNumLit:
val := decCodeToLen[(v - 257)]
length = int(val.length) + 3
n := uint(val.extra)
for f.nb < n {
c, err := fr.ReadByte()
if err != nil {
if debugDecode {
fmt.Println("morebits n>0:", err)
}
f.err = err
return
}
f.roffset++
f.b |= uint32(c) << f.nb
f.nb += 8
}
length += int(f.b & uint32(1<<(n&regSizeMaskUint32)-1))
f.b >>= n & regSizeMaskUint32
f.nb -= n
default:
if debugDecode {
fmt.Println(v, ">= maxNumLit")
}
f.err = CorruptInputError(f.roffset)
return
}
var dist uint32
if f.hd == nil {
for f.nb < 5 {
c, err := fr.ReadByte()
if err != nil {
if debugDecode {
fmt.Println("morebits f.nb<5:", err)
}
f.err = err
return
}
f.roffset++
f.b |= uint32(c) << f.nb
f.nb += 8
}
dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3)))
f.b >>= 5
f.nb -= 5
} else {
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
// with single element, huffSym must error on these two edge cases. In both
// cases, the chunks slice will be 0 for the invalid sequence, leading it
// satisfy the n == 0 check below.
n := uint(f.hd.maxRead)
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
// but is smart enough to keep local variables in registers, so use nb and b,
// inline call to moreBits and reassign b,nb back to f on return.
nb, b := f.nb, f.b
for {
for nb < n {
c, err := fr.ReadByte()
if err != nil {
f.b = b
f.nb = nb
f.err = noEOF(err)
return
}
f.roffset++
b |= uint32(c) << (nb & regSizeMaskUint32)
nb += 8
}
chunk := f.hd.chunks[b&(huffmanNumChunks-1)]
n = uint(chunk & huffmanCountMask)
if n > huffmanChunkBits {
chunk = f.hd.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hd.linkMask]
n = uint(chunk & huffmanCountMask)
}
if n <= nb {
if n == 0 {
f.b = b
f.nb = nb
if debugDecode {
fmt.Println("huffsym: n==0")
}
f.err = CorruptInputError(f.roffset)
return
}
f.b = b >> (n & regSizeMaskUint32)
f.nb = nb - n
dist = uint32(chunk >> huffmanValueShift)
break
}
}
}
switch {
case dist < 4:
dist++
case dist < maxNumDist:
nb := uint(dist-2) >> 1
// have 1 bit in bottom of dist, need nb more.
extra := (dist & 1) << (nb & regSizeMaskUint32)
for f.nb < nb {
c, err := fr.ReadByte()
if err != nil {
if debugDecode {
fmt.Println("morebits f.nb<nb:", err)
}
f.err = err
return
}
f.roffset++
f.b |= uint32(c) << f.nb
f.nb += 8
}
extra |= f.b & uint32(1<<(nb&regSizeMaskUint32)-1)
f.b >>= nb & regSizeMaskUint32
f.nb -= nb
dist = 1<<((nb+1)&regSizeMaskUint32) + 1 + extra
default:
if debugDecode {
fmt.Println("dist too big:", dist, maxNumDist)
}
f.err = CorruptInputError(f.roffset)
return
}
// No check on length; encoding can be prescient.
if dist > uint32(f.dict.histSize()) {
if debugDecode {
fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
}
f.err = CorruptInputError(f.roffset)
return
}
f.copyLen, f.copyDist = length, int(dist)
goto copyHistory
}
copyHistory:
// Perform a backwards copy according to RFC section 3.2.3.
{
cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
if cnt == 0 {
cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
}
f.copyLen -= cnt
if f.dict.availWrite() == 0 || f.copyLen > 0 {
f.toRead = f.dict.readFlush()
f.step = (*decompressor).$FUNCNAME$ // We need to continue this work
f.stepState = stateDict
return
}
goto readLiteral
}
}
`
for i, t := range types {
s := strings.Replace(template, "$FUNCNAME$", "huffman"+names[i], -1)
s = strings.Replace(s, "$TYPE$", t, -1)
f.WriteString(s)
}
f.WriteString("func (f *decompressor) huffmanBlockDecoder() func() {\n")
f.WriteString("\tswitch f.r.(type) {\n")
for i, t := range types {
f.WriteString("\t\tcase " + t + ":\n")
f.WriteString("\t\t\treturn f.huffman" + names[i] + "\n")
}
f.WriteString("\t\tdefault:\n")
f.WriteString("\t\t\treturn f.huffmanBlockGeneric")
f.WriteString("\t}\n}\n")
}

View File

@ -14,7 +14,9 @@ but it can be used as a secondary step to compressors (like Snappy) that does no
## News
* Mar 2018: First implementation released. Consider this beta software for now.
This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package.
This ensures that most functionality is well tested.
# Usage

View File

@ -5,7 +5,6 @@
package zstd
import (
"bytes"
"errors"
"io"
"sync"
@ -179,11 +178,13 @@ func (d *Decoder) Reset(r io.Reader) error {
}
// If bytes buffer and < 1MB, do sync decoding anyway.
if bb, ok := r.(*bytes.Buffer); ok && bb.Len() < 1<<20 {
if bb, ok := r.(byter); ok && bb.Len() < 1<<20 {
var bb2 byter
bb2 = bb
if debug {
println("*bytes.Buffer detected, doing sync decode, len:", bb.Len())
}
b := bb.Bytes()
b := bb2.Bytes()
var dst []byte
if cap(d.current.b) > 0 {
dst = d.current.b

View File

@ -4,6 +4,7 @@
package zstd
import (
"bytes"
"errors"
"log"
"math"
@ -146,3 +147,10 @@ func load64(b []byte, i int) uint64 {
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
}
type byter interface {
Bytes() []byte
Len() int
}
var _ byter = &bytes.Buffer{}

View File

@ -13,12 +13,42 @@ const (
compareGreater
)
var (
intType = reflect.TypeOf(int(1))
int8Type = reflect.TypeOf(int8(1))
int16Type = reflect.TypeOf(int16(1))
int32Type = reflect.TypeOf(int32(1))
int64Type = reflect.TypeOf(int64(1))
uintType = reflect.TypeOf(uint(1))
uint8Type = reflect.TypeOf(uint8(1))
uint16Type = reflect.TypeOf(uint16(1))
uint32Type = reflect.TypeOf(uint32(1))
uint64Type = reflect.TypeOf(uint64(1))
float32Type = reflect.TypeOf(float32(1))
float64Type = reflect.TypeOf(float64(1))
stringType = reflect.TypeOf("")
)
func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
obj1Value := reflect.ValueOf(obj1)
obj2Value := reflect.ValueOf(obj2)
// throughout this switch we try and avoid calling .Convert() if possible,
// as this has a pretty big performance impact
switch kind {
case reflect.Int:
{
intobj1 := obj1.(int)
intobj2 := obj2.(int)
intobj1, ok := obj1.(int)
if !ok {
intobj1 = obj1Value.Convert(intType).Interface().(int)
}
intobj2, ok := obj2.(int)
if !ok {
intobj2 = obj2Value.Convert(intType).Interface().(int)
}
if intobj1 > intobj2 {
return compareGreater, true
}
@ -31,8 +61,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
}
case reflect.Int8:
{
int8obj1 := obj1.(int8)
int8obj2 := obj2.(int8)
int8obj1, ok := obj1.(int8)
if !ok {
int8obj1 = obj1Value.Convert(int8Type).Interface().(int8)
}
int8obj2, ok := obj2.(int8)
if !ok {
int8obj2 = obj2Value.Convert(int8Type).Interface().(int8)
}
if int8obj1 > int8obj2 {
return compareGreater, true
}
@ -45,8 +81,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
}
case reflect.Int16:
{
int16obj1 := obj1.(int16)
int16obj2 := obj2.(int16)
int16obj1, ok := obj1.(int16)
if !ok {
int16obj1 = obj1Value.Convert(int16Type).Interface().(int16)
}
int16obj2, ok := obj2.(int16)
if !ok {
int16obj2 = obj2Value.Convert(int16Type).Interface().(int16)
}
if int16obj1 > int16obj2 {
return compareGreater, true
}
@ -59,8 +101,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
}
case reflect.Int32:
{
int32obj1 := obj1.(int32)
int32obj2 := obj2.(int32)
int32obj1, ok := obj1.(int32)
if !ok {
int32obj1 = obj1Value.Convert(int32Type).Interface().(int32)
}
int32obj2, ok := obj2.(int32)
if !ok {
int32obj2 = obj2Value.Convert(int32Type).Interface().(int32)
}
if int32obj1 > int32obj2 {
return compareGreater, true
}
@ -73,8 +121,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
}
case reflect.Int64:
{
int64obj1 := obj1.(int64)
int64obj2 := obj2.(int64)
int64obj1, ok := obj1.(int64)
if !ok {
int64obj1 = obj1Value.Convert(int64Type).Interface().(int64)
}
int64obj2, ok := obj2.(int64)
if !ok {
int64obj2 = obj2Value.Convert(int64Type).Interface().(int64)
}
if int64obj1 > int64obj2 {
return compareGreater, true
}
@ -87,8 +141,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
}
case reflect.Uint:
{
uintobj1 := obj1.(uint)
uintobj2 := obj2.(uint)
uintobj1, ok := obj1.(uint)
if !ok {
uintobj1 = obj1Value.Convert(uintType).Interface().(uint)
}
uintobj2, ok := obj2.(uint)
if !ok {
uintobj2 = obj2Value.Convert(uintType).Interface().(uint)
}
if uintobj1 > uintobj2 {
return compareGreater, true
}
@ -101,8 +161,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
}
case reflect.Uint8:
{
uint8obj1 := obj1.(uint8)
uint8obj2 := obj2.(uint8)
uint8obj1, ok := obj1.(uint8)
if !ok {
uint8obj1 = obj1Value.Convert(uint8Type).Interface().(uint8)
}
uint8obj2, ok := obj2.(uint8)
if !ok {
uint8obj2 = obj2Value.Convert(uint8Type).Interface().(uint8)
}
if uint8obj1 > uint8obj2 {
return compareGreater, true
}
@ -115,8 +181,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
}
case reflect.Uint16:
{
uint16obj1 := obj1.(uint16)
uint16obj2 := obj2.(uint16)
uint16obj1, ok := obj1.(uint16)
if !ok {
uint16obj1 = obj1Value.Convert(uint16Type).Interface().(uint16)
}
uint16obj2, ok := obj2.(uint16)
if !ok {
uint16obj2 = obj2Value.Convert(uint16Type).Interface().(uint16)
}
if uint16obj1 > uint16obj2 {
return compareGreater, true
}
@ -129,8 +201,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
}
case reflect.Uint32:
{
uint32obj1 := obj1.(uint32)
uint32obj2 := obj2.(uint32)
uint32obj1, ok := obj1.(uint32)
if !ok {
uint32obj1 = obj1Value.Convert(uint32Type).Interface().(uint32)
}
uint32obj2, ok := obj2.(uint32)
if !ok {
uint32obj2 = obj2Value.Convert(uint32Type).Interface().(uint32)
}
if uint32obj1 > uint32obj2 {
return compareGreater, true
}
@ -143,8 +221,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
}
case reflect.Uint64:
{
uint64obj1 := obj1.(uint64)
uint64obj2 := obj2.(uint64)
uint64obj1, ok := obj1.(uint64)
if !ok {
uint64obj1 = obj1Value.Convert(uint64Type).Interface().(uint64)
}
uint64obj2, ok := obj2.(uint64)
if !ok {
uint64obj2 = obj2Value.Convert(uint64Type).Interface().(uint64)
}
if uint64obj1 > uint64obj2 {
return compareGreater, true
}
@ -157,8 +241,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
}
case reflect.Float32:
{
float32obj1 := obj1.(float32)
float32obj2 := obj2.(float32)
float32obj1, ok := obj1.(float32)
if !ok {
float32obj1 = obj1Value.Convert(float32Type).Interface().(float32)
}
float32obj2, ok := obj2.(float32)
if !ok {
float32obj2 = obj2Value.Convert(float32Type).Interface().(float32)
}
if float32obj1 > float32obj2 {
return compareGreater, true
}
@ -171,8 +261,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
}
case reflect.Float64:
{
float64obj1 := obj1.(float64)
float64obj2 := obj2.(float64)
float64obj1, ok := obj1.(float64)
if !ok {
float64obj1 = obj1Value.Convert(float64Type).Interface().(float64)
}
float64obj2, ok := obj2.(float64)
if !ok {
float64obj2 = obj2Value.Convert(float64Type).Interface().(float64)
}
if float64obj1 > float64obj2 {
return compareGreater, true
}
@ -185,8 +281,14 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
}
case reflect.String:
{
stringobj1 := obj1.(string)
stringobj2 := obj2.(string)
stringobj1, ok := obj1.(string)
if !ok {
stringobj1 = obj1Value.Convert(stringType).Interface().(string)
}
stringobj2, ok := obj2.(string)
if !ok {
stringobj2 = obj2Value.Convert(stringType).Interface().(string)
}
if stringobj1 > stringobj2 {
return compareGreater, true
}
@ -240,6 +342,24 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter
return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs)
}
// Positive asserts that the specified element is positive
//
// assert.Positive(t, 1)
// assert.Positive(t, 1.23)
func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
zero := reflect.Zero(reflect.TypeOf(e))
return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs)
}
// Negative asserts that the specified element is negative
//
// assert.Negative(t, -1)
// assert.Negative(t, -1.23)
func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
zero := reflect.Zero(reflect.TypeOf(e))
return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs)
}
func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()

View File

@ -114,6 +114,24 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) bool {
return Error(t, err, append([]interface{}{msg}, args...)...)
}
// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
// This is a wrapper for errors.As.
func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...)
}
// ErrorIsf asserts that at least one of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return ErrorIs(t, err, target, append([]interface{}{msg}, args...)...)
}
// Eventuallyf asserts that given condition will be met in waitFor time,
// periodically checking target function each tick.
//
@ -321,6 +339,54 @@ func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsil
return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
}
// IsDecreasingf asserts that the collection is decreasing
//
// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted")
// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted")
// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return IsDecreasing(t, object, append([]interface{}{msg}, args...)...)
}
// IsIncreasingf asserts that the collection is increasing
//
// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted")
// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted")
// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return IsIncreasing(t, object, append([]interface{}{msg}, args...)...)
}
// IsNonDecreasingf asserts that the collection is not decreasing
//
// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted")
// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted")
// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return IsNonDecreasing(t, object, append([]interface{}{msg}, args...)...)
}
// IsNonIncreasingf asserts that the collection is not increasing
//
// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted")
// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted")
// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...)
}
// IsTypef asserts that the specified objects are of the same type.
func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
@ -375,6 +441,17 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args .
return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...)
}
// Negativef asserts that the specified element is negative
//
// assert.Negativef(t, -1, "error message %s", "formatted")
// assert.Negativef(t, -1.23, "error message %s", "formatted")
func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Negative(t, e, append([]interface{}{msg}, args...)...)
}
// Neverf asserts that the given condition doesn't satisfy in waitFor time,
// periodically checking the target function each tick.
//
@ -476,6 +553,15 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s
return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
}
// NotErrorIsf asserts that at none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...)
}
// NotNilf asserts that the specified object is not nil.
//
// assert.NotNilf(t, err, "error message %s", "formatted")
@ -572,6 +658,17 @@ func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg str
return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...)
}
// Positivef asserts that the specified element is positive
//
// assert.Positivef(t, 1, "error message %s", "formatted")
// assert.Positivef(t, 1.23, "error message %s", "formatted")
func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return Positive(t, e, append([]interface{}{msg}, args...)...)
}
// Regexpf asserts that a specified regexp matches a string.
//
// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")

View File

@ -204,6 +204,42 @@ func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {
return Error(a.t, err, msgAndArgs...)
}
// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
// This is a wrapper for errors.As.
func (a *Assertions) ErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return ErrorAs(a.t, err, target, msgAndArgs...)
}
// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
// This is a wrapper for errors.As.
func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return ErrorAsf(a.t, err, target, msg, args...)
}
// ErrorIs asserts that at least one of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return ErrorIs(a.t, err, target, msgAndArgs...)
}
// ErrorIsf asserts that at least one of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return ErrorIsf(a.t, err, target, msg, args...)
}
// Errorf asserts that a function returned an error (i.e. not `nil`).
//
// actualObj, err := SomeFunction()
@ -631,6 +667,102 @@ func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilo
return InEpsilonf(a.t, expected, actual, epsilon, msg, args...)
}
// IsDecreasing asserts that the collection is decreasing
//
// a.IsDecreasing([]int{2, 1, 0})
// a.IsDecreasing([]float{2, 1})
// a.IsDecreasing([]string{"b", "a"})
func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return IsDecreasing(a.t, object, msgAndArgs...)
}
// IsDecreasingf asserts that the collection is decreasing
//
// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted")
// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted")
// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted")
func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return IsDecreasingf(a.t, object, msg, args...)
}
// IsIncreasing asserts that the collection is increasing
//
// a.IsIncreasing([]int{1, 2, 3})
// a.IsIncreasing([]float{1, 2})
// a.IsIncreasing([]string{"a", "b"})
func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return IsIncreasing(a.t, object, msgAndArgs...)
}
// IsIncreasingf asserts that the collection is increasing
//
// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted")
// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted")
// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted")
func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return IsIncreasingf(a.t, object, msg, args...)
}
// IsNonDecreasing asserts that the collection is not decreasing
//
// a.IsNonDecreasing([]int{1, 1, 2})
// a.IsNonDecreasing([]float{1, 2})
// a.IsNonDecreasing([]string{"a", "b"})
func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return IsNonDecreasing(a.t, object, msgAndArgs...)
}
// IsNonDecreasingf asserts that the collection is not decreasing
//
// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted")
// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted")
// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted")
func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return IsNonDecreasingf(a.t, object, msg, args...)
}
// IsNonIncreasing asserts that the collection is not increasing
//
// a.IsNonIncreasing([]int{2, 1, 1})
// a.IsNonIncreasing([]float{2, 1})
// a.IsNonIncreasing([]string{"b", "a"})
func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return IsNonIncreasing(a.t, object, msgAndArgs...)
}
// IsNonIncreasingf asserts that the collection is not increasing
//
// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted")
// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted")
// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted")
func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return IsNonIncreasingf(a.t, object, msg, args...)
}
// IsType asserts that the specified objects are of the same type.
func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
@ -739,6 +871,28 @@ func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...i
return Lessf(a.t, e1, e2, msg, args...)
}
// Negative asserts that the specified element is negative
//
// a.Negative(-1)
// a.Negative(-1.23)
func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return Negative(a.t, e, msgAndArgs...)
}
// Negativef asserts that the specified element is negative
//
// a.Negativef(-1, "error message %s", "formatted")
// a.Negativef(-1.23, "error message %s", "formatted")
func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return Negativef(a.t, e, msg, args...)
}
// Never asserts that the given condition doesn't satisfy in waitFor time,
// periodically checking the target function each tick.
//
@ -941,6 +1095,24 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str
return NotEqualf(a.t, expected, actual, msg, args...)
}
// NotErrorIs asserts that at none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return NotErrorIs(a.t, err, target, msgAndArgs...)
}
// NotErrorIsf asserts that at none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return NotErrorIsf(a.t, err, target, msg, args...)
}
// NotNil asserts that the specified object is not nil.
//
// a.NotNil(err)
@ -1133,6 +1305,28 @@ func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) b
return Panicsf(a.t, f, msg, args...)
}
// Positive asserts that the specified element is positive
//
// a.Positive(1)
// a.Positive(1.23)
func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return Positive(a.t, e, msgAndArgs...)
}
// Positivef asserts that the specified element is positive
//
// a.Positivef(1, "error message %s", "formatted")
// a.Positivef(1.23, "error message %s", "formatted")
func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return Positivef(a.t, e, msg, args...)
}
// Regexp asserts that a specified regexp matches a string.
//
// a.Regexp(regexp.MustCompile("start"), "it's starting")

View File

@ -0,0 +1,81 @@
package assert
import (
"fmt"
"reflect"
)
// isOrdered checks that collection contains orderable elements.
func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
objKind := reflect.TypeOf(object).Kind()
if objKind != reflect.Slice && objKind != reflect.Array {
return false
}
objValue := reflect.ValueOf(object)
objLen := objValue.Len()
if objLen <= 1 {
return true
}
value := objValue.Index(0)
valueInterface := value.Interface()
firstValueKind := value.Kind()
for i := 1; i < objLen; i++ {
prevValue := value
prevValueInterface := valueInterface
value = objValue.Index(i)
valueInterface = value.Interface()
compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind)
if !isComparable {
return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...)
}
if !containsValue(allowedComparesResults, compareResult) {
return Fail(t, fmt.Sprintf(failMessage, prevValue, value), msgAndArgs...)
}
}
return true
}
// IsIncreasing asserts that the collection is increasing
//
// assert.IsIncreasing(t, []int{1, 2, 3})
// assert.IsIncreasing(t, []float{1, 2})
// assert.IsIncreasing(t, []string{"a", "b"})
func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs)
}
// IsNonIncreasing asserts that the collection is not increasing
//
// assert.IsNonIncreasing(t, []int{2, 1, 1})
// assert.IsNonIncreasing(t, []float{2, 1})
// assert.IsNonIncreasing(t, []string{"b", "a"})
func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs)
}
// IsDecreasing asserts that the collection is decreasing
//
// assert.IsDecreasing(t, []int{2, 1, 0})
// assert.IsDecreasing(t, []float{2, 1})
// assert.IsDecreasing(t, []string{"b", "a"})
func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs)
}
// IsNonDecreasing asserts that the collection is not decreasing
//
// assert.IsNonDecreasing(t, []int{1, 1, 2})
// assert.IsNonDecreasing(t, []float{1, 2})
// assert.IsNonDecreasing(t, []string{"a", "b"})
func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs)
}

View File

@ -172,8 +172,8 @@ func isTest(name, prefix string) bool {
if len(name) == len(prefix) { // "Test" is ok
return true
}
rune, _ := utf8.DecodeRuneInString(name[len(prefix):])
return !unicode.IsLower(rune)
r, _ := utf8.DecodeRuneInString(name[len(prefix):])
return !unicode.IsLower(r)
}
func messageFromMsgAndArgs(msgAndArgs ...interface{}) string {
@ -1622,6 +1622,7 @@ var spewConfig = spew.ConfigState{
DisableCapacities: true,
SortKeys: true,
DisableMethods: true,
MaxDepth: 10,
}
type tHelper interface {
@ -1693,3 +1694,81 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D
}
}
}
// ErrorIs asserts that at least one of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if errors.Is(err, target) {
return true
}
var expectedText string
if target != nil {
expectedText = target.Error()
}
chain := buildErrorChainString(err)
return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+
"expected: %q\n"+
"in chain: %s", expectedText, chain,
), msgAndArgs...)
}
// NotErrorIs asserts that at none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if !errors.Is(err, target) {
return true
}
var expectedText string
if target != nil {
expectedText = target.Error()
}
chain := buildErrorChainString(err)
return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+
"found: %q\n"+
"in chain: %s", expectedText, chain,
), msgAndArgs...)
}
// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
// This is a wrapper for errors.As.
func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if errors.As(err, target) {
return true
}
chain := buildErrorChainString(err)
return Fail(t, fmt.Sprintf("Should be in error chain:\n"+
"expected: %q\n"+
"in chain: %s", target, chain,
), msgAndArgs...)
}
func buildErrorChainString(err error) string {
if err == nil {
return ""
}
e := errors.Unwrap(err)
chain := fmt.Sprintf("%q", err.Error())
for e != nil {
chain += fmt.Sprintf("\n\t%q", e.Error())
e = errors.Unwrap(e)
}
return chain
}

View File

@ -256,6 +256,54 @@ func Error(t TestingT, err error, msgAndArgs ...interface{}) {
t.FailNow()
}
// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
// This is a wrapper for errors.As.
func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.ErrorAs(t, err, target, msgAndArgs...) {
return
}
t.FailNow()
}
// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
// This is a wrapper for errors.As.
func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.ErrorAsf(t, err, target, msg, args...) {
return
}
t.FailNow()
}
// ErrorIs asserts that at least one of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func ErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.ErrorIs(t, err, target, msgAndArgs...) {
return
}
t.FailNow()
}
// ErrorIsf asserts that at least one of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.ErrorIsf(t, err, target, msg, args...) {
return
}
t.FailNow()
}
// Errorf asserts that a function returned an error (i.e. not `nil`).
//
// actualObj, err := SomeFunction()
@ -806,6 +854,126 @@ func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon fl
t.FailNow()
}
// IsDecreasing asserts that the collection is decreasing
//
// assert.IsDecreasing(t, []int{2, 1, 0})
// assert.IsDecreasing(t, []float{2, 1})
// assert.IsDecreasing(t, []string{"b", "a"})
func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.IsDecreasing(t, object, msgAndArgs...) {
return
}
t.FailNow()
}
// IsDecreasingf asserts that the collection is decreasing
//
// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted")
// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted")
// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.IsDecreasingf(t, object, msg, args...) {
return
}
t.FailNow()
}
// IsIncreasing asserts that the collection is increasing
//
// assert.IsIncreasing(t, []int{1, 2, 3})
// assert.IsIncreasing(t, []float{1, 2})
// assert.IsIncreasing(t, []string{"a", "b"})
func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.IsIncreasing(t, object, msgAndArgs...) {
return
}
t.FailNow()
}
// IsIncreasingf asserts that the collection is increasing
//
// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted")
// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted")
// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.IsIncreasingf(t, object, msg, args...) {
return
}
t.FailNow()
}
// IsNonDecreasing asserts that the collection is not decreasing
//
// assert.IsNonDecreasing(t, []int{1, 1, 2})
// assert.IsNonDecreasing(t, []float{1, 2})
// assert.IsNonDecreasing(t, []string{"a", "b"})
func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.IsNonDecreasing(t, object, msgAndArgs...) {
return
}
t.FailNow()
}
// IsNonDecreasingf asserts that the collection is not decreasing
//
// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted")
// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted")
// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.IsNonDecreasingf(t, object, msg, args...) {
return
}
t.FailNow()
}
// IsNonIncreasing asserts that the collection is not increasing
//
// assert.IsNonIncreasing(t, []int{2, 1, 1})
// assert.IsNonIncreasing(t, []float{2, 1})
// assert.IsNonIncreasing(t, []string{"b", "a"})
func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.IsNonIncreasing(t, object, msgAndArgs...) {
return
}
t.FailNow()
}
// IsNonIncreasingf asserts that the collection is not increasing
//
// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted")
// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted")
// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.IsNonIncreasingf(t, object, msg, args...) {
return
}
t.FailNow()
}
// IsType asserts that the specified objects are of the same type.
func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
@ -944,6 +1112,34 @@ func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...inter
t.FailNow()
}
// Negative asserts that the specified element is negative
//
// assert.Negative(t, -1)
// assert.Negative(t, -1.23)
func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.Negative(t, e, msgAndArgs...) {
return
}
t.FailNow()
}
// Negativef asserts that the specified element is negative
//
// assert.Negativef(t, -1, "error message %s", "formatted")
// assert.Negativef(t, -1.23, "error message %s", "formatted")
func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.Negativef(t, e, msg, args...) {
return
}
t.FailNow()
}
// Never asserts that the given condition doesn't satisfy in waitFor time,
// periodically checking the target function each tick.
//
@ -1200,6 +1396,30 @@ func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string,
t.FailNow()
}
// NotErrorIs asserts that at none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func NotErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.NotErrorIs(t, err, target, msgAndArgs...) {
return
}
t.FailNow()
}
// NotErrorIsf asserts that at none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.NotErrorIsf(t, err, target, msg, args...) {
return
}
t.FailNow()
}
// NotNil asserts that the specified object is not nil.
//
// assert.NotNil(t, err)
@ -1446,6 +1666,34 @@ func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}
t.FailNow()
}
// Positive asserts that the specified element is positive
//
// assert.Positive(t, 1)
// assert.Positive(t, 1.23)
func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.Positive(t, e, msgAndArgs...) {
return
}
t.FailNow()
}
// Positivef asserts that the specified element is positive
//
// assert.Positivef(t, 1, "error message %s", "formatted")
// assert.Positivef(t, 1.23, "error message %s", "formatted")
func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.Positivef(t, e, msg, args...) {
return
}
t.FailNow()
}
// Regexp asserts that a specified regexp matches a string.
//
// assert.Regexp(t, regexp.MustCompile("start"), "it's starting")

View File

@ -205,6 +205,42 @@ func (a *Assertions) Error(err error, msgAndArgs ...interface{}) {
Error(a.t, err, msgAndArgs...)
}
// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
// This is a wrapper for errors.As.
func (a *Assertions) ErrorAs(err error, target interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
ErrorAs(a.t, err, target, msgAndArgs...)
}
// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
// This is a wrapper for errors.As.
func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
ErrorAsf(a.t, err, target, msg, args...)
}
// ErrorIs asserts that at least one of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
ErrorIs(a.t, err, target, msgAndArgs...)
}
// ErrorIsf asserts that at least one of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
ErrorIsf(a.t, err, target, msg, args...)
}
// Errorf asserts that a function returned an error (i.e. not `nil`).
//
// actualObj, err := SomeFunction()
@ -632,6 +668,102 @@ func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilo
InEpsilonf(a.t, expected, actual, epsilon, msg, args...)
}
// IsDecreasing asserts that the collection is decreasing
//
// a.IsDecreasing([]int{2, 1, 0})
// a.IsDecreasing([]float{2, 1})
// a.IsDecreasing([]string{"b", "a"})
func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
IsDecreasing(a.t, object, msgAndArgs...)
}
// IsDecreasingf asserts that the collection is decreasing
//
// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted")
// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted")
// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted")
func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
IsDecreasingf(a.t, object, msg, args...)
}
// IsIncreasing asserts that the collection is increasing
//
// a.IsIncreasing([]int{1, 2, 3})
// a.IsIncreasing([]float{1, 2})
// a.IsIncreasing([]string{"a", "b"})
func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
IsIncreasing(a.t, object, msgAndArgs...)
}
// IsIncreasingf asserts that the collection is increasing
//
// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted")
// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted")
// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted")
func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
IsIncreasingf(a.t, object, msg, args...)
}
// IsNonDecreasing asserts that the collection is not decreasing
//
// a.IsNonDecreasing([]int{1, 1, 2})
// a.IsNonDecreasing([]float{1, 2})
// a.IsNonDecreasing([]string{"a", "b"})
func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
IsNonDecreasing(a.t, object, msgAndArgs...)
}
// IsNonDecreasingf asserts that the collection is not decreasing
//
// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted")
// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted")
// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted")
func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
IsNonDecreasingf(a.t, object, msg, args...)
}
// IsNonIncreasing asserts that the collection is not increasing
//
// a.IsNonIncreasing([]int{2, 1, 1})
// a.IsNonIncreasing([]float{2, 1})
// a.IsNonIncreasing([]string{"b", "a"})
func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
IsNonIncreasing(a.t, object, msgAndArgs...)
}
// IsNonIncreasingf asserts that the collection is not increasing
//
// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted")
// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted")
// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted")
func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
IsNonIncreasingf(a.t, object, msg, args...)
}
// IsType asserts that the specified objects are of the same type.
func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
@ -740,6 +872,28 @@ func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...i
Lessf(a.t, e1, e2, msg, args...)
}
// Negative asserts that the specified element is negative
//
// a.Negative(-1)
// a.Negative(-1.23)
func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
Negative(a.t, e, msgAndArgs...)
}
// Negativef asserts that the specified element is negative
//
// a.Negativef(-1, "error message %s", "formatted")
// a.Negativef(-1.23, "error message %s", "formatted")
func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
Negativef(a.t, e, msg, args...)
}
// Never asserts that the given condition doesn't satisfy in waitFor time,
// periodically checking the target function each tick.
//
@ -942,6 +1096,24 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str
NotEqualf(a.t, expected, actual, msg, args...)
}
// NotErrorIs asserts that at none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
NotErrorIs(a.t, err, target, msgAndArgs...)
}
// NotErrorIsf asserts that at none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
NotErrorIsf(a.t, err, target, msg, args...)
}
// NotNil asserts that the specified object is not nil.
//
// a.NotNil(err)
@ -1134,6 +1306,28 @@ func (a *Assertions) Panicsf(f assert.PanicTestFunc, msg string, args ...interfa
Panicsf(a.t, f, msg, args...)
}
// Positive asserts that the specified element is positive
//
// a.Positive(1)
// a.Positive(1.23)
func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
Positive(a.t, e, msgAndArgs...)
}
// Positivef asserts that the specified element is positive
//
// a.Positivef(1, "error message %s", "formatted")
// a.Positivef(1.23, "error message %s", "formatted")
func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
Positivef(a.t, e, msg, args...)
}
// Regexp asserts that a specified regexp matches a string.
//
// a.Regexp(regexp.MustCompile("start"), "it's starting")

10
vendor/github.com/ulikunitz/xz/SECURITY.md generated vendored Normal file
View File

@ -0,0 +1,10 @@
# Security Policy
## Supported Versions
Currently the last minor version v0.5.x is supported.
## Reporting a Vulnerability
Report a vulnerability by creating a Github issue at
<https://github.com/ulikunitz/xz/issues>. Expect a response in a week.

View File

@ -8,19 +8,17 @@
1. Review encoder and check for lzma improvements under xz.
2. Fix binary tree matcher.
3. Compare compression ratio with xz tool using comparable parameters
and optimize parameters
4. Do some optimizations
- rename operation action and make it a simple type of size 8
- make maxMatches, wordSize parameters
- stop searching after a certain length is found (parameter sweetLen)
3. Compare compression ratio with xz tool using comparable parameters and optimize parameters
4. rename operation action and make it a simple type of size 8
5. make maxMatches, wordSize parameters
6. stop searching after a certain length is found (parameter sweetLen)
## Release v0.7
1. Optimize code
2. Do statistical analysis to get linear presets.
3. Test sync.Pool compatability for xz and lzma Writer and Reader
3. Fuzz optimized code.
4. Fuzz optimized code.
## Release v0.8
@ -44,53 +42,73 @@
## Package lzma
### Release v0.6
- Rewrite Encoder into a simple greedy one-op-at-a-time encoder
including
+ simple scan at the dictionary head for the same byte
+ use the killer byte (requiring matches to get longer, the first
test should be the byte that would make the match longer)
### v0.6
* Rewrite Encoder into a simple greedy one-op-at-a-time encoder including
* simple scan at the dictionary head for the same byte
* use the killer byte (requiring matches to get longer, the first test should be the byte that would make the match longer)
## Optimizations
- There may be a lot of false sharing in lzma.State; check whether this
can be improved by reorganizing the internal structure of it.
- Check whether batching encoding and decoding improves speed.
* There may be a lot of false sharing in lzma. State; check whether this can be improved by reorganizing the internal structure of it.
* Check whether batching encoding and decoding improves speed.
### DAG optimizations
- Use full buffer to create minimal bit-length above range encoder.
- Might be too slow (see v0.4)
* Use full buffer to create minimal bit-length above range encoder.
* Might be too slow (see v0.4)
### Different match finders
- hashes with 2, 3 characters additional to 4 characters
- binary trees with 2-7 characters (uint64 as key, use uint32 as
* hashes with 2, 3 characters additional to 4 characters
* binary trees with 2-7 characters (uint64 as key, use uint32 as
pointers into a an array)
- rb-trees with 2-7 characters (uint64 as key, use uint32 as pointers
* rb-trees with 2-7 characters (uint64 as key, use uint32 as pointers
into an array with bit-steeling for the colors)
## Release Procedure
- execute goch -l for all packages; probably with lower param like 0.5.
- check orthography with gospell
- Write release notes in doc/relnotes.
- Update README.md
- xb copyright . in xz directory to ensure all new files have Copyright
header
- VERSION=<version> go generate github.com/ulikunitz/xz/... to update
version files
- Execute test for Linux/amd64, Linux/x86 and Windows/amd64.
- Update TODO.md - write short log entry
- git checkout master && git merge dev
- git tag -a <version>
- git push
* execute goch -l for all packages; probably with lower param like 0.5.
* check orthography with gospell
* Write release notes in doc/relnotes.
* Update README.md
* xb copyright . in xz directory to ensure all new files have Copyright header
* `VERSION=<version> go generate github.com/ulikunitz/xz/...` to update version files
* Execute test for Linux/amd64, Linux/x86 and Windows/amd64.
* Update TODO.md - write short log entry
* `git checkout master && git merge dev`
* `git tag -a <version>`
* `git push`
## Log
## 2020-08-19
### 2020-12-17
Release v0.5.9 fixes warnings, a typo and adds SECURITY.md.
One fix is interesting.
```go
const (
a byte = 0x1
b = 0x2
)
```
The constants a and b don't have the same type. Correct is
```go
const (
a byte = 0x1
b byte = 0x2
)
```
### 2020-08-19
Release v0.5.8 fixes issue
[issue #35](https://github.com/ulikunitz/xz/issues/35).
@ -208,8 +226,8 @@ MININT.
### 2015-06-04
It has been a productive day. I improved the interface of lzma.Reader
and lzma.Writer and fixed the error handling.
It has been a productive day. I improved the interface of lzma. Reader
and lzma. Writer and fixed the error handling.
### 2015-06-01
@ -260,7 +278,7 @@ needed anymore.
However I will implement a ReaderState and WriterState type to use
static typing to ensure the right State object is combined with the
right lzbase.Reader and lzbase.Writer.
right lzbase. Reader and lzbase. Writer.
As a start I have implemented ReaderState and WriterState to ensure
that the state for reading is only used by readers and WriterState only
@ -282,11 +300,11 @@ old lzma package has been completely removed.
### 2015-04-05
Implemented lzma.Reader and tested it.
Implemented lzma. Reader and tested it.
### 2015-04-04
Implemented baseReader by adapting code form lzma.Reader.
Implemented baseReader by adapting code form lzma. Reader.
### 2015-04-03
@ -302,7 +320,7 @@ However in Francesco Campoy's presentation "Go for Javaneros
(Javaïstes?)" is the the idea that using an embedded field E, all the
methods of E will be defined on T. If E is an interface T satisfies E.
https://talks.golang.org/2014/go4java.slide#51
<https://talks.golang.org/2014/go4java.slide#51>
I have never used this, but it seems to be a cool idea.
@ -327,11 +345,11 @@ and the opCodec.
1. Implemented simple lzmago tool
2. Tested tool against large 4.4G file
- compression worked correctly; tested decompression with lzma
- decompression hits a full buffer condition
* compression worked correctly; tested decompression with lzma
* decompression hits a full buffer condition
3. Fixed a bug in the compressor and wrote a test for it
4. Executed full cycle for 4.4 GB file; performance can be improved ;-)
### 2015-01-11
- Release v0.2 because of the working LZMA encoder and decoder
* Release v0.2 because of the working LZMA encoder and decoder

View File

@ -47,9 +47,9 @@ const HeaderLen = 12
// Constants for the checksum methods supported by xz.
const (
None byte = 0x0
CRC32 = 0x1
CRC64 = 0x4
SHA256 = 0xa
CRC32 byte = 0x1
CRC64 byte = 0x4
SHA256 byte = 0xa
)
// errInvalidFlags indicates that flags are invalid.
@ -569,22 +569,6 @@ func readFilters(r io.Reader, count int) (filters []filter, err error) {
return []filter{f}, err
}
// writeFilters writes the filters.
func writeFilters(w io.Writer, filters []filter) (n int, err error) {
for _, f := range filters {
p, err := f.MarshalBinary()
if err != nil {
return n, err
}
k, err := w.Write(p)
n += k
if err != nil {
return n, err
}
}
return n, nil
}
/*** Index ***/
// record describes a block in the xz file index.

View File

@ -5,10 +5,7 @@
package lzma
import (
"bufio"
"errors"
"fmt"
"io"
"unicode"
)
@ -349,6 +346,7 @@ func dumpX(x uint32) string {
return string(a)
}
/*
// dumpNode writes a representation of the node v into the io.Writer.
func (t *binTree) dumpNode(w io.Writer, v uint32, indent int) {
if v == null {
@ -377,6 +375,7 @@ func (t *binTree) dump(w io.Writer) error {
t.dumpNode(bw, t.root, 0)
return bw.Flush()
}
*/
func (t *binTree) distance(v uint32) int {
dist := int(t.front) - int(v)

View File

@ -18,6 +18,7 @@ var ntz32Table = [32]int8{
30, 17, 8, 14, 29, 13, 28, 27,
}
/*
// ntz32 computes the number of trailing zeros for an unsigned 32-bit integer.
func ntz32(x uint32) int {
if x == 0 {
@ -26,6 +27,7 @@ func ntz32(x uint32) int {
x = (x & -x) * ntz32Const
return int(ntz32Table[x>>27])
}
*/
// nlz32 computes the number of leading zeros for an unsigned 32-bit integer.
func nlz32(x uint32) int {

View File

@ -200,7 +200,7 @@ func (d *decoder) decompress() error {
op, err := d.readOp()
switch err {
case nil:
break
// break
case errEOS:
d.eos = true
if !d.rd.possiblyAtEnd() {

View File

@ -126,10 +126,3 @@ func (d *decoderDict) Available() int { return d.buf.Available() }
// Read reads data from the buffer contained in the decoder dictionary.
func (d *decoderDict) Read(p []byte) (n int, err error) { return d.buf.Read(p) }
// Buffered returns the number of bytes currently buffered in the
// decoder dictionary.
func (d *decoderDict) buffered() int { return d.buf.Buffered() }
// Peek gets data from the buffer without advancing the rear index.
func (d *decoderDict) peek(p []byte) (n int, err error) { return d.buf.Peek(p) }

View File

@ -4,21 +4,10 @@
package lzma
import "fmt"
// directCodec allows the encoding and decoding of values with a fixed number
// of bits. The number of bits must be in the range [1,32].
type directCodec byte
// makeDirectCodec creates a directCodec. The function panics if the number of
// bits is not in the range [1,32].
func makeDirectCodec(bits int) directCodec {
if !(1 <= bits && bits <= 32) {
panic(fmt.Errorf("bits=%d out of range", bits))
}
return directCodec(bits)
}
// Bits returns the number of bits supported by this codec.
func (dc directCodec) Bits() int {
return int(dc)

View File

@ -20,8 +20,6 @@ const (
posSlotBits = 6
// number of align bits
alignBits = 4
// maximum position slot
maxPosSlot = 63
)
// distCodec provides encoding and decoding of distance values.
@ -45,20 +43,6 @@ func (dc *distCodec) deepcopy(src *distCodec) {
dc.alignCodec.deepcopy(&src.alignCodec)
}
// distBits returns the number of bits required to encode dist.
func distBits(dist uint32) int {
if dist < startPosModel {
return 6
}
// slot s > 3, dist d
// s = 2(bits(d)-1) + bit(d, bits(d)-2)
// s>>1 = bits(d)-1
// bits(d) = 32-nlz32(d)
// s>>1=31-nlz32(d)
// n = 5 + (s>>1) = 36 - nlz32(d)
return 36 - nlz32(dist)
}
// newDistCodec creates a new distance codec.
func (dc *distCodec) init() {
for i := range dc.posSlotCodecs {

View File

@ -19,7 +19,7 @@ type matcher interface {
}
// encoderDict provides the dictionary of the encoder. It includes an
// addtional buffer atop of the actual dictionary.
// additional buffer atop of the actual dictionary.
type encoderDict struct {
buf buffer
m matcher

View File

@ -264,7 +264,7 @@ type chunkState byte
// state
const (
start chunkState = 'S'
stop = 'T'
stop chunkState = 'T'
)
// errors for the chunk state handling

View File

@ -56,19 +56,6 @@ func (lc *lengthCodec) init() {
lc.high = makeTreeCodec(8)
}
// lBits gives the number of bits used for the encoding of the l value
// provided to the range encoder.
func lBits(l uint32) int {
switch {
case l < 8:
return 4
case l < 16:
return 5
default:
return 10
}
}
// Encode encodes the length offset. The length offset l can be compute by
// subtracting minMatchLen (2) from the actual length.
//

View File

@ -123,10 +123,3 @@ const (
minLP = 0
maxLP = 4
)
// minState and maxState define a range for the state values stored in
// the State values.
const (
minState = 0
maxState = 11
)

View File

@ -5,7 +5,6 @@
package lzma
import (
"errors"
"fmt"
"unicode"
)
@ -24,30 +23,6 @@ type match struct {
n int
}
// verify checks whether the match is valid. If that is not the case an
// error is returned.
func (m match) verify() error {
if !(minDistance <= m.distance && m.distance <= maxDistance) {
return errors.New("distance out of range")
}
if !(1 <= m.n && m.n <= maxMatchLen) {
return errors.New("length out of range")
}
return nil
}
// l return the l-value for the match, which is the difference of length
// n and 2.
func (m match) l() uint32 {
return uint32(m.n - minMatchLen)
}
// dist returns the dist value for the match, which is one less of the
// distance stored in the match.
func (m match) dist() uint32 {
return uint32(m.distance - minDistance)
}
// Len returns the number of bytes matched.
func (m match) Len() int {
return m.n

View File

@ -131,32 +131,6 @@ type rangeDecoder struct {
code uint32
}
// init initializes the range decoder, by reading from the byte reader.
func (d *rangeDecoder) init() error {
d.nrange = 0xffffffff
d.code = 0
b, err := d.br.ReadByte()
if err != nil {
return err
}
if b != 0 {
return errors.New("newRangeDecoder: first byte not zero")
}
for i := 0; i < 4; i++ {
if err = d.updateCode(); err != nil {
return err
}
}
if d.code >= d.nrange {
return errors.New("newRangeDecoder: d.code >= d.nrange")
}
return nil
}
// newRangeDecoder initializes a range decoder. It reads five bytes from the
// reader and therefore may return an error.
func newRangeDecoder(br io.ByteReader) (d *rangeDecoder, err error) {

View File

@ -48,7 +48,6 @@ type Reader2 struct {
chunkReader io.Reader
cstate chunkState
ctype chunkType
}
// NewReader2 creates a reader for an LZMA2 chunk sequence.

View File

@ -53,12 +53,6 @@ func (s *state) Reset() {
s.distCodec.init()
}
// initState initializes the state.
func initState(s *state, p Properties) {
*s = state{Properties: p}
s.Reset()
}
// newState creates a new state from the give Properties.
func newState(p Properties) *state {
s := &state{Properties: p}

View File

@ -26,13 +26,6 @@ type ReaderConfig struct {
SingleStream bool
}
// fill replaces all zero values with their default values.
func (c *ReaderConfig) fill() {
if c.DictCap == 0 {
c.DictCap = 8 * 1024 * 1024
}
}
// Verify checks the reader parameters for Validity. Zero values will be
// replaced by default values.
func (c *ReaderConfig) Verify() error {
@ -165,9 +158,6 @@ func (c ReaderConfig) newStreamReader(xz io.Reader) (r *streamReader, err error)
return r, nil
}
// errIndex indicates an error with the xz file index.
var errIndex = errors.New("xz: error in xz file index")
// readTail reads the index body and the xz footer.
func (r *streamReader) readTail() error {
index, n, err := readIndexBody(r.xz)
@ -265,7 +255,6 @@ type blockReader struct {
n int64
hash hash.Hash
r io.Reader
err error
}
// newBlockReader creates a new block reader.
@ -315,10 +304,6 @@ func (br *blockReader) record() record {
return record{br.unpaddedSize(), br.uncompressedSize()}
}
// errBlockSize indicates that the size of the block in the block header
// is wrong.
var errBlockSize = errors.New("xz: wrong uncompressed size for block")
// Read reads data from the block.
func (br *blockReader) Read(p []byte) (n int, err error) {
n, err = br.r.Read(p)

View File

@ -6,6 +6,7 @@ package xz
import (
"errors"
"fmt"
"hash"
"io"
@ -190,6 +191,9 @@ func (c WriterConfig) NewWriter(xz io.Writer) (w *Writer, err error) {
return nil, err
}
data, err := w.h.MarshalBinary()
if err != nil {
return nil, fmt.Errorf("w.h.MarshalBinary(): error %w", err)
}
if _, err = xz.Write(data); err != nil {
return nil, err
}

View File

@ -1,4 +1,7 @@
language: go
arch:
- amd64
- ppc64le
go:
- 1.14.x

View File

@ -123,13 +123,20 @@ func makeExtFunc(filler BarFiller) extFunc {
}
}
// TrimSpace trims bar's edge spaces.
func TrimSpace() BarOption {
// BarFillerTrim bar filler is rendered with leading and trailing space
// like ' [===] ' by default. With this option leading and trailing
// space will be removed.
func BarFillerTrim() BarOption {
return func(s *bState) {
s.trimSpace = true
}
}
// TrimSpace is an alias to BarFillerTrim.
func TrimSpace() BarOption {
return BarFillerTrim()
}
// BarStyle overrides mpb.DefaultBarStyle which is "[=>-]<+".
// It's ok to pass string containing just 5 runes, for example "╢▌▌░╟",
// if you don't need to override '<' (reverse tip) and '+' (refill rune).

View File

@ -4,7 +4,7 @@ require (
github.com/VividCortex/ewma v1.1.1
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
github.com/mattn/go-runewidth v0.0.9
golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed
golang.org/x/sys v0.0.0-20201218084310-7d0127a74742
)
go 1.14

View File

@ -4,5 +4,5 @@ github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpH
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed h1:WBkVNH1zd9jg/dK4HCM4lNANnmd12EHC9z+LmcCG4ns=
golang.org/x/sys v0.0.0-20200810151505-1b9f1253b3ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201218084310-7d0127a74742 h1:+CBz4km/0KPU3RGTwARGh/noP3bEwtHcq+0YcBQM2JQ=
golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=

View File

@ -8,6 +8,7 @@ import (
"io"
"io/ioutil"
"log"
"math"
"os"
"sync"
"time"
@ -40,7 +41,6 @@ type pState struct {
pMatrix map[int][]chan int
aMatrix map[int][]chan int
barShutdownQueue []*Bar
barPopQueue []*Bar
// following are provided/overrided by user
idCount int
@ -179,7 +179,7 @@ func (p *Progress) BarCount() int {
}
}
// Wait waits far all bars to complete and finally shutdowns container.
// Wait waits for all bars to complete and finally shutdowns container.
// After this method has been called, there is no way to reuse *Progress
// instance.
func (p *Progress) Wait() {
@ -301,27 +301,18 @@ func (s *pState) flush(cw *cwriter.Writer) error {
delete(s.parkedBars, b)
b.toDrop = true
}
if s.popCompleted && !b.noPop {
lineCount -= b.extendedLines + 1
b.toDrop = true
}
if b.toDrop {
delete(bm, b)
s.heapUpdated = true
} else if s.popCompleted {
if b := b; !b.noPop {
defer func() {
s.barPopQueue = append(s.barPopQueue, b)
}()
}
}
b.cancel()
}
s.barShutdownQueue = s.barShutdownQueue[0:0]
for _, b := range s.barPopQueue {
delete(bm, b)
s.heapUpdated = true
lineCount -= b.extendedLines + 1
}
s.barPopQueue = s.barPopQueue[0:0]
for b := range bm {
heap.Push(&s.bHeap, b)
}
@ -370,7 +361,7 @@ func (s *pState) makeBarState(total int64, filler BarFiller, options ...BarOptio
}
if s.popCompleted && !bs.noPop {
bs.priority = -1
bs.priority = -(math.MaxInt32 - s.idCount)
}
bs.bufP = bytes.NewBuffer(make([]byte, 0, 128))
@ -382,17 +373,18 @@ func (s *pState) makeBarState(total int64, filler BarFiller, options ...BarOptio
func syncWidth(matrix map[int][]chan int) {
for _, column := range matrix {
column := column
go func() {
var maxWidth int
for _, ch := range column {
if w := <-ch; w > maxWidth {
maxWidth = w
}
}
for _, ch := range column {
ch <- maxWidth
}
}()
go maxWidthDistributor(column)
}
}
var maxWidthDistributor = func(column []chan int) {
var maxWidth int
for _, ch := range column {
if w := <-ch; w > maxWidth {
maxWidth = w
}
}
for _, ch := range column {
ch <- maxWidth
}
}

File diff suppressed because it is too large Load Diff

8
vendor/golang.org/x/sys/plan9/asm.s generated vendored Normal file
View File

@ -0,0 +1,8 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
TEXT ·use(SB),NOSPLIT,$0
RET

30
vendor/golang.org/x/sys/plan9/asm_plan9_386.s generated vendored Normal file
View File

@ -0,0 +1,30 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
//
// System call support for 386, Plan 9
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-32
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-44
JMP syscall·Syscall6(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
JMP syscall·RawSyscall6(SB)
TEXT ·seek(SB),NOSPLIT,$0-36
JMP syscall·seek(SB)
TEXT ·exit(SB),NOSPLIT,$4-4
JMP syscall·exit(SB)

30
vendor/golang.org/x/sys/plan9/asm_plan9_amd64.s generated vendored Normal file
View File

@ -0,0 +1,30 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
//
// System call support for amd64, Plan 9
//
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-64
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-88
JMP syscall·Syscall6(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB)
TEXT ·seek(SB),NOSPLIT,$0-56
JMP syscall·seek(SB)
TEXT ·exit(SB),NOSPLIT,$8-8
JMP syscall·exit(SB)

25
vendor/golang.org/x/sys/plan9/asm_plan9_arm.s generated vendored Normal file
View File

@ -0,0 +1,25 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
// System call support for plan9 on arm
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-32
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-44
JMP syscall·Syscall6(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
JMP syscall·RawSyscall6(SB)
TEXT ·seek(SB),NOSPLIT,$0-36
JMP syscall·exit(SB)

70
vendor/golang.org/x/sys/plan9/const_plan9.go generated vendored Normal file
View File

@ -0,0 +1,70 @@
package plan9
// Plan 9 Constants
// Open modes
const (
O_RDONLY = 0
O_WRONLY = 1
O_RDWR = 2
O_TRUNC = 16
O_CLOEXEC = 32
O_EXCL = 0x1000
)
// Rfork flags
const (
RFNAMEG = 1 << 0
RFENVG = 1 << 1
RFFDG = 1 << 2
RFNOTEG = 1 << 3
RFPROC = 1 << 4
RFMEM = 1 << 5
RFNOWAIT = 1 << 6
RFCNAMEG = 1 << 10
RFCENVG = 1 << 11
RFCFDG = 1 << 12
RFREND = 1 << 13
RFNOMNT = 1 << 14
)
// Qid.Type bits
const (
QTDIR = 0x80
QTAPPEND = 0x40
QTEXCL = 0x20
QTMOUNT = 0x10
QTAUTH = 0x08
QTTMP = 0x04
QTFILE = 0x00
)
// Dir.Mode bits
const (
DMDIR = 0x80000000
DMAPPEND = 0x40000000
DMEXCL = 0x20000000
DMMOUNT = 0x10000000
DMAUTH = 0x08000000
DMTMP = 0x04000000
DMREAD = 0x4
DMWRITE = 0x2
DMEXEC = 0x1
)
const (
STATMAX = 65535
ERRMAX = 128
STATFIXLEN = 49
)
// Mount and bind flags
const (
MREPL = 0x0000
MBEFORE = 0x0001
MAFTER = 0x0002
MORDER = 0x0003
MCREATE = 0x0004
MCACHE = 0x0010
MMASK = 0x0017
)

212
vendor/golang.org/x/sys/plan9/dir_plan9.go generated vendored Normal file
View File

@ -0,0 +1,212 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Plan 9 directory marshalling. See intro(5).
package plan9
import "errors"
var (
ErrShortStat = errors.New("stat buffer too short")
ErrBadStat = errors.New("malformed stat buffer")
ErrBadName = errors.New("bad character in file name")
)
// A Qid represents a 9P server's unique identification for a file.
type Qid struct {
Path uint64 // the file server's unique identification for the file
Vers uint32 // version number for given Path
Type uint8 // the type of the file (plan9.QTDIR for example)
}
// A Dir contains the metadata for a file.
type Dir struct {
// system-modified data
Type uint16 // server type
Dev uint32 // server subtype
// file data
Qid Qid // unique id from server
Mode uint32 // permissions
Atime uint32 // last read time
Mtime uint32 // last write time
Length int64 // file length
Name string // last element of path
Uid string // owner name
Gid string // group name
Muid string // last modifier name
}
var nullDir = Dir{
Type: ^uint16(0),
Dev: ^uint32(0),
Qid: Qid{
Path: ^uint64(0),
Vers: ^uint32(0),
Type: ^uint8(0),
},
Mode: ^uint32(0),
Atime: ^uint32(0),
Mtime: ^uint32(0),
Length: ^int64(0),
}
// Null assigns special "don't touch" values to members of d to
// avoid modifying them during plan9.Wstat.
func (d *Dir) Null() { *d = nullDir }
// Marshal encodes a 9P stat message corresponding to d into b
//
// If there isn't enough space in b for a stat message, ErrShortStat is returned.
func (d *Dir) Marshal(b []byte) (n int, err error) {
n = STATFIXLEN + len(d.Name) + len(d.Uid) + len(d.Gid) + len(d.Muid)
if n > len(b) {
return n, ErrShortStat
}
for _, c := range d.Name {
if c == '/' {
return n, ErrBadName
}
}
b = pbit16(b, uint16(n)-2)
b = pbit16(b, d.Type)
b = pbit32(b, d.Dev)
b = pbit8(b, d.Qid.Type)
b = pbit32(b, d.Qid.Vers)
b = pbit64(b, d.Qid.Path)
b = pbit32(b, d.Mode)
b = pbit32(b, d.Atime)
b = pbit32(b, d.Mtime)
b = pbit64(b, uint64(d.Length))
b = pstring(b, d.Name)
b = pstring(b, d.Uid)
b = pstring(b, d.Gid)
b = pstring(b, d.Muid)
return n, nil
}
// UnmarshalDir decodes a single 9P stat message from b and returns the resulting Dir.
//
// If b is too small to hold a valid stat message, ErrShortStat is returned.
//
// If the stat message itself is invalid, ErrBadStat is returned.
func UnmarshalDir(b []byte) (*Dir, error) {
if len(b) < STATFIXLEN {
return nil, ErrShortStat
}
size, buf := gbit16(b)
if len(b) != int(size)+2 {
return nil, ErrBadStat
}
b = buf
var d Dir
d.Type, b = gbit16(b)
d.Dev, b = gbit32(b)
d.Qid.Type, b = gbit8(b)
d.Qid.Vers, b = gbit32(b)
d.Qid.Path, b = gbit64(b)
d.Mode, b = gbit32(b)
d.Atime, b = gbit32(b)
d.Mtime, b = gbit32(b)
n, b := gbit64(b)
d.Length = int64(n)
var ok bool
if d.Name, b, ok = gstring(b); !ok {
return nil, ErrBadStat
}
if d.Uid, b, ok = gstring(b); !ok {
return nil, ErrBadStat
}
if d.Gid, b, ok = gstring(b); !ok {
return nil, ErrBadStat
}
if d.Muid, b, ok = gstring(b); !ok {
return nil, ErrBadStat
}
return &d, nil
}
// pbit8 copies the 8-bit number v to b and returns the remaining slice of b.
func pbit8(b []byte, v uint8) []byte {
b[0] = byte(v)
return b[1:]
}
// pbit16 copies the 16-bit number v to b in little-endian order and returns the remaining slice of b.
func pbit16(b []byte, v uint16) []byte {
b[0] = byte(v)
b[1] = byte(v >> 8)
return b[2:]
}
// pbit32 copies the 32-bit number v to b in little-endian order and returns the remaining slice of b.
func pbit32(b []byte, v uint32) []byte {
b[0] = byte(v)
b[1] = byte(v >> 8)
b[2] = byte(v >> 16)
b[3] = byte(v >> 24)
return b[4:]
}
// pbit64 copies the 64-bit number v to b in little-endian order and returns the remaining slice of b.
func pbit64(b []byte, v uint64) []byte {
b[0] = byte(v)
b[1] = byte(v >> 8)
b[2] = byte(v >> 16)
b[3] = byte(v >> 24)
b[4] = byte(v >> 32)
b[5] = byte(v >> 40)
b[6] = byte(v >> 48)
b[7] = byte(v >> 56)
return b[8:]
}
// pstring copies the string s to b, prepending it with a 16-bit length in little-endian order, and
// returning the remaining slice of b..
func pstring(b []byte, s string) []byte {
b = pbit16(b, uint16(len(s)))
n := copy(b, s)
return b[n:]
}
// gbit8 reads an 8-bit number from b and returns it with the remaining slice of b.
func gbit8(b []byte) (uint8, []byte) {
return uint8(b[0]), b[1:]
}
// gbit16 reads a 16-bit number in little-endian order from b and returns it with the remaining slice of b.
func gbit16(b []byte) (uint16, []byte) {
return uint16(b[0]) | uint16(b[1])<<8, b[2:]
}
// gbit32 reads a 32-bit number in little-endian order from b and returns it with the remaining slice of b.
func gbit32(b []byte) (uint32, []byte) {
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24, b[4:]
}
// gbit64 reads a 64-bit number in little-endian order from b and returns it with the remaining slice of b.
func gbit64(b []byte) (uint64, []byte) {
lo := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
hi := uint32(b[4]) | uint32(b[5])<<8 | uint32(b[6])<<16 | uint32(b[7])<<24
return uint64(lo) | uint64(hi)<<32, b[8:]
}
// gstring reads a string from b, prefixed with a 16-bit length in little-endian order.
// It returns the string with the remaining slice of b and a boolean. If the length is
// greater than the number of bytes in b, the boolean will be false.
func gstring(b []byte) (string, []byte, bool) {
n, b := gbit16(b)
if int(n) > len(b) {
return "", b, false
}
return string(b[:n]), b[n:], true
}

31
vendor/golang.org/x/sys/plan9/env_plan9.go generated vendored Normal file
View File

@ -0,0 +1,31 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Plan 9 environment variables.
package plan9
import (
"syscall"
)
func Getenv(key string) (value string, found bool) {
return syscall.Getenv(key)
}
func Setenv(key, value string) error {
return syscall.Setenv(key, value)
}
func Clearenv() {
syscall.Clearenv()
}
func Environ() []string {
return syscall.Environ()
}
func Unsetenv(key string) error {
return syscall.Unsetenv(key)
}

50
vendor/golang.org/x/sys/plan9/errors_plan9.go generated vendored Normal file
View File

@ -0,0 +1,50 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package plan9
import "syscall"
// Constants
const (
// Invented values to support what package os expects.
O_CREAT = 0x02000
O_APPEND = 0x00400
O_NOCTTY = 0x00000
O_NONBLOCK = 0x00000
O_SYNC = 0x00000
O_ASYNC = 0x00000
S_IFMT = 0x1f000
S_IFIFO = 0x1000
S_IFCHR = 0x2000
S_IFDIR = 0x4000
S_IFBLK = 0x6000
S_IFREG = 0x8000
S_IFLNK = 0xa000
S_IFSOCK = 0xc000
)
// Errors
var (
EINVAL = syscall.NewError("bad arg in system call")
ENOTDIR = syscall.NewError("not a directory")
EISDIR = syscall.NewError("file is a directory")
ENOENT = syscall.NewError("file does not exist")
EEXIST = syscall.NewError("file already exists")
EMFILE = syscall.NewError("no free file descriptors")
EIO = syscall.NewError("i/o error")
ENAMETOOLONG = syscall.NewError("file name too long")
EINTR = syscall.NewError("interrupted")
EPERM = syscall.NewError("permission denied")
EBUSY = syscall.NewError("no free devices")
ETIMEDOUT = syscall.NewError("connection timed out")
EPLAN9 = syscall.NewError("not supported by plan 9")
// The following errors do not correspond to any
// Plan 9 system messages. Invented to support
// what package os and others expect.
EACCES = syscall.NewError("access permission denied")
EAFNOSUPPORT = syscall.NewError("address family not supported by protocol")
)

150
vendor/golang.org/x/sys/plan9/mkall.sh generated vendored Normal file
View File

@ -0,0 +1,150 @@
#!/usr/bin/env bash
# Copyright 2009 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# The plan9 package provides access to the raw system call
# interface of the underlying operating system. Porting Go to
# a new architecture/operating system combination requires
# some manual effort, though there are tools that automate
# much of the process. The auto-generated files have names
# beginning with z.
#
# This script runs or (given -n) prints suggested commands to generate z files
# for the current system. Running those commands is not automatic.
# This script is documentation more than anything else.
#
# * asm_${GOOS}_${GOARCH}.s
#
# This hand-written assembly file implements system call dispatch.
# There are three entry points:
#
# func Syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr);
# func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr);
# func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr);
#
# The first and second are the standard ones; they differ only in
# how many arguments can be passed to the kernel.
# The third is for low-level use by the ForkExec wrapper;
# unlike the first two, it does not call into the scheduler to
# let it know that a system call is running.
#
# * syscall_${GOOS}.go
#
# This hand-written Go file implements system calls that need
# special handling and lists "//sys" comments giving prototypes
# for ones that can be auto-generated. Mksyscall reads those
# comments to generate the stubs.
#
# * syscall_${GOOS}_${GOARCH}.go
#
# Same as syscall_${GOOS}.go except that it contains code specific
# to ${GOOS} on one particular architecture.
#
# * types_${GOOS}.c
#
# This hand-written C file includes standard C headers and then
# creates typedef or enum names beginning with a dollar sign
# (use of $ in variable names is a gcc extension). The hardest
# part about preparing this file is figuring out which headers to
# include and which symbols need to be #defined to get the
# actual data structures that pass through to the kernel system calls.
# Some C libraries present alternate versions for binary compatibility
# and translate them on the way in and out of system calls, but
# there is almost always a #define that can get the real ones.
# See types_darwin.c and types_linux.c for examples.
#
# * zerror_${GOOS}_${GOARCH}.go
#
# This machine-generated file defines the system's error numbers,
# error strings, and signal numbers. The generator is "mkerrors.sh".
# Usually no arguments are needed, but mkerrors.sh will pass its
# arguments on to godefs.
#
# * zsyscall_${GOOS}_${GOARCH}.go
#
# Generated by mksyscall.pl; see syscall_${GOOS}.go above.
#
# * zsysnum_${GOOS}_${GOARCH}.go
#
# Generated by mksysnum_${GOOS}.
#
# * ztypes_${GOOS}_${GOARCH}.go
#
# Generated by godefs; see types_${GOOS}.c above.
GOOSARCH="${GOOS}_${GOARCH}"
# defaults
mksyscall="go run mksyscall.go"
mkerrors="./mkerrors.sh"
zerrors="zerrors_$GOOSARCH.go"
mksysctl=""
zsysctl="zsysctl_$GOOSARCH.go"
mksysnum=
mktypes=
run="sh"
case "$1" in
-syscalls)
for i in zsyscall*go
do
sed 1q $i | sed 's;^// ;;' | sh > _$i && gofmt < _$i > $i
rm _$i
done
exit 0
;;
-n)
run="cat"
shift
esac
case "$#" in
0)
;;
*)
echo 'usage: mkall.sh [-n]' 1>&2
exit 2
esac
case "$GOOSARCH" in
_* | *_ | _)
echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2
exit 1
;;
plan9_386)
mkerrors=
mksyscall="go run mksyscall.go -l32 -plan9 -tags plan9,386"
mksysnum="./mksysnum_plan9.sh /n/sources/plan9/sys/src/libc/9syscall/sys.h"
mktypes="XXX"
;;
plan9_amd64)
mkerrors=
mksyscall="go run mksyscall.go -l32 -plan9 -tags plan9,amd64"
mksysnum="./mksysnum_plan9.sh /n/sources/plan9/sys/src/libc/9syscall/sys.h"
mktypes="XXX"
;;
plan9_arm)
mkerrors=
mksyscall="go run mksyscall.go -l32 -plan9 -tags plan9,arm"
mksysnum="./mksysnum_plan9.sh /n/sources/plan9/sys/src/libc/9syscall/sys.h"
mktypes="XXX"
;;
*)
echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2
exit 1
;;
esac
(
if [ -n "$mkerrors" ]; then echo "$mkerrors |gofmt >$zerrors"; fi
case "$GOOS" in
plan9)
syscall_goos="syscall_$GOOS.go"
if [ -n "$mksyscall" ]; then echo "$mksyscall $syscall_goos |gofmt >zsyscall_$GOOSARCH.go"; fi
;;
esac
if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi
if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi
if [ -n "$mktypes" ]; then echo "$mktypes types_$GOOS.go |gofmt >ztypes_$GOOSARCH.go"; fi
) | $run

246
vendor/golang.org/x/sys/plan9/mkerrors.sh generated vendored Normal file
View File

@ -0,0 +1,246 @@
#!/usr/bin/env bash
# Copyright 2009 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# Generate Go code listing errors and other #defined constant
# values (ENAMETOOLONG etc.), by asking the preprocessor
# about the definitions.
unset LANG
export LC_ALL=C
export LC_CTYPE=C
CC=${CC:-gcc}
uname=$(uname)
includes='
#include <sys/types.h>
#include <sys/file.h>
#include <fcntl.h>
#include <dirent.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet/tcp.h>
#include <errno.h>
#include <sys/signal.h>
#include <signal.h>
#include <sys/resource.h>
'
ccflags="$@"
# Write go tool cgo -godefs input.
(
echo package plan9
echo
echo '/*'
indirect="includes_$(uname)"
echo "${!indirect} $includes"
echo '*/'
echo 'import "C"'
echo
echo 'const ('
# The gcc command line prints all the #defines
# it encounters while processing the input
echo "${!indirect} $includes" | $CC -x c - -E -dM $ccflags |
awk '
$1 != "#define" || $2 ~ /\(/ || $3 == "" {next}
$2 ~ /^E([ABCD]X|[BIS]P|[SD]I|S|FL)$/ {next} # 386 registers
$2 ~ /^(SIGEV_|SIGSTKSZ|SIGRT(MIN|MAX))/ {next}
$2 ~ /^(SCM_SRCRT)$/ {next}
$2 ~ /^(MAP_FAILED)$/ {next}
$2 !~ /^ETH_/ &&
$2 !~ /^EPROC_/ &&
$2 !~ /^EQUIV_/ &&
$2 !~ /^EXPR_/ &&
$2 ~ /^E[A-Z0-9_]+$/ ||
$2 ~ /^B[0-9_]+$/ ||
$2 ~ /^V[A-Z0-9]+$/ ||
$2 ~ /^CS[A-Z0-9]/ ||
$2 ~ /^I(SIG|CANON|CRNL|EXTEN|MAXBEL|STRIP|UTF8)$/ ||
$2 ~ /^IGN/ ||
$2 ~ /^IX(ON|ANY|OFF)$/ ||
$2 ~ /^IN(LCR|PCK)$/ ||
$2 ~ /(^FLU?SH)|(FLU?SH$)/ ||
$2 ~ /^C(LOCAL|READ)$/ ||
$2 == "BRKINT" ||
$2 == "HUPCL" ||
$2 == "PENDIN" ||
$2 == "TOSTOP" ||
$2 ~ /^PAR/ ||
$2 ~ /^SIG[^_]/ ||
$2 ~ /^O[CNPFP][A-Z]+[^_][A-Z]+$/ ||
$2 ~ /^IN_/ ||
$2 ~ /^LOCK_(SH|EX|NB|UN)$/ ||
$2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|EVFILT|NOTE|EV|SHUT|PROT|MAP|PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ ||
$2 == "ICMPV6_FILTER" ||
$2 == "SOMAXCONN" ||
$2 == "NAME_MAX" ||
$2 == "IFNAMSIZ" ||
$2 ~ /^CTL_(MAXNAME|NET|QUERY)$/ ||
$2 ~ /^SYSCTL_VERS/ ||
$2 ~ /^(MS|MNT)_/ ||
$2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ ||
$2 ~ /^(O|F|FD|NAME|S|PTRACE|PT)_/ ||
$2 ~ /^LINUX_REBOOT_CMD_/ ||
$2 ~ /^LINUX_REBOOT_MAGIC[12]$/ ||
$2 !~ "NLA_TYPE_MASK" &&
$2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P)_/ ||
$2 ~ /^SIOC/ ||
$2 ~ /^TIOC/ ||
$2 !~ "RTF_BITS" &&
$2 ~ /^(IFF|IFT|NET_RT|RTM|RTF|RTV|RTA|RTAX)_/ ||
$2 ~ /^BIOC/ ||
$2 ~ /^RUSAGE_(SELF|CHILDREN|THREAD)/ ||
$2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|NOFILE|STACK)|RLIM_INFINITY/ ||
$2 ~ /^PRIO_(PROCESS|PGRP|USER)/ ||
$2 ~ /^CLONE_[A-Z_]+/ ||
$2 !~ /^(BPF_TIMEVAL)$/ &&
$2 ~ /^(BPF|DLT)_/ ||
$2 !~ "WMESGLEN" &&
$2 ~ /^W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", $2, $2)}
$2 ~ /^__WCOREFLAG$/ {next}
$2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)}
{next}
' | sort
echo ')'
) >_const.go
# Pull out the error names for later.
errors=$(
echo '#include <errno.h>' | $CC -x c - -E -dM $ccflags |
awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print $2 }' |
sort
)
# Pull out the signal names for later.
signals=$(
echo '#include <signal.h>' | $CC -x c - -E -dM $ccflags |
awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' |
egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT)' |
sort
)
# Again, writing regexps to a file.
echo '#include <errno.h>' | $CC -x c - -E -dM $ccflags |
awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print "^\t" $2 "[ \t]*=" }' |
sort >_error.grep
echo '#include <signal.h>' | $CC -x c - -E -dM $ccflags |
awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' |
egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT)' |
sort >_signal.grep
echo '// mkerrors.sh' "$@"
echo '// Code generated by the command above; DO NOT EDIT.'
echo
go tool cgo -godefs -- "$@" _const.go >_error.out
cat _error.out | grep -vf _error.grep | grep -vf _signal.grep
echo
echo '// Errors'
echo 'const ('
cat _error.out | grep -f _error.grep | sed 's/=\(.*\)/= Errno(\1)/'
echo ')'
echo
echo '// Signals'
echo 'const ('
cat _error.out | grep -f _signal.grep | sed 's/=\(.*\)/= Signal(\1)/'
echo ')'
# Run C program to print error and syscall strings.
(
echo -E "
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <ctype.h>
#include <string.h>
#include <signal.h>
#define nelem(x) (sizeof(x)/sizeof((x)[0]))
enum { A = 'A', Z = 'Z', a = 'a', z = 'z' }; // avoid need for single quotes below
int errors[] = {
"
for i in $errors
do
echo -E ' '$i,
done
echo -E "
};
int signals[] = {
"
for i in $signals
do
echo -E ' '$i,
done
# Use -E because on some systems bash builtin interprets \n itself.
echo -E '
};
static int
intcmp(const void *a, const void *b)
{
return *(int*)a - *(int*)b;
}
int
main(void)
{
int i, j, e;
char buf[1024], *p;
printf("\n\n// Error table\n");
printf("var errors = [...]string {\n");
qsort(errors, nelem(errors), sizeof errors[0], intcmp);
for(i=0; i<nelem(errors); i++) {
e = errors[i];
if(i > 0 && errors[i-1] == e)
continue;
strcpy(buf, strerror(e));
// lowercase first letter: Bad -> bad, but STREAM -> STREAM.
if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z)
buf[0] += a - A;
printf("\t%d: \"%s\",\n", e, buf);
}
printf("}\n\n");
printf("\n\n// Signal table\n");
printf("var signals = [...]string {\n");
qsort(signals, nelem(signals), sizeof signals[0], intcmp);
for(i=0; i<nelem(signals); i++) {
e = signals[i];
if(i > 0 && signals[i-1] == e)
continue;
strcpy(buf, strsignal(e));
// lowercase first letter: Bad -> bad, but STREAM -> STREAM.
if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z)
buf[0] += a - A;
// cut trailing : number.
p = strrchr(buf, ":"[0]);
if(p)
*p = '\0';
printf("\t%d: \"%s\",\n", e, buf);
}
printf("}\n\n");
return 0;
}
'
) >_errors.c
$CC $ccflags -o _errors _errors.c && $GORUN ./_errors && rm -f _errors.c _errors _const.go _error.grep _signal.grep _error.out

23
vendor/golang.org/x/sys/plan9/mksysnum_plan9.sh generated vendored Normal file
View File

@ -0,0 +1,23 @@
#!/bin/sh
# Copyright 2009 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
COMMAND="mksysnum_plan9.sh $@"
cat <<EOF
// $COMMAND
// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT
package plan9
const(
EOF
SP='[ ]' # space or tab
sed "s/^#define${SP}\\([A-Z0-9_][A-Z0-9_]*\\)${SP}${SP}*\\([0-9][0-9]*\\)/SYS_\\1=\\2/g" \
< $1 | grep -v SYS__
cat <<EOF
)
EOF

21
vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.5
package plan9
import "syscall"
func fixwd() {
syscall.Fixwd()
}
func Getwd() (wd string, err error) {
return syscall.Getwd()
}
func Chdir(path string) error {
return syscall.Chdir(path)
}

23
vendor/golang.org/x/sys/plan9/pwd_plan9.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.5
package plan9
func fixwd() {
}
func Getwd() (wd string, err error) {
fd, err := open(".", O_RDONLY)
if err != nil {
return "", err
}
defer Close(fd)
return Fd2path(fd)
}
func Chdir(path string) error {
return chdir(path)
}

Some files were not shown because too many files have changed in this diff Show More