mirror of https://github.com/containers/podman.git
				
				
				
			Merge pull request #18496 from containers/renovate/common-image-and-storage-deps
fix(deps): update github.com/containers/common digest to 3e93a76
This commit is contained in:
		
						commit
						fb03443274
					
				
							
								
								
									
										44
									
								
								go.mod
								
								
								
								
							
							
						
						
									
										44
									
								
								go.mod
								
								
								
								
							|  | @ -13,9 +13,9 @@ require ( | |||
| 	github.com/containernetworking/cni v1.1.2 | ||||
| 	github.com/containernetworking/plugins v1.2.0 | ||||
| 	github.com/containers/buildah v1.30.1-0.20230504052500-e925b5852e07 | ||||
| 	github.com/containers/common v0.53.1-0.20230505014331-bc15b042e305 | ||||
| 	github.com/containers/common v0.53.1-0.20230506101404-3e93a76d461c | ||||
| 	github.com/containers/conmon v2.0.20+incompatible | ||||
| 	github.com/containers/image/v5 v5.25.0 | ||||
| 	github.com/containers/image/v5 v5.25.1-0.20230505072505-dc4a4be9cc1e | ||||
| 	github.com/containers/libhvee v0.0.5 | ||||
| 	github.com/containers/ocicrypt v1.1.7 | ||||
| 	github.com/containers/psgo v1.8.0 | ||||
|  | @ -46,7 +46,7 @@ require ( | |||
| 	github.com/onsi/ginkgo/v2 v2.9.4 | ||||
| 	github.com/onsi/gomega v1.27.6 | ||||
| 	github.com/opencontainers/go-digest v1.0.0 | ||||
| 	github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b | ||||
| 	github.com/opencontainers/image-spec v1.1.0-rc3 | ||||
| 	github.com/opencontainers/runc v1.1.7 | ||||
| 	github.com/opencontainers/runtime-spec v1.1.0-rc.2 | ||||
| 	github.com/opencontainers/runtime-tools v0.9.1-0.20230317050512-e931285f4b69 | ||||
|  | @ -94,6 +94,7 @@ require ( | |||
| 	github.com/docker/docker-credential-helpers v0.7.0 // indirect | ||||
| 	github.com/felixge/httpsnoop v1.0.3 // indirect | ||||
| 	github.com/fsouza/go-dockerclient v1.9.7 // indirect | ||||
| 	github.com/go-chi/chi v4.1.2+incompatible // indirect | ||||
| 	github.com/go-jose/go-jose/v3 v3.0.0 // indirect | ||||
| 	github.com/go-logr/logr v1.2.4 // indirect | ||||
| 	github.com/go-logr/stdr v1.2.2 // indirect | ||||
|  | @ -103,20 +104,20 @@ require ( | |||
| 	github.com/go-openapi/jsonpointer v0.19.5 // indirect | ||||
| 	github.com/go-openapi/jsonreference v0.20.0 // indirect | ||||
| 	github.com/go-openapi/loads v0.21.2 // indirect | ||||
| 	github.com/go-openapi/runtime v0.25.0 // indirect | ||||
| 	github.com/go-openapi/spec v0.20.8 // indirect | ||||
| 	github.com/go-openapi/runtime v0.26.0 // indirect | ||||
| 	github.com/go-openapi/spec v0.20.9 // indirect | ||||
| 	github.com/go-openapi/strfmt v0.21.7 // indirect | ||||
| 	github.com/go-openapi/swag v0.22.3 // indirect | ||||
| 	github.com/go-openapi/validate v0.22.1 // indirect | ||||
| 	github.com/go-playground/locales v0.14.1 // indirect | ||||
| 	github.com/go-playground/universal-translator v0.18.1 // indirect | ||||
| 	github.com/go-playground/validator/v10 v10.12.0 // indirect | ||||
| 	github.com/go-playground/validator/v10 v10.13.0 // indirect | ||||
| 	github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect | ||||
| 	github.com/gogo/protobuf v1.3.2 // indirect | ||||
| 	github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect | ||||
| 	github.com/golang/protobuf v1.5.3 // indirect | ||||
| 	github.com/google/go-cmp v0.5.9 // indirect | ||||
| 	github.com/google/go-containerregistry v0.13.0 // indirect | ||||
| 	github.com/google/go-containerregistry v0.14.0 // indirect | ||||
| 	github.com/google/go-intervals v0.0.2 // indirect | ||||
| 	github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 // indirect | ||||
| 	github.com/google/trillian v1.5.1 // indirect | ||||
|  | @ -127,10 +128,10 @@ require ( | |||
| 	github.com/inconshreveable/mousetrap v1.1.0 // indirect | ||||
| 	github.com/jinzhu/copier v0.3.5 // indirect | ||||
| 	github.com/josharian/intern v1.0.0 // indirect | ||||
| 	github.com/klauspost/compress v1.16.4 // indirect | ||||
| 	github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 // indirect | ||||
| 	github.com/klauspost/compress v1.16.5 // indirect | ||||
| 	github.com/klauspost/pgzip v1.2.6 // indirect | ||||
| 	github.com/kr/fs v0.1.0 // indirect | ||||
| 	github.com/leodido/go-urn v1.2.2 // indirect | ||||
| 	github.com/leodido/go-urn v1.2.3 // indirect | ||||
| 	github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 // indirect | ||||
| 	github.com/mailru/easyjson v0.7.7 // indirect | ||||
| 	github.com/manifoldco/promptui v0.9.0 // indirect | ||||
|  | @ -154,34 +155,39 @@ require ( | |||
| 	github.com/rivo/uniseg v0.4.4 // indirect | ||||
| 	github.com/seccomp/libseccomp-golang v0.10.0 // indirect | ||||
| 	github.com/segmentio/ksuid v1.0.4 // indirect | ||||
| 	github.com/sigstore/fulcio v1.2.0 // indirect | ||||
| 	github.com/sigstore/rekor v1.1.0 // indirect | ||||
| 	github.com/sigstore/sigstore v1.6.0 // indirect | ||||
| 	github.com/sigstore/fulcio v1.3.1 // indirect | ||||
| 	github.com/sigstore/rekor v1.1.1 // indirect | ||||
| 	github.com/sigstore/sigstore v1.6.4 // indirect | ||||
| 	github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect | ||||
| 	github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect | ||||
| 	github.com/sylabs/sif/v2 v2.11.1 // indirect | ||||
| 	github.com/sylabs/sif/v2 v2.11.3 // indirect | ||||
| 	github.com/tchap/go-patricia/v2 v2.3.1 // indirect | ||||
| 	github.com/theupdateframework/go-tuf v0.5.2 // indirect | ||||
| 	github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect | ||||
| 	github.com/transparency-dev/merkle v0.0.1 // indirect | ||||
| 	github.com/vbatts/tar-split v0.11.3 // indirect | ||||
| 	github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect | ||||
| 	go.mongodb.org/mongo-driver v1.11.3 // indirect | ||||
| 	go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect | ||||
| 	go.opencensus.io v0.24.0 // indirect | ||||
| 	go.opentelemetry.io/otel v1.14.0 // indirect | ||||
| 	go.opentelemetry.io/otel/trace v1.14.0 // indirect | ||||
| 	go.opentelemetry.io/otel v1.15.0 // indirect | ||||
| 	go.opentelemetry.io/otel/trace v1.15.0 // indirect | ||||
| 	go.uber.org/atomic v1.10.0 // indirect | ||||
| 	go.uber.org/multierr v1.11.0 // indirect | ||||
| 	go.uber.org/zap v1.24.0 // indirect | ||||
| 	golang.org/x/crypto v0.8.0 // indirect | ||||
| 	golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect | ||||
| 	golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 // indirect | ||||
| 	golang.org/x/mod v0.10.0 // indirect | ||||
| 	golang.org/x/oauth2 v0.6.0 // indirect | ||||
| 	golang.org/x/oauth2 v0.7.0 // indirect | ||||
| 	golang.org/x/tools v0.8.0 // indirect | ||||
| 	google.golang.org/appengine v1.6.7 // indirect | ||||
| 	google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect | ||||
| 	google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect | ||||
| 	google.golang.org/grpc v1.54.0 // indirect | ||||
| 	gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect | ||||
| 	gopkg.in/square/go-jose.v2 v2.6.0 // indirect | ||||
| 	gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect | ||||
| 	gopkg.in/yaml.v2 v2.4.0 // indirect | ||||
| 	k8s.io/klog/v2 v2.90.1 // indirect | ||||
| ) | ||||
| 
 | ||||
| replace github.com/opencontainers/runc => github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc | ||||
|  |  | |||
							
								
								
									
										103
									
								
								go.sum
								
								
								
								
							
							
						
						
									
										103
									
								
								go.sum
								
								
								
								
							|  | @ -91,6 +91,7 @@ github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:W | |||
| github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= | ||||
| github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= | ||||
| github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= | ||||
| github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= | ||||
| github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= | ||||
| github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= | ||||
| github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= | ||||
|  | @ -239,12 +240,12 @@ github.com/containernetworking/plugins v1.2.0 h1:SWgg3dQG1yzUo4d9iD8cwSVh1VqI+bP | |||
| github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4= | ||||
| github.com/containers/buildah v1.30.1-0.20230504052500-e925b5852e07 h1:Bs2sNFh/fSYr4J6JJLFqzyn3dp6HhlA6ewFwRYUpeIE= | ||||
| github.com/containers/buildah v1.30.1-0.20230504052500-e925b5852e07/go.mod h1:6A/BK0YJLXL8+AqlbceKJrhUT+NtEgsvAc51F7TAllc= | ||||
| github.com/containers/common v0.53.1-0.20230505014331-bc15b042e305 h1:Ijv3qYyKISDij51qAByqFqbyGC+lMsrKKBiJ8a462F0= | ||||
| github.com/containers/common v0.53.1-0.20230505014331-bc15b042e305/go.mod h1:kckloq11mFgiftVFqKVmxxucp2PUlZ694wWFavmQe4I= | ||||
| github.com/containers/common v0.53.1-0.20230506101404-3e93a76d461c h1:NPf//8NAa6xjlj62eBbEBabu8LWVqxPRuweNAFCAYxs= | ||||
| github.com/containers/common v0.53.1-0.20230506101404-3e93a76d461c/go.mod h1:vAG2WNLK9d4umy56l413SS9xiJVe5m7LwOUSoi1x10k= | ||||
| github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg= | ||||
| github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= | ||||
| github.com/containers/image/v5 v5.25.0 h1:TJ0unmalbU+scd0i3Txap2wjGsAnv06MSCwgn6bsizk= | ||||
| github.com/containers/image/v5 v5.25.0/go.mod h1:EKvys0WVlRFkDw26R8y52TuhV9Tfn0yq2luLX6W52Ls= | ||||
| github.com/containers/image/v5 v5.25.1-0.20230505072505-dc4a4be9cc1e h1:9rH8hFLJjmwMkNAdFfXP3O6cAODKujsTn8JurumYz6I= | ||||
| github.com/containers/image/v5 v5.25.1-0.20230505072505-dc4a4be9cc1e/go.mod h1:fGnQk2T+xmEk1/yL9Ky6djJ2F86vBIeo6X14zZQ33iM= | ||||
| github.com/containers/libhvee v0.0.5 h1:5tUiF2eVe8XbVSPD/Os4dIU1gJWoQgtkQHIjQ5X7wpE= | ||||
| github.com/containers/libhvee v0.0.5/go.mod h1:AYsyMe44w9ylWWEZNW+IOzA7oZ2i/P9TChNljavhYMI= | ||||
| github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= | ||||
|  | @ -372,6 +373,8 @@ github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXt | |||
| github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= | ||||
| github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= | ||||
| github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= | ||||
| github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec= | ||||
| github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= | ||||
| github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= | ||||
| github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= | ||||
| github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= | ||||
|  | @ -384,6 +387,7 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 | |||
| github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= | ||||
| github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= | ||||
| github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= | ||||
| github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= | ||||
| github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= | ||||
| github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= | ||||
| github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= | ||||
|  | @ -413,14 +417,14 @@ github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXym | |||
| github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= | ||||
| github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= | ||||
| github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= | ||||
| github.com/go-openapi/runtime v0.25.0 h1:7yQTCdRbWhX8vnIjdzU8S00tBYf7Sg71EBeorlPHvhc= | ||||
| github.com/go-openapi/runtime v0.25.0/go.mod h1:Ux6fikcHXyyob6LNWxtE96hWwjBPYF0DXgVFuMTneOs= | ||||
| github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc= | ||||
| github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ= | ||||
| github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= | ||||
| github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= | ||||
| github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= | ||||
| github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= | ||||
| github.com/go-openapi/spec v0.20.8 h1:ubHmXNY3FCIOinT8RNrrPfGc9t7I1qhPtdOGoG2AxRU= | ||||
| github.com/go-openapi/spec v0.20.8/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= | ||||
| github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= | ||||
| github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= | ||||
| github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= | ||||
| github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= | ||||
| github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= | ||||
|  | @ -440,9 +444,10 @@ github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/o | |||
| github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= | ||||
| github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= | ||||
| github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= | ||||
| github.com/go-playground/validator/v10 v10.12.0 h1:E4gtWgxWxp8YSxExrQFv5BpCahla0PVF2oTTEYaWQGI= | ||||
| github.com/go-playground/validator/v10 v10.12.0/go.mod h1:hCAPuzYvKdP33pxWa+2+6AIKXEKqjIUyqsNCtbsSJrA= | ||||
| github.com/go-rod/rod v0.112.6 h1:zMirUmhsBeshMWyf285BD0UGtGq54HfThLDGSjcP3lU= | ||||
| github.com/go-playground/validator/v10 v10.13.0 h1:cFRQdfaSMCOSfGCCLB20MHvuoHb/s5G8L5pu2ppK5AQ= | ||||
| github.com/go-playground/validator/v10 v10.13.0/go.mod h1:dwu7+CG8/CtBiJFZDz4e+5Upb6OLw04gtBYw0mcG/z4= | ||||
| github.com/go-rod/rod v0.112.9 h1:uA/yLbB+t0UlqJcLJtK2pZrCNPzd15dOKRUEOnmnt9k= | ||||
| github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= | ||||
| github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= | ||||
| github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= | ||||
| github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= | ||||
|  | @ -503,6 +508,7 @@ github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFU | |||
| github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= | ||||
| github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= | ||||
| github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= | ||||
| github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= | ||||
| github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= | ||||
| github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= | ||||
| github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= | ||||
|  | @ -540,8 +546,8 @@ github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN | |||
| github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= | ||||
| github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= | ||||
| github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= | ||||
| github.com/google/go-containerregistry v0.13.0 h1:y1C7Z3e149OJbOPDBxLYR8ITPz8dTKqQwjErKVHJC8k= | ||||
| github.com/google/go-containerregistry v0.13.0/go.mod h1:J9FQ+eSS4a1aC2GNZxvNpbWhgp0487v+cgiilB4FqDo= | ||||
| github.com/google/go-containerregistry v0.14.0 h1:z58vMqHxuwvAsVwvKEkmVBz2TlgBgH5k6koEXBtlYkw= | ||||
| github.com/google/go-containerregistry v0.14.0/go.mod h1:aiJ2fp/SXvkWgmYHioXnbMdlgB8eXiiYOY55gfN91Wk= | ||||
| github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= | ||||
| github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= | ||||
| github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= | ||||
|  | @ -654,11 +660,11 @@ github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY | |||
| github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= | ||||
| github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= | ||||
| github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= | ||||
| github.com/klauspost/compress v1.16.4 h1:91KN02FnsOYhuunwU4ssRe8lc2JosWmizWa91B5v1PU= | ||||
| github.com/klauspost/compress v1.16.4/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= | ||||
| github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI= | ||||
| github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= | ||||
| github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= | ||||
| github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 h1:BcxbplxjtczA1a6d3wYoa7a0WL3rq9DKBMGHeKyjEF0= | ||||
| github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= | ||||
| github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= | ||||
| github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= | ||||
| github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= | ||||
| github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= | ||||
| github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= | ||||
|  | @ -675,10 +681,11 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= | |||
| github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= | ||||
| github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= | ||||
| github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= | ||||
| github.com/leodido/go-urn v1.2.2 h1:7z68G0FCGvDk646jz1AelTYNYWrTNm0bEcFAo147wt4= | ||||
| github.com/leodido/go-urn v1.2.2/go.mod h1:kUaIbLZWttglzwNuG0pgsh5vuV6u2YcGBYz1hIPjtOQ= | ||||
| github.com/leodido/go-urn v1.2.3 h1:6BE2vPT0lqoz3fmOesHZiaiFh7889ssCo2GMvLCfiuA= | ||||
| github.com/leodido/go-urn v1.2.3/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= | ||||
| github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 h1:unJdfS94Y3k85TKy+mvKzjW5R9rIC+Lv4KGbE7uNu0I= | ||||
| github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6/go.mod h1:PUgW5vI9ANEaV6qv9a6EKu8gAySgwf0xrzG9xIB/CK0= | ||||
| github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= | ||||
| github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= | ||||
| github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= | ||||
| github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= | ||||
|  | @ -793,8 +800,8 @@ github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3I | |||
| github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= | ||||
| github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= | ||||
| github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= | ||||
| github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8= | ||||
| github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= | ||||
| github.com/opencontainers/image-spec v1.1.0-rc3 h1:fzg1mXZFj8YdPeNkRXMg+zb88BFV0Ys52cJydRwBkb8= | ||||
| github.com/opencontainers/image-spec v1.1.0-rc3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= | ||||
| github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc h1:qjkUzmFsOFbQyjObybk40mRida83j5IHRaKzLGdBbEU= | ||||
| github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc/go.mod h1:wUOQGsiKae6VzA/UvlCK3cO+pHk8F2VQHlIoITEfMM8= | ||||
| github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= | ||||
|  | @ -842,7 +849,7 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf | |||
| github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= | ||||
| github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= | ||||
| github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= | ||||
| github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= | ||||
| github.com/prometheus/client_golang v1.15.0 h1:5fCgGYogn0hFdhyhLbw7hEsWxufKtY9klyvdNfFlFhM= | ||||
| github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= | ||||
| github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= | ||||
| github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= | ||||
|  | @ -883,7 +890,6 @@ github.com/rootless-containers/rootlesskit v1.1.0 h1:cRaRIYxY8oce4eE/zeAUZhgKu/4 | |||
| github.com/rootless-containers/rootlesskit v1.1.0/go.mod h1:H+o9ndNe7tS91WqU0/+vpvc+VaCd7TCIWaJjnV0ujUo= | ||||
| github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= | ||||
| github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= | ||||
| github.com/rwtodd/Go.Sed v0.0.0-20210816025313-55464686f9ef/go.mod h1:8AEUvGVi2uQ5b24BIhcr0GCcpd/RNAFWaN2CJFrWIIQ= | ||||
| github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= | ||||
| github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= | ||||
| github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= | ||||
|  | @ -894,12 +900,12 @@ github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c | |||
| github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= | ||||
| github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= | ||||
| github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= | ||||
| github.com/sigstore/fulcio v1.2.0 h1:I4H764cDbryKXkPtasUvo8bcix/7xLvkxWYWNp+JtWI= | ||||
| github.com/sigstore/fulcio v1.2.0/go.mod h1:FS7qpBvOEqs0uEh1+hJxzxtJistWN29ybLtAzFNUi0c= | ||||
| github.com/sigstore/rekor v1.1.0 h1:9fjPvW0WERE7VPtSSVSTbDLLOsrNx3RtiIeZ4/1tmDI= | ||||
| github.com/sigstore/rekor v1.1.0/go.mod h1:jEOGDGPMURBt9WR50N0rO7X8GZzLE3UQT+ln6BKJ/m0= | ||||
| github.com/sigstore/sigstore v1.6.0 h1:0fYHVoUlPU3WM8o3U1jT9SI2lqQE68XbG+qWncXaZC8= | ||||
| github.com/sigstore/sigstore v1.6.0/go.mod h1:+55pf6HZ15kf60c08W+GH95JQbAcnVyUBquQGSVdsto= | ||||
| github.com/sigstore/fulcio v1.3.1 h1:0ntW9VbQbt2JytoSs8BOGB84A65eeyvGSavWteYp29Y= | ||||
| github.com/sigstore/fulcio v1.3.1/go.mod h1:/XfqazOec45ulJZpyL9sq+OsVQ8g2UOVoNVi7abFgqU= | ||||
| github.com/sigstore/rekor v1.1.1 h1:JCeSss+qUHnCATmwAZh4zT9k0Frdyq0BjmRwewSfEy4= | ||||
| github.com/sigstore/rekor v1.1.1/go.mod h1:x/xK+HK08MiuJv+v4OxY/Oo3bhuz1DtJXNJrV7hrzvs= | ||||
| github.com/sigstore/sigstore v1.6.4 h1:jH4AzR7qlEH/EWzm+opSpxCfuUcjHL+LJPuQE7h40WE= | ||||
| github.com/sigstore/sigstore v1.6.4/go.mod h1:pjR64lBxnjoSrAr+Ydye/FV73IfrgtoYlAI11a8xMfA= | ||||
| github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= | ||||
| github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= | ||||
| github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= | ||||
|  | @ -954,8 +960,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO | |||
| github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= | ||||
| github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= | ||||
| github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= | ||||
| github.com/sylabs/sif/v2 v2.11.1 h1:d09yPukVa8b74wuy+QTA4Is3w8MH0UjO/xlWQUuFzpY= | ||||
| github.com/sylabs/sif/v2 v2.11.1/go.mod h1:i4GcKLOaT4ertznbsuf11d/G9zLEfUZa7YhrFc5L6YQ= | ||||
| github.com/sylabs/sif/v2 v2.11.3 h1:EQxi5zl6i5DsbVal9HHpk/zuSx7aNLeZBy8vmvFz838= | ||||
| github.com/sylabs/sif/v2 v2.11.3/go.mod h1:0ryivqvvsncJOJjU5QQIEc77a5zKK46F+urBXMdA07w= | ||||
| github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= | ||||
| github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= | ||||
| github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= | ||||
|  | @ -972,6 +978,8 @@ github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C | |||
| github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= | ||||
| github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= | ||||
| github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= | ||||
| github.com/transparency-dev/merkle v0.0.1 h1:T9/9gYB8uZl7VOJIhdwjALeRWlxUxSfDEysjfmx+L9E= | ||||
| github.com/transparency-dev/merkle v0.0.1/go.mod h1:B8FIw5LTq6DaULoHsVFRzYIUDkl8yuSwCdZnOZGKL/A= | ||||
| github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= | ||||
| github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= | ||||
| github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= | ||||
|  | @ -1016,6 +1024,7 @@ github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17 | |||
| github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= | ||||
| github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= | ||||
| github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= | ||||
| github.com/ysmood/fetchup v0.2.2 h1:Qn8/q5uDW7szclt4sVXCFJ1TXup3hogz94OaLf6kloo= | ||||
| github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= | ||||
| github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE= | ||||
| github.com/ysmood/leakless v0.8.0 h1:BzLrVoiwxikpgEQR0Lk8NyBN5Cit2b1z+u0mgL4ZJak= | ||||
|  | @ -1047,17 +1056,23 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= | |||
| go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= | ||||
| go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= | ||||
| go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= | ||||
| go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= | ||||
| go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= | ||||
| go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY= | ||||
| go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M= | ||||
| go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8= | ||||
| go.opentelemetry.io/otel v1.15.0 h1:NIl24d4eiLJPM0vKn4HjLYM+UZf6gSfi9Z+NmCxkWbk= | ||||
| go.opentelemetry.io/otel v1.15.0/go.mod h1:qfwLEbWhLPk5gyWrne4XnF0lC8wtywbuJbgfAE3zbek= | ||||
| go.opentelemetry.io/otel/sdk v1.15.0 h1:jZTCkRRd08nxD6w7rIaZeDNGZGGQstH3SfLQ3ZsKICk= | ||||
| go.opentelemetry.io/otel/trace v1.15.0 h1:5Fwje4O2ooOxkfyqI/kJwxWotggDLix4BSAvpE1wlpo= | ||||
| go.opentelemetry.io/otel/trace v1.15.0/go.mod h1:CUsmE2Ht1CRkvE8OsMESvraoZrrcgD1J2W8GV1ev0Y4= | ||||
| go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= | ||||
| go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= | ||||
| go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= | ||||
| go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= | ||||
| go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= | ||||
| go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= | ||||
| go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= | ||||
| go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= | ||||
| go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= | ||||
| go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= | ||||
| go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= | ||||
| go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= | ||||
| golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= | ||||
| golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= | ||||
| golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= | ||||
|  | @ -1089,8 +1104,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 | |||
| golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= | ||||
| golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= | ||||
| golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= | ||||
| golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= | ||||
| golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= | ||||
| golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 h1:5llv2sWeaMSnA3w2kS57ouQQ4pudlXrR0dCgw51QK9o= | ||||
| golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= | ||||
| golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= | ||||
| golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= | ||||
| golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= | ||||
|  | @ -1169,8 +1184,8 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr | |||
| golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= | ||||
| golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= | ||||
| golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk= | ||||
| golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= | ||||
| golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= | ||||
| golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= | ||||
| golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= | ||||
| golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||
| golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||
| golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||
|  | @ -1297,7 +1312,7 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb | |||
| golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= | ||||
| golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= | ||||
| golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= | ||||
| golang.org/x/time v0.2.0 h1:52I/1L54xyEQAYdtcSuxtiT84KGYTBGXwayxmIpNJhE= | ||||
| golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= | ||||
| golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= | ||||
| golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= | ||||
| golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= | ||||
|  | @ -1400,8 +1415,8 @@ google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfG | |||
| google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= | ||||
| google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= | ||||
| google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= | ||||
| google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= | ||||
| google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= | ||||
| google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= | ||||
| google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= | ||||
| google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= | ||||
| google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= | ||||
| google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= | ||||
|  | @ -1516,11 +1531,11 @@ k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= | |||
| k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= | ||||
| k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= | ||||
| k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= | ||||
| k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= | ||||
| k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= | ||||
| k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= | ||||
| k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= | ||||
| k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= | ||||
| k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= | ||||
| k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= | ||||
| k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= | ||||
| k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= | ||||
|  |  | |||
|  | @ -54,11 +54,13 @@ func (r *Runtime) Import(ctx context.Context, path string, options *ImportOption | |||
| 	} | ||||
| 
 | ||||
| 	config := v1.Image{ | ||||
| 		Config:       ic, | ||||
| 		History:      history, | ||||
| 		OS:           options.OS, | ||||
| 		Architecture: options.Arch, | ||||
| 		Variant:      options.Variant, | ||||
| 		Config:  ic, | ||||
| 		History: history, | ||||
| 		Platform: v1.Platform{ | ||||
| 			OS:           options.OS, | ||||
| 			Architecture: options.Arch, | ||||
| 			Variant:      options.Variant, | ||||
| 		}, | ||||
| 	} | ||||
| 
 | ||||
| 	u, err := url.ParseRequestURI(path) | ||||
|  |  | |||
|  | @ -43,7 +43,7 @@ func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Read | |||
| 	stream.reader = bar.ProxyReader(stream.reader) | ||||
| 
 | ||||
| 	// === Decrypt the stream, if required.
 | ||||
| 	decryptionStep, err := ic.c.blobPipelineDecryptionStep(&stream, srcInfo) | ||||
| 	decryptionStep, err := ic.blobPipelineDecryptionStep(&stream, srcInfo) | ||||
| 	if err != nil { | ||||
| 		return types.BlobInfo{}, err | ||||
| 	} | ||||
|  | @ -78,7 +78,7 @@ func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Read | |||
| 		// Before relaxing this, see the original pull request’s review if there are other reasons to reject this.
 | ||||
| 		return types.BlobInfo{}, errors.New("Unable to support both decryption and encryption in the same copy") | ||||
| 	} | ||||
| 	encryptionStep, err := ic.c.blobPipelineEncryptionStep(&stream, toEncrypt, srcInfo, decryptionStep) | ||||
| 	encryptionStep, err := ic.blobPipelineEncryptionStep(&stream, toEncrypt, srcInfo, decryptionStep) | ||||
| 	if err != nil { | ||||
| 		return types.BlobInfo{}, err | ||||
| 	} | ||||
|  |  | |||
|  | @ -33,28 +33,33 @@ type bpDecryptionStepData struct { | |||
| // blobPipelineDecryptionStep updates *stream to decrypt if, it necessary.
 | ||||
| // srcInfo is only used for error messages.
 | ||||
| // Returns data for other steps; the caller should eventually use updateCryptoOperation.
 | ||||
| func (c *copier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.BlobInfo) (*bpDecryptionStepData, error) { | ||||
| 	if isOciEncrypted(stream.info.MediaType) && c.ociDecryptConfig != nil { | ||||
| 		desc := imgspecv1.Descriptor{ | ||||
| 			Annotations: stream.info.Annotations, | ||||
| 		} | ||||
| 		reader, decryptedDigest, err := ocicrypt.DecryptLayer(c.ociDecryptConfig, stream.reader, desc, false) | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("decrypting layer %s: %w", srcInfo.Digest, err) | ||||
| 		} | ||||
| 
 | ||||
| 		stream.reader = reader | ||||
| 		stream.info.Digest = decryptedDigest | ||||
| 		stream.info.Size = -1 | ||||
| 		maps.DeleteFunc(stream.info.Annotations, func(k string, _ string) bool { | ||||
| 			return strings.HasPrefix(k, "org.opencontainers.image.enc") | ||||
| 		}) | ||||
| func (ic *imageCopier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.BlobInfo) (*bpDecryptionStepData, error) { | ||||
| 	if !isOciEncrypted(stream.info.MediaType) || ic.c.ociDecryptConfig == nil { | ||||
| 		return &bpDecryptionStepData{ | ||||
| 			decrypting: true, | ||||
| 			decrypting: false, | ||||
| 		}, nil | ||||
| 	} | ||||
| 
 | ||||
| 	if ic.cannotModifyManifestReason != "" { | ||||
| 		return nil, fmt.Errorf("layer %s should be decrypted, but we can’t modify the manifest: %s", srcInfo.Digest, ic.cannotModifyManifestReason) | ||||
| 	} | ||||
| 
 | ||||
| 	desc := imgspecv1.Descriptor{ | ||||
| 		Annotations: stream.info.Annotations, | ||||
| 	} | ||||
| 	reader, decryptedDigest, err := ocicrypt.DecryptLayer(ic.c.ociDecryptConfig, stream.reader, desc, false) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("decrypting layer %s: %w", srcInfo.Digest, err) | ||||
| 	} | ||||
| 
 | ||||
| 	stream.reader = reader | ||||
| 	stream.info.Digest = decryptedDigest | ||||
| 	stream.info.Size = -1 | ||||
| 	maps.DeleteFunc(stream.info.Annotations, func(k string, _ string) bool { | ||||
| 		return strings.HasPrefix(k, "org.opencontainers.image.enc") | ||||
| 	}) | ||||
| 	return &bpDecryptionStepData{ | ||||
| 		decrypting: false, | ||||
| 		decrypting: true, | ||||
| 	}, nil | ||||
| } | ||||
| 
 | ||||
|  | @ -74,34 +79,39 @@ type bpEncryptionStepData struct { | |||
| // blobPipelineEncryptionStep updates *stream to encrypt if, it required by toEncrypt.
 | ||||
| // srcInfo is primarily used for error messages.
 | ||||
| // Returns data for other steps; the caller should eventually call updateCryptoOperationAndAnnotations.
 | ||||
| func (c *copier) blobPipelineEncryptionStep(stream *sourceStream, toEncrypt bool, srcInfo types.BlobInfo, | ||||
| func (ic *imageCopier) blobPipelineEncryptionStep(stream *sourceStream, toEncrypt bool, srcInfo types.BlobInfo, | ||||
| 	decryptionStep *bpDecryptionStepData) (*bpEncryptionStepData, error) { | ||||
| 	if toEncrypt && !isOciEncrypted(srcInfo.MediaType) && c.ociEncryptConfig != nil { | ||||
| 		var annotations map[string]string | ||||
| 		if !decryptionStep.decrypting { | ||||
| 			annotations = srcInfo.Annotations | ||||
| 		} | ||||
| 		desc := imgspecv1.Descriptor{ | ||||
| 			MediaType:   srcInfo.MediaType, | ||||
| 			Digest:      srcInfo.Digest, | ||||
| 			Size:        srcInfo.Size, | ||||
| 			Annotations: annotations, | ||||
| 		} | ||||
| 		reader, finalizer, err := ocicrypt.EncryptLayer(c.ociEncryptConfig, stream.reader, desc) | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("encrypting blob %s: %w", srcInfo.Digest, err) | ||||
| 		} | ||||
| 
 | ||||
| 		stream.reader = reader | ||||
| 		stream.info.Digest = "" | ||||
| 		stream.info.Size = -1 | ||||
| 	if !toEncrypt || isOciEncrypted(srcInfo.MediaType) || ic.c.ociEncryptConfig == nil { | ||||
| 		return &bpEncryptionStepData{ | ||||
| 			encrypting: true, | ||||
| 			finalizer:  finalizer, | ||||
| 			encrypting: false, | ||||
| 		}, nil | ||||
| 	} | ||||
| 
 | ||||
| 	if ic.cannotModifyManifestReason != "" { | ||||
| 		return nil, fmt.Errorf("layer %s should be encrypted, but we can’t modify the manifest: %s", srcInfo.Digest, ic.cannotModifyManifestReason) | ||||
| 	} | ||||
| 
 | ||||
| 	var annotations map[string]string | ||||
| 	if !decryptionStep.decrypting { | ||||
| 		annotations = srcInfo.Annotations | ||||
| 	} | ||||
| 	desc := imgspecv1.Descriptor{ | ||||
| 		MediaType:   srcInfo.MediaType, | ||||
| 		Digest:      srcInfo.Digest, | ||||
| 		Size:        srcInfo.Size, | ||||
| 		Annotations: annotations, | ||||
| 	} | ||||
| 	reader, finalizer, err := ocicrypt.EncryptLayer(ic.c.ociEncryptConfig, stream.reader, desc) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("encrypting blob %s: %w", srcInfo.Digest, err) | ||||
| 	} | ||||
| 
 | ||||
| 	stream.reader = reader | ||||
| 	stream.info.Digest = "" | ||||
| 	stream.info.Size = -1 | ||||
| 	return &bpEncryptionStepData{ | ||||
| 		encrypting: false, | ||||
| 		encrypting: true, | ||||
| 		finalizer:  finalizer, | ||||
| 	}, nil | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -9,6 +9,7 @@ import ( | |||
| 	"github.com/containers/image/v5/internal/set" | ||||
| 	"github.com/containers/image/v5/manifest" | ||||
| 	"github.com/containers/image/v5/types" | ||||
| 	v1 "github.com/opencontainers/image-spec/specs-go/v1" | ||||
| 	"github.com/sirupsen/logrus" | ||||
| 	"golang.org/x/exp/slices" | ||||
| ) | ||||
|  | @ -18,6 +19,9 @@ import ( | |||
| // Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used.
 | ||||
| var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType} | ||||
| 
 | ||||
| // ociEncryptionMIMETypes lists manifest MIME types that are known to support OCI encryption.
 | ||||
| var ociEncryptionMIMETypes = []string{v1.MediaTypeImageManifest} | ||||
| 
 | ||||
| // orderedSet is a list of strings (MIME types or platform descriptors in our case), with each string appearing at most once.
 | ||||
| type orderedSet struct { | ||||
| 	list     []string | ||||
|  | @ -76,11 +80,14 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest | |||
| 		destSupportedManifestMIMETypes = []string{in.forceManifestMIMEType} | ||||
| 	} | ||||
| 
 | ||||
| 	if len(destSupportedManifestMIMETypes) == 0 && (!in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(srcType)) { | ||||
| 		return manifestConversionPlan{ // Anything goes; just use the original as is, do not try any conversions.
 | ||||
| 			preferredMIMEType:       srcType, | ||||
| 			otherMIMETypeCandidates: []string{}, | ||||
| 		}, nil | ||||
| 	if len(destSupportedManifestMIMETypes) == 0 { | ||||
| 		if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(srcType) { | ||||
| 			return manifestConversionPlan{ // Anything goes; just use the original as is, do not try any conversions.
 | ||||
| 				preferredMIMEType:       srcType, | ||||
| 				otherMIMETypeCandidates: []string{}, | ||||
| 			}, nil | ||||
| 		} | ||||
| 		destSupportedManifestMIMETypes = ociEncryptionMIMETypes | ||||
| 	} | ||||
| 	supportedByDest := set.New[string]() | ||||
| 	for _, t := range destSupportedManifestMIMETypes { | ||||
|  | @ -88,6 +95,27 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest | |||
| 			supportedByDest.Add(t) | ||||
| 		} | ||||
| 	} | ||||
| 	if supportedByDest.Empty() { | ||||
| 		if len(destSupportedManifestMIMETypes) == 0 { // Coverage: This should never happen, empty values were replaced by ociEncryptionMIMETypes
 | ||||
| 			return manifestConversionPlan{}, errors.New("internal error: destSupportedManifestMIMETypes is empty") | ||||
| 		} | ||||
| 		// We know, and have verified, that destSupportedManifestMIMETypes is not empty, so encryption must have been involved.
 | ||||
| 		if !in.requiresOCIEncryption { // Coverage: This should never happen, destSupportedManifestMIMETypes was not empty, so we should have filtered for encryption.
 | ||||
| 			return manifestConversionPlan{}, errors.New("internal error: supportedByDest is empty but destSupportedManifestMIMETypes is not, and not encrypting") | ||||
| 		} | ||||
| 		// destSupportedManifestMIMETypes has three possible origins:
 | ||||
| 		if in.forceManifestMIMEType != "" { // 1. forceManifestType specified
 | ||||
| 			return manifestConversionPlan{}, fmt.Errorf("encryption required together with format %s, which does not support encryption", | ||||
| 				in.forceManifestMIMEType) | ||||
| 		} | ||||
| 		if len(in.destSupportedManifestMIMETypes) == 0 { // 2. destination accepts anything and we have chosen ociEncryptionMIMETypes
 | ||||
| 			// Coverage: This should never happen, ociEncryptionMIMETypes all support encryption
 | ||||
| 			return manifestConversionPlan{}, errors.New("internal error: in.destSupportedManifestMIMETypes is empty but supportedByDest is empty as well") | ||||
| 		} | ||||
| 		// 3. destination does not support encryption.
 | ||||
| 		return manifestConversionPlan{}, fmt.Errorf("encryption required but the destination only supports MIME types [%s], none of which support encryption", | ||||
| 			strings.Join(destSupportedManifestMIMETypes, ", ")) | ||||
| 	} | ||||
| 
 | ||||
| 	// destSupportedManifestMIMETypes is a static guess; a particular registry may still only support a subset of the types.
 | ||||
| 	// So, build a list of types to try in order of decreasing preference.
 | ||||
|  | @ -122,11 +150,13 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest | |||
| 
 | ||||
| 	// Finally, try anything else the destination supports.
 | ||||
| 	for _, t := range destSupportedManifestMIMETypes { | ||||
| 		prioritizedTypes.append(t) | ||||
| 		if supportedByDest.Contains(t) { | ||||
| 			prioritizedTypes.append(t) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", ")) | ||||
| 	if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes is not empty (or we would have exited in the “Anything goes” case above), so this should never happen.
 | ||||
| 	if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes and supportedByDest, which is a subset, is not empty (or we would have exited  above), so this should never happen.
 | ||||
| 		return manifestConversionPlan{}, errors.New("Internal error: no candidate MIME types") | ||||
| 	} | ||||
| 	res := manifestConversionPlan{ | ||||
|  |  | |||
|  | @ -21,33 +21,49 @@ func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) { | |||
| 		host = sys.DockerDaemonHost | ||||
| 	} | ||||
| 
 | ||||
| 	// Sadly, unix:// sockets don't work transparently with dockerclient.NewClient.
 | ||||
| 	// They work fine with a nil httpClient; with a non-nil httpClient, the transport’s
 | ||||
| 	// TLSClientConfig must be nil (or the client will try using HTTPS over the PF_UNIX socket
 | ||||
| 	// regardless of the values in the *tls.Config), and we would have to call sockets.ConfigureTransport.
 | ||||
| 	opts := []dockerclient.Opt{ | ||||
| 		dockerclient.WithHost(host), | ||||
| 		dockerclient.WithVersion(defaultAPIVersion), | ||||
| 	} | ||||
| 
 | ||||
| 	// We conditionalize building the TLS configuration only to TLS sockets:
 | ||||
| 	//
 | ||||
| 	// We don't really want to configure anything for unix:// sockets, so just pass a nil *http.Client.
 | ||||
| 	// The dockerclient.Client implementation differentiates between
 | ||||
| 	// - Client.proto, which is ~how the connection is establishe (IP / AF_UNIX/Windows)
 | ||||
| 	// - Client.scheme, which is what is sent over the connection (HTTP with/without TLS).
 | ||||
| 	//
 | ||||
| 	// Similarly, if we want to communicate over plain HTTP on a TCP socket, we also need to set
 | ||||
| 	// TLSClientConfig to nil. This can be achieved by using the form `http://`
 | ||||
| 	// Only Client.proto is set from the URL in dockerclient.WithHost(),
 | ||||
| 	// Client.scheme is detected based on a http.Client.TLSClientConfig presence;
 | ||||
| 	// dockerclient.WithHTTPClient with a client that has TLSClientConfig set
 | ||||
| 	// will, by default, trigger an attempt to use TLS.
 | ||||
| 	//
 | ||||
| 	// So, don’t use WithHTTPClient for unix:// sockets at all.
 | ||||
| 	//
 | ||||
| 	// Similarly, if we want to communicate over plain HTTP on a TCP socket (http://),
 | ||||
| 	// we also should not set TLSClientConfig.  We continue to use WithHTTPClient
 | ||||
| 	// with our slightly non-default settings to avoid a behavior change on updates of c/image.
 | ||||
| 	//
 | ||||
| 	// Alternatively we could use dockerclient.WithScheme to drive the TLS/non-TLS logic
 | ||||
| 	// explicitly, but we would still want to set WithHTTPClient (differently) for https:// and http:// ;
 | ||||
| 	// so that would not be any simpler.
 | ||||
| 	serverURL, err := dockerclient.ParseHostURL(host) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	var httpClient *http.Client | ||||
| 	if serverURL.Scheme != "unix" { | ||||
| 		if serverURL.Scheme == "http" { | ||||
| 			httpClient = httpConfig() | ||||
| 		} else { | ||||
| 			hc, err := tlsConfig(sys) | ||||
| 			if err != nil { | ||||
| 				return nil, err | ||||
| 			} | ||||
| 			httpClient = hc | ||||
| 	switch serverURL.Scheme { | ||||
| 	case "unix": // Nothing
 | ||||
| 	case "http": | ||||
| 		hc := httpConfig() | ||||
| 		opts = append(opts, dockerclient.WithHTTPClient(hc)) | ||||
| 	default: | ||||
| 		hc, err := tlsConfig(sys) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		opts = append(opts, dockerclient.WithHTTPClient(hc)) | ||||
| 	} | ||||
| 
 | ||||
| 	return dockerclient.NewClient(host, defaultAPIVersion, httpClient, nil) | ||||
| 	return dockerclient.NewClientWithOpts(opts...) | ||||
| } | ||||
| 
 | ||||
| func tlsConfig(sys *types.SystemContext) (*http.Client, error) { | ||||
|  |  | |||
|  | @ -226,9 +226,9 @@ func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context, _ *types.Ma | |||
| 		layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx]) | ||||
| 		switch m.m.LayersDescriptors[idx].MediaType { | ||||
| 		case manifest.DockerV2Schema2ForeignLayerMediaType: | ||||
| 			layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable | ||||
| 			layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
 | ||||
| 		case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip: | ||||
| 			layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip | ||||
| 			layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
 | ||||
| 		case manifest.DockerV2SchemaLayerMediaTypeUncompressed: | ||||
| 			layers[idx].MediaType = imgspecv1.MediaTypeImageLayer | ||||
| 		case manifest.DockerV2Schema2LayerMediaType: | ||||
|  |  | |||
|  | @ -215,11 +215,11 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.Mani | |||
| 	for idx := range layers { | ||||
| 		layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx]) | ||||
| 		switch layers[idx].MediaType { | ||||
| 		case imgspecv1.MediaTypeImageLayerNonDistributable: | ||||
| 		case imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
 | ||||
| 			layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaType | ||||
| 		case imgspecv1.MediaTypeImageLayerNonDistributableGzip: | ||||
| 		case imgspecv1.MediaTypeImageLayerNonDistributableGzip: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
 | ||||
| 			layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip | ||||
| 		case imgspecv1.MediaTypeImageLayerNonDistributableZstd: | ||||
| 		case imgspecv1.MediaTypeImageLayerNonDistributableZstd: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
 | ||||
| 			return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) | ||||
| 		case imgspecv1.MediaTypeImageLayer: | ||||
| 			layers[idx].MediaType = manifest.DockerV2SchemaLayerMediaTypeUncompressed | ||||
|  |  | |||
|  | @ -42,7 +42,12 @@ type OCI1 struct { | |||
| // useful for validation anyway.
 | ||||
| func SupportedOCI1MediaType(m string) error { | ||||
| 	switch m { | ||||
| 	case imgspecv1.MediaTypeDescriptor, imgspecv1.MediaTypeImageConfig, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd, imgspecv1.MediaTypeImageLayerZstd, imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeLayoutHeader, ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc: | ||||
| 	case imgspecv1.MediaTypeDescriptor, imgspecv1.MediaTypeImageConfig, | ||||
| 		imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerZstd, | ||||
| 		imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
 | ||||
| 		imgspecv1.MediaTypeImageManifest, | ||||
| 		imgspecv1.MediaTypeLayoutHeader, | ||||
| 		ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc: | ||||
| 		return nil | ||||
| 	default: | ||||
| 		return fmt.Errorf("unsupported OCIv1 media type: %q", m) | ||||
|  | @ -102,9 +107,9 @@ func (m *OCI1) LayerInfos() []LayerInfo { | |||
| 
 | ||||
| var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{ | ||||
| 	{ | ||||
| 		mtsUncompressed:                    imgspecv1.MediaTypeImageLayerNonDistributable, | ||||
| 		compressiontypes.GzipAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableGzip, | ||||
| 		compressiontypes.ZstdAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableZstd, | ||||
| 		mtsUncompressed:                    imgspecv1.MediaTypeImageLayerNonDistributable,     //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
 | ||||
| 		compressiontypes.GzipAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableGzip, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
 | ||||
| 		compressiontypes.ZstdAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableZstd, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
 | ||||
| 	}, | ||||
| 	{ | ||||
| 		mtsUncompressed:                    imgspecv1.MediaTypeImageLayer, | ||||
|  | @ -166,7 +171,8 @@ func getEncryptedMediaType(mediatype string) (string, error) { | |||
| 	} | ||||
| 	unsuffixedMediatype := strings.Split(mediatype, "+")[0] | ||||
| 	switch unsuffixedMediatype { | ||||
| 	case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerNonDistributable: | ||||
| 	case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, | ||||
| 		imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
 | ||||
| 		return mediatype + "+encrypted", nil | ||||
| 	} | ||||
| 
 | ||||
|  |  | |||
|  | @ -188,14 +188,18 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) { | |||
| 		return index.Manifests[0], nil | ||||
| 	} else { | ||||
| 		// if image specified, look through all manifests for a match
 | ||||
| 		var unsupportedMIMETypes []string | ||||
| 		for _, md := range index.Manifests { | ||||
| 			if md.MediaType != imgspecv1.MediaTypeImageManifest && md.MediaType != imgspecv1.MediaTypeImageIndex { | ||||
| 				continue | ||||
| 			} | ||||
| 			if refName, ok := md.Annotations[imgspecv1.AnnotationRefName]; ok && refName == ref.image { | ||||
| 				return md, nil | ||||
| 				if md.MediaType == imgspecv1.MediaTypeImageManifest || md.MediaType == imgspecv1.MediaTypeImageIndex { | ||||
| 					return md, nil | ||||
| 				} | ||||
| 				unsupportedMIMETypes = append(unsupportedMIMETypes, md.MediaType) | ||||
| 			} | ||||
| 		} | ||||
| 		if len(unsupportedMIMETypes) != 0 { | ||||
| 			return imgspecv1.Descriptor{}, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes) | ||||
| 		} | ||||
| 	} | ||||
| 	return imgspecv1.Descriptor{}, ImageNotFoundError{ref} | ||||
| } | ||||
|  |  | |||
|  | @ -96,9 +96,11 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifRefere | |||
| 
 | ||||
| 	created := sifImg.ModifiedAt() | ||||
| 	config := imgspecv1.Image{ | ||||
| 		Created:      &created, | ||||
| 		Architecture: sifImg.PrimaryArch(), | ||||
| 		OS:           "linux", | ||||
| 		Created: &created, | ||||
| 		Platform: imgspecv1.Platform{ | ||||
| 			Architecture: sifImg.PrimaryArch(), | ||||
| 			OS:           "linux", | ||||
| 		}, | ||||
| 		Config: imgspecv1.ImageConfig{ | ||||
| 			Cmd: commandLine, | ||||
| 		}, | ||||
|  | @ -180,7 +182,7 @@ func (s *sifImageSource) Close() error { | |||
| func (s *sifImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { | ||||
| 	switch info.Digest { | ||||
| 	case s.configDigest: | ||||
| 		return io.NopCloser(bytes.NewBuffer(s.config)), int64(len(s.config)), nil | ||||
| 		return io.NopCloser(bytes.NewReader(s.config)), int64(len(s.config)), nil | ||||
| 	case s.layerDigest: | ||||
| 		reader, err := os.Open(s.layerFile) | ||||
| 		if err != nil { | ||||
|  |  | |||
|  | @ -19,7 +19,6 @@ import ( | |||
| 	imgspecs "github.com/opencontainers/image-spec/specs-go" | ||||
| 	imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" | ||||
| 	"golang.org/x/exp/maps" | ||||
| 	"golang.org/x/exp/slices" | ||||
| ) | ||||
| 
 | ||||
| type tarballImageSource struct { | ||||
|  | @ -29,42 +28,46 @@ type tarballImageSource struct { | |||
| 	impl.DoesNotAffectLayerInfosForCopy | ||||
| 	stubs.NoGetBlobAtInitialize | ||||
| 
 | ||||
| 	reference  tarballReference | ||||
| 	filenames  []string | ||||
| 	diffIDs    []digest.Digest | ||||
| 	diffSizes  []int64 | ||||
| 	blobIDs    []digest.Digest | ||||
| 	blobSizes  []int64 | ||||
| 	blobTypes  []string | ||||
| 	config     []byte | ||||
| 	configID   digest.Digest | ||||
| 	configSize int64 | ||||
| 	manifest   []byte | ||||
| 	reference tarballReference | ||||
| 	blobs     map[digest.Digest]tarballBlob | ||||
| 	manifest  []byte | ||||
| } | ||||
| 
 | ||||
| // tarballBlob is a blob that tarballImagSource can return by GetBlob.
 | ||||
| type tarballBlob struct { | ||||
| 	contents []byte // or nil to read from filename below
 | ||||
| 	filename string // valid if contents == nil
 | ||||
| 	size     int64 | ||||
| } | ||||
| 
 | ||||
| func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { | ||||
| 	// Gather up the digests, sizes, and date information for all of the files.
 | ||||
| 	filenames := []string{} | ||||
| 	// Pick up the layer comment from the configuration's history list, if one is set.
 | ||||
| 	comment := "imported from tarball" | ||||
| 	if len(r.config.History) > 0 && r.config.History[0].Comment != "" { | ||||
| 		comment = r.config.History[0].Comment | ||||
| 	} | ||||
| 
 | ||||
| 	// Gather up the digests, sizes, and history information for all of the files.
 | ||||
| 	blobs := map[digest.Digest]tarballBlob{} | ||||
| 	diffIDs := []digest.Digest{} | ||||
| 	diffSizes := []int64{} | ||||
| 	blobIDs := []digest.Digest{} | ||||
| 	blobSizes := []int64{} | ||||
| 	blobTimes := []time.Time{} | ||||
| 	blobTypes := []string{} | ||||
| 	created := time.Time{} | ||||
| 	history := []imgspecv1.History{} | ||||
| 	layerDescriptors := []imgspecv1.Descriptor{} | ||||
| 	for _, filename := range r.filenames { | ||||
| 		var file *os.File | ||||
| 		var err error | ||||
| 		var blobSize int64 | ||||
| 		var blobTime time.Time | ||||
| 		var reader io.Reader | ||||
| 		var blobTime time.Time | ||||
| 		var blob tarballBlob | ||||
| 		if filename == "-" { | ||||
| 			blobSize = int64(len(r.stdin)) | ||||
| 			blobTime = time.Now() | ||||
| 			reader = bytes.NewReader(r.stdin) | ||||
| 			blobTime = time.Now() | ||||
| 			blob = tarballBlob{ | ||||
| 				contents: r.stdin, | ||||
| 				size:     int64(len(r.stdin)), | ||||
| 			} | ||||
| 		} else { | ||||
| 			file, err = os.Open(filename) | ||||
| 			file, err := os.Open(filename) | ||||
| 			if err != nil { | ||||
| 				return nil, fmt.Errorf("error opening %q for reading: %w", filename, err) | ||||
| 				return nil, err | ||||
| 			} | ||||
| 			defer file.Close() | ||||
| 			reader = file | ||||
|  | @ -72,8 +75,11 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System | |||
| 			if err != nil { | ||||
| 				return nil, fmt.Errorf("error reading size of %q: %w", filename, err) | ||||
| 			} | ||||
| 			blobSize = fileinfo.Size() | ||||
| 			blobTime = fileinfo.ModTime() | ||||
| 			blob = tarballBlob{ | ||||
| 				filename: filename, | ||||
| 				size:     fileinfo.Size(), | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		// Default to assuming the layer is compressed.
 | ||||
|  | @ -96,8 +102,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System | |||
| 			uncompressed = nil | ||||
| 		} | ||||
| 		// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
 | ||||
| 		n, err := io.Copy(io.Discard, reader) | ||||
| 		if err != nil { | ||||
| 		if _, err := io.Copy(io.Discard, reader); err != nil { | ||||
| 			return nil, fmt.Errorf("error reading %q: %v", filename, err) | ||||
| 		} | ||||
| 		if uncompressed != nil { | ||||
|  | @ -105,38 +110,26 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System | |||
| 		} | ||||
| 
 | ||||
| 		// Grab our uncompressed and possibly-compressed digests and sizes.
 | ||||
| 		filenames = append(filenames, filename) | ||||
| 		diffIDs = append(diffIDs, diffIDdigester.Digest()) | ||||
| 		diffSizes = append(diffSizes, n) | ||||
| 		blobIDs = append(blobIDs, blobIDdigester.Digest()) | ||||
| 		blobSizes = append(blobSizes, blobSize) | ||||
| 		blobTimes = append(blobTimes, blobTime) | ||||
| 		blobTypes = append(blobTypes, layerType) | ||||
| 	} | ||||
| 		diffID := diffIDdigester.Digest() | ||||
| 		blobID := blobIDdigester.Digest() | ||||
| 		diffIDs = append(diffIDs, diffID) | ||||
| 		blobs[blobID] = blob | ||||
| 
 | ||||
| 	// Build the rootfs and history for the configuration blob.
 | ||||
| 	rootfs := imgspecv1.RootFS{ | ||||
| 		Type:    "layers", | ||||
| 		DiffIDs: diffIDs, | ||||
| 	} | ||||
| 	created := time.Time{} | ||||
| 	history := []imgspecv1.History{} | ||||
| 	// Pick up the layer comment from the configuration's history list, if one is set.
 | ||||
| 	comment := "imported from tarball" | ||||
| 	if len(r.config.History) > 0 && r.config.History[0].Comment != "" { | ||||
| 		comment = r.config.History[0].Comment | ||||
| 	} | ||||
| 	for i := range diffIDs { | ||||
| 		createdBy := fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffIDs[i].Hex(), os.PathSeparator) | ||||
| 		history = append(history, imgspecv1.History{ | ||||
| 			Created:   &blobTimes[i], | ||||
| 			CreatedBy: createdBy, | ||||
| 			Created:   &blobTime, | ||||
| 			CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffID.Hex(), os.PathSeparator), | ||||
| 			Comment:   comment, | ||||
| 		}) | ||||
| 		// Use the mtime of the most recently modified file as the image's creation time.
 | ||||
| 		if created.Before(blobTimes[i]) { | ||||
| 			created = blobTimes[i] | ||||
| 		if created.Before(blobTime) { | ||||
| 			created = blobTime | ||||
| 		} | ||||
| 
 | ||||
| 		layerDescriptors = append(layerDescriptors, imgspecv1.Descriptor{ | ||||
| 			Digest:    blobID, | ||||
| 			Size:      blob.size, | ||||
| 			MediaType: layerType, | ||||
| 		}) | ||||
| 	} | ||||
| 
 | ||||
| 	// Pick up other defaults from the config in the reference.
 | ||||
|  | @ -150,7 +143,10 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System | |||
| 	if config.OS == "" { | ||||
| 		config.OS = runtime.GOOS | ||||
| 	} | ||||
| 	config.RootFS = rootfs | ||||
| 	config.RootFS = imgspecv1.RootFS{ | ||||
| 		Type:    "layers", | ||||
| 		DiffIDs: diffIDs, | ||||
| 	} | ||||
| 	config.History = history | ||||
| 
 | ||||
| 	// Encode and digest the image configuration blob.
 | ||||
|  | @ -159,24 +155,19 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System | |||
| 		return nil, fmt.Errorf("error generating configuration blob for %q: %v", strings.Join(r.filenames, separator), err) | ||||
| 	} | ||||
| 	configID := digest.Canonical.FromBytes(configBytes) | ||||
| 	configSize := int64(len(configBytes)) | ||||
| 
 | ||||
| 	// Populate a manifest with the configuration blob and the file as the single layer.
 | ||||
| 	layerDescriptors := []imgspecv1.Descriptor{} | ||||
| 	for i := range blobIDs { | ||||
| 		layerDescriptors = append(layerDescriptors, imgspecv1.Descriptor{ | ||||
| 			Digest:    blobIDs[i], | ||||
| 			Size:      blobSizes[i], | ||||
| 			MediaType: blobTypes[i], | ||||
| 		}) | ||||
| 	blobs[configID] = tarballBlob{ | ||||
| 		contents: configBytes, | ||||
| 		size:     int64(len(configBytes)), | ||||
| 	} | ||||
| 
 | ||||
| 	// Populate a manifest with the configuration blob and the layers.
 | ||||
| 	manifest := imgspecv1.Manifest{ | ||||
| 		Versioned: imgspecs.Versioned{ | ||||
| 			SchemaVersion: 2, | ||||
| 		}, | ||||
| 		Config: imgspecv1.Descriptor{ | ||||
| 			Digest:    configID, | ||||
| 			Size:      configSize, | ||||
| 			Size:      int64(len(configBytes)), | ||||
| 			MediaType: imgspecv1.MediaTypeImageConfig, | ||||
| 		}, | ||||
| 		Layers:      layerDescriptors, | ||||
|  | @ -196,17 +187,9 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System | |||
| 		}), | ||||
| 		NoGetBlobAtInitialize: stubs.NoGetBlobAt(r), | ||||
| 
 | ||||
| 		reference:  *r, | ||||
| 		filenames:  filenames, | ||||
| 		diffIDs:    diffIDs, | ||||
| 		diffSizes:  diffSizes, | ||||
| 		blobIDs:    blobIDs, | ||||
| 		blobSizes:  blobSizes, | ||||
| 		blobTypes:  blobTypes, | ||||
| 		config:     configBytes, | ||||
| 		configID:   configID, | ||||
| 		configSize: configSize, | ||||
| 		manifest:   manifestBytes, | ||||
| 		reference: *r, | ||||
| 		blobs:     blobs, | ||||
| 		manifest:  manifestBytes, | ||||
| 	} | ||||
| 	src.Compat = impl.AddCompat(src) | ||||
| 
 | ||||
|  | @ -221,24 +204,18 @@ func (is *tarballImageSource) Close() error { | |||
| // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
 | ||||
| // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
 | ||||
| func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { | ||||
| 	// We should only be asked about things in the manifest.  Maybe the configuration blob.
 | ||||
| 	if blobinfo.Digest == is.configID { | ||||
| 		return io.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil | ||||
| 	} | ||||
| 	// Maybe one of the layer blobs.
 | ||||
| 	i := slices.Index(is.blobIDs, blobinfo.Digest) | ||||
| 	if i == -1 { | ||||
| 	blob, ok := is.blobs[blobinfo.Digest] | ||||
| 	if !ok { | ||||
| 		return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String()) | ||||
| 	} | ||||
| 	// We want to read that layer: open the file or memory block and hand it back.
 | ||||
| 	if is.filenames[i] == "-" { | ||||
| 		return io.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil | ||||
| 	if blob.contents != nil { | ||||
| 		return io.NopCloser(bytes.NewReader(blob.contents)), int64(len(blob.contents)), nil | ||||
| 	} | ||||
| 	reader, err := os.Open(is.filenames[i]) | ||||
| 	reader, err := os.Open(blob.filename) | ||||
| 	if err != nil { | ||||
| 		return nil, -1, fmt.Errorf("error opening %q: %v", is.filenames[i], err) | ||||
| 		return nil, -1, err | ||||
| 	} | ||||
| 	return reader, is.blobSizes[i], nil | ||||
| 	return reader, blob.size, nil | ||||
| } | ||||
| 
 | ||||
| // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
 | ||||
|  |  | |||
|  | @ -8,10 +8,10 @@ const ( | |||
| 	// VersionMinor is for functionality in a backwards-compatible manner
 | ||||
| 	VersionMinor = 25 | ||||
| 	// VersionPatch is for backwards-compatible bug fixes
 | ||||
| 	VersionPatch = 0 | ||||
| 	VersionPatch = 1 | ||||
| 
 | ||||
| 	// VersionDev indicates development branch. Releases will be empty string.
 | ||||
| 	VersionDev = "" | ||||
| 	VersionDev = "-dev" | ||||
| ) | ||||
| 
 | ||||
| // Version is the specification version that the package types support.
 | ||||
|  |  | |||
|  | @ -0,0 +1,3 @@ | |||
| .idea | ||||
| *.sw? | ||||
| .vscode | ||||
|  | @ -0,0 +1,20 @@ | |||
| language: go | ||||
| 
 | ||||
| go: | ||||
|   - 1.10.x | ||||
|   - 1.11.x | ||||
|   - 1.12.x | ||||
|   - 1.13.x | ||||
|   - 1.14.x | ||||
| 
 | ||||
| script: | ||||
|   - go get -d -t ./... | ||||
|   - go vet ./... | ||||
|   - go test ./... | ||||
|   - > | ||||
|     go_version=$(go version); | ||||
|     if [ ${go_version:13:4} = "1.12" ]; then | ||||
|       go get -u golang.org/x/tools/cmd/goimports; | ||||
|       goimports -d -e ./ | grep '.*' && { echo; echo "Aborting due to non-empty goimports output."; exit 1; } || :; | ||||
|     fi | ||||
| 
 | ||||
|  | @ -0,0 +1,190 @@ | |||
| # Changelog | ||||
| 
 | ||||
| ## v4.1.2 (2020-06-02) | ||||
| 
 | ||||
| - fix that handles MethodNotAllowed with path variables, thank you @caseyhadden for your contribution | ||||
| - fix to replace nested wildcards correctly in RoutePattern, thank you @@unmultimedio for your contribution | ||||
| - History of changes: see https://github.com/go-chi/chi/compare/v4.1.1...v4.1.2 | ||||
| 
 | ||||
| 
 | ||||
| ## v4.1.1 (2020-04-16) | ||||
| 
 | ||||
| - fix for issue https://github.com/go-chi/chi/issues/411 which allows for overlapping regexp | ||||
|   route to the correct handler through a recursive tree search, thanks to @Jahaja for the PR/fix! | ||||
| - new middleware.RouteHeaders as a simple router for request headers with wildcard support | ||||
| - History of changes: see https://github.com/go-chi/chi/compare/v4.1.0...v4.1.1 | ||||
| 
 | ||||
| 
 | ||||
| ## v4.1.0 (2020-04-1) | ||||
| 
 | ||||
| - middleware.LogEntry: Write method on interface now passes the response header | ||||
|   and an extra interface type useful for custom logger implementations. | ||||
| - middleware.WrapResponseWriter: minor fix | ||||
| - middleware.Recoverer: a bit prettier | ||||
| - History of changes: see https://github.com/go-chi/chi/compare/v4.0.4...v4.1.0 | ||||
| 
 | ||||
| 
 | ||||
| ## v4.0.4 (2020-03-24) | ||||
| 
 | ||||
| - middleware.Recoverer: new pretty stack trace printing (https://github.com/go-chi/chi/pull/496) | ||||
| - a few minor improvements and fixes | ||||
| - History of changes: see https://github.com/go-chi/chi/compare/v4.0.3...v4.0.4 | ||||
| 
 | ||||
| 
 | ||||
| ## v4.0.3 (2020-01-09) | ||||
| 
 | ||||
| - core: fix regexp routing to include default value when param is not matched | ||||
| - middleware: rewrite of middleware.Compress | ||||
| - middleware: suppress http.ErrAbortHandler in middleware.Recoverer | ||||
| - History of changes: see https://github.com/go-chi/chi/compare/v4.0.2...v4.0.3 | ||||
| 
 | ||||
| 
 | ||||
| ## v4.0.2 (2019-02-26) | ||||
| 
 | ||||
| - Minor fixes | ||||
| - History of changes: see https://github.com/go-chi/chi/compare/v4.0.1...v4.0.2 | ||||
| 
 | ||||
| 
 | ||||
| ## v4.0.1 (2019-01-21) | ||||
| 
 | ||||
| - Fixes issue with compress middleware: #382 #385 | ||||
| - History of changes: see https://github.com/go-chi/chi/compare/v4.0.0...v4.0.1 | ||||
| 
 | ||||
| 
 | ||||
| ## v4.0.0 (2019-01-10) | ||||
| 
 | ||||
| - chi v4 requires Go 1.10.3+ (or Go 1.9.7+) - we have deprecated support for Go 1.7 and 1.8 | ||||
| - router: respond with 404 on router with no routes (#362) | ||||
| - router: additional check to ensure wildcard is at the end of a url pattern (#333) | ||||
| - middleware: deprecate use of http.CloseNotifier (#347) | ||||
| - middleware: fix RedirectSlashes to include query params on redirect (#334) | ||||
| - History of changes: see https://github.com/go-chi/chi/compare/v3.3.4...v4.0.0 | ||||
| 
 | ||||
| 
 | ||||
| ## v3.3.4 (2019-01-07) | ||||
| 
 | ||||
| - Minor middleware improvements. No changes to core library/router. Moving v3 into its | ||||
| - own branch as a version of chi for Go 1.7, 1.8, 1.9, 1.10, 1.11 | ||||
| - History of changes: see https://github.com/go-chi/chi/compare/v3.3.3...v3.3.4 | ||||
| 
 | ||||
| 
 | ||||
| ## v3.3.3 (2018-08-27) | ||||
| 
 | ||||
| - Minor release | ||||
| - See https://github.com/go-chi/chi/compare/v3.3.2...v3.3.3 | ||||
| 
 | ||||
| 
 | ||||
| ## v3.3.2 (2017-12-22) | ||||
| 
 | ||||
| - Support to route trailing slashes on mounted sub-routers (#281) | ||||
| - middleware: new `ContentCharset` to check matching charsets. Thank you | ||||
|   @csucu for your community contribution! | ||||
| 
 | ||||
| 
 | ||||
| ## v3.3.1 (2017-11-20) | ||||
| 
 | ||||
| - middleware: new `AllowContentType` handler for explicit whitelist of accepted request Content-Types | ||||
| - middleware: new `SetHeader` handler for short-hand middleware to set a response header key/value | ||||
| - Minor bug fixes | ||||
| 
 | ||||
| 
 | ||||
| ## v3.3.0 (2017-10-10) | ||||
| 
 | ||||
| - New chi.RegisterMethod(method) to add support for custom HTTP methods, see _examples/custom-method for usage | ||||
| - Deprecated LINK and UNLINK methods from the default list, please use `chi.RegisterMethod("LINK")` and `chi.RegisterMethod("UNLINK")` in an `init()` function | ||||
| 
 | ||||
| 
 | ||||
| ## v3.2.1 (2017-08-31) | ||||
| 
 | ||||
| - Add new `Match(rctx *Context, method, path string) bool` method to `Routes` interface | ||||
|   and `Mux`. Match searches the mux's routing tree for a handler that matches the method/path | ||||
| - Add new `RouteMethod` to `*Context` | ||||
| - Add new `Routes` pointer to `*Context` | ||||
| - Add new `middleware.GetHead` to route missing HEAD requests to GET handler | ||||
| - Updated benchmarks (see README) | ||||
| 
 | ||||
| 
 | ||||
| ## v3.1.5 (2017-08-02) | ||||
| 
 | ||||
| - Setup golint and go vet for the project | ||||
| - As per golint, we've redefined `func ServerBaseContext(h http.Handler, baseCtx context.Context) http.Handler` | ||||
|   to `func ServerBaseContext(baseCtx context.Context, h http.Handler) http.Handler` | ||||
| 
 | ||||
| 
 | ||||
| ## v3.1.0 (2017-07-10) | ||||
| 
 | ||||
| - Fix a few minor issues after v3 release | ||||
| - Move `docgen` sub-pkg to https://github.com/go-chi/docgen | ||||
| - Move `render` sub-pkg to https://github.com/go-chi/render | ||||
| - Add new `URLFormat` handler to chi/middleware sub-pkg to make working with url mime  | ||||
|   suffixes easier, ie. parsing `/articles/1.json` and `/articles/1.xml`. See comments in | ||||
|   https://github.com/go-chi/chi/blob/master/middleware/url_format.go for example usage. | ||||
| 
 | ||||
| 
 | ||||
| ## v3.0.0 (2017-06-21) | ||||
| 
 | ||||
| - Major update to chi library with many exciting updates, but also some *breaking changes* | ||||
| - URL parameter syntax changed from `/:id` to `/{id}` for even more flexible routing, such as | ||||
|   `/articles/{month}-{day}-{year}-{slug}`, `/articles/{id}`, and `/articles/{id}.{ext}` on the | ||||
|   same router | ||||
| - Support for regexp for routing patterns, in the form of `/{paramKey:regExp}` for example: | ||||
|   `r.Get("/articles/{name:[a-z]+}", h)` and `chi.URLParam(r, "name")` | ||||
| - Add `Method` and `MethodFunc` to `chi.Router` to allow routing definitions such as | ||||
|   `r.Method("GET", "/", h)` which provides a cleaner interface for custom handlers like | ||||
|   in `_examples/custom-handler` | ||||
| - Deprecating `mux#FileServer` helper function. Instead, we encourage users to create their | ||||
|   own using file handler with the stdlib, see `_examples/fileserver` for an example | ||||
| - Add support for LINK/UNLINK http methods via `r.Method()` and `r.MethodFunc()` | ||||
| - Moved the chi project to its own organization, to allow chi-related community packages to | ||||
|   be easily discovered and supported, at: https://github.com/go-chi | ||||
| - *NOTE:* please update your import paths to `"github.com/go-chi/chi"` | ||||
| - *NOTE:* chi v2 is still available at https://github.com/go-chi/chi/tree/v2 | ||||
| 
 | ||||
| 
 | ||||
| ## v2.1.0 (2017-03-30) | ||||
| 
 | ||||
| - Minor improvements and update to the chi core library | ||||
| - Introduced a brand new `chi/render` sub-package to complete the story of building | ||||
|   APIs to offer a pattern for managing well-defined request / response payloads. Please | ||||
|   check out the updated `_examples/rest` example for how it works. | ||||
| - Added `MethodNotAllowed(h http.HandlerFunc)` to chi.Router interface | ||||
| 
 | ||||
| 
 | ||||
| ## v2.0.0 (2017-01-06) | ||||
| 
 | ||||
| - After many months of v2 being in an RC state with many companies and users running it in | ||||
|   production, the inclusion of some improvements to the middlewares, we are very pleased to | ||||
|   announce v2.0.0 of chi. | ||||
| 
 | ||||
| 
 | ||||
| ## v2.0.0-rc1 (2016-07-26) | ||||
| 
 | ||||
| - Huge update! chi v2 is a large refactor targetting Go 1.7+. As of Go 1.7, the popular | ||||
|   community `"net/context"` package has been included in the standard library as `"context"` and | ||||
|   utilized by `"net/http"` and `http.Request` to managing deadlines, cancelation signals and other | ||||
|   request-scoped values. We're very excited about the new context addition and are proud to | ||||
|   introduce chi v2, a minimal and powerful routing package for building large HTTP services, | ||||
|   with zero external dependencies. Chi focuses on idiomatic design and encourages the use of  | ||||
|   stdlib HTTP handlers and middlwares. | ||||
| - chi v2 deprecates its `chi.Handler` interface and requires `http.Handler` or `http.HandlerFunc` | ||||
| - chi v2 stores URL routing parameters and patterns in the standard request context: `r.Context()` | ||||
| - chi v2 lower-level routing context is accessible by `chi.RouteContext(r.Context()) *chi.Context`, | ||||
|   which provides direct access to URL routing parameters, the routing path and the matching | ||||
|   routing patterns. | ||||
| - Users upgrading from chi v1 to v2, need to: | ||||
|   1. Update the old chi.Handler signature, `func(ctx context.Context, w http.ResponseWriter, r *http.Request)` to | ||||
|      the standard http.Handler: `func(w http.ResponseWriter, r *http.Request)` | ||||
|   2. Use `chi.URLParam(r *http.Request, paramKey string) string` | ||||
|      or `URLParamFromCtx(ctx context.Context, paramKey string) string` to access a url parameter value | ||||
| 
 | ||||
| 
 | ||||
| ## v1.0.0 (2016-07-01) | ||||
| 
 | ||||
| - Released chi v1 stable https://github.com/go-chi/chi/tree/v1.0.0 for Go 1.6 and older. | ||||
| 
 | ||||
| 
 | ||||
| ## v0.9.0 (2016-03-31) | ||||
| 
 | ||||
| - Reuse context objects via sync.Pool for zero-allocation routing [#33](https://github.com/go-chi/chi/pull/33) | ||||
| - BREAKING NOTE: due to subtle API changes, previously `chi.URLParams(ctx)["id"]` used to access url parameters | ||||
|   has changed to: `chi.URLParam(ctx, "id")` | ||||
|  | @ -0,0 +1,31 @@ | |||
| # Contributing | ||||
| 
 | ||||
| ## Prerequisites | ||||
| 
 | ||||
| 1. [Install Go][go-install]. | ||||
| 2. Download the sources and switch the working directory: | ||||
| 
 | ||||
|     ```bash | ||||
|     go get -u -d github.com/go-chi/chi | ||||
|     cd $GOPATH/src/github.com/go-chi/chi | ||||
|     ``` | ||||
| 
 | ||||
| ## Submitting a Pull Request | ||||
| 
 | ||||
| A typical workflow is: | ||||
| 
 | ||||
| 1. [Fork the repository.][fork] [This tip maybe also helpful.][go-fork-tip] | ||||
| 2. [Create a topic branch.][branch] | ||||
| 3. Add tests for your change. | ||||
| 4. Run `go test`. If your tests pass, return to the step 3. | ||||
| 5. Implement the change and ensure the steps from the previous step pass. | ||||
| 6. Run `goimports -w .`, to ensure the new code conforms to Go formatting guideline. | ||||
| 7. [Add, commit and push your changes.][git-help] | ||||
| 8. [Submit a pull request.][pull-req] | ||||
| 
 | ||||
| [go-install]: https://golang.org/doc/install | ||||
| [go-fork-tip]: http://blog.campoy.cat/2014/03/github-and-go-forking-pull-requests-and.html | ||||
| [fork]: https://help.github.com/articles/fork-a-repo | ||||
| [branch]: http://learn.github.com/p/branching.html | ||||
| [git-help]: https://guides.github.com | ||||
| [pull-req]: https://help.github.com/articles/using-pull-requests | ||||
|  | @ -0,0 +1,20 @@ | |||
| Copyright (c) 2015-present Peter Kieltyka (https://github.com/pkieltyka), Google Inc. | ||||
| 
 | ||||
| MIT License | ||||
| 
 | ||||
| Permission is hereby granted, free of charge, to any person obtaining a copy of | ||||
| this software and associated documentation files (the "Software"), to deal in | ||||
| the Software without restriction, including without limitation the rights to | ||||
| use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of | ||||
| the Software, and to permit persons to whom the Software is furnished to do so, | ||||
| subject to the following conditions: | ||||
| 
 | ||||
| The above copyright notice and this permission notice shall be included in all | ||||
| copies or substantial portions of the Software. | ||||
| 
 | ||||
| THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
| IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS | ||||
| FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR | ||||
| COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER | ||||
| IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||||
| CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||||
|  | @ -0,0 +1,441 @@ | |||
| # <img alt="chi" src="https://cdn.rawgit.com/go-chi/chi/master/_examples/chi.svg" width="220" /> | ||||
| 
 | ||||
| 
 | ||||
| [![GoDoc Widget]][GoDoc] [![Travis Widget]][Travis] | ||||
| 
 | ||||
| `chi` is a lightweight, idiomatic and composable router for building Go HTTP services. It's | ||||
| especially good at helping you write large REST API services that are kept maintainable as your | ||||
| project grows and changes. `chi` is built on the new `context` package introduced in Go 1.7 to | ||||
| handle signaling, cancelation and request-scoped values across a handler chain. | ||||
| 
 | ||||
| The focus of the project has been to seek out an elegant and comfortable design for writing | ||||
| REST API servers, written during the development of the Pressly API service that powers our | ||||
| public API service, which in turn powers all of our client-side applications. | ||||
| 
 | ||||
| The key considerations of chi's design are: project structure, maintainability, standard http | ||||
| handlers (stdlib-only), developer productivity, and deconstructing a large system into many small | ||||
| parts. The core router `github.com/go-chi/chi` is quite small (less than 1000 LOC), but we've also | ||||
| included some useful/optional subpackages: [middleware](/middleware), [render](https://github.com/go-chi/render) and [docgen](https://github.com/go-chi/docgen). We hope you enjoy it too! | ||||
| 
 | ||||
| ## Install | ||||
| 
 | ||||
| `go get -u github.com/go-chi/chi` | ||||
| 
 | ||||
| 
 | ||||
| ## Features | ||||
| 
 | ||||
| * **Lightweight** - cloc'd in ~1000 LOC for the chi router | ||||
| * **Fast** - yes, see [benchmarks](#benchmarks) | ||||
| * **100% compatible with net/http** - use any http or middleware pkg in the ecosystem that is also compatible with `net/http` | ||||
| * **Designed for modular/composable APIs** - middlewares, inline middlewares, route groups and subrouter mounting | ||||
| * **Context control** - built on new `context` package, providing value chaining, cancellations and timeouts | ||||
| * **Robust** - in production at Pressly, CloudFlare, Heroku, 99Designs, and many others (see [discussion](https://github.com/go-chi/chi/issues/91)) | ||||
| * **Doc generation** - `docgen` auto-generates routing documentation from your source to JSON or Markdown | ||||
| * **No external dependencies** - plain ol' Go stdlib + net/http | ||||
| 
 | ||||
| 
 | ||||
| ## Examples | ||||
| 
 | ||||
| See [_examples/](https://github.com/go-chi/chi/blob/master/_examples/) for a variety of examples. | ||||
| 
 | ||||
| 
 | ||||
| **As easy as:** | ||||
| 
 | ||||
| ```go | ||||
| package main | ||||
| 
 | ||||
| import ( | ||||
| 	"net/http" | ||||
| 
 | ||||
| 	"github.com/go-chi/chi" | ||||
| 	"github.com/go-chi/chi/middleware" | ||||
| ) | ||||
| 
 | ||||
| func main() { | ||||
| 	r := chi.NewRouter() | ||||
| 	r.Use(middleware.Logger) | ||||
| 	r.Get("/", func(w http.ResponseWriter, r *http.Request) { | ||||
| 		w.Write([]byte("welcome")) | ||||
| 	}) | ||||
| 	http.ListenAndServe(":3000", r) | ||||
| } | ||||
| ``` | ||||
| 
 | ||||
| **REST Preview:** | ||||
| 
 | ||||
| Here is a little preview of how routing looks like with chi. Also take a look at the generated routing docs | ||||
| in JSON ([routes.json](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.json)) and in | ||||
| Markdown ([routes.md](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.md)). | ||||
| 
 | ||||
| I highly recommend reading the source of the [examples](https://github.com/go-chi/chi/blob/master/_examples/) listed | ||||
| above, they will show you all the features of chi and serve as a good form of documentation. | ||||
| 
 | ||||
| ```go | ||||
| import ( | ||||
|   //... | ||||
|   "context" | ||||
|   "github.com/go-chi/chi" | ||||
|   "github.com/go-chi/chi/middleware" | ||||
| ) | ||||
| 
 | ||||
| func main() { | ||||
|   r := chi.NewRouter() | ||||
| 
 | ||||
|   // A good base middleware stack | ||||
|   r.Use(middleware.RequestID) | ||||
|   r.Use(middleware.RealIP) | ||||
|   r.Use(middleware.Logger) | ||||
|   r.Use(middleware.Recoverer) | ||||
| 
 | ||||
|   // Set a timeout value on the request context (ctx), that will signal | ||||
|   // through ctx.Done() that the request has timed out and further | ||||
|   // processing should be stopped. | ||||
|   r.Use(middleware.Timeout(60 * time.Second)) | ||||
| 
 | ||||
|   r.Get("/", func(w http.ResponseWriter, r *http.Request) { | ||||
|     w.Write([]byte("hi")) | ||||
|   }) | ||||
| 
 | ||||
|   // RESTy routes for "articles" resource | ||||
|   r.Route("/articles", func(r chi.Router) { | ||||
|     r.With(paginate).Get("/", listArticles)                           // GET /articles | ||||
|     r.With(paginate).Get("/{month}-{day}-{year}", listArticlesByDate) // GET /articles/01-16-2017 | ||||
| 
 | ||||
|     r.Post("/", createArticle)                                        // POST /articles | ||||
|     r.Get("/search", searchArticles)                                  // GET /articles/search | ||||
| 
 | ||||
|     // Regexp url parameters: | ||||
|     r.Get("/{articleSlug:[a-z-]+}", getArticleBySlug)                // GET /articles/home-is-toronto | ||||
| 
 | ||||
|     // Subrouters: | ||||
|     r.Route("/{articleID}", func(r chi.Router) { | ||||
|       r.Use(ArticleCtx) | ||||
|       r.Get("/", getArticle)                                          // GET /articles/123 | ||||
|       r.Put("/", updateArticle)                                       // PUT /articles/123 | ||||
|       r.Delete("/", deleteArticle)                                    // DELETE /articles/123 | ||||
|     }) | ||||
|   }) | ||||
| 
 | ||||
|   // Mount the admin sub-router | ||||
|   r.Mount("/admin", adminRouter()) | ||||
| 
 | ||||
|   http.ListenAndServe(":3333", r) | ||||
| } | ||||
| 
 | ||||
| func ArticleCtx(next http.Handler) http.Handler { | ||||
|   return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | ||||
|     articleID := chi.URLParam(r, "articleID") | ||||
|     article, err := dbGetArticle(articleID) | ||||
|     if err != nil { | ||||
|       http.Error(w, http.StatusText(404), 404) | ||||
|       return | ||||
|     } | ||||
|     ctx := context.WithValue(r.Context(), "article", article) | ||||
|     next.ServeHTTP(w, r.WithContext(ctx)) | ||||
|   }) | ||||
| } | ||||
| 
 | ||||
| func getArticle(w http.ResponseWriter, r *http.Request) { | ||||
|   ctx := r.Context() | ||||
|   article, ok := ctx.Value("article").(*Article) | ||||
|   if !ok { | ||||
|     http.Error(w, http.StatusText(422), 422) | ||||
|     return | ||||
|   } | ||||
|   w.Write([]byte(fmt.Sprintf("title:%s", article.Title))) | ||||
| } | ||||
| 
 | ||||
| // A completely separate router for administrator routes | ||||
| func adminRouter() http.Handler { | ||||
|   r := chi.NewRouter() | ||||
|   r.Use(AdminOnly) | ||||
|   r.Get("/", adminIndex) | ||||
|   r.Get("/accounts", adminListAccounts) | ||||
|   return r | ||||
| } | ||||
| 
 | ||||
| func AdminOnly(next http.Handler) http.Handler { | ||||
|   return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | ||||
|     ctx := r.Context() | ||||
|     perm, ok := ctx.Value("acl.permission").(YourPermissionType) | ||||
|     if !ok || !perm.IsAdmin() { | ||||
|       http.Error(w, http.StatusText(403), 403) | ||||
|       return | ||||
|     } | ||||
|     next.ServeHTTP(w, r) | ||||
|   }) | ||||
| } | ||||
| ``` | ||||
| 
 | ||||
| 
 | ||||
| ## Router design | ||||
| 
 | ||||
| chi's router is based on a kind of [Patricia Radix trie](https://en.wikipedia.org/wiki/Radix_tree). | ||||
| The router is fully compatible with `net/http`. | ||||
| 
 | ||||
| Built on top of the tree is the `Router` interface: | ||||
| 
 | ||||
| ```go | ||||
| // Router consisting of the core routing methods used by chi's Mux, | ||||
| // using only the standard net/http. | ||||
| type Router interface { | ||||
| 	http.Handler | ||||
| 	Routes | ||||
| 
 | ||||
| 	// Use appends one or more middlewares onto the Router stack. | ||||
| 	Use(middlewares ...func(http.Handler) http.Handler) | ||||
| 
 | ||||
| 	// With adds inline middlewares for an endpoint handler. | ||||
| 	With(middlewares ...func(http.Handler) http.Handler) Router | ||||
| 
 | ||||
| 	// Group adds a new inline-Router along the current routing | ||||
| 	// path, with a fresh middleware stack for the inline-Router. | ||||
| 	Group(fn func(r Router)) Router | ||||
| 
 | ||||
| 	// Route mounts a sub-Router along a `pattern`` string. | ||||
| 	Route(pattern string, fn func(r Router)) Router | ||||
| 
 | ||||
| 	// Mount attaches another http.Handler along ./pattern/* | ||||
| 	Mount(pattern string, h http.Handler) | ||||
| 
 | ||||
| 	// Handle and HandleFunc adds routes for `pattern` that matches | ||||
| 	// all HTTP methods. | ||||
| 	Handle(pattern string, h http.Handler) | ||||
| 	HandleFunc(pattern string, h http.HandlerFunc) | ||||
| 
 | ||||
| 	// Method and MethodFunc adds routes for `pattern` that matches | ||||
| 	// the `method` HTTP method. | ||||
| 	Method(method, pattern string, h http.Handler) | ||||
| 	MethodFunc(method, pattern string, h http.HandlerFunc) | ||||
| 
 | ||||
| 	// HTTP-method routing along `pattern` | ||||
| 	Connect(pattern string, h http.HandlerFunc) | ||||
| 	Delete(pattern string, h http.HandlerFunc) | ||||
| 	Get(pattern string, h http.HandlerFunc) | ||||
| 	Head(pattern string, h http.HandlerFunc) | ||||
| 	Options(pattern string, h http.HandlerFunc) | ||||
| 	Patch(pattern string, h http.HandlerFunc) | ||||
| 	Post(pattern string, h http.HandlerFunc) | ||||
| 	Put(pattern string, h http.HandlerFunc) | ||||
| 	Trace(pattern string, h http.HandlerFunc) | ||||
| 
 | ||||
| 	// NotFound defines a handler to respond whenever a route could | ||||
| 	// not be found. | ||||
| 	NotFound(h http.HandlerFunc) | ||||
| 
 | ||||
| 	// MethodNotAllowed defines a handler to respond whenever a method is | ||||
| 	// not allowed. | ||||
| 	MethodNotAllowed(h http.HandlerFunc) | ||||
| } | ||||
| 
 | ||||
| // Routes interface adds two methods for router traversal, which is also | ||||
| // used by the github.com/go-chi/docgen package to generate documentation for Routers. | ||||
| type Routes interface { | ||||
| 	// Routes returns the routing tree in an easily traversable structure. | ||||
| 	Routes() []Route | ||||
| 
 | ||||
| 	// Middlewares returns the list of middlewares in use by the router. | ||||
| 	Middlewares() Middlewares | ||||
| 
 | ||||
| 	// Match searches the routing tree for a handler that matches | ||||
| 	// the method/path - similar to routing a http request, but without | ||||
| 	// executing the handler thereafter. | ||||
| 	Match(rctx *Context, method, path string) bool | ||||
| } | ||||
| ``` | ||||
| 
 | ||||
| Each routing method accepts a URL `pattern` and chain of `handlers`. The URL pattern | ||||
| supports named params (ie. `/users/{userID}`) and wildcards (ie. `/admin/*`). URL parameters | ||||
| can be fetched at runtime by calling `chi.URLParam(r, "userID")` for named parameters | ||||
| and `chi.URLParam(r, "*")` for a wildcard parameter. | ||||
| 
 | ||||
| 
 | ||||
| ### Middleware handlers | ||||
| 
 | ||||
| chi's middlewares are just stdlib net/http middleware handlers. There is nothing special | ||||
| about them, which means the router and all the tooling is designed to be compatible and | ||||
| friendly with any middleware in the community. This offers much better extensibility and reuse | ||||
| of packages and is at the heart of chi's purpose. | ||||
| 
 | ||||
| Here is an example of a standard net/http middleware handler using the new request context | ||||
| available in Go. This middleware sets a hypothetical user identifier on the request | ||||
| context and calls the next handler in the chain. | ||||
| 
 | ||||
| ```go | ||||
| // HTTP middleware setting a value on the request context | ||||
| func MyMiddleware(next http.Handler) http.Handler { | ||||
|   return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | ||||
|     ctx := context.WithValue(r.Context(), "user", "123") | ||||
|     next.ServeHTTP(w, r.WithContext(ctx)) | ||||
|   }) | ||||
| } | ||||
| ``` | ||||
| 
 | ||||
| 
 | ||||
| ### Request handlers | ||||
| 
 | ||||
| chi uses standard net/http request handlers. This little snippet is an example of a http.Handler | ||||
| func that reads a user identifier from the request context - hypothetically, identifying | ||||
| the user sending an authenticated request, validated+set by a previous middleware handler. | ||||
| 
 | ||||
| ```go | ||||
| // HTTP handler accessing data from the request context. | ||||
| func MyRequestHandler(w http.ResponseWriter, r *http.Request) { | ||||
|   user := r.Context().Value("user").(string) | ||||
|   w.Write([]byte(fmt.Sprintf("hi %s", user))) | ||||
| } | ||||
| ``` | ||||
| 
 | ||||
| 
 | ||||
| ### URL parameters | ||||
| 
 | ||||
| chi's router parses and stores URL parameters right onto the request context. Here is | ||||
| an example of how to access URL params in your net/http handlers. And of course, middlewares | ||||
| are able to access the same information. | ||||
| 
 | ||||
| ```go | ||||
| // HTTP handler accessing the url routing parameters. | ||||
| func MyRequestHandler(w http.ResponseWriter, r *http.Request) { | ||||
|   userID := chi.URLParam(r, "userID") // from a route like /users/{userID} | ||||
| 
 | ||||
|   ctx := r.Context() | ||||
|   key := ctx.Value("key").(string) | ||||
| 
 | ||||
|   w.Write([]byte(fmt.Sprintf("hi %v, %v", userID, key))) | ||||
| } | ||||
| ``` | ||||
| 
 | ||||
| 
 | ||||
| ## Middlewares | ||||
| 
 | ||||
| chi comes equipped with an optional `middleware` package, providing a suite of standard | ||||
| `net/http` middlewares. Please note, any middleware in the ecosystem that is also compatible | ||||
| with `net/http` can be used with chi's mux. | ||||
| 
 | ||||
| ### Core middlewares | ||||
| 
 | ||||
| ----------------------------------------------------------------------------------------------------------- | ||||
| | chi/middleware Handler | description                                                                    | | ||||
| |:----------------------|:--------------------------------------------------------------------------------- | ||||
| | AllowContentType      | Explicit whitelist of accepted request Content-Types                            | | ||||
| | BasicAuth             | Basic HTTP authentication                                                       | | ||||
| | Compress              | Gzip compression for clients that accept compressed responses                   | | ||||
| | GetHead               | Automatically route undefined HEAD requests to GET handlers                     | | ||||
| | Heartbeat             | Monitoring endpoint to check the servers pulse                                  | | ||||
| | Logger                | Logs the start and end of each request with the elapsed processing time         | | ||||
| | NoCache               | Sets response headers to prevent clients from caching                           | | ||||
| | Profiler              | Easily attach net/http/pprof to your routers                                    | | ||||
| | RealIP                | Sets a http.Request's RemoteAddr to either X-Forwarded-For or X-Real-IP         | | ||||
| | Recoverer             | Gracefully absorb panics and prints the stack trace                             | | ||||
| | RequestID             | Injects a request ID into the context of each request                           | | ||||
| | RedirectSlashes       | Redirect slashes on routing paths                                               | | ||||
| | SetHeader             | Short-hand middleware to set a response header key/value                        | | ||||
| | StripSlashes          | Strip slashes on routing paths                                                  | | ||||
| | Throttle              | Puts a ceiling on the number of concurrent requests                             | | ||||
| | Timeout               | Signals to the request context when the timeout deadline is reached             | | ||||
| | URLFormat             | Parse extension from url and put it on request context                          | | ||||
| | WithValue             | Short-hand middleware to set a key/value on the request context                 | | ||||
| ----------------------------------------------------------------------------------------------------------- | ||||
| 
 | ||||
| ### Extra middlewares & packages | ||||
| 
 | ||||
| Please see https://github.com/go-chi for additional packages. | ||||
| 
 | ||||
| -------------------------------------------------------------------------------------------------------------------- | ||||
| | package                                            | description                                                 | | ||||
| |:---------------------------------------------------|:------------------------------------------------------------- | ||||
| | [cors](https://github.com/go-chi/cors)             | Cross-origin resource sharing (CORS)                        | | ||||
| | [docgen](https://github.com/go-chi/docgen)         | Print chi.Router routes at runtime                          | | ||||
| | [jwtauth](https://github.com/go-chi/jwtauth)       | JWT authentication                                          | | ||||
| | [hostrouter](https://github.com/go-chi/hostrouter) | Domain/host based request routing                           | | ||||
| | [httplog](https://github.com/go-chi/httplog)       | Small but powerful structured HTTP request logging          | | ||||
| | [httprate](https://github.com/go-chi/httprate)     | HTTP request rate limiter                                   | | ||||
| | [httptracer](https://github.com/go-chi/httptracer) | HTTP request performance tracing library                    | | ||||
| | [httpvcr](https://github.com/go-chi/httpvcr)       | Write deterministic tests for external sources              | | ||||
| | [stampede](https://github.com/go-chi/stampede)     | HTTP request coalescer                                      | | ||||
| -------------------------------------------------------------------------------------------------------------------- | ||||
| 
 | ||||
| please [submit a PR](./CONTRIBUTING.md) if you'd like to include a link to a chi-compatible middleware | ||||
| 
 | ||||
| 
 | ||||
| ## context? | ||||
| 
 | ||||
| `context` is a tiny pkg that provides simple interface to signal context across call stacks | ||||
| and goroutines. It was originally written by [Sameer Ajmani](https://github.com/Sajmani) | ||||
| and is available in stdlib since go1.7. | ||||
| 
 | ||||
| Learn more at https://blog.golang.org/context | ||||
| 
 | ||||
| and.. | ||||
| * Docs: https://golang.org/pkg/context | ||||
| * Source: https://github.com/golang/go/tree/master/src/context | ||||
| 
 | ||||
| 
 | ||||
| ## Benchmarks | ||||
| 
 | ||||
| The benchmark suite: https://github.com/pkieltyka/go-http-routing-benchmark | ||||
| 
 | ||||
| Results as of Jan 9, 2019 with Go 1.11.4 on Linux X1 Carbon laptop | ||||
| 
 | ||||
| ```shell | ||||
| BenchmarkChi_Param            3000000         475 ns/op       432 B/op      3 allocs/op | ||||
| BenchmarkChi_Param5           2000000         696 ns/op       432 B/op      3 allocs/op | ||||
| BenchmarkChi_Param20          1000000        1275 ns/op       432 B/op      3 allocs/op | ||||
| BenchmarkChi_ParamWrite       3000000         505 ns/op       432 B/op      3 allocs/op | ||||
| BenchmarkChi_GithubStatic     3000000         508 ns/op       432 B/op      3 allocs/op | ||||
| BenchmarkChi_GithubParam      2000000         669 ns/op       432 B/op      3 allocs/op | ||||
| BenchmarkChi_GithubAll          10000      134627 ns/op     87699 B/op    609 allocs/op | ||||
| BenchmarkChi_GPlusStatic      3000000         402 ns/op       432 B/op      3 allocs/op | ||||
| BenchmarkChi_GPlusParam       3000000         500 ns/op       432 B/op      3 allocs/op | ||||
| BenchmarkChi_GPlus2Params     3000000         586 ns/op       432 B/op      3 allocs/op | ||||
| BenchmarkChi_GPlusAll          200000        7237 ns/op      5616 B/op     39 allocs/op | ||||
| BenchmarkChi_ParseStatic      3000000         408 ns/op       432 B/op      3 allocs/op | ||||
| BenchmarkChi_ParseParam       3000000         488 ns/op       432 B/op      3 allocs/op | ||||
| BenchmarkChi_Parse2Params     3000000         551 ns/op       432 B/op      3 allocs/op | ||||
| BenchmarkChi_ParseAll          100000       13508 ns/op     11232 B/op     78 allocs/op | ||||
| BenchmarkChi_StaticAll          20000       81933 ns/op     67826 B/op    471 allocs/op | ||||
| ``` | ||||
| 
 | ||||
| Comparison with other routers: https://gist.github.com/pkieltyka/123032f12052520aaccab752bd3e78cc | ||||
| 
 | ||||
| NOTE: the allocs in the benchmark above are from the calls to http.Request's | ||||
| `WithContext(context.Context)` method that clones the http.Request, sets the `Context()` | ||||
| on the duplicated (alloc'd) request and returns it the new request object. This is just | ||||
| how setting context on a request in Go works. | ||||
| 
 | ||||
| 
 | ||||
| ## Credits | ||||
| 
 | ||||
| * Carl Jackson for https://github.com/zenazn/goji | ||||
|   * Parts of chi's thinking comes from goji, and chi's middleware package | ||||
|     sources from goji. | ||||
| * Armon Dadgar for https://github.com/armon/go-radix | ||||
| * Contributions: [@VojtechVitek](https://github.com/VojtechVitek) | ||||
| 
 | ||||
| We'll be more than happy to see [your contributions](./CONTRIBUTING.md)! | ||||
| 
 | ||||
| 
 | ||||
| ## Beyond REST | ||||
| 
 | ||||
| chi is just a http router that lets you decompose request handling into many smaller layers. | ||||
| Many companies use chi to write REST services for their public APIs. But, REST is just a convention | ||||
| for managing state via HTTP, and there's a lot of other pieces required to write a complete client-server | ||||
| system or network of microservices. | ||||
| 
 | ||||
| Looking beyond REST, I also recommend some newer works in the field: | ||||
| * [webrpc](https://github.com/webrpc/webrpc) - Web-focused RPC client+server framework with code-gen | ||||
| * [gRPC](https://github.com/grpc/grpc-go) - Google's RPC framework via protobufs | ||||
| * [graphql](https://github.com/99designs/gqlgen) - Declarative query language | ||||
| * [NATS](https://nats.io) - lightweight pub-sub | ||||
| 
 | ||||
| 
 | ||||
| ## License | ||||
| 
 | ||||
| Copyright (c) 2015-present [Peter Kieltyka](https://github.com/pkieltyka) | ||||
| 
 | ||||
| Licensed under [MIT License](./LICENSE) | ||||
| 
 | ||||
| [GoDoc]: https://godoc.org/github.com/go-chi/chi | ||||
| [GoDoc Widget]: https://godoc.org/github.com/go-chi/chi?status.svg | ||||
| [Travis]: https://travis-ci.org/go-chi/chi | ||||
| [Travis Widget]: https://travis-ci.org/go-chi/chi.svg?branch=master | ||||
|  | @ -0,0 +1,49 @@ | |||
| package chi | ||||
| 
 | ||||
| import "net/http" | ||||
| 
 | ||||
| // Chain returns a Middlewares type from a slice of middleware handlers.
 | ||||
| func Chain(middlewares ...func(http.Handler) http.Handler) Middlewares { | ||||
| 	return Middlewares(middlewares) | ||||
| } | ||||
| 
 | ||||
| // Handler builds and returns a http.Handler from the chain of middlewares,
 | ||||
| // with `h http.Handler` as the final handler.
 | ||||
| func (mws Middlewares) Handler(h http.Handler) http.Handler { | ||||
| 	return &ChainHandler{mws, h, chain(mws, h)} | ||||
| } | ||||
| 
 | ||||
| // HandlerFunc builds and returns a http.Handler from the chain of middlewares,
 | ||||
| // with `h http.Handler` as the final handler.
 | ||||
| func (mws Middlewares) HandlerFunc(h http.HandlerFunc) http.Handler { | ||||
| 	return &ChainHandler{mws, h, chain(mws, h)} | ||||
| } | ||||
| 
 | ||||
| // ChainHandler is a http.Handler with support for handler composition and
 | ||||
| // execution.
 | ||||
| type ChainHandler struct { | ||||
| 	Middlewares Middlewares | ||||
| 	Endpoint    http.Handler | ||||
| 	chain       http.Handler | ||||
| } | ||||
| 
 | ||||
| func (c *ChainHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { | ||||
| 	c.chain.ServeHTTP(w, r) | ||||
| } | ||||
| 
 | ||||
| // chain builds a http.Handler composed of an inline middleware stack and endpoint
 | ||||
| // handler in the order they are passed.
 | ||||
| func chain(middlewares []func(http.Handler) http.Handler, endpoint http.Handler) http.Handler { | ||||
| 	// Return ahead of time if there aren't any middlewares for the chain
 | ||||
| 	if len(middlewares) == 0 { | ||||
| 		return endpoint | ||||
| 	} | ||||
| 
 | ||||
| 	// Wrap the end handler with the middleware chain
 | ||||
| 	h := middlewares[len(middlewares)-1](endpoint) | ||||
| 	for i := len(middlewares) - 2; i >= 0; i-- { | ||||
| 		h = middlewares[i](h) | ||||
| 	} | ||||
| 
 | ||||
| 	return h | ||||
| } | ||||
|  | @ -0,0 +1,134 @@ | |||
| //
 | ||||
| // Package chi is a small, idiomatic and composable router for building HTTP services.
 | ||||
| //
 | ||||
| // chi requires Go 1.10 or newer.
 | ||||
| //
 | ||||
| // Example:
 | ||||
| //  package main
 | ||||
| //
 | ||||
| //  import (
 | ||||
| //  	"net/http"
 | ||||
| //
 | ||||
| //  	"github.com/go-chi/chi"
 | ||||
| //  	"github.com/go-chi/chi/middleware"
 | ||||
| //  )
 | ||||
| //
 | ||||
| //  func main() {
 | ||||
| //  	r := chi.NewRouter()
 | ||||
| //  	r.Use(middleware.Logger)
 | ||||
| //  	r.Use(middleware.Recoverer)
 | ||||
| //
 | ||||
| //  	r.Get("/", func(w http.ResponseWriter, r *http.Request) {
 | ||||
| //  		w.Write([]byte("root."))
 | ||||
| //  	})
 | ||||
| //
 | ||||
| //  	http.ListenAndServe(":3333", r)
 | ||||
| //  }
 | ||||
| //
 | ||||
| // See github.com/go-chi/chi/_examples/ for more in-depth examples.
 | ||||
| //
 | ||||
| // URL patterns allow for easy matching of path components in HTTP
 | ||||
| // requests. The matching components can then be accessed using
 | ||||
| // chi.URLParam(). All patterns must begin with a slash.
 | ||||
| //
 | ||||
| // A simple named placeholder {name} matches any sequence of characters
 | ||||
| // up to the next / or the end of the URL. Trailing slashes on paths must
 | ||||
| // be handled explicitly.
 | ||||
| //
 | ||||
| // A placeholder with a name followed by a colon allows a regular
 | ||||
| // expression match, for example {number:\\d+}. The regular expression
 | ||||
| // syntax is Go's normal regexp RE2 syntax, except that regular expressions
 | ||||
| // including { or } are not supported, and / will never be
 | ||||
| // matched. An anonymous regexp pattern is allowed, using an empty string
 | ||||
| // before the colon in the placeholder, such as {:\\d+}
 | ||||
| //
 | ||||
| // The special placeholder of asterisk matches the rest of the requested
 | ||||
| // URL. Any trailing characters in the pattern are ignored. This is the only
 | ||||
| // placeholder which will match / characters.
 | ||||
| //
 | ||||
| // Examples:
 | ||||
| //  "/user/{name}" matches "/user/jsmith" but not "/user/jsmith/info" or "/user/jsmith/"
 | ||||
| //  "/user/{name}/info" matches "/user/jsmith/info"
 | ||||
| //  "/page/*" matches "/page/intro/latest"
 | ||||
| //  "/page/*/index" also matches "/page/intro/latest"
 | ||||
| //  "/date/{yyyy:\\d\\d\\d\\d}/{mm:\\d\\d}/{dd:\\d\\d}" matches "/date/2017/04/01"
 | ||||
| //
 | ||||
| package chi | ||||
| 
 | ||||
| import "net/http" | ||||
| 
 | ||||
| // NewRouter returns a new Mux object that implements the Router interface.
 | ||||
| func NewRouter() *Mux { | ||||
| 	return NewMux() | ||||
| } | ||||
| 
 | ||||
| // Router consisting of the core routing methods used by chi's Mux,
 | ||||
| // using only the standard net/http.
 | ||||
| type Router interface { | ||||
| 	http.Handler | ||||
| 	Routes | ||||
| 
 | ||||
| 	// Use appends one or more middlewares onto the Router stack.
 | ||||
| 	Use(middlewares ...func(http.Handler) http.Handler) | ||||
| 
 | ||||
| 	// With adds inline middlewares for an endpoint handler.
 | ||||
| 	With(middlewares ...func(http.Handler) http.Handler) Router | ||||
| 
 | ||||
| 	// Group adds a new inline-Router along the current routing
 | ||||
| 	// path, with a fresh middleware stack for the inline-Router.
 | ||||
| 	Group(fn func(r Router)) Router | ||||
| 
 | ||||
| 	// Route mounts a sub-Router along a `pattern`` string.
 | ||||
| 	Route(pattern string, fn func(r Router)) Router | ||||
| 
 | ||||
| 	// Mount attaches another http.Handler along ./pattern/*
 | ||||
| 	Mount(pattern string, h http.Handler) | ||||
| 
 | ||||
| 	// Handle and HandleFunc adds routes for `pattern` that matches
 | ||||
| 	// all HTTP methods.
 | ||||
| 	Handle(pattern string, h http.Handler) | ||||
| 	HandleFunc(pattern string, h http.HandlerFunc) | ||||
| 
 | ||||
| 	// Method and MethodFunc adds routes for `pattern` that matches
 | ||||
| 	// the `method` HTTP method.
 | ||||
| 	Method(method, pattern string, h http.Handler) | ||||
| 	MethodFunc(method, pattern string, h http.HandlerFunc) | ||||
| 
 | ||||
| 	// HTTP-method routing along `pattern`
 | ||||
| 	Connect(pattern string, h http.HandlerFunc) | ||||
| 	Delete(pattern string, h http.HandlerFunc) | ||||
| 	Get(pattern string, h http.HandlerFunc) | ||||
| 	Head(pattern string, h http.HandlerFunc) | ||||
| 	Options(pattern string, h http.HandlerFunc) | ||||
| 	Patch(pattern string, h http.HandlerFunc) | ||||
| 	Post(pattern string, h http.HandlerFunc) | ||||
| 	Put(pattern string, h http.HandlerFunc) | ||||
| 	Trace(pattern string, h http.HandlerFunc) | ||||
| 
 | ||||
| 	// NotFound defines a handler to respond whenever a route could
 | ||||
| 	// not be found.
 | ||||
| 	NotFound(h http.HandlerFunc) | ||||
| 
 | ||||
| 	// MethodNotAllowed defines a handler to respond whenever a method is
 | ||||
| 	// not allowed.
 | ||||
| 	MethodNotAllowed(h http.HandlerFunc) | ||||
| } | ||||
| 
 | ||||
| // Routes interface adds two methods for router traversal, which is also
 | ||||
| // used by the `docgen` subpackage to generation documentation for Routers.
 | ||||
| type Routes interface { | ||||
| 	// Routes returns the routing tree in an easily traversable structure.
 | ||||
| 	Routes() []Route | ||||
| 
 | ||||
| 	// Middlewares returns the list of middlewares in use by the router.
 | ||||
| 	Middlewares() Middlewares | ||||
| 
 | ||||
| 	// Match searches the routing tree for a handler that matches
 | ||||
| 	// the method/path - similar to routing a http request, but without
 | ||||
| 	// executing the handler thereafter.
 | ||||
| 	Match(rctx *Context, method, path string) bool | ||||
| } | ||||
| 
 | ||||
| // Middlewares type is a slice of standard middleware handlers with methods
 | ||||
| // to compose middleware chains and http.Handler's.
 | ||||
| type Middlewares []func(http.Handler) http.Handler | ||||
|  | @ -0,0 +1,172 @@ | |||
| package chi | ||||
| 
 | ||||
| import ( | ||||
| 	"context" | ||||
| 	"net" | ||||
| 	"net/http" | ||||
| 	"strings" | ||||
| ) | ||||
| 
 | ||||
| // URLParam returns the url parameter from a http.Request object.
 | ||||
| func URLParam(r *http.Request, key string) string { | ||||
| 	if rctx := RouteContext(r.Context()); rctx != nil { | ||||
| 		return rctx.URLParam(key) | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
| 
 | ||||
| // URLParamFromCtx returns the url parameter from a http.Request Context.
 | ||||
| func URLParamFromCtx(ctx context.Context, key string) string { | ||||
| 	if rctx := RouteContext(ctx); rctx != nil { | ||||
| 		return rctx.URLParam(key) | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
| 
 | ||||
| // RouteContext returns chi's routing Context object from a
 | ||||
| // http.Request Context.
 | ||||
| func RouteContext(ctx context.Context) *Context { | ||||
| 	val, _ := ctx.Value(RouteCtxKey).(*Context) | ||||
| 	return val | ||||
| } | ||||
| 
 | ||||
| // ServerBaseContext wraps an http.Handler to set the request context to the
 | ||||
| // `baseCtx`.
 | ||||
| func ServerBaseContext(baseCtx context.Context, h http.Handler) http.Handler { | ||||
| 	fn := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | ||||
| 		ctx := r.Context() | ||||
| 		baseCtx := baseCtx | ||||
| 
 | ||||
| 		// Copy over default net/http server context keys
 | ||||
| 		if v, ok := ctx.Value(http.ServerContextKey).(*http.Server); ok { | ||||
| 			baseCtx = context.WithValue(baseCtx, http.ServerContextKey, v) | ||||
| 		} | ||||
| 		if v, ok := ctx.Value(http.LocalAddrContextKey).(net.Addr); ok { | ||||
| 			baseCtx = context.WithValue(baseCtx, http.LocalAddrContextKey, v) | ||||
| 		} | ||||
| 
 | ||||
| 		h.ServeHTTP(w, r.WithContext(baseCtx)) | ||||
| 	}) | ||||
| 	return fn | ||||
| } | ||||
| 
 | ||||
| // NewRouteContext returns a new routing Context object.
 | ||||
| func NewRouteContext() *Context { | ||||
| 	return &Context{} | ||||
| } | ||||
| 
 | ||||
| var ( | ||||
| 	// RouteCtxKey is the context.Context key to store the request context.
 | ||||
| 	RouteCtxKey = &contextKey{"RouteContext"} | ||||
| ) | ||||
| 
 | ||||
| // Context is the default routing context set on the root node of a
 | ||||
| // request context to track route patterns, URL parameters and
 | ||||
| // an optional routing path.
 | ||||
| type Context struct { | ||||
| 	Routes Routes | ||||
| 
 | ||||
| 	// Routing path/method override used during the route search.
 | ||||
| 	// See Mux#routeHTTP method.
 | ||||
| 	RoutePath   string | ||||
| 	RouteMethod string | ||||
| 
 | ||||
| 	// Routing pattern stack throughout the lifecycle of the request,
 | ||||
| 	// across all connected routers. It is a record of all matching
 | ||||
| 	// patterns across a stack of sub-routers.
 | ||||
| 	RoutePatterns []string | ||||
| 
 | ||||
| 	// URLParams are the stack of routeParams captured during the
 | ||||
| 	// routing lifecycle across a stack of sub-routers.
 | ||||
| 	URLParams RouteParams | ||||
| 
 | ||||
| 	// The endpoint routing pattern that matched the request URI path
 | ||||
| 	// or `RoutePath` of the current sub-router. This value will update
 | ||||
| 	// during the lifecycle of a request passing through a stack of
 | ||||
| 	// sub-routers.
 | ||||
| 	routePattern string | ||||
| 
 | ||||
| 	// Route parameters matched for the current sub-router. It is
 | ||||
| 	// intentionally unexported so it cant be tampered.
 | ||||
| 	routeParams RouteParams | ||||
| 
 | ||||
| 	// methodNotAllowed hint
 | ||||
| 	methodNotAllowed bool | ||||
| } | ||||
| 
 | ||||
| // Reset a routing context to its initial state.
 | ||||
| func (x *Context) Reset() { | ||||
| 	x.Routes = nil | ||||
| 	x.RoutePath = "" | ||||
| 	x.RouteMethod = "" | ||||
| 	x.RoutePatterns = x.RoutePatterns[:0] | ||||
| 	x.URLParams.Keys = x.URLParams.Keys[:0] | ||||
| 	x.URLParams.Values = x.URLParams.Values[:0] | ||||
| 
 | ||||
| 	x.routePattern = "" | ||||
| 	x.routeParams.Keys = x.routeParams.Keys[:0] | ||||
| 	x.routeParams.Values = x.routeParams.Values[:0] | ||||
| 	x.methodNotAllowed = false | ||||
| } | ||||
| 
 | ||||
| // URLParam returns the corresponding URL parameter value from the request
 | ||||
| // routing context.
 | ||||
| func (x *Context) URLParam(key string) string { | ||||
| 	for k := len(x.URLParams.Keys) - 1; k >= 0; k-- { | ||||
| 		if x.URLParams.Keys[k] == key { | ||||
| 			return x.URLParams.Values[k] | ||||
| 		} | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
| 
 | ||||
| // RoutePattern builds the routing pattern string for the particular
 | ||||
| // request, at the particular point during routing. This means, the value
 | ||||
| // will change throughout the execution of a request in a router. That is
 | ||||
| // why its advised to only use this value after calling the next handler.
 | ||||
| //
 | ||||
| // For example,
 | ||||
| //
 | ||||
| //   func Instrument(next http.Handler) http.Handler {
 | ||||
| //     return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 | ||||
| //       next.ServeHTTP(w, r)
 | ||||
| //       routePattern := chi.RouteContext(r.Context()).RoutePattern()
 | ||||
| //       measure(w, r, routePattern)
 | ||||
| //   	 })
 | ||||
| //   }
 | ||||
| func (x *Context) RoutePattern() string { | ||||
| 	routePattern := strings.Join(x.RoutePatterns, "") | ||||
| 	return replaceWildcards(routePattern) | ||||
| } | ||||
| 
 | ||||
| // replaceWildcards takes a route pattern and recursively replaces all
 | ||||
| // occurrences of "/*/" to "/".
 | ||||
| func replaceWildcards(p string) string { | ||||
| 	if strings.Contains(p, "/*/") { | ||||
| 		return replaceWildcards(strings.Replace(p, "/*/", "/", -1)) | ||||
| 	} | ||||
| 
 | ||||
| 	return p | ||||
| } | ||||
| 
 | ||||
| // RouteParams is a structure to track URL routing parameters efficiently.
 | ||||
| type RouteParams struct { | ||||
| 	Keys, Values []string | ||||
| } | ||||
| 
 | ||||
| // Add will append a URL parameter to the end of the route param
 | ||||
| func (s *RouteParams) Add(key, value string) { | ||||
| 	s.Keys = append(s.Keys, key) | ||||
| 	s.Values = append(s.Values, value) | ||||
| } | ||||
| 
 | ||||
| // contextKey is a value for use with context.WithValue. It's used as
 | ||||
| // a pointer so it fits in an interface{} without allocation. This technique
 | ||||
| // for defining context keys was copied from Go 1.7's new use of context in net/http.
 | ||||
| type contextKey struct { | ||||
| 	name string | ||||
| } | ||||
| 
 | ||||
| func (k *contextKey) String() string { | ||||
| 	return "chi context value " + k.name | ||||
| } | ||||
|  | @ -0,0 +1,32 @@ | |||
| package middleware | ||||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"net/http" | ||||
| ) | ||||
| 
 | ||||
| // BasicAuth implements a simple middleware handler for adding basic http auth to a route.
 | ||||
| func BasicAuth(realm string, creds map[string]string) func(next http.Handler) http.Handler { | ||||
| 	return func(next http.Handler) http.Handler { | ||||
| 		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | ||||
| 			user, pass, ok := r.BasicAuth() | ||||
| 			if !ok { | ||||
| 				basicAuthFailed(w, realm) | ||||
| 				return | ||||
| 			} | ||||
| 
 | ||||
| 			credPass, credUserOk := creds[user] | ||||
| 			if !credUserOk || pass != credPass { | ||||
| 				basicAuthFailed(w, realm) | ||||
| 				return | ||||
| 			} | ||||
| 
 | ||||
| 			next.ServeHTTP(w, r) | ||||
| 		}) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func basicAuthFailed(w http.ResponseWriter, realm string) { | ||||
| 	w.Header().Add("WWW-Authenticate", fmt.Sprintf(`Basic realm="%s"`, realm)) | ||||
| 	w.WriteHeader(http.StatusUnauthorized) | ||||
| } | ||||
|  | @ -0,0 +1,399 @@ | |||
| package middleware | ||||
| 
 | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"compress/flate" | ||||
| 	"compress/gzip" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| 	"net" | ||||
| 	"net/http" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
| ) | ||||
| 
 | ||||
| var defaultCompressibleContentTypes = []string{ | ||||
| 	"text/html", | ||||
| 	"text/css", | ||||
| 	"text/plain", | ||||
| 	"text/javascript", | ||||
| 	"application/javascript", | ||||
| 	"application/x-javascript", | ||||
| 	"application/json", | ||||
| 	"application/atom+xml", | ||||
| 	"application/rss+xml", | ||||
| 	"image/svg+xml", | ||||
| } | ||||
| 
 | ||||
| // Compress is a middleware that compresses response
 | ||||
| // body of a given content types to a data format based
 | ||||
| // on Accept-Encoding request header. It uses a given
 | ||||
| // compression level.
 | ||||
| //
 | ||||
| // NOTE: make sure to set the Content-Type header on your response
 | ||||
| // otherwise this middleware will not compress the response body. For ex, in
 | ||||
| // your handler you should set w.Header().Set("Content-Type", http.DetectContentType(yourBody))
 | ||||
| // or set it manually.
 | ||||
| //
 | ||||
| // Passing a compression level of 5 is sensible value
 | ||||
| func Compress(level int, types ...string) func(next http.Handler) http.Handler { | ||||
| 	compressor := NewCompressor(level, types...) | ||||
| 	return compressor.Handler | ||||
| } | ||||
| 
 | ||||
| // Compressor represents a set of encoding configurations.
 | ||||
| type Compressor struct { | ||||
| 	level int // The compression level.
 | ||||
| 	// The mapping of encoder names to encoder functions.
 | ||||
| 	encoders map[string]EncoderFunc | ||||
| 	// The mapping of pooled encoders to pools.
 | ||||
| 	pooledEncoders map[string]*sync.Pool | ||||
| 	// The set of content types allowed to be compressed.
 | ||||
| 	allowedTypes     map[string]struct{} | ||||
| 	allowedWildcards map[string]struct{} | ||||
| 	// The list of encoders in order of decreasing precedence.
 | ||||
| 	encodingPrecedence []string | ||||
| } | ||||
| 
 | ||||
| // NewCompressor creates a new Compressor that will handle encoding responses.
 | ||||
| //
 | ||||
| // The level should be one of the ones defined in the flate package.
 | ||||
| // The types are the content types that are allowed to be compressed.
 | ||||
| func NewCompressor(level int, types ...string) *Compressor { | ||||
| 	// If types are provided, set those as the allowed types. If none are
 | ||||
| 	// provided, use the default list.
 | ||||
| 	allowedTypes := make(map[string]struct{}) | ||||
| 	allowedWildcards := make(map[string]struct{}) | ||||
| 	if len(types) > 0 { | ||||
| 		for _, t := range types { | ||||
| 			if strings.Contains(strings.TrimSuffix(t, "/*"), "*") { | ||||
| 				panic(fmt.Sprintf("middleware/compress: Unsupported content-type wildcard pattern '%s'. Only '/*' supported", t)) | ||||
| 			} | ||||
| 			if strings.HasSuffix(t, "/*") { | ||||
| 				allowedWildcards[strings.TrimSuffix(t, "/*")] = struct{}{} | ||||
| 			} else { | ||||
| 				allowedTypes[t] = struct{}{} | ||||
| 			} | ||||
| 		} | ||||
| 	} else { | ||||
| 		for _, t := range defaultCompressibleContentTypes { | ||||
| 			allowedTypes[t] = struct{}{} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	c := &Compressor{ | ||||
| 		level:            level, | ||||
| 		encoders:         make(map[string]EncoderFunc), | ||||
| 		pooledEncoders:   make(map[string]*sync.Pool), | ||||
| 		allowedTypes:     allowedTypes, | ||||
| 		allowedWildcards: allowedWildcards, | ||||
| 	} | ||||
| 
 | ||||
| 	// Set the default encoders.  The precedence order uses the reverse
 | ||||
| 	// ordering that the encoders were added. This means adding new encoders
 | ||||
| 	// will move them to the front of the order.
 | ||||
| 	//
 | ||||
| 	// TODO:
 | ||||
| 	// lzma: Opera.
 | ||||
| 	// sdch: Chrome, Android. Gzip output + dictionary header.
 | ||||
| 	// br:   Brotli, see https://github.com/go-chi/chi/pull/326
 | ||||
| 
 | ||||
| 	// HTTP 1.1 "deflate" (RFC 2616) stands for DEFLATE data (RFC 1951)
 | ||||
| 	// wrapped with zlib (RFC 1950). The zlib wrapper uses Adler-32
 | ||||
| 	// checksum compared to CRC-32 used in "gzip" and thus is faster.
 | ||||
| 	//
 | ||||
| 	// But.. some old browsers (MSIE, Safari 5.1) incorrectly expect
 | ||||
| 	// raw DEFLATE data only, without the mentioned zlib wrapper.
 | ||||
| 	// Because of this major confusion, most modern browsers try it
 | ||||
| 	// both ways, first looking for zlib headers.
 | ||||
| 	// Quote by Mark Adler: http://stackoverflow.com/a/9186091/385548
 | ||||
| 	//
 | ||||
| 	// The list of browsers having problems is quite big, see:
 | ||||
| 	// http://zoompf.com/blog/2012/02/lose-the-wait-http-compression
 | ||||
| 	// https://web.archive.org/web/20120321182910/http://www.vervestudios.co/projects/compression-tests/results
 | ||||
| 	//
 | ||||
| 	// That's why we prefer gzip over deflate. It's just more reliable
 | ||||
| 	// and not significantly slower than gzip.
 | ||||
| 	c.SetEncoder("deflate", encoderDeflate) | ||||
| 
 | ||||
| 	// TODO: Exception for old MSIE browsers that can't handle non-HTML?
 | ||||
| 	// https://zoompf.com/blog/2012/02/lose-the-wait-http-compression
 | ||||
| 	c.SetEncoder("gzip", encoderGzip) | ||||
| 
 | ||||
| 	// NOTE: Not implemented, intentionally:
 | ||||
| 	// case "compress": // LZW. Deprecated.
 | ||||
| 	// case "bzip2":    // Too slow on-the-fly.
 | ||||
| 	// case "zopfli":   // Too slow on-the-fly.
 | ||||
| 	// case "xz":       // Too slow on-the-fly.
 | ||||
| 	return c | ||||
| } | ||||
| 
 | ||||
| // SetEncoder can be used to set the implementation of a compression algorithm.
 | ||||
| //
 | ||||
| // The encoding should be a standardised identifier. See:
 | ||||
| // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Encoding
 | ||||
| //
 | ||||
| // For example, add the Brotli algortithm:
 | ||||
| //
 | ||||
| //  import brotli_enc "gopkg.in/kothar/brotli-go.v0/enc"
 | ||||
| //
 | ||||
| //  compressor := middleware.NewCompressor(5, "text/html")
 | ||||
| //  compressor.SetEncoder("br", func(w http.ResponseWriter, level int) io.Writer {
 | ||||
| //    params := brotli_enc.NewBrotliParams()
 | ||||
| //    params.SetQuality(level)
 | ||||
| //    return brotli_enc.NewBrotliWriter(params, w)
 | ||||
| //  })
 | ||||
| func (c *Compressor) SetEncoder(encoding string, fn EncoderFunc) { | ||||
| 	encoding = strings.ToLower(encoding) | ||||
| 	if encoding == "" { | ||||
| 		panic("the encoding can not be empty") | ||||
| 	} | ||||
| 	if fn == nil { | ||||
| 		panic("attempted to set a nil encoder function") | ||||
| 	} | ||||
| 
 | ||||
| 	// If we are adding a new encoder that is already registered, we have to
 | ||||
| 	// clear that one out first.
 | ||||
| 	if _, ok := c.pooledEncoders[encoding]; ok { | ||||
| 		delete(c.pooledEncoders, encoding) | ||||
| 	} | ||||
| 	if _, ok := c.encoders[encoding]; ok { | ||||
| 		delete(c.encoders, encoding) | ||||
| 	} | ||||
| 
 | ||||
| 	// If the encoder supports Resetting (IoReseterWriter), then it can be pooled.
 | ||||
| 	encoder := fn(ioutil.Discard, c.level) | ||||
| 	if encoder != nil { | ||||
| 		if _, ok := encoder.(ioResetterWriter); ok { | ||||
| 			pool := &sync.Pool{ | ||||
| 				New: func() interface{} { | ||||
| 					return fn(ioutil.Discard, c.level) | ||||
| 				}, | ||||
| 			} | ||||
| 			c.pooledEncoders[encoding] = pool | ||||
| 		} | ||||
| 	} | ||||
| 	// If the encoder is not in the pooledEncoders, add it to the normal encoders.
 | ||||
| 	if _, ok := c.pooledEncoders[encoding]; !ok { | ||||
| 		c.encoders[encoding] = fn | ||||
| 	} | ||||
| 
 | ||||
| 	for i, v := range c.encodingPrecedence { | ||||
| 		if v == encoding { | ||||
| 			c.encodingPrecedence = append(c.encodingPrecedence[:i], c.encodingPrecedence[i+1:]...) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	c.encodingPrecedence = append([]string{encoding}, c.encodingPrecedence...) | ||||
| } | ||||
| 
 | ||||
| // Handler returns a new middleware that will compress the response based on the
 | ||||
| // current Compressor.
 | ||||
| func (c *Compressor) Handler(next http.Handler) http.Handler { | ||||
| 	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | ||||
| 		encoder, encoding, cleanup := c.selectEncoder(r.Header, w) | ||||
| 
 | ||||
| 		cw := &compressResponseWriter{ | ||||
| 			ResponseWriter:   w, | ||||
| 			w:                w, | ||||
| 			contentTypes:     c.allowedTypes, | ||||
| 			contentWildcards: c.allowedWildcards, | ||||
| 			encoding:         encoding, | ||||
| 			compressable:     false, // determined in post-handler
 | ||||
| 		} | ||||
| 		if encoder != nil { | ||||
| 			cw.w = encoder | ||||
| 		} | ||||
| 		// Re-add the encoder to the pool if applicable.
 | ||||
| 		defer cleanup() | ||||
| 		defer cw.Close() | ||||
| 
 | ||||
| 		next.ServeHTTP(cw, r) | ||||
| 	}) | ||||
| } | ||||
| 
 | ||||
| // selectEncoder returns the encoder, the name of the encoder, and a closer function.
 | ||||
| func (c *Compressor) selectEncoder(h http.Header, w io.Writer) (io.Writer, string, func()) { | ||||
| 	header := h.Get("Accept-Encoding") | ||||
| 
 | ||||
| 	// Parse the names of all accepted algorithms from the header.
 | ||||
| 	accepted := strings.Split(strings.ToLower(header), ",") | ||||
| 
 | ||||
| 	// Find supported encoder by accepted list by precedence
 | ||||
| 	for _, name := range c.encodingPrecedence { | ||||
| 		if matchAcceptEncoding(accepted, name) { | ||||
| 			if pool, ok := c.pooledEncoders[name]; ok { | ||||
| 				encoder := pool.Get().(ioResetterWriter) | ||||
| 				cleanup := func() { | ||||
| 					pool.Put(encoder) | ||||
| 				} | ||||
| 				encoder.Reset(w) | ||||
| 				return encoder, name, cleanup | ||||
| 
 | ||||
| 			} | ||||
| 			if fn, ok := c.encoders[name]; ok { | ||||
| 				return fn(w, c.level), name, func() {} | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 	} | ||||
| 
 | ||||
| 	// No encoder found to match the accepted encoding
 | ||||
| 	return nil, "", func() {} | ||||
| } | ||||
| 
 | ||||
| func matchAcceptEncoding(accepted []string, encoding string) bool { | ||||
| 	for _, v := range accepted { | ||||
| 		if strings.Contains(v, encoding) { | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
| 
 | ||||
| // An EncoderFunc is a function that wraps the provided io.Writer with a
 | ||||
| // streaming compression algorithm and returns it.
 | ||||
| //
 | ||||
| // In case of failure, the function should return nil.
 | ||||
| type EncoderFunc func(w io.Writer, level int) io.Writer | ||||
| 
 | ||||
| // Interface for types that allow resetting io.Writers.
 | ||||
| type ioResetterWriter interface { | ||||
| 	io.Writer | ||||
| 	Reset(w io.Writer) | ||||
| } | ||||
| 
 | ||||
| type compressResponseWriter struct { | ||||
| 	http.ResponseWriter | ||||
| 
 | ||||
| 	// The streaming encoder writer to be used if there is one. Otherwise,
 | ||||
| 	// this is just the normal writer.
 | ||||
| 	w                io.Writer | ||||
| 	encoding         string | ||||
| 	contentTypes     map[string]struct{} | ||||
| 	contentWildcards map[string]struct{} | ||||
| 	wroteHeader      bool | ||||
| 	compressable     bool | ||||
| } | ||||
| 
 | ||||
| func (cw *compressResponseWriter) isCompressable() bool { | ||||
| 	// Parse the first part of the Content-Type response header.
 | ||||
| 	contentType := cw.Header().Get("Content-Type") | ||||
| 	if idx := strings.Index(contentType, ";"); idx >= 0 { | ||||
| 		contentType = contentType[0:idx] | ||||
| 	} | ||||
| 
 | ||||
| 	// Is the content type compressable?
 | ||||
| 	if _, ok := cw.contentTypes[contentType]; ok { | ||||
| 		return true | ||||
| 	} | ||||
| 	if idx := strings.Index(contentType, "/"); idx > 0 { | ||||
| 		contentType = contentType[0:idx] | ||||
| 		_, ok := cw.contentWildcards[contentType] | ||||
| 		return ok | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
| 
 | ||||
| func (cw *compressResponseWriter) WriteHeader(code int) { | ||||
| 	if cw.wroteHeader { | ||||
| 		cw.ResponseWriter.WriteHeader(code) // Allow multiple calls to propagate.
 | ||||
| 		return | ||||
| 	} | ||||
| 	cw.wroteHeader = true | ||||
| 	defer cw.ResponseWriter.WriteHeader(code) | ||||
| 
 | ||||
| 	// Already compressed data?
 | ||||
| 	if cw.Header().Get("Content-Encoding") != "" { | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	if !cw.isCompressable() { | ||||
| 		cw.compressable = false | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	if cw.encoding != "" { | ||||
| 		cw.compressable = true | ||||
| 		cw.Header().Set("Content-Encoding", cw.encoding) | ||||
| 		cw.Header().Set("Vary", "Accept-Encoding") | ||||
| 
 | ||||
| 		// The content-length after compression is unknown
 | ||||
| 		cw.Header().Del("Content-Length") | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (cw *compressResponseWriter) Write(p []byte) (int, error) { | ||||
| 	if !cw.wroteHeader { | ||||
| 		cw.WriteHeader(http.StatusOK) | ||||
| 	} | ||||
| 
 | ||||
| 	return cw.writer().Write(p) | ||||
| } | ||||
| 
 | ||||
| func (cw *compressResponseWriter) writer() io.Writer { | ||||
| 	if cw.compressable { | ||||
| 		return cw.w | ||||
| 	} else { | ||||
| 		return cw.ResponseWriter | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| type compressFlusher interface { | ||||
| 	Flush() error | ||||
| } | ||||
| 
 | ||||
| func (cw *compressResponseWriter) Flush() { | ||||
| 	if f, ok := cw.writer().(http.Flusher); ok { | ||||
| 		f.Flush() | ||||
| 	} | ||||
| 	// If the underlying writer has a compression flush signature,
 | ||||
| 	// call this Flush() method instead
 | ||||
| 	if f, ok := cw.writer().(compressFlusher); ok { | ||||
| 		f.Flush() | ||||
| 
 | ||||
| 		// Also flush the underlying response writer
 | ||||
| 		if f, ok := cw.ResponseWriter.(http.Flusher); ok { | ||||
| 			f.Flush() | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (cw *compressResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { | ||||
| 	if hj, ok := cw.writer().(http.Hijacker); ok { | ||||
| 		return hj.Hijack() | ||||
| 	} | ||||
| 	return nil, nil, errors.New("chi/middleware: http.Hijacker is unavailable on the writer") | ||||
| } | ||||
| 
 | ||||
| func (cw *compressResponseWriter) Push(target string, opts *http.PushOptions) error { | ||||
| 	if ps, ok := cw.writer().(http.Pusher); ok { | ||||
| 		return ps.Push(target, opts) | ||||
| 	} | ||||
| 	return errors.New("chi/middleware: http.Pusher is unavailable on the writer") | ||||
| } | ||||
| 
 | ||||
| func (cw *compressResponseWriter) Close() error { | ||||
| 	if c, ok := cw.writer().(io.WriteCloser); ok { | ||||
| 		return c.Close() | ||||
| 	} | ||||
| 	return errors.New("chi/middleware: io.WriteCloser is unavailable on the writer") | ||||
| } | ||||
| 
 | ||||
| func encoderGzip(w io.Writer, level int) io.Writer { | ||||
| 	gw, err := gzip.NewWriterLevel(w, level) | ||||
| 	if err != nil { | ||||
| 		return nil | ||||
| 	} | ||||
| 	return gw | ||||
| } | ||||
| 
 | ||||
| func encoderDeflate(w io.Writer, level int) io.Writer { | ||||
| 	dw, err := flate.NewWriter(w, level) | ||||
| 	if err != nil { | ||||
| 		return nil | ||||
| 	} | ||||
| 	return dw | ||||
| } | ||||
|  | @ -0,0 +1,51 @@ | |||
| package middleware | ||||
| 
 | ||||
| import ( | ||||
| 	"net/http" | ||||
| 	"strings" | ||||
| ) | ||||
| 
 | ||||
| // ContentCharset generates a handler that writes a 415 Unsupported Media Type response if none of the charsets match.
 | ||||
| // An empty charset will allow requests with no Content-Type header or no specified charset.
 | ||||
| func ContentCharset(charsets ...string) func(next http.Handler) http.Handler { | ||||
| 	for i, c := range charsets { | ||||
| 		charsets[i] = strings.ToLower(c) | ||||
| 	} | ||||
| 
 | ||||
| 	return func(next http.Handler) http.Handler { | ||||
| 		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | ||||
| 			if !contentEncoding(r.Header.Get("Content-Type"), charsets...) { | ||||
| 				w.WriteHeader(http.StatusUnsupportedMediaType) | ||||
| 				return | ||||
| 			} | ||||
| 
 | ||||
| 			next.ServeHTTP(w, r) | ||||
| 		}) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // Check the content encoding against a list of acceptable values.
 | ||||
| func contentEncoding(ce string, charsets ...string) bool { | ||||
| 	_, ce = split(strings.ToLower(ce), ";") | ||||
| 	_, ce = split(ce, "charset=") | ||||
| 	ce, _ = split(ce, ";") | ||||
| 	for _, c := range charsets { | ||||
| 		if ce == c { | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return false | ||||
| } | ||||
| 
 | ||||
| // Split a string in two parts, cleaning any whitespace.
 | ||||
| func split(str, sep string) (string, string) { | ||||
| 	var a, b string | ||||
| 	var parts = strings.SplitN(str, sep, 2) | ||||
| 	a = strings.TrimSpace(parts[0]) | ||||
| 	if len(parts) == 2 { | ||||
| 		b = strings.TrimSpace(parts[1]) | ||||
| 	} | ||||
| 
 | ||||
| 	return a, b | ||||
| } | ||||
|  | @ -0,0 +1,34 @@ | |||
| package middleware | ||||
| 
 | ||||
| import ( | ||||
| 	"net/http" | ||||
| 	"strings" | ||||
| ) | ||||
| 
 | ||||
| // AllowContentEncoding enforces a whitelist of request Content-Encoding otherwise responds
 | ||||
| // with a 415 Unsupported Media Type status.
 | ||||
| func AllowContentEncoding(contentEncoding ...string) func(next http.Handler) http.Handler { | ||||
| 	allowedEncodings := make(map[string]struct{}, len(contentEncoding)) | ||||
| 	for _, encoding := range contentEncoding { | ||||
| 		allowedEncodings[strings.TrimSpace(strings.ToLower(encoding))] = struct{}{} | ||||
| 	} | ||||
| 	return func(next http.Handler) http.Handler { | ||||
| 		fn := func(w http.ResponseWriter, r *http.Request) { | ||||
| 			requestEncodings := r.Header["Content-Encoding"] | ||||
| 			// skip check for empty content body or no Content-Encoding
 | ||||
| 			if r.ContentLength == 0 { | ||||
| 				next.ServeHTTP(w, r) | ||||
| 				return | ||||
| 			} | ||||
| 			// All encodings in the request must be allowed
 | ||||
| 			for _, encoding := range requestEncodings { | ||||
| 				if _, ok := allowedEncodings[strings.TrimSpace(strings.ToLower(encoding))]; !ok { | ||||
| 					w.WriteHeader(http.StatusUnsupportedMediaType) | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 			next.ServeHTTP(w, r) | ||||
| 		} | ||||
| 		return http.HandlerFunc(fn) | ||||
| 	} | ||||
| } | ||||
|  | @ -0,0 +1,51 @@ | |||
| package middleware | ||||
| 
 | ||||
| import ( | ||||
| 	"net/http" | ||||
| 	"strings" | ||||
| ) | ||||
| 
 | ||||
| // SetHeader is a convenience handler to set a response header key/value
 | ||||
| func SetHeader(key, value string) func(next http.Handler) http.Handler { | ||||
| 	return func(next http.Handler) http.Handler { | ||||
| 		fn := func(w http.ResponseWriter, r *http.Request) { | ||||
| 			w.Header().Set(key, value) | ||||
| 			next.ServeHTTP(w, r) | ||||
| 		} | ||||
| 		return http.HandlerFunc(fn) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // AllowContentType enforces a whitelist of request Content-Types otherwise responds
 | ||||
| // with a 415 Unsupported Media Type status.
 | ||||
| func AllowContentType(contentTypes ...string) func(next http.Handler) http.Handler { | ||||
| 	cT := []string{} | ||||
| 	for _, t := range contentTypes { | ||||
| 		cT = append(cT, strings.ToLower(t)) | ||||
| 	} | ||||
| 
 | ||||
| 	return func(next http.Handler) http.Handler { | ||||
| 		fn := func(w http.ResponseWriter, r *http.Request) { | ||||
| 			if r.ContentLength == 0 { | ||||
| 				// skip check for empty content body
 | ||||
| 				next.ServeHTTP(w, r) | ||||
| 				return | ||||
| 			} | ||||
| 
 | ||||
| 			s := strings.ToLower(strings.TrimSpace(r.Header.Get("Content-Type"))) | ||||
| 			if i := strings.Index(s, ";"); i > -1 { | ||||
| 				s = s[0:i] | ||||
| 			} | ||||
| 
 | ||||
| 			for _, t := range cT { | ||||
| 				if t == s { | ||||
| 					next.ServeHTTP(w, r) | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 
 | ||||
| 			w.WriteHeader(http.StatusUnsupportedMediaType) | ||||
| 		} | ||||
| 		return http.HandlerFunc(fn) | ||||
| 	} | ||||
| } | ||||
|  | @ -0,0 +1,39 @@ | |||
| package middleware | ||||
| 
 | ||||
| import ( | ||||
| 	"net/http" | ||||
| 
 | ||||
| 	"github.com/go-chi/chi" | ||||
| ) | ||||
| 
 | ||||
| // GetHead automatically route undefined HEAD requests to GET handlers.
 | ||||
| func GetHead(next http.Handler) http.Handler { | ||||
| 	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | ||||
| 		if r.Method == "HEAD" { | ||||
| 			rctx := chi.RouteContext(r.Context()) | ||||
| 			routePath := rctx.RoutePath | ||||
| 			if routePath == "" { | ||||
| 				if r.URL.RawPath != "" { | ||||
| 					routePath = r.URL.RawPath | ||||
| 				} else { | ||||
| 					routePath = r.URL.Path | ||||
| 				} | ||||
| 			} | ||||
| 
 | ||||
| 			// Temporary routing context to look-ahead before routing the request
 | ||||
| 			tctx := chi.NewRouteContext() | ||||
| 
 | ||||
| 			// Attempt to find a HEAD handler for the routing path, if not found, traverse
 | ||||
| 			// the router as through its a GET route, but proceed with the request
 | ||||
| 			// with the HEAD method.
 | ||||
| 			if !rctx.Routes.Match(tctx, "HEAD", routePath) { | ||||
| 				rctx.RouteMethod = "GET" | ||||
| 				rctx.RoutePath = routePath | ||||
| 				next.ServeHTTP(w, r) | ||||
| 				return | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		next.ServeHTTP(w, r) | ||||
| 	}) | ||||
| } | ||||
|  | @ -0,0 +1,26 @@ | |||
| package middleware | ||||
| 
 | ||||
| import ( | ||||
| 	"net/http" | ||||
| 	"strings" | ||||
| ) | ||||
| 
 | ||||
| // Heartbeat endpoint middleware useful to setting up a path like
 | ||||
| // `/ping` that load balancers or uptime testing external services
 | ||||
| // can make a request before hitting any routes. It's also convenient
 | ||||
| // to place this above ACL middlewares as well.
 | ||||
| func Heartbeat(endpoint string) func(http.Handler) http.Handler { | ||||
| 	f := func(h http.Handler) http.Handler { | ||||
| 		fn := func(w http.ResponseWriter, r *http.Request) { | ||||
| 			if r.Method == "GET" && strings.EqualFold(r.URL.Path, endpoint) { | ||||
| 				w.Header().Set("Content-Type", "text/plain") | ||||
| 				w.WriteHeader(http.StatusOK) | ||||
| 				w.Write([]byte(".")) | ||||
| 				return | ||||
| 			} | ||||
| 			h.ServeHTTP(w, r) | ||||
| 		} | ||||
| 		return http.HandlerFunc(fn) | ||||
| 	} | ||||
| 	return f | ||||
| } | ||||
|  | @ -0,0 +1,155 @@ | |||
| package middleware | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"context" | ||||
| 	"log" | ||||
| 	"net/http" | ||||
| 	"os" | ||||
| 	"time" | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	// LogEntryCtxKey is the context.Context key to store the request log entry.
 | ||||
| 	LogEntryCtxKey = &contextKey{"LogEntry"} | ||||
| 
 | ||||
| 	// DefaultLogger is called by the Logger middleware handler to log each request.
 | ||||
| 	// Its made a package-level variable so that it can be reconfigured for custom
 | ||||
| 	// logging configurations.
 | ||||
| 	DefaultLogger = RequestLogger(&DefaultLogFormatter{Logger: log.New(os.Stdout, "", log.LstdFlags), NoColor: false}) | ||||
| ) | ||||
| 
 | ||||
| // Logger is a middleware that logs the start and end of each request, along
 | ||||
| // with some useful data about what was requested, what the response status was,
 | ||||
| // and how long it took to return. When standard output is a TTY, Logger will
 | ||||
| // print in color, otherwise it will print in black and white. Logger prints a
 | ||||
| // request ID if one is provided.
 | ||||
| //
 | ||||
| // Alternatively, look at https://github.com/goware/httplog for a more in-depth
 | ||||
| // http logger with structured logging support.
 | ||||
| func Logger(next http.Handler) http.Handler { | ||||
| 	return DefaultLogger(next) | ||||
| } | ||||
| 
 | ||||
| // RequestLogger returns a logger handler using a custom LogFormatter.
 | ||||
| func RequestLogger(f LogFormatter) func(next http.Handler) http.Handler { | ||||
| 	return func(next http.Handler) http.Handler { | ||||
| 		fn := func(w http.ResponseWriter, r *http.Request) { | ||||
| 			entry := f.NewLogEntry(r) | ||||
| 			ww := NewWrapResponseWriter(w, r.ProtoMajor) | ||||
| 
 | ||||
| 			t1 := time.Now() | ||||
| 			defer func() { | ||||
| 				entry.Write(ww.Status(), ww.BytesWritten(), ww.Header(), time.Since(t1), nil) | ||||
| 			}() | ||||
| 
 | ||||
| 			next.ServeHTTP(ww, WithLogEntry(r, entry)) | ||||
| 		} | ||||
| 		return http.HandlerFunc(fn) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // LogFormatter initiates the beginning of a new LogEntry per request.
 | ||||
| // See DefaultLogFormatter for an example implementation.
 | ||||
| type LogFormatter interface { | ||||
| 	NewLogEntry(r *http.Request) LogEntry | ||||
| } | ||||
| 
 | ||||
| // LogEntry records the final log when a request completes.
 | ||||
| // See defaultLogEntry for an example implementation.
 | ||||
| type LogEntry interface { | ||||
| 	Write(status, bytes int, header http.Header, elapsed time.Duration, extra interface{}) | ||||
| 	Panic(v interface{}, stack []byte) | ||||
| } | ||||
| 
 | ||||
| // GetLogEntry returns the in-context LogEntry for a request.
 | ||||
| func GetLogEntry(r *http.Request) LogEntry { | ||||
| 	entry, _ := r.Context().Value(LogEntryCtxKey).(LogEntry) | ||||
| 	return entry | ||||
| } | ||||
| 
 | ||||
| // WithLogEntry sets the in-context LogEntry for a request.
 | ||||
| func WithLogEntry(r *http.Request, entry LogEntry) *http.Request { | ||||
| 	r = r.WithContext(context.WithValue(r.Context(), LogEntryCtxKey, entry)) | ||||
| 	return r | ||||
| } | ||||
| 
 | ||||
| // LoggerInterface accepts printing to stdlib logger or compatible logger.
 | ||||
| type LoggerInterface interface { | ||||
| 	Print(v ...interface{}) | ||||
| } | ||||
| 
 | ||||
| // DefaultLogFormatter is a simple logger that implements a LogFormatter.
 | ||||
| type DefaultLogFormatter struct { | ||||
| 	Logger  LoggerInterface | ||||
| 	NoColor bool | ||||
| } | ||||
| 
 | ||||
| // NewLogEntry creates a new LogEntry for the request.
 | ||||
| func (l *DefaultLogFormatter) NewLogEntry(r *http.Request) LogEntry { | ||||
| 	useColor := !l.NoColor | ||||
| 	entry := &defaultLogEntry{ | ||||
| 		DefaultLogFormatter: l, | ||||
| 		request:             r, | ||||
| 		buf:                 &bytes.Buffer{}, | ||||
| 		useColor:            useColor, | ||||
| 	} | ||||
| 
 | ||||
| 	reqID := GetReqID(r.Context()) | ||||
| 	if reqID != "" { | ||||
| 		cW(entry.buf, useColor, nYellow, "[%s] ", reqID) | ||||
| 	} | ||||
| 	cW(entry.buf, useColor, nCyan, "\"") | ||||
| 	cW(entry.buf, useColor, bMagenta, "%s ", r.Method) | ||||
| 
 | ||||
| 	scheme := "http" | ||||
| 	if r.TLS != nil { | ||||
| 		scheme = "https" | ||||
| 	} | ||||
| 	cW(entry.buf, useColor, nCyan, "%s://%s%s %s\" ", scheme, r.Host, r.RequestURI, r.Proto) | ||||
| 
 | ||||
| 	entry.buf.WriteString("from ") | ||||
| 	entry.buf.WriteString(r.RemoteAddr) | ||||
| 	entry.buf.WriteString(" - ") | ||||
| 
 | ||||
| 	return entry | ||||
| } | ||||
| 
 | ||||
| type defaultLogEntry struct { | ||||
| 	*DefaultLogFormatter | ||||
| 	request  *http.Request | ||||
| 	buf      *bytes.Buffer | ||||
| 	useColor bool | ||||
| } | ||||
| 
 | ||||
| func (l *defaultLogEntry) Write(status, bytes int, header http.Header, elapsed time.Duration, extra interface{}) { | ||||
| 	switch { | ||||
| 	case status < 200: | ||||
| 		cW(l.buf, l.useColor, bBlue, "%03d", status) | ||||
| 	case status < 300: | ||||
| 		cW(l.buf, l.useColor, bGreen, "%03d", status) | ||||
| 	case status < 400: | ||||
| 		cW(l.buf, l.useColor, bCyan, "%03d", status) | ||||
| 	case status < 500: | ||||
| 		cW(l.buf, l.useColor, bYellow, "%03d", status) | ||||
| 	default: | ||||
| 		cW(l.buf, l.useColor, bRed, "%03d", status) | ||||
| 	} | ||||
| 
 | ||||
| 	cW(l.buf, l.useColor, bBlue, " %dB", bytes) | ||||
| 
 | ||||
| 	l.buf.WriteString(" in ") | ||||
| 	if elapsed < 500*time.Millisecond { | ||||
| 		cW(l.buf, l.useColor, nGreen, "%s", elapsed) | ||||
| 	} else if elapsed < 5*time.Second { | ||||
| 		cW(l.buf, l.useColor, nYellow, "%s", elapsed) | ||||
| 	} else { | ||||
| 		cW(l.buf, l.useColor, nRed, "%s", elapsed) | ||||
| 	} | ||||
| 
 | ||||
| 	l.Logger.Print(l.buf.String()) | ||||
| } | ||||
| 
 | ||||
| func (l *defaultLogEntry) Panic(v interface{}, stack []byte) { | ||||
| 	PrintPrettyStack(v) | ||||
| } | ||||
|  | @ -0,0 +1,23 @@ | |||
| package middleware | ||||
| 
 | ||||
| import "net/http" | ||||
| 
 | ||||
| // New will create a new middleware handler from a http.Handler.
 | ||||
| func New(h http.Handler) func(next http.Handler) http.Handler { | ||||
| 	return func(next http.Handler) http.Handler { | ||||
| 		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | ||||
| 			h.ServeHTTP(w, r) | ||||
| 		}) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // contextKey is a value for use with context.WithValue. It's used as
 | ||||
| // a pointer so it fits in an interface{} without allocation. This technique
 | ||||
| // for defining context keys was copied from Go 1.7's new use of context in net/http.
 | ||||
| type contextKey struct { | ||||
| 	name string | ||||
| } | ||||
| 
 | ||||
| func (k *contextKey) String() string { | ||||
| 	return "chi/middleware context value " + k.name | ||||
| } | ||||
|  | @ -0,0 +1,58 @@ | |||
| package middleware | ||||
| 
 | ||||
| // Ported from Goji's middleware, source:
 | ||||
| // https://github.com/zenazn/goji/tree/master/web/middleware
 | ||||
| 
 | ||||
| import ( | ||||
| 	"net/http" | ||||
| 	"time" | ||||
| ) | ||||
| 
 | ||||
| // Unix epoch time
 | ||||
| var epoch = time.Unix(0, 0).Format(time.RFC1123) | ||||
| 
 | ||||
| // Taken from https://github.com/mytrile/nocache
 | ||||
| var noCacheHeaders = map[string]string{ | ||||
| 	"Expires":         epoch, | ||||
| 	"Cache-Control":   "no-cache, no-store, no-transform, must-revalidate, private, max-age=0", | ||||
| 	"Pragma":          "no-cache", | ||||
| 	"X-Accel-Expires": "0", | ||||
| } | ||||
| 
 | ||||
| var etagHeaders = []string{ | ||||
| 	"ETag", | ||||
| 	"If-Modified-Since", | ||||
| 	"If-Match", | ||||
| 	"If-None-Match", | ||||
| 	"If-Range", | ||||
| 	"If-Unmodified-Since", | ||||
| } | ||||
| 
 | ||||
| // NoCache is a simple piece of middleware that sets a number of HTTP headers to prevent
 | ||||
| // a router (or subrouter) from being cached by an upstream proxy and/or client.
 | ||||
| //
 | ||||
| // As per http://wiki.nginx.org/HttpProxyModule - NoCache sets:
 | ||||
| //      Expires: Thu, 01 Jan 1970 00:00:00 UTC
 | ||||
| //      Cache-Control: no-cache, private, max-age=0
 | ||||
| //      X-Accel-Expires: 0
 | ||||
| //      Pragma: no-cache (for HTTP/1.0 proxies/clients)
 | ||||
| func NoCache(h http.Handler) http.Handler { | ||||
| 	fn := func(w http.ResponseWriter, r *http.Request) { | ||||
| 
 | ||||
| 		// Delete any ETag headers that may have been set
 | ||||
| 		for _, v := range etagHeaders { | ||||
| 			if r.Header.Get(v) != "" { | ||||
| 				r.Header.Del(v) | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		// Set our NoCache headers
 | ||||
| 		for k, v := range noCacheHeaders { | ||||
| 			w.Header().Set(k, v) | ||||
| 		} | ||||
| 
 | ||||
| 		h.ServeHTTP(w, r) | ||||
| 	} | ||||
| 
 | ||||
| 	return http.HandlerFunc(fn) | ||||
| } | ||||
|  | @ -0,0 +1,55 @@ | |||
| package middleware | ||||
| 
 | ||||
| import ( | ||||
| 	"expvar" | ||||
| 	"fmt" | ||||
| 	"net/http" | ||||
| 	"net/http/pprof" | ||||
| 
 | ||||
| 	"github.com/go-chi/chi" | ||||
| ) | ||||
| 
 | ||||
| // Profiler is a convenient subrouter used for mounting net/http/pprof. ie.
 | ||||
| //
 | ||||
| //  func MyService() http.Handler {
 | ||||
| //    r := chi.NewRouter()
 | ||||
| //    // ..middlewares
 | ||||
| //    r.Mount("/debug", middleware.Profiler())
 | ||||
| //    // ..routes
 | ||||
| //    return r
 | ||||
| //  }
 | ||||
| func Profiler() http.Handler { | ||||
| 	r := chi.NewRouter() | ||||
| 	r.Use(NoCache) | ||||
| 
 | ||||
| 	r.Get("/", func(w http.ResponseWriter, r *http.Request) { | ||||
| 		http.Redirect(w, r, r.RequestURI+"/pprof/", 301) | ||||
| 	}) | ||||
| 	r.HandleFunc("/pprof", func(w http.ResponseWriter, r *http.Request) { | ||||
| 		http.Redirect(w, r, r.RequestURI+"/", 301) | ||||
| 	}) | ||||
| 
 | ||||
| 	r.HandleFunc("/pprof/*", pprof.Index) | ||||
| 	r.HandleFunc("/pprof/cmdline", pprof.Cmdline) | ||||
| 	r.HandleFunc("/pprof/profile", pprof.Profile) | ||||
| 	r.HandleFunc("/pprof/symbol", pprof.Symbol) | ||||
| 	r.HandleFunc("/pprof/trace", pprof.Trace) | ||||
| 	r.HandleFunc("/vars", expVars) | ||||
| 
 | ||||
| 	return r | ||||
| } | ||||
| 
 | ||||
| // Replicated from expvar.go as not public.
 | ||||
| func expVars(w http.ResponseWriter, r *http.Request) { | ||||
| 	first := true | ||||
| 	w.Header().Set("Content-Type", "application/json") | ||||
| 	fmt.Fprintf(w, "{\n") | ||||
| 	expvar.Do(func(kv expvar.KeyValue) { | ||||
| 		if !first { | ||||
| 			fmt.Fprintf(w, ",\n") | ||||
| 		} | ||||
| 		first = false | ||||
| 		fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) | ||||
| 	}) | ||||
| 	fmt.Fprintf(w, "\n}\n") | ||||
| } | ||||
|  | @ -0,0 +1,54 @@ | |||
| package middleware | ||||
| 
 | ||||
| // Ported from Goji's middleware, source:
 | ||||
| // https://github.com/zenazn/goji/tree/master/web/middleware
 | ||||
| 
 | ||||
| import ( | ||||
| 	"net/http" | ||||
| 	"strings" | ||||
| ) | ||||
| 
 | ||||
| var xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For") | ||||
| var xRealIP = http.CanonicalHeaderKey("X-Real-IP") | ||||
| 
 | ||||
| // RealIP is a middleware that sets a http.Request's RemoteAddr to the results
 | ||||
| // of parsing either the X-Forwarded-For header or the X-Real-IP header (in that
 | ||||
| // order).
 | ||||
| //
 | ||||
| // This middleware should be inserted fairly early in the middleware stack to
 | ||||
| // ensure that subsequent layers (e.g., request loggers) which examine the
 | ||||
| // RemoteAddr will see the intended value.
 | ||||
| //
 | ||||
| // You should only use this middleware if you can trust the headers passed to
 | ||||
| // you (in particular, the two headers this middleware uses), for example
 | ||||
| // because you have placed a reverse proxy like HAProxy or nginx in front of
 | ||||
| // chi. If your reverse proxies are configured to pass along arbitrary header
 | ||||
| // values from the client, or if you use this middleware without a reverse
 | ||||
| // proxy, malicious clients will be able to make you very sad (or, depending on
 | ||||
| // how you're using RemoteAddr, vulnerable to an attack of some sort).
 | ||||
| func RealIP(h http.Handler) http.Handler { | ||||
| 	fn := func(w http.ResponseWriter, r *http.Request) { | ||||
| 		if rip := realIP(r); rip != "" { | ||||
| 			r.RemoteAddr = rip | ||||
| 		} | ||||
| 		h.ServeHTTP(w, r) | ||||
| 	} | ||||
| 
 | ||||
| 	return http.HandlerFunc(fn) | ||||
| } | ||||
| 
 | ||||
| func realIP(r *http.Request) string { | ||||
| 	var ip string | ||||
| 
 | ||||
| 	if xrip := r.Header.Get(xRealIP); xrip != "" { | ||||
| 		ip = xrip | ||||
| 	} else if xff := r.Header.Get(xForwardedFor); xff != "" { | ||||
| 		i := strings.Index(xff, ", ") | ||||
| 		if i == -1 { | ||||
| 			i = len(xff) | ||||
| 		} | ||||
| 		ip = xff[:i] | ||||
| 	} | ||||
| 
 | ||||
| 	return ip | ||||
| } | ||||
|  | @ -0,0 +1,192 @@ | |||
| package middleware | ||||
| 
 | ||||
| // The original work was derived from Goji's middleware, source:
 | ||||
| // https://github.com/zenazn/goji/tree/master/web/middleware
 | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"net/http" | ||||
| 	"os" | ||||
| 	"runtime/debug" | ||||
| 	"strings" | ||||
| ) | ||||
| 
 | ||||
| // Recoverer is a middleware that recovers from panics, logs the panic (and a
 | ||||
| // backtrace), and returns a HTTP 500 (Internal Server Error) status if
 | ||||
| // possible. Recoverer prints a request ID if one is provided.
 | ||||
| //
 | ||||
| // Alternatively, look at https://github.com/pressly/lg middleware pkgs.
 | ||||
| func Recoverer(next http.Handler) http.Handler { | ||||
| 	fn := func(w http.ResponseWriter, r *http.Request) { | ||||
| 		defer func() { | ||||
| 			if rvr := recover(); rvr != nil && rvr != http.ErrAbortHandler { | ||||
| 
 | ||||
| 				logEntry := GetLogEntry(r) | ||||
| 				if logEntry != nil { | ||||
| 					logEntry.Panic(rvr, debug.Stack()) | ||||
| 				} else { | ||||
| 					PrintPrettyStack(rvr) | ||||
| 				} | ||||
| 
 | ||||
| 				w.WriteHeader(http.StatusInternalServerError) | ||||
| 			} | ||||
| 		}() | ||||
| 
 | ||||
| 		next.ServeHTTP(w, r) | ||||
| 	} | ||||
| 
 | ||||
| 	return http.HandlerFunc(fn) | ||||
| } | ||||
| 
 | ||||
| func PrintPrettyStack(rvr interface{}) { | ||||
| 	debugStack := debug.Stack() | ||||
| 	s := prettyStack{} | ||||
| 	out, err := s.parse(debugStack, rvr) | ||||
| 	if err == nil { | ||||
| 		os.Stderr.Write(out) | ||||
| 	} else { | ||||
| 		// print stdlib output as a fallback
 | ||||
| 		os.Stderr.Write(debugStack) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| type prettyStack struct { | ||||
| } | ||||
| 
 | ||||
| func (s prettyStack) parse(debugStack []byte, rvr interface{}) ([]byte, error) { | ||||
| 	var err error | ||||
| 	useColor := true | ||||
| 	buf := &bytes.Buffer{} | ||||
| 
 | ||||
| 	cW(buf, false, bRed, "\n") | ||||
| 	cW(buf, useColor, bCyan, " panic: ") | ||||
| 	cW(buf, useColor, bBlue, "%v", rvr) | ||||
| 	cW(buf, false, bWhite, "\n \n") | ||||
| 
 | ||||
| 	// process debug stack info
 | ||||
| 	stack := strings.Split(string(debugStack), "\n") | ||||
| 	lines := []string{} | ||||
| 
 | ||||
| 	// locate panic line, as we may have nested panics
 | ||||
| 	for i := len(stack) - 1; i > 0; i-- { | ||||
| 		lines = append(lines, stack[i]) | ||||
| 		if strings.HasPrefix(stack[i], "panic(0x") { | ||||
| 			lines = lines[0 : len(lines)-2] // remove boilerplate
 | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	// reverse
 | ||||
| 	for i := len(lines)/2 - 1; i >= 0; i-- { | ||||
| 		opp := len(lines) - 1 - i | ||||
| 		lines[i], lines[opp] = lines[opp], lines[i] | ||||
| 	} | ||||
| 
 | ||||
| 	// decorate
 | ||||
| 	for i, line := range lines { | ||||
| 		lines[i], err = s.decorateLine(line, useColor, i) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	for _, l := range lines { | ||||
| 		fmt.Fprintf(buf, "%s", l) | ||||
| 	} | ||||
| 	return buf.Bytes(), nil | ||||
| } | ||||
| 
 | ||||
| func (s prettyStack) decorateLine(line string, useColor bool, num int) (string, error) { | ||||
| 	line = strings.TrimSpace(line) | ||||
| 	if strings.HasPrefix(line, "\t") || strings.Contains(line, ".go:") { | ||||
| 		return s.decorateSourceLine(line, useColor, num) | ||||
| 	} else if strings.HasSuffix(line, ")") { | ||||
| 		return s.decorateFuncCallLine(line, useColor, num) | ||||
| 	} else { | ||||
| 		if strings.HasPrefix(line, "\t") { | ||||
| 			return strings.Replace(line, "\t", "      ", 1), nil | ||||
| 		} else { | ||||
| 			return fmt.Sprintf("    %s\n", line), nil | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (s prettyStack) decorateFuncCallLine(line string, useColor bool, num int) (string, error) { | ||||
| 	idx := strings.LastIndex(line, "(") | ||||
| 	if idx < 0 { | ||||
| 		return "", errors.New("not a func call line") | ||||
| 	} | ||||
| 
 | ||||
| 	buf := &bytes.Buffer{} | ||||
| 	pkg := line[0:idx] | ||||
| 	// addr := line[idx:]
 | ||||
| 	method := "" | ||||
| 
 | ||||
| 	idx = strings.LastIndex(pkg, string(os.PathSeparator)) | ||||
| 	if idx < 0 { | ||||
| 		idx = strings.Index(pkg, ".") | ||||
| 		method = pkg[idx:] | ||||
| 		pkg = pkg[0:idx] | ||||
| 	} else { | ||||
| 		method = pkg[idx+1:] | ||||
| 		pkg = pkg[0 : idx+1] | ||||
| 		idx = strings.Index(method, ".") | ||||
| 		pkg += method[0:idx] | ||||
| 		method = method[idx:] | ||||
| 	} | ||||
| 	pkgColor := nYellow | ||||
| 	methodColor := bGreen | ||||
| 
 | ||||
| 	if num == 0 { | ||||
| 		cW(buf, useColor, bRed, " -> ") | ||||
| 		pkgColor = bMagenta | ||||
| 		methodColor = bRed | ||||
| 	} else { | ||||
| 		cW(buf, useColor, bWhite, "    ") | ||||
| 	} | ||||
| 	cW(buf, useColor, pkgColor, "%s", pkg) | ||||
| 	cW(buf, useColor, methodColor, "%s\n", method) | ||||
| 	// cW(buf, useColor, nBlack, "%s", addr)
 | ||||
| 	return buf.String(), nil | ||||
| } | ||||
| 
 | ||||
| func (s prettyStack) decorateSourceLine(line string, useColor bool, num int) (string, error) { | ||||
| 	idx := strings.LastIndex(line, ".go:") | ||||
| 	if idx < 0 { | ||||
| 		return "", errors.New("not a source line") | ||||
| 	} | ||||
| 
 | ||||
| 	buf := &bytes.Buffer{} | ||||
| 	path := line[0 : idx+3] | ||||
| 	lineno := line[idx+3:] | ||||
| 
 | ||||
| 	idx = strings.LastIndex(path, string(os.PathSeparator)) | ||||
| 	dir := path[0 : idx+1] | ||||
| 	file := path[idx+1:] | ||||
| 
 | ||||
| 	idx = strings.Index(lineno, " ") | ||||
| 	if idx > 0 { | ||||
| 		lineno = lineno[0:idx] | ||||
| 	} | ||||
| 	fileColor := bCyan | ||||
| 	lineColor := bGreen | ||||
| 
 | ||||
| 	if num == 1 { | ||||
| 		cW(buf, useColor, bRed, " ->   ") | ||||
| 		fileColor = bRed | ||||
| 		lineColor = bMagenta | ||||
| 	} else { | ||||
| 		cW(buf, false, bWhite, "      ") | ||||
| 	} | ||||
| 	cW(buf, useColor, bWhite, "%s", dir) | ||||
| 	cW(buf, useColor, fileColor, "%s", file) | ||||
| 	cW(buf, useColor, lineColor, "%s", lineno) | ||||
| 	if num == 1 { | ||||
| 		cW(buf, false, bWhite, "\n") | ||||
| 	} | ||||
| 	cW(buf, false, bWhite, "\n") | ||||
| 
 | ||||
| 	return buf.String(), nil | ||||
| } | ||||
|  | @ -0,0 +1,96 @@ | |||
| package middleware | ||||
| 
 | ||||
| // Ported from Goji's middleware, source:
 | ||||
| // https://github.com/zenazn/goji/tree/master/web/middleware
 | ||||
| 
 | ||||
| import ( | ||||
| 	"context" | ||||
| 	"crypto/rand" | ||||
| 	"encoding/base64" | ||||
| 	"fmt" | ||||
| 	"net/http" | ||||
| 	"os" | ||||
| 	"strings" | ||||
| 	"sync/atomic" | ||||
| ) | ||||
| 
 | ||||
| // Key to use when setting the request ID.
 | ||||
| type ctxKeyRequestID int | ||||
| 
 | ||||
| // RequestIDKey is the key that holds the unique request ID in a request context.
 | ||||
| const RequestIDKey ctxKeyRequestID = 0 | ||||
| 
 | ||||
| // RequestIDHeader is the name of the HTTP Header which contains the request id.
 | ||||
| // Exported so that it can be changed by developers
 | ||||
| var RequestIDHeader = "X-Request-Id" | ||||
| 
 | ||||
| var prefix string | ||||
| var reqid uint64 | ||||
| 
 | ||||
| // A quick note on the statistics here: we're trying to calculate the chance that
 | ||||
| // two randomly generated base62 prefixes will collide. We use the formula from
 | ||||
| // http://en.wikipedia.org/wiki/Birthday_problem
 | ||||
| //
 | ||||
| // P[m, n] \approx 1 - e^{-m^2/2n}
 | ||||
| //
 | ||||
| // We ballpark an upper bound for $m$ by imagining (for whatever reason) a server
 | ||||
| // that restarts every second over 10 years, for $m = 86400 * 365 * 10 = 315360000$
 | ||||
| //
 | ||||
| // For a $k$ character base-62 identifier, we have $n(k) = 62^k$
 | ||||
| //
 | ||||
| // Plugging this in, we find $P[m, n(10)] \approx 5.75%$, which is good enough for
 | ||||
| // our purposes, and is surely more than anyone would ever need in practice -- a
 | ||||
| // process that is rebooted a handful of times a day for a hundred years has less
 | ||||
| // than a millionth of a percent chance of generating two colliding IDs.
 | ||||
| 
 | ||||
| func init() { | ||||
| 	hostname, err := os.Hostname() | ||||
| 	if hostname == "" || err != nil { | ||||
| 		hostname = "localhost" | ||||
| 	} | ||||
| 	var buf [12]byte | ||||
| 	var b64 string | ||||
| 	for len(b64) < 10 { | ||||
| 		rand.Read(buf[:]) | ||||
| 		b64 = base64.StdEncoding.EncodeToString(buf[:]) | ||||
| 		b64 = strings.NewReplacer("+", "", "/", "").Replace(b64) | ||||
| 	} | ||||
| 
 | ||||
| 	prefix = fmt.Sprintf("%s/%s", hostname, b64[0:10]) | ||||
| } | ||||
| 
 | ||||
| // RequestID is a middleware that injects a request ID into the context of each
 | ||||
| // request. A request ID is a string of the form "host.example.com/random-0001",
 | ||||
| // where "random" is a base62 random string that uniquely identifies this go
 | ||||
| // process, and where the last number is an atomically incremented request
 | ||||
| // counter.
 | ||||
| func RequestID(next http.Handler) http.Handler { | ||||
| 	fn := func(w http.ResponseWriter, r *http.Request) { | ||||
| 		ctx := r.Context() | ||||
| 		requestID := r.Header.Get(RequestIDHeader) | ||||
| 		if requestID == "" { | ||||
| 			myid := atomic.AddUint64(&reqid, 1) | ||||
| 			requestID = fmt.Sprintf("%s-%06d", prefix, myid) | ||||
| 		} | ||||
| 		ctx = context.WithValue(ctx, RequestIDKey, requestID) | ||||
| 		next.ServeHTTP(w, r.WithContext(ctx)) | ||||
| 	} | ||||
| 	return http.HandlerFunc(fn) | ||||
| } | ||||
| 
 | ||||
| // GetReqID returns a request ID from the given context if one is present.
 | ||||
| // Returns the empty string if a request ID cannot be found.
 | ||||
| func GetReqID(ctx context.Context) string { | ||||
| 	if ctx == nil { | ||||
| 		return "" | ||||
| 	} | ||||
| 	if reqID, ok := ctx.Value(RequestIDKey).(string); ok { | ||||
| 		return reqID | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
| 
 | ||||
| // NextRequestID generates the next request ID in the sequence.
 | ||||
| func NextRequestID() uint64 { | ||||
| 	return atomic.AddUint64(&reqid, 1) | ||||
| } | ||||
|  | @ -0,0 +1,160 @@ | |||
| package middleware | ||||
| 
 | ||||
| import ( | ||||
| 	"net/http" | ||||
| 	"strings" | ||||
| ) | ||||
| 
 | ||||
| // RouteHeaders is a neat little header-based router that allows you to direct
 | ||||
| // the flow of a request through a middleware stack based on a request header.
 | ||||
| //
 | ||||
| // For example, lets say you'd like to setup multiple routers depending on the
 | ||||
| // request Host header, you could then do something as so:
 | ||||
| //
 | ||||
| // r := chi.NewRouter()
 | ||||
| // rSubdomain := chi.NewRouter()
 | ||||
| //
 | ||||
| // r.Use(middleware.RouteHeaders().
 | ||||
| //   Route("Host", "example.com", middleware.New(r)).
 | ||||
| //   Route("Host", "*.example.com", middleware.New(rSubdomain)).
 | ||||
| //   Handler)
 | ||||
| //
 | ||||
| // r.Get("/", h)
 | ||||
| // rSubdomain.Get("/", h2)
 | ||||
| //
 | ||||
| //
 | ||||
| // Another example, imagine you want to setup multiple CORS handlers, where for
 | ||||
| // your origin servers you allow authorized requests, but for third-party public
 | ||||
| // requests, authorization is disabled.
 | ||||
| //
 | ||||
| // r := chi.NewRouter()
 | ||||
| //
 | ||||
| // r.Use(middleware.RouteHeaders().
 | ||||
| //   Route("Origin", "https://app.skyweaver.net", cors.Handler(cors.Options{
 | ||||
| // 	   AllowedOrigins:   []string{"https://api.skyweaver.net"},
 | ||||
| // 	   AllowedMethods:   []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"},
 | ||||
| // 	   AllowedHeaders:   []string{"Accept", "Authorization", "Content-Type"},
 | ||||
| // 	   AllowCredentials: true, // <----------<<< allow credentials
 | ||||
| //   })).
 | ||||
| //   Route("Origin", "*", cors.Handler(cors.Options{
 | ||||
| // 	   AllowedOrigins:   []string{"*"},
 | ||||
| // 	   AllowedMethods:   []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"},
 | ||||
| // 	   AllowedHeaders:   []string{"Accept", "Content-Type"},
 | ||||
| // 	   AllowCredentials: false, // <----------<<< do not allow credentials
 | ||||
| //   })).
 | ||||
| //   Handler)
 | ||||
| //
 | ||||
| func RouteHeaders() HeaderRouter { | ||||
| 	return HeaderRouter{} | ||||
| } | ||||
| 
 | ||||
| type HeaderRouter map[string][]HeaderRoute | ||||
| 
 | ||||
| func (hr HeaderRouter) Route(header string, match string, middlewareHandler func(next http.Handler) http.Handler) HeaderRouter { | ||||
| 	header = strings.ToLower(header) | ||||
| 	k := hr[header] | ||||
| 	if k == nil { | ||||
| 		hr[header] = []HeaderRoute{} | ||||
| 	} | ||||
| 	hr[header] = append(hr[header], HeaderRoute{MatchOne: NewPattern(match), Middleware: middlewareHandler}) | ||||
| 	return hr | ||||
| } | ||||
| 
 | ||||
| func (hr HeaderRouter) RouteAny(header string, match []string, middlewareHandler func(next http.Handler) http.Handler) HeaderRouter { | ||||
| 	header = strings.ToLower(header) | ||||
| 	k := hr[header] | ||||
| 	if k == nil { | ||||
| 		hr[header] = []HeaderRoute{} | ||||
| 	} | ||||
| 	patterns := []Pattern{} | ||||
| 	for _, m := range match { | ||||
| 		patterns = append(patterns, NewPattern(m)) | ||||
| 	} | ||||
| 	hr[header] = append(hr[header], HeaderRoute{MatchAny: patterns, Middleware: middlewareHandler}) | ||||
| 	return hr | ||||
| } | ||||
| 
 | ||||
| func (hr HeaderRouter) RouteDefault(handler func(next http.Handler) http.Handler) HeaderRouter { | ||||
| 	hr["*"] = []HeaderRoute{{Middleware: handler}} | ||||
| 	return hr | ||||
| } | ||||
| 
 | ||||
| func (hr HeaderRouter) Handler(next http.Handler) http.Handler { | ||||
| 	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | ||||
| 		if len(hr) == 0 { | ||||
| 			// skip if no routes set
 | ||||
| 			next.ServeHTTP(w, r) | ||||
| 		} | ||||
| 
 | ||||
| 		// find first matching header route, and continue
 | ||||
| 		for header, matchers := range hr { | ||||
| 			headerValue := r.Header.Get(header) | ||||
| 			if headerValue == "" { | ||||
| 				continue | ||||
| 			} | ||||
| 			headerValue = strings.ToLower(headerValue) | ||||
| 			for _, matcher := range matchers { | ||||
| 				if matcher.IsMatch(headerValue) { | ||||
| 					matcher.Middleware(next).ServeHTTP(w, r) | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		// if no match, check for "*" default route
 | ||||
| 		matcher, ok := hr["*"] | ||||
| 		if !ok || matcher[0].Middleware == nil { | ||||
| 			next.ServeHTTP(w, r) | ||||
| 			return | ||||
| 		} | ||||
| 		matcher[0].Middleware(next).ServeHTTP(w, r) | ||||
| 	}) | ||||
| } | ||||
| 
 | ||||
| type HeaderRoute struct { | ||||
| 	MatchAny   []Pattern | ||||
| 	MatchOne   Pattern | ||||
| 	Middleware func(next http.Handler) http.Handler | ||||
| } | ||||
| 
 | ||||
| func (r HeaderRoute) IsMatch(value string) bool { | ||||
| 	if len(r.MatchAny) > 0 { | ||||
| 		for _, m := range r.MatchAny { | ||||
| 			if m.Match(value) { | ||||
| 				return true | ||||
| 			} | ||||
| 		} | ||||
| 	} else if r.MatchOne.Match(value) { | ||||
| 		return true | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
| 
 | ||||
| type Pattern struct { | ||||
| 	prefix   string | ||||
| 	suffix   string | ||||
| 	wildcard bool | ||||
| } | ||||
| 
 | ||||
| func NewPattern(value string) Pattern { | ||||
| 	p := Pattern{} | ||||
| 	if i := strings.IndexByte(value, '*'); i >= 0 { | ||||
| 		p.wildcard = true | ||||
| 		p.prefix = value[0:i] | ||||
| 		p.suffix = value[i+1:] | ||||
| 	} else { | ||||
| 		p.prefix = value | ||||
| 	} | ||||
| 	return p | ||||
| } | ||||
| 
 | ||||
| func (p Pattern) Match(v string) bool { | ||||
| 	if !p.wildcard { | ||||
| 		if p.prefix == v { | ||||
| 			return true | ||||
| 		} else { | ||||
| 			return false | ||||
| 		} | ||||
| 	} | ||||
| 	return len(v) >= len(p.prefix+p.suffix) && strings.HasPrefix(v, p.prefix) && strings.HasSuffix(v, p.suffix) | ||||
| } | ||||
|  | @ -0,0 +1,56 @@ | |||
| package middleware | ||||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"net/http" | ||||
| 
 | ||||
| 	"github.com/go-chi/chi" | ||||
| ) | ||||
| 
 | ||||
| // StripSlashes is a middleware that will match request paths with a trailing
 | ||||
| // slash, strip it from the path and continue routing through the mux, if a route
 | ||||
| // matches, then it will serve the handler.
 | ||||
| func StripSlashes(next http.Handler) http.Handler { | ||||
| 	fn := func(w http.ResponseWriter, r *http.Request) { | ||||
| 		var path string | ||||
| 		rctx := chi.RouteContext(r.Context()) | ||||
| 		if rctx.RoutePath != "" { | ||||
| 			path = rctx.RoutePath | ||||
| 		} else { | ||||
| 			path = r.URL.Path | ||||
| 		} | ||||
| 		if len(path) > 1 && path[len(path)-1] == '/' { | ||||
| 			rctx.RoutePath = path[:len(path)-1] | ||||
| 		} | ||||
| 		next.ServeHTTP(w, r) | ||||
| 	} | ||||
| 	return http.HandlerFunc(fn) | ||||
| } | ||||
| 
 | ||||
| // RedirectSlashes is a middleware that will match request paths with a trailing
 | ||||
| // slash and redirect to the same path, less the trailing slash.
 | ||||
| //
 | ||||
| // NOTE: RedirectSlashes middleware is *incompatible* with http.FileServer,
 | ||||
| // see https://github.com/go-chi/chi/issues/343
 | ||||
| func RedirectSlashes(next http.Handler) http.Handler { | ||||
| 	fn := func(w http.ResponseWriter, r *http.Request) { | ||||
| 		var path string | ||||
| 		rctx := chi.RouteContext(r.Context()) | ||||
| 		if rctx.RoutePath != "" { | ||||
| 			path = rctx.RoutePath | ||||
| 		} else { | ||||
| 			path = r.URL.Path | ||||
| 		} | ||||
| 		if len(path) > 1 && path[len(path)-1] == '/' { | ||||
| 			if r.URL.RawQuery != "" { | ||||
| 				path = fmt.Sprintf("%s?%s", path[:len(path)-1], r.URL.RawQuery) | ||||
| 			} else { | ||||
| 				path = path[:len(path)-1] | ||||
| 			} | ||||
| 			http.Redirect(w, r, path, 301) | ||||
| 			return | ||||
| 		} | ||||
| 		next.ServeHTTP(w, r) | ||||
| 	} | ||||
| 	return http.HandlerFunc(fn) | ||||
| } | ||||
|  | @ -0,0 +1,63 @@ | |||
| package middleware | ||||
| 
 | ||||
| // Ported from Goji's middleware, source:
 | ||||
| // https://github.com/zenazn/goji/tree/master/web/middleware
 | ||||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"os" | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	// Normal colors
 | ||||
| 	nBlack   = []byte{'\033', '[', '3', '0', 'm'} | ||||
| 	nRed     = []byte{'\033', '[', '3', '1', 'm'} | ||||
| 	nGreen   = []byte{'\033', '[', '3', '2', 'm'} | ||||
| 	nYellow  = []byte{'\033', '[', '3', '3', 'm'} | ||||
| 	nBlue    = []byte{'\033', '[', '3', '4', 'm'} | ||||
| 	nMagenta = []byte{'\033', '[', '3', '5', 'm'} | ||||
| 	nCyan    = []byte{'\033', '[', '3', '6', 'm'} | ||||
| 	nWhite   = []byte{'\033', '[', '3', '7', 'm'} | ||||
| 	// Bright colors
 | ||||
| 	bBlack   = []byte{'\033', '[', '3', '0', ';', '1', 'm'} | ||||
| 	bRed     = []byte{'\033', '[', '3', '1', ';', '1', 'm'} | ||||
| 	bGreen   = []byte{'\033', '[', '3', '2', ';', '1', 'm'} | ||||
| 	bYellow  = []byte{'\033', '[', '3', '3', ';', '1', 'm'} | ||||
| 	bBlue    = []byte{'\033', '[', '3', '4', ';', '1', 'm'} | ||||
| 	bMagenta = []byte{'\033', '[', '3', '5', ';', '1', 'm'} | ||||
| 	bCyan    = []byte{'\033', '[', '3', '6', ';', '1', 'm'} | ||||
| 	bWhite   = []byte{'\033', '[', '3', '7', ';', '1', 'm'} | ||||
| 
 | ||||
| 	reset = []byte{'\033', '[', '0', 'm'} | ||||
| ) | ||||
| 
 | ||||
| var IsTTY bool | ||||
| 
 | ||||
| func init() { | ||||
| 	// This is sort of cheating: if stdout is a character device, we assume
 | ||||
| 	// that means it's a TTY. Unfortunately, there are many non-TTY
 | ||||
| 	// character devices, but fortunately stdout is rarely set to any of
 | ||||
| 	// them.
 | ||||
| 	//
 | ||||
| 	// We could solve this properly by pulling in a dependency on
 | ||||
| 	// code.google.com/p/go.crypto/ssh/terminal, for instance, but as a
 | ||||
| 	// heuristic for whether to print in color or in black-and-white, I'd
 | ||||
| 	// really rather not.
 | ||||
| 	fi, err := os.Stdout.Stat() | ||||
| 	if err == nil { | ||||
| 		m := os.ModeDevice | os.ModeCharDevice | ||||
| 		IsTTY = fi.Mode()&m == m | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // colorWrite
 | ||||
| func cW(w io.Writer, useColor bool, color []byte, s string, args ...interface{}) { | ||||
| 	if IsTTY && useColor { | ||||
| 		w.Write(color) | ||||
| 	} | ||||
| 	fmt.Fprintf(w, s, args...) | ||||
| 	if IsTTY && useColor { | ||||
| 		w.Write(reset) | ||||
| 	} | ||||
| } | ||||
|  | @ -0,0 +1,132 @@ | |||
| package middleware | ||||
| 
 | ||||
| import ( | ||||
| 	"net/http" | ||||
| 	"strconv" | ||||
| 	"time" | ||||
| ) | ||||
| 
 | ||||
| const ( | ||||
| 	errCapacityExceeded = "Server capacity exceeded." | ||||
| 	errTimedOut         = "Timed out while waiting for a pending request to complete." | ||||
| 	errContextCanceled  = "Context was canceled." | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	defaultBacklogTimeout = time.Second * 60 | ||||
| ) | ||||
| 
 | ||||
| // ThrottleOpts represents a set of throttling options.
 | ||||
| type ThrottleOpts struct { | ||||
| 	Limit          int | ||||
| 	BacklogLimit   int | ||||
| 	BacklogTimeout time.Duration | ||||
| 	RetryAfterFn   func(ctxDone bool) time.Duration | ||||
| } | ||||
| 
 | ||||
| // Throttle is a middleware that limits number of currently processed requests
 | ||||
| // at a time across all users. Note: Throttle is not a rate-limiter per user,
 | ||||
| // instead it just puts a ceiling on the number of currentl in-flight requests
 | ||||
| // being processed from the point from where the Throttle middleware is mounted.
 | ||||
| func Throttle(limit int) func(http.Handler) http.Handler { | ||||
| 	return ThrottleWithOpts(ThrottleOpts{Limit: limit, BacklogTimeout: defaultBacklogTimeout}) | ||||
| } | ||||
| 
 | ||||
| // ThrottleBacklog is a middleware that limits number of currently processed
 | ||||
| // requests at a time and provides a backlog for holding a finite number of
 | ||||
| // pending requests.
 | ||||
| func ThrottleBacklog(limit int, backlogLimit int, backlogTimeout time.Duration) func(http.Handler) http.Handler { | ||||
| 	return ThrottleWithOpts(ThrottleOpts{Limit: limit, BacklogLimit: backlogLimit, BacklogTimeout: backlogTimeout}) | ||||
| } | ||||
| 
 | ||||
| // ThrottleWithOpts is a middleware that limits number of currently processed requests using passed ThrottleOpts.
 | ||||
| func ThrottleWithOpts(opts ThrottleOpts) func(http.Handler) http.Handler { | ||||
| 	if opts.Limit < 1 { | ||||
| 		panic("chi/middleware: Throttle expects limit > 0") | ||||
| 	} | ||||
| 
 | ||||
| 	if opts.BacklogLimit < 0 { | ||||
| 		panic("chi/middleware: Throttle expects backlogLimit to be positive") | ||||
| 	} | ||||
| 
 | ||||
| 	t := throttler{ | ||||
| 		tokens:         make(chan token, opts.Limit), | ||||
| 		backlogTokens:  make(chan token, opts.Limit+opts.BacklogLimit), | ||||
| 		backlogTimeout: opts.BacklogTimeout, | ||||
| 		retryAfterFn:   opts.RetryAfterFn, | ||||
| 	} | ||||
| 
 | ||||
| 	// Filling tokens.
 | ||||
| 	for i := 0; i < opts.Limit+opts.BacklogLimit; i++ { | ||||
| 		if i < opts.Limit { | ||||
| 			t.tokens <- token{} | ||||
| 		} | ||||
| 		t.backlogTokens <- token{} | ||||
| 	} | ||||
| 
 | ||||
| 	return func(next http.Handler) http.Handler { | ||||
| 		fn := func(w http.ResponseWriter, r *http.Request) { | ||||
| 			ctx := r.Context() | ||||
| 
 | ||||
| 			select { | ||||
| 
 | ||||
| 			case <-ctx.Done(): | ||||
| 				t.setRetryAfterHeaderIfNeeded(w, true) | ||||
| 				http.Error(w, errContextCanceled, http.StatusServiceUnavailable) | ||||
| 				return | ||||
| 
 | ||||
| 			case btok := <-t.backlogTokens: | ||||
| 				timer := time.NewTimer(t.backlogTimeout) | ||||
| 
 | ||||
| 				defer func() { | ||||
| 					t.backlogTokens <- btok | ||||
| 				}() | ||||
| 
 | ||||
| 				select { | ||||
| 				case <-timer.C: | ||||
| 					t.setRetryAfterHeaderIfNeeded(w, false) | ||||
| 					http.Error(w, errTimedOut, http.StatusServiceUnavailable) | ||||
| 					return | ||||
| 				case <-ctx.Done(): | ||||
| 					timer.Stop() | ||||
| 					t.setRetryAfterHeaderIfNeeded(w, true) | ||||
| 					http.Error(w, errContextCanceled, http.StatusServiceUnavailable) | ||||
| 					return | ||||
| 				case tok := <-t.tokens: | ||||
| 					defer func() { | ||||
| 						timer.Stop() | ||||
| 						t.tokens <- tok | ||||
| 					}() | ||||
| 					next.ServeHTTP(w, r) | ||||
| 				} | ||||
| 				return | ||||
| 
 | ||||
| 			default: | ||||
| 				t.setRetryAfterHeaderIfNeeded(w, false) | ||||
| 				http.Error(w, errCapacityExceeded, http.StatusServiceUnavailable) | ||||
| 				return | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		return http.HandlerFunc(fn) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // token represents a request that is being processed.
 | ||||
| type token struct{} | ||||
| 
 | ||||
| // throttler limits number of currently processed requests at a time.
 | ||||
| type throttler struct { | ||||
| 	tokens         chan token | ||||
| 	backlogTokens  chan token | ||||
| 	backlogTimeout time.Duration | ||||
| 	retryAfterFn   func(ctxDone bool) time.Duration | ||||
| } | ||||
| 
 | ||||
| // setRetryAfterHeaderIfNeeded sets Retry-After HTTP header if corresponding retryAfterFn option of throttler is initialized.
 | ||||
| func (t throttler) setRetryAfterHeaderIfNeeded(w http.ResponseWriter, ctxDone bool) { | ||||
| 	if t.retryAfterFn == nil { | ||||
| 		return | ||||
| 	} | ||||
| 	w.Header().Set("Retry-After", strconv.Itoa(int(t.retryAfterFn(ctxDone).Seconds()))) | ||||
| } | ||||
|  | @ -0,0 +1,49 @@ | |||
| package middleware | ||||
| 
 | ||||
| import ( | ||||
| 	"context" | ||||
| 	"net/http" | ||||
| 	"time" | ||||
| ) | ||||
| 
 | ||||
| // Timeout is a middleware that cancels ctx after a given timeout and return
 | ||||
| // a 504 Gateway Timeout error to the client.
 | ||||
| //
 | ||||
| // It's required that you select the ctx.Done() channel to check for the signal
 | ||||
| // if the context has reached its deadline and return, otherwise the timeout
 | ||||
| // signal will be just ignored.
 | ||||
| //
 | ||||
| // ie. a route/handler may look like:
 | ||||
| //
 | ||||
| //  r.Get("/long", func(w http.ResponseWriter, r *http.Request) {
 | ||||
| // 	 ctx := r.Context()
 | ||||
| // 	 processTime := time.Duration(rand.Intn(4)+1) * time.Second
 | ||||
| //
 | ||||
| // 	 select {
 | ||||
| // 	 case <-ctx.Done():
 | ||||
| // 	 	return
 | ||||
| //
 | ||||
| // 	 case <-time.After(processTime):
 | ||||
| // 	 	 // The above channel simulates some hard work.
 | ||||
| // 	 }
 | ||||
| //
 | ||||
| // 	 w.Write([]byte("done"))
 | ||||
| //  })
 | ||||
| //
 | ||||
| func Timeout(timeout time.Duration) func(next http.Handler) http.Handler { | ||||
| 	return func(next http.Handler) http.Handler { | ||||
| 		fn := func(w http.ResponseWriter, r *http.Request) { | ||||
| 			ctx, cancel := context.WithTimeout(r.Context(), timeout) | ||||
| 			defer func() { | ||||
| 				cancel() | ||||
| 				if ctx.Err() == context.DeadlineExceeded { | ||||
| 					w.WriteHeader(http.StatusGatewayTimeout) | ||||
| 				} | ||||
| 			}() | ||||
| 
 | ||||
| 			r = r.WithContext(ctx) | ||||
| 			next.ServeHTTP(w, r) | ||||
| 		} | ||||
| 		return http.HandlerFunc(fn) | ||||
| 	} | ||||
| } | ||||
|  | @ -0,0 +1,72 @@ | |||
| package middleware | ||||
| 
 | ||||
| import ( | ||||
| 	"context" | ||||
| 	"net/http" | ||||
| 	"strings" | ||||
| 
 | ||||
| 	"github.com/go-chi/chi" | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	// URLFormatCtxKey is the context.Context key to store the URL format data
 | ||||
| 	// for a request.
 | ||||
| 	URLFormatCtxKey = &contextKey{"URLFormat"} | ||||
| ) | ||||
| 
 | ||||
| // URLFormat is a middleware that parses the url extension from a request path and stores it
 | ||||
| // on the context as a string under the key `middleware.URLFormatCtxKey`. The middleware will
 | ||||
| // trim the suffix from the routing path and continue routing.
 | ||||
| //
 | ||||
| // Routers should not include a url parameter for the suffix when using this middleware.
 | ||||
| //
 | ||||
| // Sample usage.. for url paths: `/articles/1`, `/articles/1.json` and `/articles/1.xml`
 | ||||
| //
 | ||||
| //  func routes() http.Handler {
 | ||||
| //    r := chi.NewRouter()
 | ||||
| //    r.Use(middleware.URLFormat)
 | ||||
| //
 | ||||
| //    r.Get("/articles/{id}", ListArticles)
 | ||||
| //
 | ||||
| //    return r
 | ||||
| //  }
 | ||||
| //
 | ||||
| //  func ListArticles(w http.ResponseWriter, r *http.Request) {
 | ||||
| // 	  urlFormat, _ := r.Context().Value(middleware.URLFormatCtxKey).(string)
 | ||||
| //
 | ||||
| // 	  switch urlFormat {
 | ||||
| // 	  case "json":
 | ||||
| // 	  	render.JSON(w, r, articles)
 | ||||
| // 	  case "xml:"
 | ||||
| // 	  	render.XML(w, r, articles)
 | ||||
| // 	  default:
 | ||||
| // 	  	render.JSON(w, r, articles)
 | ||||
| // 	  }
 | ||||
| // }
 | ||||
| //
 | ||||
| func URLFormat(next http.Handler) http.Handler { | ||||
| 	fn := func(w http.ResponseWriter, r *http.Request) { | ||||
| 		ctx := r.Context() | ||||
| 
 | ||||
| 		var format string | ||||
| 		path := r.URL.Path | ||||
| 
 | ||||
| 		if strings.Index(path, ".") > 0 { | ||||
| 			base := strings.LastIndex(path, "/") | ||||
| 			idx := strings.Index(path[base:], ".") | ||||
| 
 | ||||
| 			if idx > 0 { | ||||
| 				idx += base | ||||
| 				format = path[idx+1:] | ||||
| 
 | ||||
| 				rctx := chi.RouteContext(r.Context()) | ||||
| 				rctx.RoutePath = path[:idx] | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		r = r.WithContext(context.WithValue(ctx, URLFormatCtxKey, format)) | ||||
| 
 | ||||
| 		next.ServeHTTP(w, r) | ||||
| 	} | ||||
| 	return http.HandlerFunc(fn) | ||||
| } | ||||
|  | @ -0,0 +1,17 @@ | |||
| package middleware | ||||
| 
 | ||||
| import ( | ||||
| 	"context" | ||||
| 	"net/http" | ||||
| ) | ||||
| 
 | ||||
| // WithValue is a middleware that sets a given key/value in a context chain.
 | ||||
| func WithValue(key interface{}, val interface{}) func(next http.Handler) http.Handler { | ||||
| 	return func(next http.Handler) http.Handler { | ||||
| 		fn := func(w http.ResponseWriter, r *http.Request) { | ||||
| 			r = r.WithContext(context.WithValue(r.Context(), key, val)) | ||||
| 			next.ServeHTTP(w, r) | ||||
| 		} | ||||
| 		return http.HandlerFunc(fn) | ||||
| 	} | ||||
| } | ||||
|  | @ -0,0 +1,180 @@ | |||
| package middleware | ||||
| 
 | ||||
| // The original work was derived from Goji's middleware, source:
 | ||||
| // https://github.com/zenazn/goji/tree/master/web/middleware
 | ||||
| 
 | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"io" | ||||
| 	"net" | ||||
| 	"net/http" | ||||
| ) | ||||
| 
 | ||||
| // NewWrapResponseWriter wraps an http.ResponseWriter, returning a proxy that allows you to
 | ||||
| // hook into various parts of the response process.
 | ||||
| func NewWrapResponseWriter(w http.ResponseWriter, protoMajor int) WrapResponseWriter { | ||||
| 	_, fl := w.(http.Flusher) | ||||
| 
 | ||||
| 	bw := basicWriter{ResponseWriter: w} | ||||
| 
 | ||||
| 	if protoMajor == 2 { | ||||
| 		_, ps := w.(http.Pusher) | ||||
| 		if fl && ps { | ||||
| 			return &http2FancyWriter{bw} | ||||
| 		} | ||||
| 	} else { | ||||
| 		_, hj := w.(http.Hijacker) | ||||
| 		_, rf := w.(io.ReaderFrom) | ||||
| 		if fl && hj && rf { | ||||
| 			return &httpFancyWriter{bw} | ||||
| 		} | ||||
| 	} | ||||
| 	if fl { | ||||
| 		return &flushWriter{bw} | ||||
| 	} | ||||
| 
 | ||||
| 	return &bw | ||||
| } | ||||
| 
 | ||||
| // WrapResponseWriter is a proxy around an http.ResponseWriter that allows you to hook
 | ||||
| // into various parts of the response process.
 | ||||
| type WrapResponseWriter interface { | ||||
| 	http.ResponseWriter | ||||
| 	// Status returns the HTTP status of the request, or 0 if one has not
 | ||||
| 	// yet been sent.
 | ||||
| 	Status() int | ||||
| 	// BytesWritten returns the total number of bytes sent to the client.
 | ||||
| 	BytesWritten() int | ||||
| 	// Tee causes the response body to be written to the given io.Writer in
 | ||||
| 	// addition to proxying the writes through. Only one io.Writer can be
 | ||||
| 	// tee'd to at once: setting a second one will overwrite the first.
 | ||||
| 	// Writes will be sent to the proxy before being written to this
 | ||||
| 	// io.Writer. It is illegal for the tee'd writer to be modified
 | ||||
| 	// concurrently with writes.
 | ||||
| 	Tee(io.Writer) | ||||
| 	// Unwrap returns the original proxied target.
 | ||||
| 	Unwrap() http.ResponseWriter | ||||
| } | ||||
| 
 | ||||
| // basicWriter wraps a http.ResponseWriter that implements the minimal
 | ||||
| // http.ResponseWriter interface.
 | ||||
| type basicWriter struct { | ||||
| 	http.ResponseWriter | ||||
| 	wroteHeader bool | ||||
| 	code        int | ||||
| 	bytes       int | ||||
| 	tee         io.Writer | ||||
| } | ||||
| 
 | ||||
| func (b *basicWriter) WriteHeader(code int) { | ||||
| 	if !b.wroteHeader { | ||||
| 		b.code = code | ||||
| 		b.wroteHeader = true | ||||
| 		b.ResponseWriter.WriteHeader(code) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (b *basicWriter) Write(buf []byte) (int, error) { | ||||
| 	b.maybeWriteHeader() | ||||
| 	n, err := b.ResponseWriter.Write(buf) | ||||
| 	if b.tee != nil { | ||||
| 		_, err2 := b.tee.Write(buf[:n]) | ||||
| 		// Prefer errors generated by the proxied writer.
 | ||||
| 		if err == nil { | ||||
| 			err = err2 | ||||
| 		} | ||||
| 	} | ||||
| 	b.bytes += n | ||||
| 	return n, err | ||||
| } | ||||
| 
 | ||||
| func (b *basicWriter) maybeWriteHeader() { | ||||
| 	if !b.wroteHeader { | ||||
| 		b.WriteHeader(http.StatusOK) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (b *basicWriter) Status() int { | ||||
| 	return b.code | ||||
| } | ||||
| 
 | ||||
| func (b *basicWriter) BytesWritten() int { | ||||
| 	return b.bytes | ||||
| } | ||||
| 
 | ||||
| func (b *basicWriter) Tee(w io.Writer) { | ||||
| 	b.tee = w | ||||
| } | ||||
| 
 | ||||
| func (b *basicWriter) Unwrap() http.ResponseWriter { | ||||
| 	return b.ResponseWriter | ||||
| } | ||||
| 
 | ||||
| type flushWriter struct { | ||||
| 	basicWriter | ||||
| } | ||||
| 
 | ||||
| func (f *flushWriter) Flush() { | ||||
| 	f.wroteHeader = true | ||||
| 	fl := f.basicWriter.ResponseWriter.(http.Flusher) | ||||
| 	fl.Flush() | ||||
| } | ||||
| 
 | ||||
| var _ http.Flusher = &flushWriter{} | ||||
| 
 | ||||
| // httpFancyWriter is a HTTP writer that additionally satisfies
 | ||||
| // http.Flusher, http.Hijacker, and io.ReaderFrom. It exists for the common case
 | ||||
| // of wrapping the http.ResponseWriter that package http gives you, in order to
 | ||||
| // make the proxied object support the full method set of the proxied object.
 | ||||
| type httpFancyWriter struct { | ||||
| 	basicWriter | ||||
| } | ||||
| 
 | ||||
| func (f *httpFancyWriter) Flush() { | ||||
| 	f.wroteHeader = true | ||||
| 	fl := f.basicWriter.ResponseWriter.(http.Flusher) | ||||
| 	fl.Flush() | ||||
| } | ||||
| 
 | ||||
| func (f *httpFancyWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { | ||||
| 	hj := f.basicWriter.ResponseWriter.(http.Hijacker) | ||||
| 	return hj.Hijack() | ||||
| } | ||||
| 
 | ||||
| func (f *http2FancyWriter) Push(target string, opts *http.PushOptions) error { | ||||
| 	return f.basicWriter.ResponseWriter.(http.Pusher).Push(target, opts) | ||||
| } | ||||
| 
 | ||||
| func (f *httpFancyWriter) ReadFrom(r io.Reader) (int64, error) { | ||||
| 	if f.basicWriter.tee != nil { | ||||
| 		n, err := io.Copy(&f.basicWriter, r) | ||||
| 		f.basicWriter.bytes += int(n) | ||||
| 		return n, err | ||||
| 	} | ||||
| 	rf := f.basicWriter.ResponseWriter.(io.ReaderFrom) | ||||
| 	f.basicWriter.maybeWriteHeader() | ||||
| 	n, err := rf.ReadFrom(r) | ||||
| 	f.basicWriter.bytes += int(n) | ||||
| 	return n, err | ||||
| } | ||||
| 
 | ||||
| var _ http.Flusher = &httpFancyWriter{} | ||||
| var _ http.Hijacker = &httpFancyWriter{} | ||||
| var _ http.Pusher = &http2FancyWriter{} | ||||
| var _ io.ReaderFrom = &httpFancyWriter{} | ||||
| 
 | ||||
| // http2FancyWriter is a HTTP2 writer that additionally satisfies
 | ||||
| // http.Flusher, and io.ReaderFrom. It exists for the common case
 | ||||
| // of wrapping the http.ResponseWriter that package http gives you, in order to
 | ||||
| // make the proxied object support the full method set of the proxied object.
 | ||||
| type http2FancyWriter struct { | ||||
| 	basicWriter | ||||
| } | ||||
| 
 | ||||
| func (f *http2FancyWriter) Flush() { | ||||
| 	f.wroteHeader = true | ||||
| 	fl := f.basicWriter.ResponseWriter.(http.Flusher) | ||||
| 	fl.Flush() | ||||
| } | ||||
| 
 | ||||
| var _ http.Flusher = &http2FancyWriter{} | ||||
|  | @ -0,0 +1,466 @@ | |||
| package chi | ||||
| 
 | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"net/http" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
| ) | ||||
| 
 | ||||
| var _ Router = &Mux{} | ||||
| 
 | ||||
| // Mux is a simple HTTP route multiplexer that parses a request path,
 | ||||
| // records any URL params, and executes an end handler. It implements
 | ||||
| // the http.Handler interface and is friendly with the standard library.
 | ||||
| //
 | ||||
| // Mux is designed to be fast, minimal and offer a powerful API for building
 | ||||
| // modular and composable HTTP services with a large set of handlers. It's
 | ||||
| // particularly useful for writing large REST API services that break a handler
 | ||||
| // into many smaller parts composed of middlewares and end handlers.
 | ||||
| type Mux struct { | ||||
| 	// The radix trie router
 | ||||
| 	tree *node | ||||
| 
 | ||||
| 	// The middleware stack
 | ||||
| 	middlewares []func(http.Handler) http.Handler | ||||
| 
 | ||||
| 	// Controls the behaviour of middleware chain generation when a mux
 | ||||
| 	// is registered as an inline group inside another mux.
 | ||||
| 	inline bool | ||||
| 	parent *Mux | ||||
| 
 | ||||
| 	// The computed mux handler made of the chained middleware stack and
 | ||||
| 	// the tree router
 | ||||
| 	handler http.Handler | ||||
| 
 | ||||
| 	// Routing context pool
 | ||||
| 	pool *sync.Pool | ||||
| 
 | ||||
| 	// Custom route not found handler
 | ||||
| 	notFoundHandler http.HandlerFunc | ||||
| 
 | ||||
| 	// Custom method not allowed handler
 | ||||
| 	methodNotAllowedHandler http.HandlerFunc | ||||
| } | ||||
| 
 | ||||
| // NewMux returns a newly initialized Mux object that implements the Router
 | ||||
| // interface.
 | ||||
| func NewMux() *Mux { | ||||
| 	mux := &Mux{tree: &node{}, pool: &sync.Pool{}} | ||||
| 	mux.pool.New = func() interface{} { | ||||
| 		return NewRouteContext() | ||||
| 	} | ||||
| 	return mux | ||||
| } | ||||
| 
 | ||||
| // ServeHTTP is the single method of the http.Handler interface that makes
 | ||||
| // Mux interoperable with the standard library. It uses a sync.Pool to get and
 | ||||
| // reuse routing contexts for each request.
 | ||||
| func (mx *Mux) ServeHTTP(w http.ResponseWriter, r *http.Request) { | ||||
| 	// Ensure the mux has some routes defined on the mux
 | ||||
| 	if mx.handler == nil { | ||||
| 		mx.NotFoundHandler().ServeHTTP(w, r) | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	// Check if a routing context already exists from a parent router.
 | ||||
| 	rctx, _ := r.Context().Value(RouteCtxKey).(*Context) | ||||
| 	if rctx != nil { | ||||
| 		mx.handler.ServeHTTP(w, r) | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	// Fetch a RouteContext object from the sync pool, and call the computed
 | ||||
| 	// mx.handler that is comprised of mx.middlewares + mx.routeHTTP.
 | ||||
| 	// Once the request is finished, reset the routing context and put it back
 | ||||
| 	// into the pool for reuse from another request.
 | ||||
| 	rctx = mx.pool.Get().(*Context) | ||||
| 	rctx.Reset() | ||||
| 	rctx.Routes = mx | ||||
| 
 | ||||
| 	// NOTE: r.WithContext() causes 2 allocations and context.WithValue() causes 1 allocation
 | ||||
| 	r = r.WithContext(context.WithValue(r.Context(), RouteCtxKey, rctx)) | ||||
| 
 | ||||
| 	// Serve the request and once its done, put the request context back in the sync pool
 | ||||
| 	mx.handler.ServeHTTP(w, r) | ||||
| 	mx.pool.Put(rctx) | ||||
| } | ||||
| 
 | ||||
| // Use appends a middleware handler to the Mux middleware stack.
 | ||||
| //
 | ||||
| // The middleware stack for any Mux will execute before searching for a matching
 | ||||
| // route to a specific handler, which provides opportunity to respond early,
 | ||||
| // change the course of the request execution, or set request-scoped values for
 | ||||
| // the next http.Handler.
 | ||||
| func (mx *Mux) Use(middlewares ...func(http.Handler) http.Handler) { | ||||
| 	if mx.handler != nil { | ||||
| 		panic("chi: all middlewares must be defined before routes on a mux") | ||||
| 	} | ||||
| 	mx.middlewares = append(mx.middlewares, middlewares...) | ||||
| } | ||||
| 
 | ||||
| // Handle adds the route `pattern` that matches any http method to
 | ||||
| // execute the `handler` http.Handler.
 | ||||
| func (mx *Mux) Handle(pattern string, handler http.Handler) { | ||||
| 	mx.handle(mALL, pattern, handler) | ||||
| } | ||||
| 
 | ||||
| // HandleFunc adds the route `pattern` that matches any http method to
 | ||||
| // execute the `handlerFn` http.HandlerFunc.
 | ||||
| func (mx *Mux) HandleFunc(pattern string, handlerFn http.HandlerFunc) { | ||||
| 	mx.handle(mALL, pattern, handlerFn) | ||||
| } | ||||
| 
 | ||||
| // Method adds the route `pattern` that matches `method` http method to
 | ||||
| // execute the `handler` http.Handler.
 | ||||
| func (mx *Mux) Method(method, pattern string, handler http.Handler) { | ||||
| 	m, ok := methodMap[strings.ToUpper(method)] | ||||
| 	if !ok { | ||||
| 		panic(fmt.Sprintf("chi: '%s' http method is not supported.", method)) | ||||
| 	} | ||||
| 	mx.handle(m, pattern, handler) | ||||
| } | ||||
| 
 | ||||
| // MethodFunc adds the route `pattern` that matches `method` http method to
 | ||||
| // execute the `handlerFn` http.HandlerFunc.
 | ||||
| func (mx *Mux) MethodFunc(method, pattern string, handlerFn http.HandlerFunc) { | ||||
| 	mx.Method(method, pattern, handlerFn) | ||||
| } | ||||
| 
 | ||||
| // Connect adds the route `pattern` that matches a CONNECT http method to
 | ||||
| // execute the `handlerFn` http.HandlerFunc.
 | ||||
| func (mx *Mux) Connect(pattern string, handlerFn http.HandlerFunc) { | ||||
| 	mx.handle(mCONNECT, pattern, handlerFn) | ||||
| } | ||||
| 
 | ||||
| // Delete adds the route `pattern` that matches a DELETE http method to
 | ||||
| // execute the `handlerFn` http.HandlerFunc.
 | ||||
| func (mx *Mux) Delete(pattern string, handlerFn http.HandlerFunc) { | ||||
| 	mx.handle(mDELETE, pattern, handlerFn) | ||||
| } | ||||
| 
 | ||||
| // Get adds the route `pattern` that matches a GET http method to
 | ||||
| // execute the `handlerFn` http.HandlerFunc.
 | ||||
| func (mx *Mux) Get(pattern string, handlerFn http.HandlerFunc) { | ||||
| 	mx.handle(mGET, pattern, handlerFn) | ||||
| } | ||||
| 
 | ||||
| // Head adds the route `pattern` that matches a HEAD http method to
 | ||||
| // execute the `handlerFn` http.HandlerFunc.
 | ||||
| func (mx *Mux) Head(pattern string, handlerFn http.HandlerFunc) { | ||||
| 	mx.handle(mHEAD, pattern, handlerFn) | ||||
| } | ||||
| 
 | ||||
| // Options adds the route `pattern` that matches a OPTIONS http method to
 | ||||
| // execute the `handlerFn` http.HandlerFunc.
 | ||||
| func (mx *Mux) Options(pattern string, handlerFn http.HandlerFunc) { | ||||
| 	mx.handle(mOPTIONS, pattern, handlerFn) | ||||
| } | ||||
| 
 | ||||
| // Patch adds the route `pattern` that matches a PATCH http method to
 | ||||
| // execute the `handlerFn` http.HandlerFunc.
 | ||||
| func (mx *Mux) Patch(pattern string, handlerFn http.HandlerFunc) { | ||||
| 	mx.handle(mPATCH, pattern, handlerFn) | ||||
| } | ||||
| 
 | ||||
| // Post adds the route `pattern` that matches a POST http method to
 | ||||
| // execute the `handlerFn` http.HandlerFunc.
 | ||||
| func (mx *Mux) Post(pattern string, handlerFn http.HandlerFunc) { | ||||
| 	mx.handle(mPOST, pattern, handlerFn) | ||||
| } | ||||
| 
 | ||||
| // Put adds the route `pattern` that matches a PUT http method to
 | ||||
| // execute the `handlerFn` http.HandlerFunc.
 | ||||
| func (mx *Mux) Put(pattern string, handlerFn http.HandlerFunc) { | ||||
| 	mx.handle(mPUT, pattern, handlerFn) | ||||
| } | ||||
| 
 | ||||
| // Trace adds the route `pattern` that matches a TRACE http method to
 | ||||
| // execute the `handlerFn` http.HandlerFunc.
 | ||||
| func (mx *Mux) Trace(pattern string, handlerFn http.HandlerFunc) { | ||||
| 	mx.handle(mTRACE, pattern, handlerFn) | ||||
| } | ||||
| 
 | ||||
| // NotFound sets a custom http.HandlerFunc for routing paths that could
 | ||||
| // not be found. The default 404 handler is `http.NotFound`.
 | ||||
| func (mx *Mux) NotFound(handlerFn http.HandlerFunc) { | ||||
| 	// Build NotFound handler chain
 | ||||
| 	m := mx | ||||
| 	hFn := handlerFn | ||||
| 	if mx.inline && mx.parent != nil { | ||||
| 		m = mx.parent | ||||
| 		hFn = Chain(mx.middlewares...).HandlerFunc(hFn).ServeHTTP | ||||
| 	} | ||||
| 
 | ||||
| 	// Update the notFoundHandler from this point forward
 | ||||
| 	m.notFoundHandler = hFn | ||||
| 	m.updateSubRoutes(func(subMux *Mux) { | ||||
| 		if subMux.notFoundHandler == nil { | ||||
| 			subMux.NotFound(hFn) | ||||
| 		} | ||||
| 	}) | ||||
| } | ||||
| 
 | ||||
| // MethodNotAllowed sets a custom http.HandlerFunc for routing paths where the
 | ||||
| // method is unresolved. The default handler returns a 405 with an empty body.
 | ||||
| func (mx *Mux) MethodNotAllowed(handlerFn http.HandlerFunc) { | ||||
| 	// Build MethodNotAllowed handler chain
 | ||||
| 	m := mx | ||||
| 	hFn := handlerFn | ||||
| 	if mx.inline && mx.parent != nil { | ||||
| 		m = mx.parent | ||||
| 		hFn = Chain(mx.middlewares...).HandlerFunc(hFn).ServeHTTP | ||||
| 	} | ||||
| 
 | ||||
| 	// Update the methodNotAllowedHandler from this point forward
 | ||||
| 	m.methodNotAllowedHandler = hFn | ||||
| 	m.updateSubRoutes(func(subMux *Mux) { | ||||
| 		if subMux.methodNotAllowedHandler == nil { | ||||
| 			subMux.MethodNotAllowed(hFn) | ||||
| 		} | ||||
| 	}) | ||||
| } | ||||
| 
 | ||||
| // With adds inline middlewares for an endpoint handler.
 | ||||
| func (mx *Mux) With(middlewares ...func(http.Handler) http.Handler) Router { | ||||
| 	// Similarly as in handle(), we must build the mux handler once additional
 | ||||
| 	// middleware registration isn't allowed for this stack, like now.
 | ||||
| 	if !mx.inline && mx.handler == nil { | ||||
| 		mx.buildRouteHandler() | ||||
| 	} | ||||
| 
 | ||||
| 	// Copy middlewares from parent inline muxs
 | ||||
| 	var mws Middlewares | ||||
| 	if mx.inline { | ||||
| 		mws = make(Middlewares, len(mx.middlewares)) | ||||
| 		copy(mws, mx.middlewares) | ||||
| 	} | ||||
| 	mws = append(mws, middlewares...) | ||||
| 
 | ||||
| 	im := &Mux{ | ||||
| 		pool: mx.pool, inline: true, parent: mx, tree: mx.tree, middlewares: mws, | ||||
| 		notFoundHandler: mx.notFoundHandler, methodNotAllowedHandler: mx.methodNotAllowedHandler, | ||||
| 	} | ||||
| 
 | ||||
| 	return im | ||||
| } | ||||
| 
 | ||||
| // Group creates a new inline-Mux with a fresh middleware stack. It's useful
 | ||||
| // for a group of handlers along the same routing path that use an additional
 | ||||
| // set of middlewares. See _examples/.
 | ||||
| func (mx *Mux) Group(fn func(r Router)) Router { | ||||
| 	im := mx.With().(*Mux) | ||||
| 	if fn != nil { | ||||
| 		fn(im) | ||||
| 	} | ||||
| 	return im | ||||
| } | ||||
| 
 | ||||
| // Route creates a new Mux with a fresh middleware stack and mounts it
 | ||||
| // along the `pattern` as a subrouter. Effectively, this is a short-hand
 | ||||
| // call to Mount. See _examples/.
 | ||||
| func (mx *Mux) Route(pattern string, fn func(r Router)) Router { | ||||
| 	subRouter := NewRouter() | ||||
| 	if fn != nil { | ||||
| 		fn(subRouter) | ||||
| 	} | ||||
| 	mx.Mount(pattern, subRouter) | ||||
| 	return subRouter | ||||
| } | ||||
| 
 | ||||
| // Mount attaches another http.Handler or chi Router as a subrouter along a routing
 | ||||
| // path. It's very useful to split up a large API as many independent routers and
 | ||||
| // compose them as a single service using Mount. See _examples/.
 | ||||
| //
 | ||||
| // Note that Mount() simply sets a wildcard along the `pattern` that will continue
 | ||||
| // routing at the `handler`, which in most cases is another chi.Router. As a result,
 | ||||
| // if you define two Mount() routes on the exact same pattern the mount will panic.
 | ||||
| func (mx *Mux) Mount(pattern string, handler http.Handler) { | ||||
| 	// Provide runtime safety for ensuring a pattern isn't mounted on an existing
 | ||||
| 	// routing pattern.
 | ||||
| 	if mx.tree.findPattern(pattern+"*") || mx.tree.findPattern(pattern+"/*") { | ||||
| 		panic(fmt.Sprintf("chi: attempting to Mount() a handler on an existing path, '%s'", pattern)) | ||||
| 	} | ||||
| 
 | ||||
| 	// Assign sub-Router's with the parent not found & method not allowed handler if not specified.
 | ||||
| 	subr, ok := handler.(*Mux) | ||||
| 	if ok && subr.notFoundHandler == nil && mx.notFoundHandler != nil { | ||||
| 		subr.NotFound(mx.notFoundHandler) | ||||
| 	} | ||||
| 	if ok && subr.methodNotAllowedHandler == nil && mx.methodNotAllowedHandler != nil { | ||||
| 		subr.MethodNotAllowed(mx.methodNotAllowedHandler) | ||||
| 	} | ||||
| 
 | ||||
| 	mountHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | ||||
| 		rctx := RouteContext(r.Context()) | ||||
| 		rctx.RoutePath = mx.nextRoutePath(rctx) | ||||
| 		handler.ServeHTTP(w, r) | ||||
| 	}) | ||||
| 
 | ||||
| 	if pattern == "" || pattern[len(pattern)-1] != '/' { | ||||
| 		mx.handle(mALL|mSTUB, pattern, mountHandler) | ||||
| 		mx.handle(mALL|mSTUB, pattern+"/", mountHandler) | ||||
| 		pattern += "/" | ||||
| 	} | ||||
| 
 | ||||
| 	method := mALL | ||||
| 	subroutes, _ := handler.(Routes) | ||||
| 	if subroutes != nil { | ||||
| 		method |= mSTUB | ||||
| 	} | ||||
| 	n := mx.handle(method, pattern+"*", mountHandler) | ||||
| 
 | ||||
| 	if subroutes != nil { | ||||
| 		n.subroutes = subroutes | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // Routes returns a slice of routing information from the tree,
 | ||||
| // useful for traversing available routes of a router.
 | ||||
| func (mx *Mux) Routes() []Route { | ||||
| 	return mx.tree.routes() | ||||
| } | ||||
| 
 | ||||
| // Middlewares returns a slice of middleware handler functions.
 | ||||
| func (mx *Mux) Middlewares() Middlewares { | ||||
| 	return mx.middlewares | ||||
| } | ||||
| 
 | ||||
| // Match searches the routing tree for a handler that matches the method/path.
 | ||||
| // It's similar to routing a http request, but without executing the handler
 | ||||
| // thereafter.
 | ||||
| //
 | ||||
| // Note: the *Context state is updated during execution, so manage
 | ||||
| // the state carefully or make a NewRouteContext().
 | ||||
| func (mx *Mux) Match(rctx *Context, method, path string) bool { | ||||
| 	m, ok := methodMap[method] | ||||
| 	if !ok { | ||||
| 		return false | ||||
| 	} | ||||
| 
 | ||||
| 	node, _, h := mx.tree.FindRoute(rctx, m, path) | ||||
| 
 | ||||
| 	if node != nil && node.subroutes != nil { | ||||
| 		rctx.RoutePath = mx.nextRoutePath(rctx) | ||||
| 		return node.subroutes.Match(rctx, method, rctx.RoutePath) | ||||
| 	} | ||||
| 
 | ||||
| 	return h != nil | ||||
| } | ||||
| 
 | ||||
| // NotFoundHandler returns the default Mux 404 responder whenever a route
 | ||||
| // cannot be found.
 | ||||
| func (mx *Mux) NotFoundHandler() http.HandlerFunc { | ||||
| 	if mx.notFoundHandler != nil { | ||||
| 		return mx.notFoundHandler | ||||
| 	} | ||||
| 	return http.NotFound | ||||
| } | ||||
| 
 | ||||
| // MethodNotAllowedHandler returns the default Mux 405 responder whenever
 | ||||
| // a method cannot be resolved for a route.
 | ||||
| func (mx *Mux) MethodNotAllowedHandler() http.HandlerFunc { | ||||
| 	if mx.methodNotAllowedHandler != nil { | ||||
| 		return mx.methodNotAllowedHandler | ||||
| 	} | ||||
| 	return methodNotAllowedHandler | ||||
| } | ||||
| 
 | ||||
| // buildRouteHandler builds the single mux handler that is a chain of the middleware
 | ||||
| // stack, as defined by calls to Use(), and the tree router (Mux) itself. After this
 | ||||
| // point, no other middlewares can be registered on this Mux's stack. But you can still
 | ||||
| // compose additional middlewares via Group()'s or using a chained middleware handler.
 | ||||
| func (mx *Mux) buildRouteHandler() { | ||||
| 	mx.handler = chain(mx.middlewares, http.HandlerFunc(mx.routeHTTP)) | ||||
| } | ||||
| 
 | ||||
| // handle registers a http.Handler in the routing tree for a particular http method
 | ||||
| // and routing pattern.
 | ||||
| func (mx *Mux) handle(method methodTyp, pattern string, handler http.Handler) *node { | ||||
| 	if len(pattern) == 0 || pattern[0] != '/' { | ||||
| 		panic(fmt.Sprintf("chi: routing pattern must begin with '/' in '%s'", pattern)) | ||||
| 	} | ||||
| 
 | ||||
| 	// Build the computed routing handler for this routing pattern.
 | ||||
| 	if !mx.inline && mx.handler == nil { | ||||
| 		mx.buildRouteHandler() | ||||
| 	} | ||||
| 
 | ||||
| 	// Build endpoint handler with inline middlewares for the route
 | ||||
| 	var h http.Handler | ||||
| 	if mx.inline { | ||||
| 		mx.handler = http.HandlerFunc(mx.routeHTTP) | ||||
| 		h = Chain(mx.middlewares...).Handler(handler) | ||||
| 	} else { | ||||
| 		h = handler | ||||
| 	} | ||||
| 
 | ||||
| 	// Add the endpoint to the tree and return the node
 | ||||
| 	return mx.tree.InsertRoute(method, pattern, h) | ||||
| } | ||||
| 
 | ||||
| // routeHTTP routes a http.Request through the Mux routing tree to serve
 | ||||
| // the matching handler for a particular http method.
 | ||||
| func (mx *Mux) routeHTTP(w http.ResponseWriter, r *http.Request) { | ||||
| 	// Grab the route context object
 | ||||
| 	rctx := r.Context().Value(RouteCtxKey).(*Context) | ||||
| 
 | ||||
| 	// The request routing path
 | ||||
| 	routePath := rctx.RoutePath | ||||
| 	if routePath == "" { | ||||
| 		if r.URL.RawPath != "" { | ||||
| 			routePath = r.URL.RawPath | ||||
| 		} else { | ||||
| 			routePath = r.URL.Path | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	// Check if method is supported by chi
 | ||||
| 	if rctx.RouteMethod == "" { | ||||
| 		rctx.RouteMethod = r.Method | ||||
| 	} | ||||
| 	method, ok := methodMap[rctx.RouteMethod] | ||||
| 	if !ok { | ||||
| 		mx.MethodNotAllowedHandler().ServeHTTP(w, r) | ||||
| 		return | ||||
| 	} | ||||
| 
 | ||||
| 	// Find the route
 | ||||
| 	if _, _, h := mx.tree.FindRoute(rctx, method, routePath); h != nil { | ||||
| 		h.ServeHTTP(w, r) | ||||
| 		return | ||||
| 	} | ||||
| 	if rctx.methodNotAllowed { | ||||
| 		mx.MethodNotAllowedHandler().ServeHTTP(w, r) | ||||
| 	} else { | ||||
| 		mx.NotFoundHandler().ServeHTTP(w, r) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (mx *Mux) nextRoutePath(rctx *Context) string { | ||||
| 	routePath := "/" | ||||
| 	nx := len(rctx.routeParams.Keys) - 1 // index of last param in list
 | ||||
| 	if nx >= 0 && rctx.routeParams.Keys[nx] == "*" && len(rctx.routeParams.Values) > nx { | ||||
| 		routePath = "/" + rctx.routeParams.Values[nx] | ||||
| 	} | ||||
| 	return routePath | ||||
| } | ||||
| 
 | ||||
| // Recursively update data on child routers.
 | ||||
| func (mx *Mux) updateSubRoutes(fn func(subMux *Mux)) { | ||||
| 	for _, r := range mx.tree.routes() { | ||||
| 		subMux, ok := r.SubRoutes.(*Mux) | ||||
| 		if !ok { | ||||
| 			continue | ||||
| 		} | ||||
| 		fn(subMux) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // methodNotAllowedHandler is a helper function to respond with a 405,
 | ||||
| // method not allowed.
 | ||||
| func methodNotAllowedHandler(w http.ResponseWriter, r *http.Request) { | ||||
| 	w.WriteHeader(405) | ||||
| 	w.Write(nil) | ||||
| } | ||||
|  | @ -0,0 +1,865 @@ | |||
| package chi | ||||
| 
 | ||||
| // Radix tree implementation below is a based on the original work by
 | ||||
| // Armon Dadgar in https://github.com/armon/go-radix/blob/master/radix.go
 | ||||
| // (MIT licensed). It's been heavily modified for use as a HTTP routing tree.
 | ||||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"math" | ||||
| 	"net/http" | ||||
| 	"regexp" | ||||
| 	"sort" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| ) | ||||
| 
 | ||||
| type methodTyp int | ||||
| 
 | ||||
| const ( | ||||
| 	mSTUB methodTyp = 1 << iota | ||||
| 	mCONNECT | ||||
| 	mDELETE | ||||
| 	mGET | ||||
| 	mHEAD | ||||
| 	mOPTIONS | ||||
| 	mPATCH | ||||
| 	mPOST | ||||
| 	mPUT | ||||
| 	mTRACE | ||||
| ) | ||||
| 
 | ||||
| var mALL = mCONNECT | mDELETE | mGET | mHEAD | | ||||
| 	mOPTIONS | mPATCH | mPOST | mPUT | mTRACE | ||||
| 
 | ||||
| var methodMap = map[string]methodTyp{ | ||||
| 	http.MethodConnect: mCONNECT, | ||||
| 	http.MethodDelete:  mDELETE, | ||||
| 	http.MethodGet:     mGET, | ||||
| 	http.MethodHead:    mHEAD, | ||||
| 	http.MethodOptions: mOPTIONS, | ||||
| 	http.MethodPatch:   mPATCH, | ||||
| 	http.MethodPost:    mPOST, | ||||
| 	http.MethodPut:     mPUT, | ||||
| 	http.MethodTrace:   mTRACE, | ||||
| } | ||||
| 
 | ||||
| // RegisterMethod adds support for custom HTTP method handlers, available
 | ||||
| // via Router#Method and Router#MethodFunc
 | ||||
| func RegisterMethod(method string) { | ||||
| 	if method == "" { | ||||
| 		return | ||||
| 	} | ||||
| 	method = strings.ToUpper(method) | ||||
| 	if _, ok := methodMap[method]; ok { | ||||
| 		return | ||||
| 	} | ||||
| 	n := len(methodMap) | ||||
| 	if n > strconv.IntSize { | ||||
| 		panic(fmt.Sprintf("chi: max number of methods reached (%d)", strconv.IntSize)) | ||||
| 	} | ||||
| 	mt := methodTyp(math.Exp2(float64(n))) | ||||
| 	methodMap[method] = mt | ||||
| 	mALL |= mt | ||||
| } | ||||
| 
 | ||||
| type nodeTyp uint8 | ||||
| 
 | ||||
| const ( | ||||
| 	ntStatic   nodeTyp = iota // /home
 | ||||
| 	ntRegexp                  // /{id:[0-9]+}
 | ||||
| 	ntParam                   // /{user}
 | ||||
| 	ntCatchAll                // /api/v1/*
 | ||||
| ) | ||||
| 
 | ||||
| type node struct { | ||||
| 	// node type: static, regexp, param, catchAll
 | ||||
| 	typ nodeTyp | ||||
| 
 | ||||
| 	// first byte of the prefix
 | ||||
| 	label byte | ||||
| 
 | ||||
| 	// first byte of the child prefix
 | ||||
| 	tail byte | ||||
| 
 | ||||
| 	// prefix is the common prefix we ignore
 | ||||
| 	prefix string | ||||
| 
 | ||||
| 	// regexp matcher for regexp nodes
 | ||||
| 	rex *regexp.Regexp | ||||
| 
 | ||||
| 	// HTTP handler endpoints on the leaf node
 | ||||
| 	endpoints endpoints | ||||
| 
 | ||||
| 	// subroutes on the leaf node
 | ||||
| 	subroutes Routes | ||||
| 
 | ||||
| 	// child nodes should be stored in-order for iteration,
 | ||||
| 	// in groups of the node type.
 | ||||
| 	children [ntCatchAll + 1]nodes | ||||
| } | ||||
| 
 | ||||
| // endpoints is a mapping of http method constants to handlers
 | ||||
| // for a given route.
 | ||||
| type endpoints map[methodTyp]*endpoint | ||||
| 
 | ||||
| type endpoint struct { | ||||
| 	// endpoint handler
 | ||||
| 	handler http.Handler | ||||
| 
 | ||||
| 	// pattern is the routing pattern for handler nodes
 | ||||
| 	pattern string | ||||
| 
 | ||||
| 	// parameter keys recorded on handler nodes
 | ||||
| 	paramKeys []string | ||||
| } | ||||
| 
 | ||||
| func (s endpoints) Value(method methodTyp) *endpoint { | ||||
| 	mh, ok := s[method] | ||||
| 	if !ok { | ||||
| 		mh = &endpoint{} | ||||
| 		s[method] = mh | ||||
| 	} | ||||
| 	return mh | ||||
| } | ||||
| 
 | ||||
| func (n *node) InsertRoute(method methodTyp, pattern string, handler http.Handler) *node { | ||||
| 	var parent *node | ||||
| 	search := pattern | ||||
| 
 | ||||
| 	for { | ||||
| 		// Handle key exhaustion
 | ||||
| 		if len(search) == 0 { | ||||
| 			// Insert or update the node's leaf handler
 | ||||
| 			n.setEndpoint(method, handler, pattern) | ||||
| 			return n | ||||
| 		} | ||||
| 
 | ||||
| 		// We're going to be searching for a wild node next,
 | ||||
| 		// in this case, we need to get the tail
 | ||||
| 		var label = search[0] | ||||
| 		var segTail byte | ||||
| 		var segEndIdx int | ||||
| 		var segTyp nodeTyp | ||||
| 		var segRexpat string | ||||
| 		if label == '{' || label == '*' { | ||||
| 			segTyp, _, segRexpat, segTail, _, segEndIdx = patNextSegment(search) | ||||
| 		} | ||||
| 
 | ||||
| 		var prefix string | ||||
| 		if segTyp == ntRegexp { | ||||
| 			prefix = segRexpat | ||||
| 		} | ||||
| 
 | ||||
| 		// Look for the edge to attach to
 | ||||
| 		parent = n | ||||
| 		n = n.getEdge(segTyp, label, segTail, prefix) | ||||
| 
 | ||||
| 		// No edge, create one
 | ||||
| 		if n == nil { | ||||
| 			child := &node{label: label, tail: segTail, prefix: search} | ||||
| 			hn := parent.addChild(child, search) | ||||
| 			hn.setEndpoint(method, handler, pattern) | ||||
| 
 | ||||
| 			return hn | ||||
| 		} | ||||
| 
 | ||||
| 		// Found an edge to match the pattern
 | ||||
| 
 | ||||
| 		if n.typ > ntStatic { | ||||
| 			// We found a param node, trim the param from the search path and continue.
 | ||||
| 			// This param/wild pattern segment would already be on the tree from a previous
 | ||||
| 			// call to addChild when creating a new node.
 | ||||
| 			search = search[segEndIdx:] | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		// Static nodes fall below here.
 | ||||
| 		// Determine longest prefix of the search key on match.
 | ||||
| 		commonPrefix := longestPrefix(search, n.prefix) | ||||
| 		if commonPrefix == len(n.prefix) { | ||||
| 			// the common prefix is as long as the current node's prefix we're attempting to insert.
 | ||||
| 			// keep the search going.
 | ||||
| 			search = search[commonPrefix:] | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		// Split the node
 | ||||
| 		child := &node{ | ||||
| 			typ:    ntStatic, | ||||
| 			prefix: search[:commonPrefix], | ||||
| 		} | ||||
| 		parent.replaceChild(search[0], segTail, child) | ||||
| 
 | ||||
| 		// Restore the existing node
 | ||||
| 		n.label = n.prefix[commonPrefix] | ||||
| 		n.prefix = n.prefix[commonPrefix:] | ||||
| 		child.addChild(n, n.prefix) | ||||
| 
 | ||||
| 		// If the new key is a subset, set the method/handler on this node and finish.
 | ||||
| 		search = search[commonPrefix:] | ||||
| 		if len(search) == 0 { | ||||
| 			child.setEndpoint(method, handler, pattern) | ||||
| 			return child | ||||
| 		} | ||||
| 
 | ||||
| 		// Create a new edge for the node
 | ||||
| 		subchild := &node{ | ||||
| 			typ:    ntStatic, | ||||
| 			label:  search[0], | ||||
| 			prefix: search, | ||||
| 		} | ||||
| 		hn := child.addChild(subchild, search) | ||||
| 		hn.setEndpoint(method, handler, pattern) | ||||
| 		return hn | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // addChild appends the new `child` node to the tree using the `pattern` as the trie key.
 | ||||
| // For a URL router like chi's, we split the static, param, regexp and wildcard segments
 | ||||
| // into different nodes. In addition, addChild will recursively call itself until every
 | ||||
| // pattern segment is added to the url pattern tree as individual nodes, depending on type.
 | ||||
| func (n *node) addChild(child *node, prefix string) *node { | ||||
| 	search := prefix | ||||
| 
 | ||||
| 	// handler leaf node added to the tree is the child.
 | ||||
| 	// this may be overridden later down the flow
 | ||||
| 	hn := child | ||||
| 
 | ||||
| 	// Parse next segment
 | ||||
| 	segTyp, _, segRexpat, segTail, segStartIdx, segEndIdx := patNextSegment(search) | ||||
| 
 | ||||
| 	// Add child depending on next up segment
 | ||||
| 	switch segTyp { | ||||
| 
 | ||||
| 	case ntStatic: | ||||
| 		// Search prefix is all static (that is, has no params in path)
 | ||||
| 		// noop
 | ||||
| 
 | ||||
| 	default: | ||||
| 		// Search prefix contains a param, regexp or wildcard
 | ||||
| 
 | ||||
| 		if segTyp == ntRegexp { | ||||
| 			rex, err := regexp.Compile(segRexpat) | ||||
| 			if err != nil { | ||||
| 				panic(fmt.Sprintf("chi: invalid regexp pattern '%s' in route param", segRexpat)) | ||||
| 			} | ||||
| 			child.prefix = segRexpat | ||||
| 			child.rex = rex | ||||
| 		} | ||||
| 
 | ||||
| 		if segStartIdx == 0 { | ||||
| 			// Route starts with a param
 | ||||
| 			child.typ = segTyp | ||||
| 
 | ||||
| 			if segTyp == ntCatchAll { | ||||
| 				segStartIdx = -1 | ||||
| 			} else { | ||||
| 				segStartIdx = segEndIdx | ||||
| 			} | ||||
| 			if segStartIdx < 0 { | ||||
| 				segStartIdx = len(search) | ||||
| 			} | ||||
| 			child.tail = segTail // for params, we set the tail
 | ||||
| 
 | ||||
| 			if segStartIdx != len(search) { | ||||
| 				// add static edge for the remaining part, split the end.
 | ||||
| 				// its not possible to have adjacent param nodes, so its certainly
 | ||||
| 				// going to be a static node next.
 | ||||
| 
 | ||||
| 				search = search[segStartIdx:] // advance search position
 | ||||
| 
 | ||||
| 				nn := &node{ | ||||
| 					typ:    ntStatic, | ||||
| 					label:  search[0], | ||||
| 					prefix: search, | ||||
| 				} | ||||
| 				hn = child.addChild(nn, search) | ||||
| 			} | ||||
| 
 | ||||
| 		} else if segStartIdx > 0 { | ||||
| 			// Route has some param
 | ||||
| 
 | ||||
| 			// starts with a static segment
 | ||||
| 			child.typ = ntStatic | ||||
| 			child.prefix = search[:segStartIdx] | ||||
| 			child.rex = nil | ||||
| 
 | ||||
| 			// add the param edge node
 | ||||
| 			search = search[segStartIdx:] | ||||
| 
 | ||||
| 			nn := &node{ | ||||
| 				typ:   segTyp, | ||||
| 				label: search[0], | ||||
| 				tail:  segTail, | ||||
| 			} | ||||
| 			hn = child.addChild(nn, search) | ||||
| 
 | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	n.children[child.typ] = append(n.children[child.typ], child) | ||||
| 	n.children[child.typ].Sort() | ||||
| 	return hn | ||||
| } | ||||
| 
 | ||||
| func (n *node) replaceChild(label, tail byte, child *node) { | ||||
| 	for i := 0; i < len(n.children[child.typ]); i++ { | ||||
| 		if n.children[child.typ][i].label == label && n.children[child.typ][i].tail == tail { | ||||
| 			n.children[child.typ][i] = child | ||||
| 			n.children[child.typ][i].label = label | ||||
| 			n.children[child.typ][i].tail = tail | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| 	panic("chi: replacing missing child") | ||||
| } | ||||
| 
 | ||||
| func (n *node) getEdge(ntyp nodeTyp, label, tail byte, prefix string) *node { | ||||
| 	nds := n.children[ntyp] | ||||
| 	for i := 0; i < len(nds); i++ { | ||||
| 		if nds[i].label == label && nds[i].tail == tail { | ||||
| 			if ntyp == ntRegexp && nds[i].prefix != prefix { | ||||
| 				continue | ||||
| 			} | ||||
| 			return nds[i] | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func (n *node) setEndpoint(method methodTyp, handler http.Handler, pattern string) { | ||||
| 	// Set the handler for the method type on the node
 | ||||
| 	if n.endpoints == nil { | ||||
| 		n.endpoints = make(endpoints) | ||||
| 	} | ||||
| 
 | ||||
| 	paramKeys := patParamKeys(pattern) | ||||
| 
 | ||||
| 	if method&mSTUB == mSTUB { | ||||
| 		n.endpoints.Value(mSTUB).handler = handler | ||||
| 	} | ||||
| 	if method&mALL == mALL { | ||||
| 		h := n.endpoints.Value(mALL) | ||||
| 		h.handler = handler | ||||
| 		h.pattern = pattern | ||||
| 		h.paramKeys = paramKeys | ||||
| 		for _, m := range methodMap { | ||||
| 			h := n.endpoints.Value(m) | ||||
| 			h.handler = handler | ||||
| 			h.pattern = pattern | ||||
| 			h.paramKeys = paramKeys | ||||
| 		} | ||||
| 	} else { | ||||
| 		h := n.endpoints.Value(method) | ||||
| 		h.handler = handler | ||||
| 		h.pattern = pattern | ||||
| 		h.paramKeys = paramKeys | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (n *node) FindRoute(rctx *Context, method methodTyp, path string) (*node, endpoints, http.Handler) { | ||||
| 	// Reset the context routing pattern and params
 | ||||
| 	rctx.routePattern = "" | ||||
| 	rctx.routeParams.Keys = rctx.routeParams.Keys[:0] | ||||
| 	rctx.routeParams.Values = rctx.routeParams.Values[:0] | ||||
| 
 | ||||
| 	// Find the routing handlers for the path
 | ||||
| 	rn := n.findRoute(rctx, method, path) | ||||
| 	if rn == nil { | ||||
| 		return nil, nil, nil | ||||
| 	} | ||||
| 
 | ||||
| 	// Record the routing params in the request lifecycle
 | ||||
| 	rctx.URLParams.Keys = append(rctx.URLParams.Keys, rctx.routeParams.Keys...) | ||||
| 	rctx.URLParams.Values = append(rctx.URLParams.Values, rctx.routeParams.Values...) | ||||
| 
 | ||||
| 	// Record the routing pattern in the request lifecycle
 | ||||
| 	if rn.endpoints[method].pattern != "" { | ||||
| 		rctx.routePattern = rn.endpoints[method].pattern | ||||
| 		rctx.RoutePatterns = append(rctx.RoutePatterns, rctx.routePattern) | ||||
| 	} | ||||
| 
 | ||||
| 	return rn, rn.endpoints, rn.endpoints[method].handler | ||||
| } | ||||
| 
 | ||||
| // Recursive edge traversal by checking all nodeTyp groups along the way.
 | ||||
| // It's like searching through a multi-dimensional radix trie.
 | ||||
| func (n *node) findRoute(rctx *Context, method methodTyp, path string) *node { | ||||
| 	nn := n | ||||
| 	search := path | ||||
| 
 | ||||
| 	for t, nds := range nn.children { | ||||
| 		ntyp := nodeTyp(t) | ||||
| 		if len(nds) == 0 { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		var xn *node | ||||
| 		xsearch := search | ||||
| 
 | ||||
| 		var label byte | ||||
| 		if search != "" { | ||||
| 			label = search[0] | ||||
| 		} | ||||
| 
 | ||||
| 		switch ntyp { | ||||
| 		case ntStatic: | ||||
| 			xn = nds.findEdge(label) | ||||
| 			if xn == nil || !strings.HasPrefix(xsearch, xn.prefix) { | ||||
| 				continue | ||||
| 			} | ||||
| 			xsearch = xsearch[len(xn.prefix):] | ||||
| 
 | ||||
| 		case ntParam, ntRegexp: | ||||
| 			// short-circuit and return no matching route for empty param values
 | ||||
| 			if xsearch == "" { | ||||
| 				continue | ||||
| 			} | ||||
| 
 | ||||
| 			// serially loop through each node grouped by the tail delimiter
 | ||||
| 			for idx := 0; idx < len(nds); idx++ { | ||||
| 				xn = nds[idx] | ||||
| 
 | ||||
| 				// label for param nodes is the delimiter byte
 | ||||
| 				p := strings.IndexByte(xsearch, xn.tail) | ||||
| 
 | ||||
| 				if p < 0 { | ||||
| 					if xn.tail == '/' { | ||||
| 						p = len(xsearch) | ||||
| 					} else { | ||||
| 						continue | ||||
| 					} | ||||
| 				} | ||||
| 
 | ||||
| 				if ntyp == ntRegexp && xn.rex != nil { | ||||
| 					if !xn.rex.Match([]byte(xsearch[:p])) { | ||||
| 						continue | ||||
| 					} | ||||
| 				} else if strings.IndexByte(xsearch[:p], '/') != -1 { | ||||
| 					// avoid a match across path segments
 | ||||
| 					continue | ||||
| 				} | ||||
| 
 | ||||
| 				prevlen := len(rctx.routeParams.Values) | ||||
| 				rctx.routeParams.Values = append(rctx.routeParams.Values, xsearch[:p]) | ||||
| 				xsearch = xsearch[p:] | ||||
| 
 | ||||
| 				if len(xsearch) == 0 { | ||||
| 					if xn.isLeaf() { | ||||
| 						h := xn.endpoints[method] | ||||
| 						if h != nil && h.handler != nil { | ||||
| 							rctx.routeParams.Keys = append(rctx.routeParams.Keys, h.paramKeys...) | ||||
| 							return xn | ||||
| 						} | ||||
| 
 | ||||
| 						// flag that the routing context found a route, but not a corresponding
 | ||||
| 						// supported method
 | ||||
| 						rctx.methodNotAllowed = true | ||||
| 					} | ||||
| 				} | ||||
| 
 | ||||
| 				// recursively find the next node on this branch
 | ||||
| 				fin := xn.findRoute(rctx, method, xsearch) | ||||
| 				if fin != nil { | ||||
| 					return fin | ||||
| 				} | ||||
| 
 | ||||
| 				// not found on this branch, reset vars
 | ||||
| 				rctx.routeParams.Values = rctx.routeParams.Values[:prevlen] | ||||
| 				xsearch = search | ||||
| 			} | ||||
| 
 | ||||
| 			rctx.routeParams.Values = append(rctx.routeParams.Values, "") | ||||
| 
 | ||||
| 		default: | ||||
| 			// catch-all nodes
 | ||||
| 			rctx.routeParams.Values = append(rctx.routeParams.Values, search) | ||||
| 			xn = nds[0] | ||||
| 			xsearch = "" | ||||
| 		} | ||||
| 
 | ||||
| 		if xn == nil { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		// did we find it yet?
 | ||||
| 		if len(xsearch) == 0 { | ||||
| 			if xn.isLeaf() { | ||||
| 				h := xn.endpoints[method] | ||||
| 				if h != nil && h.handler != nil { | ||||
| 					rctx.routeParams.Keys = append(rctx.routeParams.Keys, h.paramKeys...) | ||||
| 					return xn | ||||
| 				} | ||||
| 
 | ||||
| 				// flag that the routing context found a route, but not a corresponding
 | ||||
| 				// supported method
 | ||||
| 				rctx.methodNotAllowed = true | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		// recursively find the next node..
 | ||||
| 		fin := xn.findRoute(rctx, method, xsearch) | ||||
| 		if fin != nil { | ||||
| 			return fin | ||||
| 		} | ||||
| 
 | ||||
| 		// Did not find final handler, let's remove the param here if it was set
 | ||||
| 		if xn.typ > ntStatic { | ||||
| 			if len(rctx.routeParams.Values) > 0 { | ||||
| 				rctx.routeParams.Values = rctx.routeParams.Values[:len(rctx.routeParams.Values)-1] | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func (n *node) findEdge(ntyp nodeTyp, label byte) *node { | ||||
| 	nds := n.children[ntyp] | ||||
| 	num := len(nds) | ||||
| 	idx := 0 | ||||
| 
 | ||||
| 	switch ntyp { | ||||
| 	case ntStatic, ntParam, ntRegexp: | ||||
| 		i, j := 0, num-1 | ||||
| 		for i <= j { | ||||
| 			idx = i + (j-i)/2 | ||||
| 			if label > nds[idx].label { | ||||
| 				i = idx + 1 | ||||
| 			} else if label < nds[idx].label { | ||||
| 				j = idx - 1 | ||||
| 			} else { | ||||
| 				i = num // breaks cond
 | ||||
| 			} | ||||
| 		} | ||||
| 		if nds[idx].label != label { | ||||
| 			return nil | ||||
| 		} | ||||
| 		return nds[idx] | ||||
| 
 | ||||
| 	default: // catch all
 | ||||
| 		return nds[idx] | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (n *node) isLeaf() bool { | ||||
| 	return n.endpoints != nil | ||||
| } | ||||
| 
 | ||||
| func (n *node) findPattern(pattern string) bool { | ||||
| 	nn := n | ||||
| 	for _, nds := range nn.children { | ||||
| 		if len(nds) == 0 { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		n = nn.findEdge(nds[0].typ, pattern[0]) | ||||
| 		if n == nil { | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		var idx int | ||||
| 		var xpattern string | ||||
| 
 | ||||
| 		switch n.typ { | ||||
| 		case ntStatic: | ||||
| 			idx = longestPrefix(pattern, n.prefix) | ||||
| 			if idx < len(n.prefix) { | ||||
| 				continue | ||||
| 			} | ||||
| 
 | ||||
| 		case ntParam, ntRegexp: | ||||
| 			idx = strings.IndexByte(pattern, '}') + 1 | ||||
| 
 | ||||
| 		case ntCatchAll: | ||||
| 			idx = longestPrefix(pattern, "*") | ||||
| 
 | ||||
| 		default: | ||||
| 			panic("chi: unknown node type") | ||||
| 		} | ||||
| 
 | ||||
| 		xpattern = pattern[idx:] | ||||
| 		if len(xpattern) == 0 { | ||||
| 			return true | ||||
| 		} | ||||
| 
 | ||||
| 		return n.findPattern(xpattern) | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
| 
 | ||||
| func (n *node) routes() []Route { | ||||
| 	rts := []Route{} | ||||
| 
 | ||||
| 	n.walk(func(eps endpoints, subroutes Routes) bool { | ||||
| 		if eps[mSTUB] != nil && eps[mSTUB].handler != nil && subroutes == nil { | ||||
| 			return false | ||||
| 		} | ||||
| 
 | ||||
| 		// Group methodHandlers by unique patterns
 | ||||
| 		pats := make(map[string]endpoints) | ||||
| 
 | ||||
| 		for mt, h := range eps { | ||||
| 			if h.pattern == "" { | ||||
| 				continue | ||||
| 			} | ||||
| 			p, ok := pats[h.pattern] | ||||
| 			if !ok { | ||||
| 				p = endpoints{} | ||||
| 				pats[h.pattern] = p | ||||
| 			} | ||||
| 			p[mt] = h | ||||
| 		} | ||||
| 
 | ||||
| 		for p, mh := range pats { | ||||
| 			hs := make(map[string]http.Handler) | ||||
| 			if mh[mALL] != nil && mh[mALL].handler != nil { | ||||
| 				hs["*"] = mh[mALL].handler | ||||
| 			} | ||||
| 
 | ||||
| 			for mt, h := range mh { | ||||
| 				if h.handler == nil { | ||||
| 					continue | ||||
| 				} | ||||
| 				m := methodTypString(mt) | ||||
| 				if m == "" { | ||||
| 					continue | ||||
| 				} | ||||
| 				hs[m] = h.handler | ||||
| 			} | ||||
| 
 | ||||
| 			rt := Route{p, hs, subroutes} | ||||
| 			rts = append(rts, rt) | ||||
| 		} | ||||
| 
 | ||||
| 		return false | ||||
| 	}) | ||||
| 
 | ||||
| 	return rts | ||||
| } | ||||
| 
 | ||||
| func (n *node) walk(fn func(eps endpoints, subroutes Routes) bool) bool { | ||||
| 	// Visit the leaf values if any
 | ||||
| 	if (n.endpoints != nil || n.subroutes != nil) && fn(n.endpoints, n.subroutes) { | ||||
| 		return true | ||||
| 	} | ||||
| 
 | ||||
| 	// Recurse on the children
 | ||||
| 	for _, ns := range n.children { | ||||
| 		for _, cn := range ns { | ||||
| 			if cn.walk(fn) { | ||||
| 				return true | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
| 
 | ||||
| // patNextSegment returns the next segment details from a pattern:
 | ||||
| // node type, param key, regexp string, param tail byte, param starting index, param ending index
 | ||||
| func patNextSegment(pattern string) (nodeTyp, string, string, byte, int, int) { | ||||
| 	ps := strings.Index(pattern, "{") | ||||
| 	ws := strings.Index(pattern, "*") | ||||
| 
 | ||||
| 	if ps < 0 && ws < 0 { | ||||
| 		return ntStatic, "", "", 0, 0, len(pattern) // we return the entire thing
 | ||||
| 	} | ||||
| 
 | ||||
| 	// Sanity check
 | ||||
| 	if ps >= 0 && ws >= 0 && ws < ps { | ||||
| 		panic("chi: wildcard '*' must be the last pattern in a route, otherwise use a '{param}'") | ||||
| 	} | ||||
| 
 | ||||
| 	var tail byte = '/' // Default endpoint tail to / byte
 | ||||
| 
 | ||||
| 	if ps >= 0 { | ||||
| 		// Param/Regexp pattern is next
 | ||||
| 		nt := ntParam | ||||
| 
 | ||||
| 		// Read to closing } taking into account opens and closes in curl count (cc)
 | ||||
| 		cc := 0 | ||||
| 		pe := ps | ||||
| 		for i, c := range pattern[ps:] { | ||||
| 			if c == '{' { | ||||
| 				cc++ | ||||
| 			} else if c == '}' { | ||||
| 				cc-- | ||||
| 				if cc == 0 { | ||||
| 					pe = ps + i | ||||
| 					break | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 		if pe == ps { | ||||
| 			panic("chi: route param closing delimiter '}' is missing") | ||||
| 		} | ||||
| 
 | ||||
| 		key := pattern[ps+1 : pe] | ||||
| 		pe++ // set end to next position
 | ||||
| 
 | ||||
| 		if pe < len(pattern) { | ||||
| 			tail = pattern[pe] | ||||
| 		} | ||||
| 
 | ||||
| 		var rexpat string | ||||
| 		if idx := strings.Index(key, ":"); idx >= 0 { | ||||
| 			nt = ntRegexp | ||||
| 			rexpat = key[idx+1:] | ||||
| 			key = key[:idx] | ||||
| 		} | ||||
| 
 | ||||
| 		if len(rexpat) > 0 { | ||||
| 			if rexpat[0] != '^' { | ||||
| 				rexpat = "^" + rexpat | ||||
| 			} | ||||
| 			if rexpat[len(rexpat)-1] != '$' { | ||||
| 				rexpat += "$" | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		return nt, key, rexpat, tail, ps, pe | ||||
| 	} | ||||
| 
 | ||||
| 	// Wildcard pattern as finale
 | ||||
| 	if ws < len(pattern)-1 { | ||||
| 		panic("chi: wildcard '*' must be the last value in a route. trim trailing text or use a '{param}' instead") | ||||
| 	} | ||||
| 	return ntCatchAll, "*", "", 0, ws, len(pattern) | ||||
| } | ||||
| 
 | ||||
| func patParamKeys(pattern string) []string { | ||||
| 	pat := pattern | ||||
| 	paramKeys := []string{} | ||||
| 	for { | ||||
| 		ptyp, paramKey, _, _, _, e := patNextSegment(pat) | ||||
| 		if ptyp == ntStatic { | ||||
| 			return paramKeys | ||||
| 		} | ||||
| 		for i := 0; i < len(paramKeys); i++ { | ||||
| 			if paramKeys[i] == paramKey { | ||||
| 				panic(fmt.Sprintf("chi: routing pattern '%s' contains duplicate param key, '%s'", pattern, paramKey)) | ||||
| 			} | ||||
| 		} | ||||
| 		paramKeys = append(paramKeys, paramKey) | ||||
| 		pat = pat[e:] | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // longestPrefix finds the length of the shared prefix
 | ||||
| // of two strings
 | ||||
| func longestPrefix(k1, k2 string) int { | ||||
| 	max := len(k1) | ||||
| 	if l := len(k2); l < max { | ||||
| 		max = l | ||||
| 	} | ||||
| 	var i int | ||||
| 	for i = 0; i < max; i++ { | ||||
| 		if k1[i] != k2[i] { | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	return i | ||||
| } | ||||
| 
 | ||||
| func methodTypString(method methodTyp) string { | ||||
| 	for s, t := range methodMap { | ||||
| 		if method == t { | ||||
| 			return s | ||||
| 		} | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
| 
 | ||||
| type nodes []*node | ||||
| 
 | ||||
| // Sort the list of nodes by label
 | ||||
| func (ns nodes) Sort()              { sort.Sort(ns); ns.tailSort() } | ||||
| func (ns nodes) Len() int           { return len(ns) } | ||||
| func (ns nodes) Swap(i, j int)      { ns[i], ns[j] = ns[j], ns[i] } | ||||
| func (ns nodes) Less(i, j int) bool { return ns[i].label < ns[j].label } | ||||
| 
 | ||||
| // tailSort pushes nodes with '/' as the tail to the end of the list for param nodes.
 | ||||
| // The list order determines the traversal order.
 | ||||
| func (ns nodes) tailSort() { | ||||
| 	for i := len(ns) - 1; i >= 0; i-- { | ||||
| 		if ns[i].typ > ntStatic && ns[i].tail == '/' { | ||||
| 			ns.Swap(i, len(ns)-1) | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (ns nodes) findEdge(label byte) *node { | ||||
| 	num := len(ns) | ||||
| 	idx := 0 | ||||
| 	i, j := 0, num-1 | ||||
| 	for i <= j { | ||||
| 		idx = i + (j-i)/2 | ||||
| 		if label > ns[idx].label { | ||||
| 			i = idx + 1 | ||||
| 		} else if label < ns[idx].label { | ||||
| 			j = idx - 1 | ||||
| 		} else { | ||||
| 			i = num // breaks cond
 | ||||
| 		} | ||||
| 	} | ||||
| 	if ns[idx].label != label { | ||||
| 		return nil | ||||
| 	} | ||||
| 	return ns[idx] | ||||
| } | ||||
| 
 | ||||
| // Route describes the details of a routing handler.
 | ||||
| // Handlers map key is an HTTP method
 | ||||
| type Route struct { | ||||
| 	Pattern   string | ||||
| 	Handlers  map[string]http.Handler | ||||
| 	SubRoutes Routes | ||||
| } | ||||
| 
 | ||||
| // WalkFunc is the type of the function called for each method and route visited by Walk.
 | ||||
| type WalkFunc func(method string, route string, handler http.Handler, middlewares ...func(http.Handler) http.Handler) error | ||||
| 
 | ||||
| // Walk walks any router tree that implements Routes interface.
 | ||||
| func Walk(r Routes, walkFn WalkFunc) error { | ||||
| 	return walk(r, walkFn, "") | ||||
| } | ||||
| 
 | ||||
| func walk(r Routes, walkFn WalkFunc, parentRoute string, parentMw ...func(http.Handler) http.Handler) error { | ||||
| 	for _, route := range r.Routes() { | ||||
| 		mws := make([]func(http.Handler) http.Handler, len(parentMw)) | ||||
| 		copy(mws, parentMw) | ||||
| 		mws = append(mws, r.Middlewares()...) | ||||
| 
 | ||||
| 		if route.SubRoutes != nil { | ||||
| 			if err := walk(route.SubRoutes, walkFn, parentRoute+route.Pattern, mws...); err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 			continue | ||||
| 		} | ||||
| 
 | ||||
| 		for method, handler := range route.Handlers { | ||||
| 			if method == "*" { | ||||
| 				// Ignore a "catchAll" method, since we pass down all the specific methods for each route.
 | ||||
| 				continue | ||||
| 			} | ||||
| 
 | ||||
| 			fullRoute := parentRoute + route.Pattern | ||||
| 			fullRoute = strings.Replace(fullRoute, "/*/", "/", -1) | ||||
| 
 | ||||
| 			if chain, ok := handler.(*ChainHandler); ok { | ||||
| 				if err := walkFn(method, fullRoute, chain.Endpoint, append(mws, chain.Middlewares...)...); err != nil { | ||||
| 					return err | ||||
| 				} | ||||
| 			} else { | ||||
| 				if err := walkFn(method, fullRoute, handler, mws...); err != nil { | ||||
| 					return err | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
|  | @ -161,15 +161,22 @@ func (r *request) buildHTTP(mediaType, basePath string, producers map[string]run | |||
| 			}() | ||||
| 			for fn, f := range r.fileFields { | ||||
| 				for _, fi := range f { | ||||
| 					// Need to read the data so that we can detect the content type
 | ||||
| 					buf := make([]byte, 512) | ||||
| 					size, err := fi.Read(buf) | ||||
| 					if err != nil { | ||||
| 						logClose(err, pw) | ||||
| 						return | ||||
| 					var fileContentType string | ||||
| 					if p, ok := fi.(interface { | ||||
| 						ContentType() string | ||||
| 					}); ok { | ||||
| 						fileContentType = p.ContentType() | ||||
| 					} else { | ||||
| 						// Need to read the data so that we can detect the content type
 | ||||
| 						buf := make([]byte, 512) | ||||
| 						size, err := fi.Read(buf) | ||||
| 						if err != nil { | ||||
| 							logClose(err, pw) | ||||
| 							return | ||||
| 						} | ||||
| 						fileContentType = http.DetectContentType(buf) | ||||
| 						fi = runtime.NamedReader(fi.Name(), io.MultiReader(bytes.NewReader(buf[:size]), fi)) | ||||
| 					} | ||||
| 					fileContentType := http.DetectContentType(buf) | ||||
| 					newFi := runtime.NamedReader(fi.Name(), io.MultiReader(bytes.NewReader(buf[:size]), fi)) | ||||
| 
 | ||||
| 					// Create the MIME headers for the new part
 | ||||
| 					h := make(textproto.MIMEHeader) | ||||
|  | @ -183,7 +190,7 @@ func (r *request) buildHTTP(mediaType, basePath string, producers map[string]run | |||
| 						logClose(err, pw) | ||||
| 						return | ||||
| 					} | ||||
| 					if _, err := io.Copy(wrtr, newFi); err != nil { | ||||
| 					if _, err := io.Copy(wrtr, fi); err != nil { | ||||
| 						logClose(err, pw) | ||||
| 					} | ||||
| 				} | ||||
|  |  | |||
|  | @ -16,6 +16,7 @@ package spec | |||
| 
 | ||||
| import ( | ||||
| 	"encoding/json" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 
 | ||||
| 	"github.com/go-openapi/jsonpointer" | ||||
|  | @ -40,6 +41,24 @@ func (e Extensions) GetString(key string) (string, bool) { | |||
| 	return "", false | ||||
| } | ||||
| 
 | ||||
| // GetInt gets a int value from the extensions
 | ||||
| func (e Extensions) GetInt(key string) (int, bool) { | ||||
| 	realKey := strings.ToLower(key) | ||||
| 
 | ||||
| 	if v, ok := e.GetString(realKey); ok { | ||||
| 		if r, err := strconv.Atoi(v); err == nil { | ||||
| 			return r, true | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	if v, ok := e[realKey]; ok { | ||||
| 		if r, rOk := v.(float64); rOk { | ||||
| 			return int(r), true | ||||
| 		} | ||||
| 	} | ||||
| 	return -1, false | ||||
| } | ||||
| 
 | ||||
| // GetBool gets a string value from the extensions
 | ||||
| func (e Extensions) GetBool(key string) (bool, bool) { | ||||
| 	if v, ok := e[strings.ToLower(key)]; ok { | ||||
|  |  | |||
|  | @ -42,8 +42,8 @@ func (items OrderSchemaItems) MarshalJSON() ([]byte, error) { | |||
| func (items OrderSchemaItems) Len() int      { return len(items) } | ||||
| func (items OrderSchemaItems) Swap(i, j int) { items[i], items[j] = items[j], items[i] } | ||||
| func (items OrderSchemaItems) Less(i, j int) (ret bool) { | ||||
| 	ii, oki := items[i].Extensions.GetString("x-order") | ||||
| 	ij, okj := items[j].Extensions.GetString("x-order") | ||||
| 	ii, oki := items[i].Extensions.GetInt("x-order") | ||||
| 	ij, okj := items[j].Extensions.GetInt("x-order") | ||||
| 	if oki { | ||||
| 		if okj { | ||||
| 			defer func() { | ||||
|  | @ -56,7 +56,7 @@ func (items OrderSchemaItems) Less(i, j int) (ret bool) { | |||
| 					ret = reflect.ValueOf(ii).String() < reflect.ValueOf(ij).String() | ||||
| 				} | ||||
| 			}() | ||||
| 			return reflect.ValueOf(ii).Int() < reflect.ValueOf(ij).Int() | ||||
| 			return ii < ij | ||||
| 		} | ||||
| 		return true | ||||
| 	} else if okj { | ||||
|  |  | |||
|  | @ -19,6 +19,7 @@ import ( | |||
| 	"fmt" | ||||
| 	"reflect" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 
 | ||||
| 	"github.com/go-openapi/swag" | ||||
| ) | ||||
|  | @ -62,6 +63,7 @@ func (r *Responses) UnmarshalJSON(data []byte) error { | |||
| 	if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | @ -107,20 +109,31 @@ func (r ResponsesProps) MarshalJSON() ([]byte, error) { | |||
| 
 | ||||
| // UnmarshalJSON unmarshals responses from JSON
 | ||||
| func (r *ResponsesProps) UnmarshalJSON(data []byte) error { | ||||
| 	var res map[string]Response | ||||
| 	var res map[string]json.RawMessage | ||||
| 	if err := json.Unmarshal(data, &res); err != nil { | ||||
| 		return nil | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	if v, ok := res["default"]; ok { | ||||
| 		r.Default = &v | ||||
| 		var defaultRes Response | ||||
| 		if err := json.Unmarshal(v, &defaultRes); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		r.Default = &defaultRes | ||||
| 		delete(res, "default") | ||||
| 	} | ||||
| 	for k, v := range res { | ||||
| 		if nk, err := strconv.Atoi(k); err == nil { | ||||
| 			if r.StatusCodeResponses == nil { | ||||
| 				r.StatusCodeResponses = map[int]Response{} | ||||
| 		if !strings.HasPrefix(k, "x-") { | ||||
| 			var statusCodeResp Response | ||||
| 			if err := json.Unmarshal(v, &statusCodeResp); err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 			if nk, err := strconv.Atoi(k); err == nil { | ||||
| 				if r.StatusCodeResponses == nil { | ||||
| 					r.StatusCodeResponses = map[int]Response{} | ||||
| 				} | ||||
| 				r.StatusCodeResponses[nk] = statusCodeResp | ||||
| 			} | ||||
| 			r.StatusCodeResponses[nk] = v | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
|  |  | |||
|  | @ -1,7 +1,7 @@ | |||
| Package validator | ||||
| ================= | ||||
| <img align="right" src="https://raw.githubusercontent.com/go-playground/validator/v10/logo.png">[](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) | ||||
|  | ||||
|  | ||||
| [](https://travis-ci.org/go-playground/validator) | ||||
| [](https://coveralls.io/github/go-playground/validator?branch=master) | ||||
| [](https://goreportcard.com/report/github.com/go-playground/validator) | ||||
|  |  | |||
|  | @ -33,7 +33,7 @@ type Func func(fl FieldLevel) bool | |||
| // validation needs. The return value should be true when validation succeeds.
 | ||||
| type FuncCtx func(ctx context.Context, fl FieldLevel) bool | ||||
| 
 | ||||
| // wrapFunc wraps noramal Func makes it compatible with FuncCtx
 | ||||
| // wrapFunc wraps normal Func makes it compatible with FuncCtx
 | ||||
| func wrapFunc(fn Func) FuncCtx { | ||||
| 	if fn == nil { | ||||
| 		return nil // be sure not to wrap a bad function.
 | ||||
|  | @ -73,6 +73,7 @@ var ( | |||
| 		"required":                      hasValue, | ||||
| 		"required_if":                   requiredIf, | ||||
| 		"required_unless":               requiredUnless, | ||||
| 		"skip_unless":                   skipUnless, | ||||
| 		"required_with":                 requiredWith, | ||||
| 		"required_with_all":             requiredWithAll, | ||||
| 		"required_without":              requiredWithout, | ||||
|  | @ -928,7 +929,7 @@ func isNe(fl FieldLevel) bool { | |||
| 	return !isEq(fl) | ||||
| } | ||||
| 
 | ||||
| // isNe is the validation function for validating that the field's string value does not equal the
 | ||||
| // isNeIgnoreCase is the validation function for validating that the field's string value does not equal the
 | ||||
| // provided param value. The comparison is case-insensitive
 | ||||
| func isNeIgnoreCase(fl FieldLevel) bool { | ||||
| 	return !isEqIgnoreCase(fl) | ||||
|  | @ -1648,7 +1649,7 @@ func hasValue(fl FieldLevel) bool { | |||
| 	} | ||||
| } | ||||
| 
 | ||||
| // requireCheckField is a func for check field kind
 | ||||
| // requireCheckFieldKind is a func for check field kind
 | ||||
| func requireCheckFieldKind(fl FieldLevel, param string, defaultNotFoundValue bool) bool { | ||||
| 	field := fl.Field() | ||||
| 	kind := field.Kind() | ||||
|  | @ -1728,10 +1729,10 @@ func excludedIf(fl FieldLevel) bool { | |||
| 
 | ||||
| 	for i := 0; i < len(params); i += 2 { | ||||
| 		if !requireCheckFieldValue(fl, params[i], params[i+1], false) { | ||||
| 			return false | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
| 	return true | ||||
| 	return !hasValue(fl) | ||||
| } | ||||
| 
 | ||||
| // requiredUnless is the validation function
 | ||||
|  | @ -1750,6 +1751,21 @@ func requiredUnless(fl FieldLevel) bool { | |||
| 	return hasValue(fl) | ||||
| } | ||||
| 
 | ||||
| // skipUnless is the validation function
 | ||||
| // The field under validation must be present and not empty only unless all the other specified fields are equal to the value following with the specified field.
 | ||||
| func skipUnless(fl FieldLevel) bool { | ||||
| 	params := parseOneOfParam2(fl.Param()) | ||||
| 	if len(params)%2 != 0 { | ||||
| 		panic(fmt.Sprintf("Bad param number for skip_unless %s", fl.FieldName())) | ||||
| 	} | ||||
| 	for i := 0; i < len(params); i += 2 { | ||||
| 		if !requireCheckFieldValue(fl, params[i], params[i+1], false) { | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
| 	return hasValue(fl) | ||||
| } | ||||
| 
 | ||||
| // excludedUnless is the validation function
 | ||||
| // The field under validation must not be present or is empty unless all the other specified fields are equal to the value following with the specified field.
 | ||||
| func excludedUnless(fl FieldLevel) bool { | ||||
|  | @ -2593,13 +2609,13 @@ func isIso3166Alpha2(fl FieldLevel) bool { | |||
| 	return iso3166_1_alpha2[val] | ||||
| } | ||||
| 
 | ||||
| // isIso3166Alpha2 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-3 country code.
 | ||||
| // isIso3166Alpha3 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-3 country code.
 | ||||
| func isIso3166Alpha3(fl FieldLevel) bool { | ||||
| 	val := fl.Field().String() | ||||
| 	return iso3166_1_alpha3[val] | ||||
| } | ||||
| 
 | ||||
| // isIso3166Alpha2 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-numeric country code.
 | ||||
| // isIso3166AlphaNumeric is the validation function for validating if the current field's value is a valid iso3166-1 alpha-numeric country code.
 | ||||
| func isIso3166AlphaNumeric(fl FieldLevel) bool { | ||||
| 	field := fl.Field() | ||||
| 
 | ||||
|  |  | |||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -146,7 +146,7 @@ use the UTF-8 hex representation 0x7C, which is replaced in the code as a pipe, | |||
| so the above will become excludesall=0x7C | ||||
| 
 | ||||
| 	type Test struct { | ||||
| 		Field `validate:"excludesall=|"`    // BAD! Do not include a a pipe!
 | ||||
| 		Field `validate:"excludesall=|"`    // BAD! Do not include a pipe!
 | ||||
| 		Field `validate:"excludesall=0x7C"` // GOOD! Use the UTF-8 hex representation.
 | ||||
| 	} | ||||
| 
 | ||||
|  | @ -239,7 +239,7 @@ Example #2 | |||
| 
 | ||||
| 	map[[2]string]string with validation tag "gt=0,dive,keys,dive,eq=1|eq=2,endkeys,required" | ||||
| 	// gt=0 will be applied to the map itself
 | ||||
| 	// eq=1|eq=2 will be applied to each array element in the the map keys
 | ||||
| 	// eq=1|eq=2 will be applied to each array element in the map keys
 | ||||
| 	// required will be applied to map values
 | ||||
| 
 | ||||
| # Required | ||||
|  | @ -916,7 +916,7 @@ this with the omitempty tag. | |||
| # Base64URL String | ||||
| 
 | ||||
| This validates that a string value contains a valid base64 URL safe value | ||||
| according the the RFC4648 spec. | ||||
| according the RFC4648 spec. | ||||
| Although an empty string is a valid base64 URL safe value, this will report | ||||
| an empty string as an error, if you wish to accept an empty string as valid | ||||
| you can use this with the omitempty tag. | ||||
|  | @ -927,7 +927,7 @@ you can use this with the omitempty tag. | |||
| # Base64RawURL String | ||||
| 
 | ||||
| This validates that a string value contains a valid base64 URL safe value, | ||||
| but without = padding, according the the RFC4648 spec, section 3.2. | ||||
| but without = padding, according the RFC4648 spec, section 3.2. | ||||
| Although an empty string is a valid base64 URL safe value, this will report | ||||
| an empty string as an error, if you wish to accept an empty string as valid | ||||
| you can use this with the omitempty tag. | ||||
|  | @ -1361,7 +1361,7 @@ More information on https://cve.mitre.org/ | |||
| 
 | ||||
| # Credit Card | ||||
| 
 | ||||
| This validates that a string value contains a valid credit card number using Luhn algoritm. | ||||
| This validates that a string value contains a valid credit card number using Luhn algorithm. | ||||
| 
 | ||||
| 	Usage: credit_card | ||||
| 
 | ||||
|  | @ -1372,8 +1372,7 @@ This validates that a string value contains a valid credit card number using Luh | |||
| 
 | ||||
| This validates that a string or (u)int value contains a valid checksum using the Luhn algorithm. | ||||
| 
 | ||||
| 
 | ||||
| #MongoDb ObjectID | ||||
| # MongoDb ObjectID | ||||
| 
 | ||||
| This validates that a string is a valid 24 character hexadecimal string. | ||||
| 
 | ||||
|  |  | |||
|  | @ -62,7 +62,7 @@ type StructLevel interface { | |||
| 	// existing namespace that validator is on.
 | ||||
| 	// e.g. pass 'User.FirstName' or 'Users[0].FirstName' depending
 | ||||
| 	// on the nesting. most of the time they will be blank, unless you validate
 | ||||
| 	// at a level lower the the current field depth
 | ||||
| 	// at a level lower the current field depth
 | ||||
| 	ReportValidationErrors(relativeNamespace, relativeActualNamespace string, errs ValidationErrors) | ||||
| } | ||||
| 
 | ||||
|  | @ -74,7 +74,7 @@ var _ StructLevel = new(validate) | |||
| // if not is a nested struct.
 | ||||
| //
 | ||||
| // this is only called when within Struct and Field Level validation and
 | ||||
| // should not be relied upon for an acurate value otherwise.
 | ||||
| // should not be relied upon for an accurate value otherwise.
 | ||||
| func (v *validate) Top() reflect.Value { | ||||
| 	return v.top | ||||
| } | ||||
|  | @ -85,7 +85,7 @@ func (v *validate) Top() reflect.Value { | |||
| // if not is a nested struct.
 | ||||
| //
 | ||||
| // this is only called when within Struct and Field Level validation and
 | ||||
| // should not be relied upon for an acurate value otherwise.
 | ||||
| // should not be relied upon for an accurate value otherwise.
 | ||||
| func (v *validate) Parent() reflect.Value { | ||||
| 	return v.slflParent | ||||
| } | ||||
|  |  | |||
|  | @ -234,7 +234,7 @@ func asInt(param string) int64 { | |||
| func asIntFromTimeDuration(param string) int64 { | ||||
| 	d, err := time.ParseDuration(param) | ||||
| 	if err != nil { | ||||
| 		// attempt parsing as an an integer assuming nanosecond precision
 | ||||
| 		// attempt parsing as an integer assuming nanosecond precision
 | ||||
| 		return asInt(param) | ||||
| 	} | ||||
| 	return int64(d) | ||||
|  |  | |||
|  | @ -29,6 +29,7 @@ const ( | |||
| 	requiredWithAllTag    = "required_with_all" | ||||
| 	requiredIfTag         = "required_if" | ||||
| 	requiredUnlessTag     = "required_unless" | ||||
| 	skipUnlessTag         = "skip_unless" | ||||
| 	excludedWithoutAllTag = "excluded_without_all" | ||||
| 	excludedWithoutTag    = "excluded_without" | ||||
| 	excludedWithTag       = "excluded_with" | ||||
|  | @ -57,7 +58,7 @@ var ( | |||
| 
 | ||||
| // FilterFunc is the type used to filter fields using
 | ||||
| // StructFiltered(...) function.
 | ||||
| // returning true results in the field being filtered/skiped from
 | ||||
| // returning true results in the field being filtered/skipped from
 | ||||
| // validation
 | ||||
| type FilterFunc func(ns []byte) bool | ||||
| 
 | ||||
|  | @ -123,7 +124,8 @@ func New() *Validate { | |||
| 		switch k { | ||||
| 		// these require that even if the value is nil that the validation should run, omitempty still overrides this behaviour
 | ||||
| 		case requiredIfTag, requiredUnlessTag, requiredWithTag, requiredWithAllTag, requiredWithoutTag, requiredWithoutAllTag, | ||||
| 			excludedIfTag, excludedUnlessTag, excludedWithTag, excludedWithAllTag, excludedWithoutTag, excludedWithoutAllTag: | ||||
| 			excludedIfTag, excludedUnlessTag, excludedWithTag, excludedWithAllTag, excludedWithoutTag, excludedWithoutAllTag, | ||||
| 			skipUnlessTag: | ||||
| 			_ = v.registerValidation(k, wrapFunc(val), true, true) | ||||
| 		default: | ||||
| 			// no need to error check here, baked in will always be valid
 | ||||
|  | @ -151,7 +153,7 @@ func (v *Validate) SetTagName(name string) { | |||
| } | ||||
| 
 | ||||
| // ValidateMapCtx validates a map using a map of validation rules and allows passing of contextual
 | ||||
| // validation validation information via context.Context.
 | ||||
| // validation information via context.Context.
 | ||||
| func (v Validate) ValidateMapCtx(ctx context.Context, data map[string]interface{}, rules map[string]interface{}) map[string]interface{} { | ||||
| 	errs := make(map[string]interface{}) | ||||
| 	for field, rule := range rules { | ||||
|  | @ -451,7 +453,7 @@ func (v *Validate) StructPartial(s interface{}, fields ...string) error { | |||
| } | ||||
| 
 | ||||
| // StructPartialCtx validates the fields passed in only, ignoring all others and allows passing of contextual
 | ||||
| // validation validation information via context.Context
 | ||||
| // validation information via context.Context
 | ||||
| // Fields may be provided in a namespaced fashion relative to the  struct provided
 | ||||
| // eg. NestedStruct.Field or NestedArrayField[0].Struct.Name
 | ||||
| //
 | ||||
|  | @ -541,7 +543,7 @@ func (v *Validate) StructExcept(s interface{}, fields ...string) error { | |||
| } | ||||
| 
 | ||||
| // StructExceptCtx validates all fields except the ones passed in and allows passing of contextual
 | ||||
| // validation validation information via context.Context
 | ||||
| // validation information via context.Context
 | ||||
| // Fields may be provided in a namespaced fashion relative to the  struct provided
 | ||||
| // i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name
 | ||||
| //
 | ||||
|  |  | |||
|  | @ -15,6 +15,7 @@ | |||
| package name | ||||
| 
 | ||||
| import ( | ||||
| 	// nolint: depguard
 | ||||
| 	_ "crypto/sha256" // Recommended by go-digest.
 | ||||
| 	"strings" | ||||
| 
 | ||||
|  |  | |||
|  | @ -0,0 +1,124 @@ | |||
| // Copyright 2018 Google LLC. All Rights Reserved.
 | ||||
| //
 | ||||
| // Licensed under the Apache License, Version 2.0 (the "License");
 | ||||
| // you may not use this file except in compliance with the License.
 | ||||
| // You may obtain a copy of the License at
 | ||||
| //
 | ||||
| //     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| //
 | ||||
| // Unless required by applicable law or agreed to in writing, software
 | ||||
| // distributed under the License is distributed on an "AS IS" BASIS,
 | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | ||||
| // See the License for the specific language governing permissions and
 | ||||
| // limitations under the License.
 | ||||
| 
 | ||||
| package client | ||||
| 
 | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/google/trillian" | ||||
| 	"github.com/google/trillian/client/backoff" | ||||
| 	"google.golang.org/grpc/codes" | ||||
| 	"google.golang.org/grpc/status" | ||||
| 	"k8s.io/klog/v2" | ||||
| ) | ||||
| 
 | ||||
| // CreateAndInitTree uses the adminClient and logClient to create the tree
 | ||||
| // described by req.
 | ||||
| // If req describes a LOG tree, then this function will also call the InitLog
 | ||||
| // function using logClient.
 | ||||
| // Internally, the function will continue to retry failed requests until either
 | ||||
| // the tree is created (and if necessary, initialised) successfully, or ctx is
 | ||||
| // cancelled.
 | ||||
| func CreateAndInitTree( | ||||
| 	ctx context.Context, | ||||
| 	req *trillian.CreateTreeRequest, | ||||
| 	adminClient trillian.TrillianAdminClient, | ||||
| 	logClient trillian.TrillianLogClient) (*trillian.Tree, error) { | ||||
| 	b := &backoff.Backoff{ | ||||
| 		Min:    100 * time.Millisecond, | ||||
| 		Max:    10 * time.Second, | ||||
| 		Factor: 2, | ||||
| 		Jitter: true, | ||||
| 	} | ||||
| 
 | ||||
| 	var tree *trillian.Tree | ||||
| 	err := b.Retry(ctx, func() error { | ||||
| 		klog.Info("CreateTree...") | ||||
| 		var err error | ||||
| 		tree, err = adminClient.CreateTree(ctx, req) | ||||
| 		switch code := status.Code(err); code { | ||||
| 		case codes.Unavailable: | ||||
| 			klog.Errorf("Admin server unavailable: %v", err) | ||||
| 			return err | ||||
| 		case codes.OK: | ||||
| 			return nil | ||||
| 		default: | ||||
| 			klog.Errorf("failed to CreateTree(%+v): %T %v", req, err, err) | ||||
| 			return err | ||||
| 		} | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	switch tree.TreeType { | ||||
| 	case trillian.TreeType_LOG, trillian.TreeType_PREORDERED_LOG: | ||||
| 		if err := InitLog(ctx, tree, logClient); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	default: | ||||
| 		return nil, fmt.Errorf("don't know how or whether to initialise tree type %v", tree.TreeType) | ||||
| 	} | ||||
| 
 | ||||
| 	return tree, nil | ||||
| } | ||||
| 
 | ||||
| // InitLog initialises a freshly created Log tree.
 | ||||
| func InitLog(ctx context.Context, tree *trillian.Tree, logClient trillian.TrillianLogClient) error { | ||||
| 	if tree.TreeType != trillian.TreeType_LOG && | ||||
| 		tree.TreeType != trillian.TreeType_PREORDERED_LOG { | ||||
| 		return fmt.Errorf("InitLog called with tree of type %v", tree.TreeType) | ||||
| 	} | ||||
| 
 | ||||
| 	b := &backoff.Backoff{ | ||||
| 		Min:    100 * time.Millisecond, | ||||
| 		Max:    10 * time.Second, | ||||
| 		Factor: 2, | ||||
| 		Jitter: true, | ||||
| 	} | ||||
| 
 | ||||
| 	err := b.Retry(ctx, func() error { | ||||
| 		klog.Infof("Initialising Log %v...", tree.TreeId) | ||||
| 		req := &trillian.InitLogRequest{LogId: tree.TreeId} | ||||
| 		resp, err := logClient.InitLog(ctx, req) | ||||
| 		switch code := status.Code(err); code { | ||||
| 		case codes.Unavailable: | ||||
| 			klog.Errorf("Log server unavailable: %v", err) | ||||
| 			return err | ||||
| 		case codes.AlreadyExists: | ||||
| 			klog.Warningf("Bizarrely, the just-created Log (%v) is already initialised!: %v", tree.TreeId, err) | ||||
| 			return err | ||||
| 		case codes.OK: | ||||
| 			klog.Infof("Initialised Log (%v) with new SignedTreeHead:\n%+v", | ||||
| 				tree.TreeId, resp.Created) | ||||
| 			return nil | ||||
| 		default: | ||||
| 			klog.Errorf("failed to InitLog(%+v): %T %v", req, err, err) | ||||
| 			return err | ||||
| 		} | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	// Wait for log root to become available.
 | ||||
| 	return b.Retry(ctx, func() error { | ||||
| 		_, err := logClient.GetLatestSignedLogRoot(ctx, | ||||
| 			&trillian.GetLatestSignedLogRootRequest{LogId: tree.TreeId}) | ||||
| 		return err | ||||
| 	}, codes.FailedPrecondition) | ||||
| } | ||||
|  | @ -0,0 +1,130 @@ | |||
| // Copyright 2017 Google LLC. All Rights Reserved.
 | ||||
| //
 | ||||
| // Licensed under the Apache License, Version 2.0 (the "License");
 | ||||
| // you may not use this file except in compliance with the License.
 | ||||
| // You may obtain a copy of the License at
 | ||||
| //
 | ||||
| //     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| //
 | ||||
| // Unless required by applicable law or agreed to in writing, software
 | ||||
| // distributed under the License is distributed on an "AS IS" BASIS,
 | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | ||||
| // See the License for the specific language governing permissions and
 | ||||
| // limitations under the License.
 | ||||
| 
 | ||||
| // Package backoff allows retrying an operation with backoff.
 | ||||
| package backoff | ||||
| 
 | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"math/rand" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"google.golang.org/grpc/codes" | ||||
| 	"google.golang.org/grpc/status" | ||||
| ) | ||||
| 
 | ||||
| // RetriableError explicitly instructs Backoff to retry.
 | ||||
| type RetriableError string | ||||
| 
 | ||||
| // Error returns string representation of the retriable error.
 | ||||
| func (re RetriableError) Error() string { | ||||
| 	return string(re) | ||||
| } | ||||
| 
 | ||||
| // RetriableErrorf wraps a formatted string into a RetriableError.
 | ||||
| func RetriableErrorf(format string, a ...interface{}) error { | ||||
| 	return RetriableError(fmt.Sprintf(format, a...)) | ||||
| } | ||||
| 
 | ||||
| // Backoff specifies the parameters of the backoff algorithm. Works correctly
 | ||||
| // if 0 < Min <= Max <= 2^62 (nanosec), and Factor >= 1.
 | ||||
| type Backoff struct { | ||||
| 	Min    time.Duration // Duration of the first pause.
 | ||||
| 	Max    time.Duration // Max duration of a pause.
 | ||||
| 	Factor float64       // The factor of duration increase between iterations.
 | ||||
| 	Jitter bool          // Add random noise to pauses.
 | ||||
| 
 | ||||
| 	delta time.Duration // Current pause duration relative to Min, no jitter.
 | ||||
| } | ||||
| 
 | ||||
| // Duration returns the time to wait on current retry iteration. Every time
 | ||||
| // Duration is called, the returned value will exponentially increase by Factor
 | ||||
| // until Backoff.Max. If Jitter is enabled, will add an additional random value
 | ||||
| // between 0 and the duration, so the result can at most double.
 | ||||
| func (b *Backoff) Duration() time.Duration { | ||||
| 	base := b.Min + b.delta | ||||
| 	pause := base | ||||
| 	if b.Jitter { // Add a number in the range [0, pause).
 | ||||
| 		pause += time.Duration(rand.Int63n(int64(pause))) | ||||
| 	} | ||||
| 
 | ||||
| 	nextPause := time.Duration(float64(base) * b.Factor) | ||||
| 	if nextPause > b.Max || nextPause < b.Min { // Multiplication could overflow.
 | ||||
| 		nextPause = b.Max | ||||
| 	} | ||||
| 	b.delta = nextPause - b.Min | ||||
| 
 | ||||
| 	return pause | ||||
| } | ||||
| 
 | ||||
| // Reset sets the internal state back to first retry iteration.
 | ||||
| func (b *Backoff) Reset() { | ||||
| 	b.delta = 0 | ||||
| } | ||||
| 
 | ||||
| // Retry calls a function until it succeeds or the context is done.
 | ||||
| // It will backoff if the function returns a retryable error.
 | ||||
| // Once the context is done, retries will end and the most recent error will be returned.
 | ||||
| // Backoff is not reset by this function.
 | ||||
| func (b *Backoff) Retry(ctx context.Context, f func() error, retry ...codes.Code) error { | ||||
| 	// If the context is already done, don't make any attempts to call f.
 | ||||
| 	if ctx.Err() != nil { | ||||
| 		return ctx.Err() | ||||
| 	} | ||||
| 
 | ||||
| 	// Try calling f while the error is retryable and ctx is not done.
 | ||||
| 	for { | ||||
| 		if err := f(); !IsRetryable(err, retry...) { | ||||
| 			return err | ||||
| 		} | ||||
| 		select { | ||||
| 		case <-time.After(b.Duration()): | ||||
| 		case <-ctx.Done(): | ||||
| 			return ctx.Err() | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // IsRetryable returns false unless the error is explicitly retriable per
 | ||||
| // https://godoc.org/google.golang.org/grpc/codes,
 | ||||
| // or if the error codes is in retry. codes.OK is not retryable.
 | ||||
| func IsRetryable(err error, retry ...codes.Code) bool { | ||||
| 	code := status.Code(err) | ||||
| 	switch code { | ||||
| 	// Fast path.
 | ||||
| 	case codes.OK: | ||||
| 		return false | ||||
| 
 | ||||
| 	// Debatable cases:
 | ||||
| 	case codes.DeadlineExceeded, | ||||
| 		codes.ResourceExhausted: // Retry with backoff.
 | ||||
| 		return true | ||||
| 
 | ||||
| 	// Errors that are explicitly retryable:
 | ||||
| 	case codes.Unavailable, // Client can just retry the call.
 | ||||
| 		codes.Aborted: // Client can retry the read-modify-write function.
 | ||||
| 		return true | ||||
| 	} | ||||
| 
 | ||||
| 	for _, c := range retry { | ||||
| 		if code == c { | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	// Don't retry for all other errors, unless it is a RetriableError.
 | ||||
| 	_, ok := err.(RetriableError) | ||||
| 	return ok | ||||
| } | ||||
|  | @ -0,0 +1,343 @@ | |||
| // Copyright 2017 Google LLC. All Rights Reserved.
 | ||||
| //
 | ||||
| // Licensed under the Apache License, Version 2.0 (the "License");
 | ||||
| // you may not use this file except in compliance with the License.
 | ||||
| // You may obtain a copy of the License at
 | ||||
| //
 | ||||
| //     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| //
 | ||||
| // Unless required by applicable law or agreed to in writing, software
 | ||||
| // distributed under the License is distributed on an "AS IS" BASIS,
 | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | ||||
| // See the License for the specific language governing permissions and
 | ||||
| // limitations under the License.
 | ||||
| 
 | ||||
| // Package client verifies responses from the Trillian log.
 | ||||
| package client | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"sort" | ||||
| 	"sync" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/google/trillian" | ||||
| 	"github.com/google/trillian/client/backoff" | ||||
| 	"github.com/google/trillian/types" | ||||
| 	"github.com/transparency-dev/merkle" | ||||
| 	"google.golang.org/grpc/codes" | ||||
| 	"google.golang.org/grpc/status" | ||||
| ) | ||||
| 
 | ||||
| // LogClient represents a client for a given Trillian log instance.
 | ||||
| type LogClient struct { | ||||
| 	*LogVerifier | ||||
| 	LogID         int64 | ||||
| 	MinMergeDelay time.Duration | ||||
| 	client        trillian.TrillianLogClient | ||||
| 	root          types.LogRootV1 | ||||
| 	rootLock      sync.Mutex | ||||
| 	updateLock    sync.Mutex | ||||
| } | ||||
| 
 | ||||
| // New returns a new LogClient.
 | ||||
| func New(logID int64, client trillian.TrillianLogClient, verifier *LogVerifier, root types.LogRootV1) *LogClient { | ||||
| 	return &LogClient{ | ||||
| 		LogVerifier: verifier, | ||||
| 		LogID:       logID, | ||||
| 		client:      client, | ||||
| 		root:        root, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // NewFromTree creates a new LogClient given a tree config.
 | ||||
| func NewFromTree(client trillian.TrillianLogClient, config *trillian.Tree, root types.LogRootV1) (*LogClient, error) { | ||||
| 	verifier, err := NewLogVerifierFromTree(config) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return New(config.GetTreeId(), client, verifier, root), nil | ||||
| } | ||||
| 
 | ||||
| // AddLeaf adds leaf to the append only log.
 | ||||
| // Blocks and continuously updates the trusted root until a successful inclusion proof
 | ||||
| // can be retrieved.
 | ||||
| func (c *LogClient) AddLeaf(ctx context.Context, data []byte) error { | ||||
| 	if err := c.QueueLeaf(ctx, data); err != nil { | ||||
| 		return fmt.Errorf("QueueLeaf(): %v", err) | ||||
| 	} | ||||
| 	if err := c.WaitForInclusion(ctx, data); err != nil { | ||||
| 		return fmt.Errorf("WaitForInclusion(): %v", err) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // ListByIndex returns the requested leaves by index.
 | ||||
| func (c *LogClient) ListByIndex(ctx context.Context, start, count int64) ([]*trillian.LogLeaf, error) { | ||||
| 	resp, err := c.client.GetLeavesByRange(ctx, | ||||
| 		&trillian.GetLeavesByRangeRequest{ | ||||
| 			LogId:      c.LogID, | ||||
| 			StartIndex: start, | ||||
| 			Count:      count, | ||||
| 		}) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	// Verify that we got back the requested leaves.
 | ||||
| 	if len(resp.Leaves) < int(count) { | ||||
| 		return nil, fmt.Errorf("len(Leaves)=%d, want %d", len(resp.Leaves), count) | ||||
| 	} | ||||
| 	for i, l := range resp.Leaves { | ||||
| 		if want := start + int64(i); l.LeafIndex != want { | ||||
| 			return nil, fmt.Errorf("Leaves[%d].LeafIndex=%d, want %d", i, l.LeafIndex, want) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return resp.Leaves, nil | ||||
| } | ||||
| 
 | ||||
| // WaitForRootUpdate repeatedly fetches the latest root until there is an
 | ||||
| // update, which it then applies, or until ctx times out.
 | ||||
| func (c *LogClient) WaitForRootUpdate(ctx context.Context) (*types.LogRootV1, error) { | ||||
| 	b := &backoff.Backoff{ | ||||
| 		Min:    100 * time.Millisecond, | ||||
| 		Max:    10 * time.Second, | ||||
| 		Factor: 2, | ||||
| 		Jitter: true, | ||||
| 	} | ||||
| 
 | ||||
| 	for { | ||||
| 		newTrusted, err := c.UpdateRoot(ctx) | ||||
| 		switch status.Code(err) { | ||||
| 		case codes.OK: | ||||
| 			if newTrusted != nil { | ||||
| 				return newTrusted, nil | ||||
| 			} | ||||
| 		case codes.Unavailable, codes.NotFound, codes.FailedPrecondition: | ||||
| 			// Retry.
 | ||||
| 		default: | ||||
| 			return nil, err | ||||
| 		} | ||||
| 
 | ||||
| 		select { | ||||
| 		case <-ctx.Done(): | ||||
| 			return nil, status.Errorf(codes.DeadlineExceeded, "%v", ctx.Err()) | ||||
| 		case <-time.After(b.Duration()): | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // getAndVerifyLatestRoot fetches and verifies the latest root against a trusted root, seen in the past.
 | ||||
| // Pass nil for trusted if this is the first time querying this log.
 | ||||
| func (c *LogClient) getAndVerifyLatestRoot(ctx context.Context, trusted *types.LogRootV1) (*types.LogRootV1, error) { | ||||
| 	resp, err := c.client.GetLatestSignedLogRoot(ctx, | ||||
| 		&trillian.GetLatestSignedLogRootRequest{ | ||||
| 			LogId:         c.LogID, | ||||
| 			FirstTreeSize: int64(trusted.TreeSize), | ||||
| 		}) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	// TODO(gbelvin): Turn on root verification.
 | ||||
| 	/* | ||||
| 		logRoot, err := c.VerifyRoot(&types.LogRootV1{}, resp.GetSignedLogRoot(), nil) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	*/ | ||||
| 	// TODO(gbelvin): Remove this hack when all implementations store digital signatures.
 | ||||
| 	var logRoot types.LogRootV1 | ||||
| 	if err := logRoot.UnmarshalBinary(resp.GetSignedLogRoot().LogRoot); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	if trusted.TreeSize > 0 && | ||||
| 		logRoot.TreeSize == trusted.TreeSize && | ||||
| 		bytes.Equal(logRoot.RootHash, trusted.RootHash) { | ||||
| 		// Tree has not been updated.
 | ||||
| 		return &logRoot, nil | ||||
| 	} | ||||
| 
 | ||||
| 	// Verify root update if the tree / the latest signed log root isn't empty.
 | ||||
| 	if logRoot.TreeSize > 0 { | ||||
| 		if _, err := c.VerifyRoot(trusted, resp.GetSignedLogRoot(), resp.GetProof().GetHashes()); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	} | ||||
| 	return &logRoot, nil | ||||
| } | ||||
| 
 | ||||
| // GetRoot returns a copy of the latest trusted root.
 | ||||
| func (c *LogClient) GetRoot() *types.LogRootV1 { | ||||
| 	c.rootLock.Lock() | ||||
| 	defer c.rootLock.Unlock() | ||||
| 
 | ||||
| 	// Copy the internal trusted root in order to prevent clients from modifying it.
 | ||||
| 	ret := c.root | ||||
| 	return &ret | ||||
| } | ||||
| 
 | ||||
| // UpdateRoot retrieves the current SignedLogRoot, verifying it against roots this client has
 | ||||
| // seen in the past, and updating the currently trusted root if the new root verifies, and is
 | ||||
| // newer than the currently trusted root.
 | ||||
| func (c *LogClient) UpdateRoot(ctx context.Context) (*types.LogRootV1, error) { | ||||
| 	// Only one root update should be running at any point in time, because
 | ||||
| 	// the update involves a consistency proof from the old value, and if the
 | ||||
| 	// old value could change along the way (in another goroutine) then the
 | ||||
| 	// result could be inconsistent.
 | ||||
| 	//
 | ||||
| 	// For example, if the current root is A and two root updates A->B and A->C
 | ||||
| 	// happen in parallel, then we might end up with the transitions A->B->C:
 | ||||
| 	//     cur := A            cur := A
 | ||||
| 	//    getRoot() => B      getRoot() => C
 | ||||
| 	//    proof(A->B) ok      proof(A->C) ok
 | ||||
| 	//    c.root = B
 | ||||
| 	//                        c.root = C
 | ||||
| 	// and the last step (B->C) has no proof and so could hide a forked tree.
 | ||||
| 	c.updateLock.Lock() | ||||
| 	defer c.updateLock.Unlock() | ||||
| 
 | ||||
| 	currentlyTrusted := c.GetRoot() | ||||
| 	newTrusted, err := c.getAndVerifyLatestRoot(ctx, currentlyTrusted) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	// Lock "rootLock" for the "root" update.
 | ||||
| 	c.rootLock.Lock() | ||||
| 	defer c.rootLock.Unlock() | ||||
| 
 | ||||
| 	if newTrusted.TimestampNanos > currentlyTrusted.TimestampNanos && | ||||
| 		newTrusted.TreeSize >= currentlyTrusted.TreeSize { | ||||
| 
 | ||||
| 		// Take a copy of the new trusted root in order to prevent clients from modifying it.
 | ||||
| 		c.root = *newTrusted | ||||
| 
 | ||||
| 		return newTrusted, nil | ||||
| 	} | ||||
| 
 | ||||
| 	return nil, nil | ||||
| } | ||||
| 
 | ||||
| // WaitForInclusion blocks until the requested data has been verified with an
 | ||||
| // inclusion proof.
 | ||||
| //
 | ||||
| // It will continuously update the root to the latest one available until the
 | ||||
| // data is found, or an error is returned.
 | ||||
| //
 | ||||
| // It is best to call this method with a context that will timeout to avoid
 | ||||
| // waiting forever.
 | ||||
| func (c *LogClient) WaitForInclusion(ctx context.Context, data []byte) error { | ||||
| 	leaf := prepareLeaf(c.hasher, data) | ||||
| 
 | ||||
| 	// If a minimum merge delay has been configured, wait at least that long before
 | ||||
| 	// starting to poll
 | ||||
| 	if c.MinMergeDelay > 0 { | ||||
| 		select { | ||||
| 		case <-ctx.Done(): | ||||
| 			return status.Errorf(codes.DeadlineExceeded, "%v", ctx.Err()) | ||||
| 		case <-time.After(c.MinMergeDelay): | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	var root *types.LogRootV1 | ||||
| 	for { | ||||
| 		root = c.GetRoot() | ||||
| 
 | ||||
| 		// It is illegal to ask for an inclusion proof with TreeSize = 0.
 | ||||
| 		if root.TreeSize >= 1 { | ||||
| 			ok, err := c.getAndVerifyInclusionProof(ctx, leaf.MerkleLeafHash, root) | ||||
| 			if err != nil && status.Code(err) != codes.NotFound { | ||||
| 				return err | ||||
| 			} else if ok { | ||||
| 				return nil | ||||
| 			} | ||||
| 		} | ||||
| 
 | ||||
| 		// If not found or tree is empty, wait for a root update before retrying again.
 | ||||
| 		if _, err := c.WaitForRootUpdate(ctx); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 
 | ||||
| 		// Retry
 | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (c *LogClient) getAndVerifyInclusionProof(ctx context.Context, leafHash []byte, sth *types.LogRootV1) (bool, error) { | ||||
| 	resp, err := c.client.GetInclusionProofByHash(ctx, | ||||
| 		&trillian.GetInclusionProofByHashRequest{ | ||||
| 			LogId:    c.LogID, | ||||
| 			LeafHash: leafHash, | ||||
| 			TreeSize: int64(sth.TreeSize), | ||||
| 		}) | ||||
| 	if err != nil { | ||||
| 		return false, err | ||||
| 	} | ||||
| 	if len(resp.Proof) < 1 { | ||||
| 		return false, nil | ||||
| 	} | ||||
| 	for _, proof := range resp.Proof { | ||||
| 		if err := c.VerifyInclusionByHash(sth, leafHash, proof); err != nil { | ||||
| 			return false, fmt.Errorf("VerifyInclusionByHash(): %v", err) | ||||
| 		} | ||||
| 	} | ||||
| 	return true, nil | ||||
| } | ||||
| 
 | ||||
| // AddSequencedLeaves adds any number of pre-sequenced leaves to the log.
 | ||||
| // Indexes must be contiguous.
 | ||||
| func (c *LogClient) AddSequencedLeaves(ctx context.Context, dataByIndex map[int64][]byte) error { | ||||
| 	if len(dataByIndex) == 0 { | ||||
| 		return nil | ||||
| 	} | ||||
| 	leaves := make([]*trillian.LogLeaf, 0, len(dataByIndex)) | ||||
| 	indexes := make([]int64, 0, len(dataByIndex)) | ||||
| 	for index := range dataByIndex { | ||||
| 		indexes = append(indexes, index) | ||||
| 	} | ||||
| 	sort.Slice(indexes, func(a, b int) bool { return indexes[a] < indexes[b] }) | ||||
| 
 | ||||
| 	for i, index := range indexes { | ||||
| 		// Check index continuity.
 | ||||
| 		if want := indexes[0] + int64(i); index != want { | ||||
| 			return fmt.Errorf("missing index in contiugous index range. got: %v, want: %v", index, want) | ||||
| 		} | ||||
| 		leaf := prepareLeaf(c.hasher, dataByIndex[index]) | ||||
| 		leaf.LeafIndex = index | ||||
| 		leaves = append(leaves, leaf) | ||||
| 	} | ||||
| 	resp, err := c.client.AddSequencedLeaves(ctx, &trillian.AddSequencedLeavesRequest{ | ||||
| 		LogId:  c.LogID, | ||||
| 		Leaves: leaves, | ||||
| 	}) | ||||
| 	for _, leaf := range resp.GetResults() { | ||||
| 		if s := status.FromProto(leaf.GetStatus()); s.Code() != codes.OK && s.Code() != codes.AlreadyExists { | ||||
| 			return status.Errorf(s.Code(), "unexpected fail status in AddSequencedLeaves: %+v, err: %v", leaf, s.Message()) | ||||
| 		} | ||||
| 	} | ||||
| 	return err | ||||
| } | ||||
| 
 | ||||
| // QueueLeaf adds a leaf to a Trillian log without blocking.
 | ||||
| // AlreadyExists is considered a success case by this function.
 | ||||
| func (c *LogClient) QueueLeaf(ctx context.Context, data []byte) error { | ||||
| 	leaf := prepareLeaf(c.hasher, data) | ||||
| 	_, err := c.client.QueueLeaf(ctx, &trillian.QueueLeafRequest{ | ||||
| 		LogId: c.LogID, | ||||
| 		Leaf:  leaf, | ||||
| 	}) | ||||
| 	return err | ||||
| } | ||||
| 
 | ||||
| // prepareLeaf returns a trillian.LogLeaf prepopulated with leaf data and hash.
 | ||||
| func prepareLeaf(hasher merkle.LogHasher, data []byte) *trillian.LogLeaf { | ||||
| 	leafHash := hasher.HashLeaf(data) | ||||
| 	return &trillian.LogLeaf{ | ||||
| 		LeafValue:      data, | ||||
| 		MerkleLeafHash: leafHash, | ||||
| 	} | ||||
| } | ||||
|  | @ -0,0 +1,91 @@ | |||
| // Copyright 2017 Google LLC. All Rights Reserved.
 | ||||
| //
 | ||||
| // Licensed under the Apache License, Version 2.0 (the "License");
 | ||||
| // you may not use this file except in compliance with the License.
 | ||||
| // You may obtain a copy of the License at
 | ||||
| //
 | ||||
| //     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| //
 | ||||
| // Unless required by applicable law or agreed to in writing, software
 | ||||
| // distributed under the License is distributed on an "AS IS" BASIS,
 | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | ||||
| // See the License for the specific language governing permissions and
 | ||||
| // limitations under the License.
 | ||||
| 
 | ||||
| package client | ||||
| 
 | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 
 | ||||
| 	"github.com/google/trillian" | ||||
| 	"github.com/google/trillian/types" | ||||
| 	"github.com/transparency-dev/merkle" | ||||
| 	"github.com/transparency-dev/merkle/proof" | ||||
| 	"github.com/transparency-dev/merkle/rfc6962" | ||||
| ) | ||||
| 
 | ||||
| // LogVerifier allows verification of output from Trillian Logs, both regular
 | ||||
| // and pre-ordered; it is safe for concurrent use (as its contents are fixed
 | ||||
| // after construction).
 | ||||
| type LogVerifier struct { | ||||
| 	// hasher is the hash strategy used to compute nodes in the Merkle tree.
 | ||||
| 	hasher merkle.LogHasher | ||||
| } | ||||
| 
 | ||||
| // NewLogVerifier returns an object that can verify output from Trillian Logs.
 | ||||
| func NewLogVerifier(hasher merkle.LogHasher) *LogVerifier { | ||||
| 	return &LogVerifier{hasher: hasher} | ||||
| } | ||||
| 
 | ||||
| // NewLogVerifierFromTree creates a new LogVerifier using the algorithms
 | ||||
| // specified by a Trillian Tree object.
 | ||||
| func NewLogVerifierFromTree(config *trillian.Tree) (*LogVerifier, error) { | ||||
| 	if config == nil { | ||||
| 		return nil, errors.New("client: NewLogVerifierFromTree(): nil config") | ||||
| 	} | ||||
| 	log, pLog := trillian.TreeType_LOG, trillian.TreeType_PREORDERED_LOG | ||||
| 	if got := config.TreeType; got != log && got != pLog { | ||||
| 		return nil, fmt.Errorf("client: NewLogVerifierFromTree(): TreeType: %v, want %v or %v", got, log, pLog) | ||||
| 	} | ||||
| 
 | ||||
| 	return NewLogVerifier(rfc6962.DefaultHasher), nil | ||||
| } | ||||
| 
 | ||||
| // VerifyRoot verifies that newRoot is a valid append-only operation from
 | ||||
| // trusted. If trusted.TreeSize is zero, a consistency proof is not needed.
 | ||||
| func (c *LogVerifier) VerifyRoot(trusted *types.LogRootV1, newRoot *trillian.SignedLogRoot, consistency [][]byte) (*types.LogRootV1, error) { | ||||
| 	if trusted == nil { | ||||
| 		return nil, fmt.Errorf("VerifyRoot() error: trusted == nil") | ||||
| 	} | ||||
| 	if newRoot == nil { | ||||
| 		return nil, fmt.Errorf("VerifyRoot() error: newRoot == nil") | ||||
| 	} | ||||
| 
 | ||||
| 	var r types.LogRootV1 | ||||
| 	if err := r.UnmarshalBinary(newRoot.LogRoot); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	// Implicitly trust the first root we get.
 | ||||
| 	if trusted.TreeSize != 0 { | ||||
| 		// Verify consistency proof.
 | ||||
| 		if err := proof.VerifyConsistency(c.hasher, trusted.TreeSize, r.TreeSize, consistency, trusted.RootHash, r.RootHash); err != nil { | ||||
| 			return nil, fmt.Errorf("failed to verify consistency proof from %d->%d %x->%x: %v", trusted.TreeSize, r.TreeSize, trusted.RootHash, r.RootHash, err) | ||||
| 		} | ||||
| 	} | ||||
| 	return &r, nil | ||||
| } | ||||
| 
 | ||||
| // VerifyInclusionByHash verifies that the inclusion proof for the given Merkle leafHash
 | ||||
| // matches the given trusted root.
 | ||||
| func (c *LogVerifier) VerifyInclusionByHash(trusted *types.LogRootV1, leafHash []byte, pf *trillian.Proof) error { | ||||
| 	if trusted == nil { | ||||
| 		return fmt.Errorf("VerifyInclusionByHash() error: trusted == nil") | ||||
| 	} | ||||
| 	if pf == nil { | ||||
| 		return fmt.Errorf("VerifyInclusionByHash() error: proof == nil") | ||||
| 	} | ||||
| 
 | ||||
| 	return proof.VerifyInclusion(c.hasher, uint64(pf.LeafIndex), trusted.TreeSize, leafHash, pf.Hashes, trusted.RootHash) | ||||
| } | ||||
|  | @ -16,6 +16,15 @@ This package provides various compression algorithms. | |||
| 
 | ||||
| # changelog | ||||
| 
 | ||||
| * Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4) | ||||
| 	* zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784 | ||||
| 	* zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792 | ||||
| 	* zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785 | ||||
| 	* zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 | ||||
| 	* s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 | ||||
| 	* s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 | ||||
| 	* gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 | ||||
| 
 | ||||
| * Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) | ||||
| 	* zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 | ||||
| 	* gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767 | ||||
|  |  | |||
|  | @ -109,7 +109,7 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { | |||
| } | ||||
| 
 | ||||
| func (r *readerWrapper) readByte() (byte, error) { | ||||
| 	n2, err := r.r.Read(r.tmp[:1]) | ||||
| 	n2, err := io.ReadFull(r.r, r.tmp[:1]) | ||||
| 	if err != nil { | ||||
| 		if err == io.EOF { | ||||
| 			err = io.ErrUnexpectedEOF | ||||
|  |  | |||
|  | @ -17,7 +17,7 @@ images: docs/urn.png | |||
| 
 | ||||
| .PHONY: removecomments | ||||
| removecomments: | ||||
| 	@go build ./tools/removecomments | ||||
| 	@cd ./tools/removecomments; go build -o ../../removecomments ./main.go | ||||
| 
 | ||||
| machine.go: machine.go.rl | ||||
| 
 | ||||
|  |  | |||
|  | @ -65,7 +65,4 @@ const ( | |||
| 
 | ||||
| 	// AnnotationArtifactDescription is the annotation key for the human readable description for the artifact.
 | ||||
| 	AnnotationArtifactDescription = "org.opencontainers.artifact.description" | ||||
| 
 | ||||
| 	// AnnotationReferrersFiltersApplied is the annotation key for the comma separated list of filters applied by the registry in the referrers listing.
 | ||||
| 	AnnotationReferrersFiltersApplied = "org.opencontainers.referrers.filtersApplied" | ||||
| ) | ||||
|  |  | |||
|  | @ -1,34 +0,0 @@ | |||
| // Copyright 2022 The Linux Foundation
 | ||||
| //
 | ||||
| // Licensed under the Apache License, Version 2.0 (the "License");
 | ||||
| // you may not use this file except in compliance with the License.
 | ||||
| // You may obtain a copy of the License at
 | ||||
| //
 | ||||
| //     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| //
 | ||||
| // Unless required by applicable law or agreed to in writing, software
 | ||||
| // distributed under the License is distributed on an "AS IS" BASIS,
 | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | ||||
| // See the License for the specific language governing permissions and
 | ||||
| // limitations under the License.
 | ||||
| 
 | ||||
| package v1 | ||||
| 
 | ||||
| // Artifact describes an artifact manifest.
 | ||||
| // This structure provides `application/vnd.oci.artifact.manifest.v1+json` mediatype when marshalled to JSON.
 | ||||
| type Artifact struct { | ||||
| 	// MediaType is the media type of the object this schema refers to.
 | ||||
| 	MediaType string `json:"mediaType"` | ||||
| 
 | ||||
| 	// ArtifactType is the IANA media type of the artifact this schema refers to.
 | ||||
| 	ArtifactType string `json:"artifactType"` | ||||
| 
 | ||||
| 	// Blobs is a collection of blobs referenced by this manifest.
 | ||||
| 	Blobs []Descriptor `json:"blobs,omitempty"` | ||||
| 
 | ||||
| 	// Subject (reference) is an optional link from the artifact to another manifest forming an association between the artifact and the other manifest.
 | ||||
| 	Subject *Descriptor `json:"subject,omitempty"` | ||||
| 
 | ||||
| 	// Annotations contains arbitrary metadata for the artifact manifest.
 | ||||
| 	Annotations map[string]string `json:"annotations,omitempty"` | ||||
| } | ||||
|  | @ -49,13 +49,15 @@ type ImageConfig struct { | |||
| 	// StopSignal contains the system call signal that will be sent to the container to exit.
 | ||||
| 	StopSignal string `json:"StopSignal,omitempty"` | ||||
| 
 | ||||
| 	// ArgsEscaped `[Deprecated]` - This field is present only for legacy
 | ||||
| 	// compatibility with Docker and should not be used by new image builders.
 | ||||
| 	// It is used by Docker for Windows images to indicate that the `Entrypoint`
 | ||||
| 	// or `Cmd` or both, contains only a single element array, that is a
 | ||||
| 	// pre-escaped, and combined into a single string `CommandLine`. If `true`
 | ||||
| 	// the value in `Entrypoint` or `Cmd` should be used as-is to avoid double
 | ||||
| 	// escaping.
 | ||||
| 	// ArgsEscaped
 | ||||
| 	//
 | ||||
| 	// Deprecated: This field is present only for legacy compatibility with
 | ||||
| 	// Docker and should not be used by new image builders.  It is used by Docker
 | ||||
| 	// for Windows images to indicate that the `Entrypoint` or `Cmd` or both,
 | ||||
| 	// contains only a single element array, that is a pre-escaped, and combined
 | ||||
| 	// into a single string `CommandLine`. If `true` the value in `Entrypoint` or
 | ||||
| 	// `Cmd` should be used as-is to avoid double escaping.
 | ||||
| 	// https://github.com/opencontainers/image-spec/pull/892
 | ||||
| 	ArgsEscaped bool `json:"ArgsEscaped,omitempty"` | ||||
| } | ||||
| 
 | ||||
|  | @ -95,22 +97,8 @@ type Image struct { | |||
| 	// Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image.
 | ||||
| 	Author string `json:"author,omitempty"` | ||||
| 
 | ||||
| 	// Architecture is the CPU architecture which the binaries in this image are built to run on.
 | ||||
| 	Architecture string `json:"architecture"` | ||||
| 
 | ||||
| 	// Variant is the variant of the specified CPU architecture which image binaries are intended to run on.
 | ||||
| 	Variant string `json:"variant,omitempty"` | ||||
| 
 | ||||
| 	// OS is the name of the operating system which the image is built to run on.
 | ||||
| 	OS string `json:"os"` | ||||
| 
 | ||||
| 	// OSVersion is an optional field specifying the operating system
 | ||||
| 	// version, for example on Windows `10.0.14393.1066`.
 | ||||
| 	OSVersion string `json:"os.version,omitempty"` | ||||
| 
 | ||||
| 	// OSFeatures is an optional field specifying an array of strings,
 | ||||
| 	// each listing a required OS feature (for example on Windows `win32k`).
 | ||||
| 	OSFeatures []string `json:"os.features,omitempty"` | ||||
| 	// Platform describes the platform which the image in the manifest runs on.
 | ||||
| 	Platform | ||||
| 
 | ||||
| 	// Config defines the execution parameters which should be used as a base when running a container using the image.
 | ||||
| 	Config ImageConfig `json:"config,omitempty"` | ||||
|  |  | |||
|  | @ -23,6 +23,9 @@ type Manifest struct { | |||
| 	// MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json`
 | ||||
| 	MediaType string `json:"mediaType,omitempty"` | ||||
| 
 | ||||
| 	// ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact.
 | ||||
| 	ArtifactType string `json:"artifactType,omitempty"` | ||||
| 
 | ||||
| 	// Config references a configuration object for a container, by digest.
 | ||||
| 	// The referenced configuration object is a JSON blob that the runtime uses to set up the container.
 | ||||
| 	Config Descriptor `json:"config"` | ||||
|  | @ -36,3 +39,11 @@ type Manifest struct { | |||
| 	// Annotations contains arbitrary metadata for the image manifest.
 | ||||
| 	Annotations map[string]string `json:"annotations,omitempty"` | ||||
| } | ||||
| 
 | ||||
| // ScratchDescriptor is the descriptor of a blob with content of `{}`.
 | ||||
| var ScratchDescriptor = Descriptor{ | ||||
| 	MediaType: MediaTypeScratch, | ||||
| 	Digest:    `sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a`, | ||||
| 	Size:      2, | ||||
| 	Data:      []byte(`{}`), | ||||
| } | ||||
|  |  | |||
|  | @ -40,21 +40,36 @@ const ( | |||
| 
 | ||||
| 	// MediaTypeImageLayerNonDistributable is the media type for layers referenced by
 | ||||
| 	// the manifest but with distribution restrictions.
 | ||||
| 	//
 | ||||
| 	// Deprecated: Non-distributable layers are deprecated, and not recommended
 | ||||
| 	// for future use. Implementations SHOULD NOT produce new non-distributable
 | ||||
| 	// layers.
 | ||||
| 	// https://github.com/opencontainers/image-spec/pull/965
 | ||||
| 	MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar" | ||||
| 
 | ||||
| 	// MediaTypeImageLayerNonDistributableGzip is the media type for
 | ||||
| 	// gzipped layers referenced by the manifest but with distribution
 | ||||
| 	// restrictions.
 | ||||
| 	//
 | ||||
| 	// Deprecated: Non-distributable layers are deprecated, and not recommended
 | ||||
| 	// for future use. Implementations SHOULD NOT produce new non-distributable
 | ||||
| 	// layers.
 | ||||
| 	// https://github.com/opencontainers/image-spec/pull/965
 | ||||
| 	MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" | ||||
| 
 | ||||
| 	// MediaTypeImageLayerNonDistributableZstd is the media type for zstd
 | ||||
| 	// compressed layers referenced by the manifest but with distribution
 | ||||
| 	// restrictions.
 | ||||
| 	//
 | ||||
| 	// Deprecated: Non-distributable layers are deprecated, and not recommended
 | ||||
| 	// for future use. Implementations SHOULD NOT produce new non-distributable
 | ||||
| 	// layers.
 | ||||
| 	// https://github.com/opencontainers/image-spec/pull/965
 | ||||
| 	MediaTypeImageLayerNonDistributableZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd" | ||||
| 
 | ||||
| 	// MediaTypeImageConfig specifies the media type for the image configuration.
 | ||||
| 	MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" | ||||
| 
 | ||||
| 	// MediaTypeArtifactManifest specifies the media type for a content descriptor.
 | ||||
| 	MediaTypeArtifactManifest = "application/vnd.oci.artifact.manifest.v1+json" | ||||
| 	// MediaTypeScratch specifies the media type for an unused blob containing the value `{}`
 | ||||
| 	MediaTypeScratch = "application/vnd.oci.scratch.v1+json" | ||||
| ) | ||||
|  |  | |||
|  | @ -25,7 +25,7 @@ const ( | |||
| 	VersionPatch = 0 | ||||
| 
 | ||||
| 	// VersionDev indicates development branch. Releases will be empty string.
 | ||||
| 	VersionDev = "-dev" | ||||
| 	VersionDev = "-rc.3" | ||||
| ) | ||||
| 
 | ||||
| // Version is the specification version that the package types support.
 | ||||
|  |  | |||
|  | @ -344,59 +344,59 @@ func parseExtensions(ext []pkix.Extension) (Extensions, error) { | |||
| 			out.GithubWorkflowRef = string(e.Value) | ||||
| 		// END: Deprecated
 | ||||
| 		case e.Id.Equal(OIDIssuerV2): | ||||
| 			if err := parseDERString(e.Value, &out.Issuer); err != nil { | ||||
| 			if err := ParseDERString(e.Value, &out.Issuer); err != nil { | ||||
| 				return Extensions{}, err | ||||
| 			} | ||||
| 		case e.Id.Equal(OIDBuildSignerURI): | ||||
| 			if err := parseDERString(e.Value, &out.BuildSignerURI); err != nil { | ||||
| 			if err := ParseDERString(e.Value, &out.BuildSignerURI); err != nil { | ||||
| 				return Extensions{}, err | ||||
| 			} | ||||
| 		case e.Id.Equal(OIDBuildSignerDigest): | ||||
| 			if err := parseDERString(e.Value, &out.BuildSignerDigest); err != nil { | ||||
| 			if err := ParseDERString(e.Value, &out.BuildSignerDigest); err != nil { | ||||
| 				return Extensions{}, err | ||||
| 			} | ||||
| 		case e.Id.Equal(OIDRunnerEnvironment): | ||||
| 			if err := parseDERString(e.Value, &out.RunnerEnvironment); err != nil { | ||||
| 			if err := ParseDERString(e.Value, &out.RunnerEnvironment); err != nil { | ||||
| 				return Extensions{}, err | ||||
| 			} | ||||
| 		case e.Id.Equal(OIDSourceRepositoryURI): | ||||
| 			if err := parseDERString(e.Value, &out.SourceRepositoryURI); err != nil { | ||||
| 			if err := ParseDERString(e.Value, &out.SourceRepositoryURI); err != nil { | ||||
| 				return Extensions{}, err | ||||
| 			} | ||||
| 		case e.Id.Equal(OIDSourceRepositoryDigest): | ||||
| 			if err := parseDERString(e.Value, &out.SourceRepositoryDigest); err != nil { | ||||
| 			if err := ParseDERString(e.Value, &out.SourceRepositoryDigest); err != nil { | ||||
| 				return Extensions{}, err | ||||
| 			} | ||||
| 		case e.Id.Equal(OIDSourceRepositoryRef): | ||||
| 			if err := parseDERString(e.Value, &out.SourceRepositoryRef); err != nil { | ||||
| 			if err := ParseDERString(e.Value, &out.SourceRepositoryRef); err != nil { | ||||
| 				return Extensions{}, err | ||||
| 			} | ||||
| 		case e.Id.Equal(OIDSourceRepositoryIdentifier): | ||||
| 			if err := parseDERString(e.Value, &out.SourceRepositoryIdentifier); err != nil { | ||||
| 			if err := ParseDERString(e.Value, &out.SourceRepositoryIdentifier); err != nil { | ||||
| 				return Extensions{}, err | ||||
| 			} | ||||
| 		case e.Id.Equal(OIDSourceRepositoryOwnerURI): | ||||
| 			if err := parseDERString(e.Value, &out.SourceRepositoryOwnerURI); err != nil { | ||||
| 			if err := ParseDERString(e.Value, &out.SourceRepositoryOwnerURI); err != nil { | ||||
| 				return Extensions{}, err | ||||
| 			} | ||||
| 		case e.Id.Equal(OIDSourceRepositoryOwnerIdentifier): | ||||
| 			if err := parseDERString(e.Value, &out.SourceRepositoryOwnerIdentifier); err != nil { | ||||
| 			if err := ParseDERString(e.Value, &out.SourceRepositoryOwnerIdentifier); err != nil { | ||||
| 				return Extensions{}, err | ||||
| 			} | ||||
| 		case e.Id.Equal(OIDBuildConfigURI): | ||||
| 			if err := parseDERString(e.Value, &out.BuildConfigURI); err != nil { | ||||
| 			if err := ParseDERString(e.Value, &out.BuildConfigURI); err != nil { | ||||
| 				return Extensions{}, err | ||||
| 			} | ||||
| 		case e.Id.Equal(OIDBuildConfigDigest): | ||||
| 			if err := parseDERString(e.Value, &out.BuildConfigDigest); err != nil { | ||||
| 			if err := ParseDERString(e.Value, &out.BuildConfigDigest); err != nil { | ||||
| 				return Extensions{}, err | ||||
| 			} | ||||
| 		case e.Id.Equal(OIDBuildTrigger): | ||||
| 			if err := parseDERString(e.Value, &out.BuildTrigger); err != nil { | ||||
| 			if err := ParseDERString(e.Value, &out.BuildTrigger); err != nil { | ||||
| 				return Extensions{}, err | ||||
| 			} | ||||
| 		case e.Id.Equal(OIDRunInvocationURI): | ||||
| 			if err := parseDERString(e.Value, &out.RunInvocationURI); err != nil { | ||||
| 			if err := ParseDERString(e.Value, &out.RunInvocationURI); err != nil { | ||||
| 				return Extensions{}, err | ||||
| 			} | ||||
| 		} | ||||
|  | @ -407,9 +407,9 @@ func parseExtensions(ext []pkix.Extension) (Extensions, error) { | |||
| 	return out, nil | ||||
| } | ||||
| 
 | ||||
| // parseDERString decodes a DER-encoded string and puts the value in parsedVal.
 | ||||
| // Rerturns an error if the unmarshalling fails or if there are trailing bytes in the encoding.
 | ||||
| func parseDERString(val []byte, parsedVal *string) error { | ||||
| // ParseDERString decodes a DER-encoded string and puts the value in parsedVal.
 | ||||
| // Returns an error if the unmarshalling fails or if there are trailing bytes in the encoding.
 | ||||
| func ParseDERString(val []byte, parsedVal *string) error { | ||||
| 	rest, err := asn1.Unmarshal(val, parsedVal) | ||||
| 	if err != nil { | ||||
| 		return fmt.Errorf("unexpected error unmarshalling DER-encoded string: %v", err) | ||||
|  |  | |||
|  | @ -0,0 +1,115 @@ | |||
| //
 | ||||
| // Copyright 2021 The Sigstore Authors.
 | ||||
| //
 | ||||
| // Licensed under the Apache License, Version 2.0 (the "License");
 | ||||
| // you may not use this file except in compliance with the License.
 | ||||
| // You may obtain a copy of the License at
 | ||||
| //
 | ||||
| //     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| //
 | ||||
| // Unless required by applicable law or agreed to in writing, software
 | ||||
| // distributed under the License is distributed on an "AS IS" BASIS,
 | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | ||||
| // See the License for the specific language governing permissions and
 | ||||
| // limitations under the License.
 | ||||
| 
 | ||||
| package log | ||||
| 
 | ||||
| import ( | ||||
| 	"context" | ||||
| 	"log" | ||||
| 
 | ||||
| 	"github.com/go-chi/chi/middleware" | ||||
| 	"go.uber.org/zap" | ||||
| 	"go.uber.org/zap/zapcore" | ||||
| ) | ||||
| 
 | ||||
| // Logger set the default logger to development mode
 | ||||
| var Logger *zap.SugaredLogger | ||||
| 
 | ||||
| func init() { | ||||
| 	ConfigureLogger("dev") | ||||
| } | ||||
| 
 | ||||
| func ConfigureLogger(logType string) { | ||||
| 	var cfg zap.Config | ||||
| 	if logType == "prod" { | ||||
| 		cfg = zap.NewProductionConfig() | ||||
| 		cfg.EncoderConfig.LevelKey = "severity" | ||||
| 		cfg.EncoderConfig.MessageKey = "message" | ||||
| 		cfg.EncoderConfig.TimeKey = "time" | ||||
| 		cfg.EncoderConfig.EncodeLevel = encodeLevel() | ||||
| 		cfg.EncoderConfig.EncodeTime = zapcore.RFC3339TimeEncoder | ||||
| 		cfg.EncoderConfig.EncodeDuration = zapcore.SecondsDurationEncoder | ||||
| 		cfg.EncoderConfig.EncodeCaller = zapcore.FullCallerEncoder | ||||
| 	} else { | ||||
| 		cfg = zap.NewDevelopmentConfig() | ||||
| 		cfg.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder | ||||
| 	} | ||||
| 	logger, err := cfg.Build() | ||||
| 	if err != nil { | ||||
| 		log.Fatalln("createLogger", err) | ||||
| 	} | ||||
| 	Logger = logger.Sugar() | ||||
| } | ||||
| 
 | ||||
| func encodeLevel() zapcore.LevelEncoder { | ||||
| 	return func(l zapcore.Level, enc zapcore.PrimitiveArrayEncoder) { | ||||
| 		switch l { | ||||
| 		case zapcore.DebugLevel: | ||||
| 			enc.AppendString("DEBUG") | ||||
| 		case zapcore.InfoLevel: | ||||
| 			enc.AppendString("INFO") | ||||
| 		case zapcore.WarnLevel: | ||||
| 			enc.AppendString("WARNING") | ||||
| 		case zapcore.ErrorLevel: | ||||
| 			enc.AppendString("ERROR") | ||||
| 		case zapcore.DPanicLevel: | ||||
| 			enc.AppendString("CRITICAL") | ||||
| 		case zapcore.PanicLevel: | ||||
| 			enc.AppendString("ALERT") | ||||
| 		case zapcore.FatalLevel: | ||||
| 			enc.AppendString("EMERGENCY") | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| var CliLogger = createCliLogger() | ||||
| 
 | ||||
| func createCliLogger() *zap.SugaredLogger { | ||||
| 	cfg := zap.NewDevelopmentConfig() | ||||
| 	cfg.EncoderConfig.TimeKey = "" | ||||
| 	cfg.EncoderConfig.LevelKey = "" | ||||
| 	cfg.DisableCaller = true | ||||
| 	cfg.DisableStacktrace = true | ||||
| 	logger, err := cfg.Build() | ||||
| 	if err != nil { | ||||
| 		log.Fatalln("createLogger", err) | ||||
| 	} | ||||
| 
 | ||||
| 	return logger.Sugar() | ||||
| } | ||||
| 
 | ||||
| func WithRequestID(ctx context.Context, id string) context.Context { | ||||
| 	return context.WithValue(ctx, middleware.RequestIDKey, id) | ||||
| } | ||||
| 
 | ||||
| type operation struct { | ||||
| 	id string | ||||
| } | ||||
| 
 | ||||
| func (o operation) MarshalLogObject(enc zapcore.ObjectEncoder) error { | ||||
| 	enc.AddString("id", o.id) | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func ContextLogger(ctx context.Context) *zap.SugaredLogger { | ||||
| 	proposedLogger := Logger | ||||
| 	if ctx != nil { | ||||
| 		if ctxRequestID, ok := ctx.Value(middleware.RequestIDKey).(string); ok { | ||||
| 			requestID := operation{ctxRequestID} | ||||
| 			proposedLogger = proposedLogger.With(zap.Object("operation", requestID)) | ||||
| 		} | ||||
| 	} | ||||
| 	return proposedLogger | ||||
| } | ||||
|  | @ -0,0 +1,380 @@ | |||
| //
 | ||||
| // Copyright 2021 The Sigstore Authors.
 | ||||
| //
 | ||||
| // Licensed under the Apache License, Version 2.0 (the "License");
 | ||||
| // you may not use this file except in compliance with the License.
 | ||||
| // You may obtain a copy of the License at
 | ||||
| //
 | ||||
| //     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| //
 | ||||
| // Unless required by applicable law or agreed to in writing, software
 | ||||
| // distributed under the License is distributed on an "AS IS" BASIS,
 | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | ||||
| // See the License for the specific language governing permissions and
 | ||||
| // limitations under the License.
 | ||||
| 
 | ||||
| package util | ||||
| 
 | ||||
| import ( | ||||
| 	"context" | ||||
| 	"encoding/hex" | ||||
| 	"fmt" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/sigstore/rekor/pkg/log" | ||||
| 	"github.com/transparency-dev/merkle/proof" | ||||
| 	"github.com/transparency-dev/merkle/rfc6962" | ||||
| 
 | ||||
| 	"google.golang.org/grpc/codes" | ||||
| 	"google.golang.org/grpc/status" | ||||
| 	"google.golang.org/protobuf/types/known/durationpb" | ||||
| 
 | ||||
| 	"github.com/google/trillian" | ||||
| 	"github.com/google/trillian/client" | ||||
| 	"github.com/google/trillian/types" | ||||
| ) | ||||
| 
 | ||||
| // TrillianClient provides a wrapper around the Trillian client
 | ||||
| type TrillianClient struct { | ||||
| 	client  trillian.TrillianLogClient | ||||
| 	logID   int64 | ||||
| 	context context.Context | ||||
| } | ||||
| 
 | ||||
| // NewTrillianClient creates a TrillianClient with the given Trillian client and log/tree ID.
 | ||||
| func NewTrillianClient(ctx context.Context, logClient trillian.TrillianLogClient, logID int64) TrillianClient { | ||||
| 	return TrillianClient{ | ||||
| 		client:  logClient, | ||||
| 		logID:   logID, | ||||
| 		context: ctx, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| // Response includes a status code, an optional error message, and one of the results based on the API call
 | ||||
| type Response struct { | ||||
| 	// Status is the status code of the response
 | ||||
| 	Status codes.Code | ||||
| 	// Error contains an error on request or client failure
 | ||||
| 	Err error | ||||
| 	// GetAddResult contains the response from queueing a leaf in Trillian
 | ||||
| 	GetAddResult *trillian.QueueLeafResponse | ||||
| 	// GetLeafAndProofResult contains the response for fetching an inclusion proof and leaf
 | ||||
| 	GetLeafAndProofResult *trillian.GetEntryAndProofResponse | ||||
| 	// GetLatestResult contains the response for the latest checkpoint
 | ||||
| 	GetLatestResult *trillian.GetLatestSignedLogRootResponse | ||||
| 	// GetConsistencyProofResult contains the response for a consistency proof between two log sizes
 | ||||
| 	GetConsistencyProofResult *trillian.GetConsistencyProofResponse | ||||
| 	// getProofResult contains the response for an inclusion proof fetched by leaf hash
 | ||||
| 	getProofResult *trillian.GetInclusionProofByHashResponse | ||||
| } | ||||
| 
 | ||||
| func unmarshalLogRoot(logRoot []byte) (types.LogRootV1, error) { | ||||
| 	var root types.LogRootV1 | ||||
| 	if err := root.UnmarshalBinary(logRoot); err != nil { | ||||
| 		return types.LogRootV1{}, err | ||||
| 	} | ||||
| 	return root, nil | ||||
| } | ||||
| 
 | ||||
| func (t *TrillianClient) root() (types.LogRootV1, error) { | ||||
| 	rqst := &trillian.GetLatestSignedLogRootRequest{ | ||||
| 		LogId: t.logID, | ||||
| 	} | ||||
| 	resp, err := t.client.GetLatestSignedLogRoot(t.context, rqst) | ||||
| 	if err != nil { | ||||
| 		return types.LogRootV1{}, err | ||||
| 	} | ||||
| 	return unmarshalLogRoot(resp.SignedLogRoot.LogRoot) | ||||
| } | ||||
| 
 | ||||
| func (t *TrillianClient) AddLeaf(byteValue []byte) *Response { | ||||
| 	leaf := &trillian.LogLeaf{ | ||||
| 		LeafValue: byteValue, | ||||
| 	} | ||||
| 	rqst := &trillian.QueueLeafRequest{ | ||||
| 		LogId: t.logID, | ||||
| 		Leaf:  leaf, | ||||
| 	} | ||||
| 	resp, err := t.client.QueueLeaf(t.context, rqst) | ||||
| 
 | ||||
| 	// check for error
 | ||||
| 	if err != nil || (resp.QueuedLeaf.Status != nil && resp.QueuedLeaf.Status.Code != int32(codes.OK)) { | ||||
| 		return &Response{ | ||||
| 			Status:       status.Code(err), | ||||
| 			Err:          err, | ||||
| 			GetAddResult: resp, | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	root, err := t.root() | ||||
| 	if err != nil { | ||||
| 		return &Response{ | ||||
| 			Status:       status.Code(err), | ||||
| 			Err:          err, | ||||
| 			GetAddResult: resp, | ||||
| 		} | ||||
| 	} | ||||
| 	v := client.NewLogVerifier(rfc6962.DefaultHasher) | ||||
| 	logClient := client.New(t.logID, t.client, v, root) | ||||
| 
 | ||||
| 	waitForInclusion := func(ctx context.Context, leafHash []byte) *Response { | ||||
| 		if logClient.MinMergeDelay > 0 { | ||||
| 			select { | ||||
| 			case <-ctx.Done(): | ||||
| 				return &Response{ | ||||
| 					Status: codes.DeadlineExceeded, | ||||
| 					Err:    ctx.Err(), | ||||
| 				} | ||||
| 			case <-time.After(logClient.MinMergeDelay): | ||||
| 			} | ||||
| 		} | ||||
| 		for { | ||||
| 			root = *logClient.GetRoot() | ||||
| 			if root.TreeSize >= 1 { | ||||
| 				proofResp := t.getProofByHash(resp.QueuedLeaf.Leaf.MerkleLeafHash) | ||||
| 				// if this call succeeds or returns an error other than "not found", return
 | ||||
| 				if proofResp.Err == nil || (proofResp.Err != nil && status.Code(proofResp.Err) != codes.NotFound) { | ||||
| 					return proofResp | ||||
| 				} | ||||
| 				// otherwise wait for a root update before trying again
 | ||||
| 			} | ||||
| 
 | ||||
| 			if _, err := logClient.WaitForRootUpdate(ctx); err != nil { | ||||
| 				return &Response{ | ||||
| 					Status: codes.Unknown, | ||||
| 					Err:    err, | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	proofResp := waitForInclusion(t.context, resp.QueuedLeaf.Leaf.MerkleLeafHash) | ||||
| 	if proofResp.Err != nil { | ||||
| 		return &Response{ | ||||
| 			Status:       status.Code(proofResp.Err), | ||||
| 			Err:          proofResp.Err, | ||||
| 			GetAddResult: resp, | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	proofs := proofResp.getProofResult.Proof | ||||
| 	if len(proofs) != 1 { | ||||
| 		err := fmt.Errorf("expected 1 proof from getProofByHash for %v, found %v", hex.EncodeToString(resp.QueuedLeaf.Leaf.MerkleLeafHash), len(proofs)) | ||||
| 		return &Response{ | ||||
| 			Status:       status.Code(err), | ||||
| 			Err:          err, | ||||
| 			GetAddResult: resp, | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	leafIndex := proofs[0].LeafIndex | ||||
| 	leafResp := t.GetLeafAndProofByIndex(leafIndex) | ||||
| 	if leafResp.Err != nil { | ||||
| 		return &Response{ | ||||
| 			Status:       status.Code(leafResp.Err), | ||||
| 			Err:          leafResp.Err, | ||||
| 			GetAddResult: resp, | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	// overwrite queued leaf that doesn't have index set
 | ||||
| 	resp.QueuedLeaf.Leaf = leafResp.GetLeafAndProofResult.Leaf | ||||
| 
 | ||||
| 	return &Response{ | ||||
| 		Status:       status.Code(err), | ||||
| 		Err:          err, | ||||
| 		GetAddResult: resp, | ||||
| 		// include getLeafAndProofResult for inclusion proof
 | ||||
| 		GetLeafAndProofResult: leafResp.GetLeafAndProofResult, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (t *TrillianClient) GetLeafAndProofByHash(hash []byte) *Response { | ||||
| 	// get inclusion proof for hash, extract index, then fetch leaf using index
 | ||||
| 	proofResp := t.getProofByHash(hash) | ||||
| 	if proofResp.Err != nil { | ||||
| 		return &Response{ | ||||
| 			Status: status.Code(proofResp.Err), | ||||
| 			Err:    proofResp.Err, | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	proofs := proofResp.getProofResult.Proof | ||||
| 	if len(proofs) != 1 { | ||||
| 		err := fmt.Errorf("expected 1 proof from getProofByHash for %v, found %v", hex.EncodeToString(hash), len(proofs)) | ||||
| 		return &Response{ | ||||
| 			Status: status.Code(err), | ||||
| 			Err:    err, | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return t.GetLeafAndProofByIndex(proofs[0].LeafIndex) | ||||
| } | ||||
| 
 | ||||
| func (t *TrillianClient) GetLeafAndProofByIndex(index int64) *Response { | ||||
| 	ctx, cancel := context.WithTimeout(t.context, 20*time.Second) | ||||
| 	defer cancel() | ||||
| 
 | ||||
| 	rootResp := t.GetLatest(0) | ||||
| 	if rootResp.Err != nil { | ||||
| 		return &Response{ | ||||
| 			Status: status.Code(rootResp.Err), | ||||
| 			Err:    rootResp.Err, | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	root, err := unmarshalLogRoot(rootResp.GetLatestResult.SignedLogRoot.LogRoot) | ||||
| 	if err != nil { | ||||
| 		return &Response{ | ||||
| 			Status: status.Code(rootResp.Err), | ||||
| 			Err:    rootResp.Err, | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	resp, err := t.client.GetEntryAndProof(ctx, | ||||
| 		&trillian.GetEntryAndProofRequest{ | ||||
| 			LogId:     t.logID, | ||||
| 			LeafIndex: index, | ||||
| 			TreeSize:  int64(root.TreeSize), | ||||
| 		}) | ||||
| 
 | ||||
| 	if resp != nil && resp.Proof != nil { | ||||
| 		if err := proof.VerifyInclusion(rfc6962.DefaultHasher, uint64(index), root.TreeSize, resp.GetLeaf().MerkleLeafHash, resp.Proof.Hashes, root.RootHash); err != nil { | ||||
| 			return &Response{ | ||||
| 				Status: status.Code(err), | ||||
| 				Err:    err, | ||||
| 			} | ||||
| 		} | ||||
| 		return &Response{ | ||||
| 			Status: status.Code(err), | ||||
| 			Err:    err, | ||||
| 			GetLeafAndProofResult: &trillian.GetEntryAndProofResponse{ | ||||
| 				Proof:         resp.Proof, | ||||
| 				Leaf:          resp.Leaf, | ||||
| 				SignedLogRoot: rootResp.GetLatestResult.SignedLogRoot, | ||||
| 			}, | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return &Response{ | ||||
| 		Status: status.Code(err), | ||||
| 		Err:    err, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (t *TrillianClient) GetLatest(leafSizeInt int64) *Response { | ||||
| 
 | ||||
| 	ctx, cancel := context.WithTimeout(t.context, 20*time.Second) | ||||
| 	defer cancel() | ||||
| 
 | ||||
| 	resp, err := t.client.GetLatestSignedLogRoot(ctx, | ||||
| 		&trillian.GetLatestSignedLogRootRequest{ | ||||
| 			LogId:         t.logID, | ||||
| 			FirstTreeSize: leafSizeInt, | ||||
| 		}) | ||||
| 
 | ||||
| 	return &Response{ | ||||
| 		Status:          status.Code(err), | ||||
| 		Err:             err, | ||||
| 		GetLatestResult: resp, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (t *TrillianClient) GetConsistencyProof(firstSize, lastSize int64) *Response { | ||||
| 
 | ||||
| 	ctx, cancel := context.WithTimeout(t.context, 20*time.Second) | ||||
| 	defer cancel() | ||||
| 
 | ||||
| 	resp, err := t.client.GetConsistencyProof(ctx, | ||||
| 		&trillian.GetConsistencyProofRequest{ | ||||
| 			LogId:          t.logID, | ||||
| 			FirstTreeSize:  firstSize, | ||||
| 			SecondTreeSize: lastSize, | ||||
| 		}) | ||||
| 
 | ||||
| 	return &Response{ | ||||
| 		Status:                    status.Code(err), | ||||
| 		Err:                       err, | ||||
| 		GetConsistencyProofResult: resp, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func (t *TrillianClient) getProofByHash(hashValue []byte) *Response { | ||||
| 	ctx, cancel := context.WithTimeout(t.context, 20*time.Second) | ||||
| 	defer cancel() | ||||
| 
 | ||||
| 	rootResp := t.GetLatest(0) | ||||
| 	if rootResp.Err != nil { | ||||
| 		return &Response{ | ||||
| 			Status: status.Code(rootResp.Err), | ||||
| 			Err:    rootResp.Err, | ||||
| 		} | ||||
| 	} | ||||
| 	root, err := unmarshalLogRoot(rootResp.GetLatestResult.SignedLogRoot.LogRoot) | ||||
| 	if err != nil { | ||||
| 		return &Response{ | ||||
| 			Status: status.Code(rootResp.Err), | ||||
| 			Err:    rootResp.Err, | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	// issue 1308: if the tree is empty, there's no way we can return a proof
 | ||||
| 	if root.TreeSize == 0 { | ||||
| 		return &Response{ | ||||
| 			Status: codes.NotFound, | ||||
| 			Err:    status.Error(codes.NotFound, "tree is empty"), | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	resp, err := t.client.GetInclusionProofByHash(ctx, | ||||
| 		&trillian.GetInclusionProofByHashRequest{ | ||||
| 			LogId:    t.logID, | ||||
| 			LeafHash: hashValue, | ||||
| 			TreeSize: int64(root.TreeSize), | ||||
| 		}) | ||||
| 
 | ||||
| 	if resp != nil { | ||||
| 		v := client.NewLogVerifier(rfc6962.DefaultHasher) | ||||
| 		for _, proof := range resp.Proof { | ||||
| 			if err := v.VerifyInclusionByHash(&root, hashValue, proof); err != nil { | ||||
| 				return &Response{ | ||||
| 					Status: status.Code(err), | ||||
| 					Err:    err, | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 		// Return an inclusion proof response with the requested
 | ||||
| 		return &Response{ | ||||
| 			Status: status.Code(err), | ||||
| 			Err:    err, | ||||
| 			getProofResult: &trillian.GetInclusionProofByHashResponse{ | ||||
| 				Proof:         resp.Proof, | ||||
| 				SignedLogRoot: rootResp.GetLatestResult.SignedLogRoot, | ||||
| 			}, | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	return &Response{ | ||||
| 		Status: status.Code(err), | ||||
| 		Err:    err, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func CreateAndInitTree(ctx context.Context, adminClient trillian.TrillianAdminClient, logClient trillian.TrillianLogClient) (*trillian.Tree, error) { | ||||
| 	t, err := adminClient.CreateTree(ctx, &trillian.CreateTreeRequest{ | ||||
| 		Tree: &trillian.Tree{ | ||||
| 			TreeType:        trillian.TreeType_LOG, | ||||
| 			TreeState:       trillian.TreeState_ACTIVE, | ||||
| 			MaxRootDuration: durationpb.New(time.Hour), | ||||
| 		}, | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("create tree: %w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	if err := client.InitLog(ctx, t, logClient); err != nil { | ||||
| 		return nil, fmt.Errorf("init log: %w", err) | ||||
| 	} | ||||
| 	log.Logger.Infof("Created new tree with ID: %v", t.TreeId) | ||||
| 	return t, nil | ||||
| } | ||||
|  | @ -179,6 +179,6 @@ func validateEcdsaKey(pub *ecdsa.PublicKey) error { | |||
| } | ||||
| 
 | ||||
| // No validations currently, ED25519 supports only one key size.
 | ||||
| func validateEd25519Key(pub ed25519.PublicKey) error { | ||||
| func validateEd25519Key(_ ed25519.PublicKey) error { | ||||
| 	return nil | ||||
| } | ||||
|  |  | |||
|  | @ -25,25 +25,25 @@ import ( | |||
| type NoOpOptionImpl struct{} | ||||
| 
 | ||||
| // ApplyContext is a no-op required to fully implement the requisite interfaces
 | ||||
| func (NoOpOptionImpl) ApplyContext(ctx *context.Context) {} | ||||
| func (NoOpOptionImpl) ApplyContext(_ *context.Context) {} | ||||
| 
 | ||||
| // ApplyCryptoSignerOpts is a no-op required to fully implement the requisite interfaces
 | ||||
| func (NoOpOptionImpl) ApplyCryptoSignerOpts(opts *crypto.SignerOpts) {} | ||||
| func (NoOpOptionImpl) ApplyCryptoSignerOpts(_ *crypto.SignerOpts) {} | ||||
| 
 | ||||
| // ApplyDigest is a no-op required to fully implement the requisite interfaces
 | ||||
| func (NoOpOptionImpl) ApplyDigest(digest *[]byte) {} | ||||
| func (NoOpOptionImpl) ApplyDigest(_ *[]byte) {} | ||||
| 
 | ||||
| // ApplyRand is a no-op required to fully implement the requisite interfaces
 | ||||
| func (NoOpOptionImpl) ApplyRand(rand *io.Reader) {} | ||||
| func (NoOpOptionImpl) ApplyRand(_ *io.Reader) {} | ||||
| 
 | ||||
| // ApplyRemoteVerification is a no-op required to fully implement the requisite interfaces
 | ||||
| func (NoOpOptionImpl) ApplyRemoteVerification(remoteVerification *bool) {} | ||||
| func (NoOpOptionImpl) ApplyRemoteVerification(_ *bool) {} | ||||
| 
 | ||||
| // ApplyRPCAuthOpts is a no-op required to fully implement the requisite interfaces
 | ||||
| func (NoOpOptionImpl) ApplyRPCAuthOpts(opts *RPCAuth) {} | ||||
| func (NoOpOptionImpl) ApplyRPCAuthOpts(_ *RPCAuth) {} | ||||
| 
 | ||||
| // ApplyKeyVersion is a no-op required to fully implement the requisite interfaces
 | ||||
| func (NoOpOptionImpl) ApplyKeyVersion(keyVersion *string) {} | ||||
| func (NoOpOptionImpl) ApplyKeyVersion(_ *string) {} | ||||
| 
 | ||||
| // ApplyKeyVersionUsed is a no-op required to fully implement the requisite interfaces
 | ||||
| func (NoOpOptionImpl) ApplyKeyVersionUsed(keyVersion **string) {} | ||||
| func (NoOpOptionImpl) ApplyKeyVersionUsed(_ **string) {} | ||||
|  |  | |||
|  | @ -622,60 +622,52 @@ func (f *FileImage) SetPrimPart(id uint32, opts ...SetOpt) error { | |||
| 		return fmt.Errorf("%w", errNotPartition) | ||||
| 	} | ||||
| 
 | ||||
| 	fs, pt, arch, err := descr.getPartitionMetadata() | ||||
| 	if err != nil { | ||||
| 	var p partition | ||||
| 	if err := descr.getExtra(binaryUnmarshaler{&p}); err != nil { | ||||
| 		return fmt.Errorf("%w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	// if already primary system partition, nothing to do
 | ||||
| 	if pt == PartPrimSys { | ||||
| 	if p.Parttype == PartPrimSys { | ||||
| 		return nil | ||||
| 	} | ||||
| 
 | ||||
| 	if pt != PartSystem { | ||||
| 	if p.Parttype != PartSystem { | ||||
| 		return fmt.Errorf("%w", errNotSystem) | ||||
| 	} | ||||
| 
 | ||||
| 	olddescr, err := f.getDescriptor(WithPartitionType(PartPrimSys)) | ||||
| 	if err != nil && !errors.Is(err, ErrObjectNotFound) { | ||||
| 	// If there is currently a primary system partition, update it.
 | ||||
| 	if d, err := f.getDescriptor(WithPartitionType(PartPrimSys)); err == nil { | ||||
| 		var p partition | ||||
| 		if err := d.getExtra(binaryUnmarshaler{&p}); err != nil { | ||||
| 			return fmt.Errorf("%w", err) | ||||
| 		} | ||||
| 
 | ||||
| 		p.Parttype = PartSystem | ||||
| 
 | ||||
| 		if err := d.setExtra(p); err != nil { | ||||
| 			return fmt.Errorf("%w", err) | ||||
| 		} | ||||
| 
 | ||||
| 		d.ModifiedAt = so.t.Unix() | ||||
| 	} else if !errors.Is(err, ErrObjectNotFound) { | ||||
| 		return fmt.Errorf("%w", err) | ||||
| 	} | ||||
| 	extra := partition{ | ||||
| 		Fstype:   fs, | ||||
| 		Parttype: PartPrimSys, | ||||
| 	} | ||||
| 	copy(extra.Arch[:], arch) | ||||
| 
 | ||||
| 	if err := descr.setExtra(extra); err != nil { | ||||
| 	// Update the descriptor of the new primary system partition.
 | ||||
| 	p.Parttype = PartPrimSys | ||||
| 
 | ||||
| 	if err := descr.setExtra(p); err != nil { | ||||
| 		return fmt.Errorf("%w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	descr.ModifiedAt = so.t.Unix() | ||||
| 
 | ||||
| 	if olddescr != nil { | ||||
| 		oldfs, _, oldarch, err := olddescr.getPartitionMetadata() | ||||
| 		if err != nil { | ||||
| 			return fmt.Errorf("%w", err) | ||||
| 		} | ||||
| 
 | ||||
| 		oldextra := partition{ | ||||
| 			Fstype:   oldfs, | ||||
| 			Parttype: PartSystem, | ||||
| 			Arch:     getSIFArch(oldarch), | ||||
| 		} | ||||
| 
 | ||||
| 		if err := olddescr.setExtra(oldextra); err != nil { | ||||
| 			return fmt.Errorf("%w", err) | ||||
| 		} | ||||
| 
 | ||||
| 		olddescr.ModifiedAt = so.t.Unix() | ||||
| 	} | ||||
| 
 | ||||
| 	if err := f.writeDescriptors(); err != nil { | ||||
| 		return fmt.Errorf("%w", err) | ||||
| 	} | ||||
| 
 | ||||
| 	f.h.Arch = getSIFArch(arch) | ||||
| 	f.h.Arch = p.Arch | ||||
| 	f.h.ModifiedAt = so.t.Unix() | ||||
| 
 | ||||
| 	if err := f.writeHeader(); err != nil { | ||||
|  |  | |||
|  | @ -0,0 +1,58 @@ | |||
| # How to contribute # | ||||
| 
 | ||||
| We'd love to accept your patches and contributions to this project.  There are | ||||
| a just a few small guidelines you need to follow. | ||||
| 
 | ||||
| 
 | ||||
| ## Contributor License Agreement ## | ||||
| 
 | ||||
| Contributions to any Google project must be accompanied by a Contributor | ||||
| License Agreement.  This is not a copyright **assignment**, it simply gives | ||||
| Google permission to use and redistribute your contributions as part of the | ||||
| project. | ||||
| 
 | ||||
|   * If you are an individual writing original source code and you're sure you | ||||
|     own the intellectual property, then you'll need to sign an [individual | ||||
|     CLA][]. | ||||
| 
 | ||||
|   * If you work for a company that wants to allow you to contribute your work, | ||||
|     then you'll need to sign a [corporate CLA][]. | ||||
| 
 | ||||
| You generally only need to submit a CLA once, so if you've already submitted | ||||
| one (even if it was for a different project), you probably don't need to do it | ||||
| again. | ||||
| 
 | ||||
| [individual CLA]: https://developers.google.com/open-source/cla/individual | ||||
| [corporate CLA]: https://developers.google.com/open-source/cla/corporate | ||||
| 
 | ||||
| Once your CLA is submitted (or if you already submitted one for | ||||
| another Google project), make a commit adding yourself to the | ||||
| [AUTHORS][] and [CONTRIBUTORS][] files. This commit can be part | ||||
| of your first [pull request][]. | ||||
| 
 | ||||
| [AUTHORS]: AUTHORS | ||||
| [CONTRIBUTORS]: CONTRIBUTORS | ||||
| 
 | ||||
| 
 | ||||
| ## Submitting a patch ## | ||||
| 
 | ||||
|   1. It's generally best to start by opening a new issue describing the bug or | ||||
|      feature you're intending to fix.  Even if you think it's relatively minor, | ||||
|      it's helpful to know what people are working on.  Mention in the initial | ||||
|      issue that you are planning to work on that bug or feature so that it can | ||||
|      be assigned to you. | ||||
| 
 | ||||
|   1. Follow the normal process of [forking][] the project, and setup a new | ||||
|      branch to work in.  It's important that each group of changes be done in | ||||
|      separate branches in order to ensure that a pull request only includes the | ||||
|      commits related to that bug or feature. | ||||
| 
 | ||||
|   1. Do your best to have [well-formed commit messages][] for each change. | ||||
|      This provides consistency throughout the project, and ensures that commit | ||||
|      messages are able to be formatted properly by various git tools. | ||||
| 
 | ||||
|   1. Finally, push the commits to your fork and submit a [pull request][]. | ||||
| 
 | ||||
| [forking]: https://help.github.com/articles/fork-a-repo | ||||
| [well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html | ||||
| [pull request]: https://help.github.com/articles/creating-a-pull-request | ||||
|  | @ -0,0 +1,202 @@ | |||
| 
 | ||||
|                                  Apache License | ||||
|                            Version 2.0, January 2004 | ||||
|                         http://www.apache.org/licenses/ | ||||
| 
 | ||||
|    TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | ||||
| 
 | ||||
|    1. Definitions. | ||||
| 
 | ||||
|       "License" shall mean the terms and conditions for use, reproduction, | ||||
|       and distribution as defined by Sections 1 through 9 of this document. | ||||
| 
 | ||||
|       "Licensor" shall mean the copyright owner or entity authorized by | ||||
|       the copyright owner that is granting the License. | ||||
| 
 | ||||
|       "Legal Entity" shall mean the union of the acting entity and all | ||||
|       other entities that control, are controlled by, or are under common | ||||
|       control with that entity. For the purposes of this definition, | ||||
|       "control" means (i) the power, direct or indirect, to cause the | ||||
|       direction or management of such entity, whether by contract or | ||||
|       otherwise, or (ii) ownership of fifty percent (50%) or more of the | ||||
|       outstanding shares, or (iii) beneficial ownership of such entity. | ||||
| 
 | ||||
|       "You" (or "Your") shall mean an individual or Legal Entity | ||||
|       exercising permissions granted by this License. | ||||
| 
 | ||||
|       "Source" form shall mean the preferred form for making modifications, | ||||
|       including but not limited to software source code, documentation | ||||
|       source, and configuration files. | ||||
| 
 | ||||
|       "Object" form shall mean any form resulting from mechanical | ||||
|       transformation or translation of a Source form, including but | ||||
|       not limited to compiled object code, generated documentation, | ||||
|       and conversions to other media types. | ||||
| 
 | ||||
|       "Work" shall mean the work of authorship, whether in Source or | ||||
|       Object form, made available under the License, as indicated by a | ||||
|       copyright notice that is included in or attached to the work | ||||
|       (an example is provided in the Appendix below). | ||||
| 
 | ||||
|       "Derivative Works" shall mean any work, whether in Source or Object | ||||
|       form, that is based on (or derived from) the Work and for which the | ||||
|       editorial revisions, annotations, elaborations, or other modifications | ||||
|       represent, as a whole, an original work of authorship. For the purposes | ||||
|       of this License, Derivative Works shall not include works that remain | ||||
|       separable from, or merely link (or bind by name) to the interfaces of, | ||||
|       the Work and Derivative Works thereof. | ||||
| 
 | ||||
|       "Contribution" shall mean any work of authorship, including | ||||
|       the original version of the Work and any modifications or additions | ||||
|       to that Work or Derivative Works thereof, that is intentionally | ||||
|       submitted to Licensor for inclusion in the Work by the copyright owner | ||||
|       or by an individual or Legal Entity authorized to submit on behalf of | ||||
|       the copyright owner. For the purposes of this definition, "submitted" | ||||
|       means any form of electronic, verbal, or written communication sent | ||||
|       to the Licensor or its representatives, including but not limited to | ||||
|       communication on electronic mailing lists, source code control systems, | ||||
|       and issue tracking systems that are managed by, or on behalf of, the | ||||
|       Licensor for the purpose of discussing and improving the Work, but | ||||
|       excluding communication that is conspicuously marked or otherwise | ||||
|       designated in writing by the copyright owner as "Not a Contribution." | ||||
| 
 | ||||
|       "Contributor" shall mean Licensor and any individual or Legal Entity | ||||
|       on behalf of whom a Contribution has been received by Licensor and | ||||
|       subsequently incorporated within the Work. | ||||
| 
 | ||||
|    2. Grant of Copyright License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       copyright license to reproduce, prepare Derivative Works of, | ||||
|       publicly display, publicly perform, sublicense, and distribute the | ||||
|       Work and such Derivative Works in Source or Object form. | ||||
| 
 | ||||
|    3. Grant of Patent License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       (except as stated in this section) patent license to make, have made, | ||||
|       use, offer to sell, sell, import, and otherwise transfer the Work, | ||||
|       where such license applies only to those patent claims licensable | ||||
|       by such Contributor that are necessarily infringed by their | ||||
|       Contribution(s) alone or by combination of their Contribution(s) | ||||
|       with the Work to which such Contribution(s) was submitted. If You | ||||
|       institute patent litigation against any entity (including a | ||||
|       cross-claim or counterclaim in a lawsuit) alleging that the Work | ||||
|       or a Contribution incorporated within the Work constitutes direct | ||||
|       or contributory patent infringement, then any patent licenses | ||||
|       granted to You under this License for that Work shall terminate | ||||
|       as of the date such litigation is filed. | ||||
| 
 | ||||
|    4. Redistribution. You may reproduce and distribute copies of the | ||||
|       Work or Derivative Works thereof in any medium, with or without | ||||
|       modifications, and in Source or Object form, provided that You | ||||
|       meet the following conditions: | ||||
| 
 | ||||
|       (a) You must give any other recipients of the Work or | ||||
|           Derivative Works a copy of this License; and | ||||
| 
 | ||||
|       (b) You must cause any modified files to carry prominent notices | ||||
|           stating that You changed the files; and | ||||
| 
 | ||||
|       (c) You must retain, in the Source form of any Derivative Works | ||||
|           that You distribute, all copyright, patent, trademark, and | ||||
|           attribution notices from the Source form of the Work, | ||||
|           excluding those notices that do not pertain to any part of | ||||
|           the Derivative Works; and | ||||
| 
 | ||||
|       (d) If the Work includes a "NOTICE" text file as part of its | ||||
|           distribution, then any Derivative Works that You distribute must | ||||
|           include a readable copy of the attribution notices contained | ||||
|           within such NOTICE file, excluding those notices that do not | ||||
|           pertain to any part of the Derivative Works, in at least one | ||||
|           of the following places: within a NOTICE text file distributed | ||||
|           as part of the Derivative Works; within the Source form or | ||||
|           documentation, if provided along with the Derivative Works; or, | ||||
|           within a display generated by the Derivative Works, if and | ||||
|           wherever such third-party notices normally appear. The contents | ||||
|           of the NOTICE file are for informational purposes only and | ||||
|           do not modify the License. You may add Your own attribution | ||||
|           notices within Derivative Works that You distribute, alongside | ||||
|           or as an addendum to the NOTICE text from the Work, provided | ||||
|           that such additional attribution notices cannot be construed | ||||
|           as modifying the License. | ||||
| 
 | ||||
|       You may add Your own copyright statement to Your modifications and | ||||
|       may provide additional or different license terms and conditions | ||||
|       for use, reproduction, or distribution of Your modifications, or | ||||
|       for any such Derivative Works as a whole, provided Your use, | ||||
|       reproduction, and distribution of the Work otherwise complies with | ||||
|       the conditions stated in this License. | ||||
| 
 | ||||
|    5. Submission of Contributions. Unless You explicitly state otherwise, | ||||
|       any Contribution intentionally submitted for inclusion in the Work | ||||
|       by You to the Licensor shall be under the terms and conditions of | ||||
|       this License, without any additional terms or conditions. | ||||
|       Notwithstanding the above, nothing herein shall supersede or modify | ||||
|       the terms of any separate license agreement you may have executed | ||||
|       with Licensor regarding such Contributions. | ||||
| 
 | ||||
|    6. Trademarks. This License does not grant permission to use the trade | ||||
|       names, trademarks, service marks, or product names of the Licensor, | ||||
|       except as required for reasonable and customary use in describing the | ||||
|       origin of the Work and reproducing the content of the NOTICE file. | ||||
| 
 | ||||
|    7. Disclaimer of Warranty. Unless required by applicable law or | ||||
|       agreed to in writing, Licensor provides the Work (and each | ||||
|       Contributor provides its Contributions) on an "AS IS" BASIS, | ||||
|       WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | ||||
|       implied, including, without limitation, any warranties or conditions | ||||
|       of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | ||||
|       PARTICULAR PURPOSE. You are solely responsible for determining the | ||||
|       appropriateness of using or redistributing the Work and assume any | ||||
|       risks associated with Your exercise of permissions under this License. | ||||
| 
 | ||||
|    8. Limitation of Liability. In no event and under no legal theory, | ||||
|       whether in tort (including negligence), contract, or otherwise, | ||||
|       unless required by applicable law (such as deliberate and grossly | ||||
|       negligent acts) or agreed to in writing, shall any Contributor be | ||||
|       liable to You for damages, including any direct, indirect, special, | ||||
|       incidental, or consequential damages of any character arising as a | ||||
|       result of this License or out of the use or inability to use the | ||||
|       Work (including but not limited to damages for loss of goodwill, | ||||
|       work stoppage, computer failure or malfunction, or any and all | ||||
|       other commercial damages or losses), even if such Contributor | ||||
|       has been advised of the possibility of such damages. | ||||
| 
 | ||||
|    9. Accepting Warranty or Additional Liability. While redistributing | ||||
|       the Work or Derivative Works thereof, You may choose to offer, | ||||
|       and charge a fee for, acceptance of support, warranty, indemnity, | ||||
|       or other liability obligations and/or rights consistent with this | ||||
|       License. However, in accepting such obligations, You may act only | ||||
|       on Your own behalf and on Your sole responsibility, not on behalf | ||||
|       of any other Contributor, and only if You agree to indemnify, | ||||
|       defend, and hold each Contributor harmless for any liability | ||||
|       incurred by, or claims asserted against, such Contributor by reason | ||||
|       of your accepting any such warranty or additional liability. | ||||
| 
 | ||||
|    END OF TERMS AND CONDITIONS | ||||
| 
 | ||||
|    APPENDIX: How to apply the Apache License to your work. | ||||
| 
 | ||||
|       To apply the Apache License to your work, attach the following | ||||
|       boilerplate notice, with the fields enclosed by brackets "[]" | ||||
|       replaced with your own identifying information. (Don't include | ||||
|       the brackets!)  The text should be enclosed in the appropriate | ||||
|       comment syntax for the file format. We also recommend that a | ||||
|       file or class name and description of purpose be included on the | ||||
|       same "printed page" as the copyright notice for easier | ||||
|       identification within third-party archives. | ||||
| 
 | ||||
|    Copyright [yyyy] [name of copyright owner] | ||||
| 
 | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
| 
 | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
| 
 | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
|  | @ -0,0 +1,25 @@ | |||
| # Merkle | ||||
| 
 | ||||
| [](https://pkg.go.dev/github.com/transparency-dev/merkle) | ||||
| [](https://goreportcard.com/report/github.com/transparency-dev/merkle) | ||||
| [](https://codecov.io/gh/transparency-dev/merkle) | ||||
| [](https://gtrillian.slack.com/) | ||||
| 
 | ||||
| ## Overview | ||||
| 
 | ||||
| This repository contains Go code to help create and manipulate Merkle trees, as | ||||
| well as constructing and verifying various types of proof. | ||||
| 
 | ||||
| This is the data structure which is used by projects such as | ||||
| [Trillian](https://github.com/google/trillian) to provide | ||||
| [verifiable logs](https://transparency.dev/verifiable-data-structures/#verifiable-log). | ||||
| 
 | ||||
| 
 | ||||
| ## Support | ||||
| * Mailing list: https://groups.google.com/forum/#!forum/trillian-transparency | ||||
| * Slack: https://gtrillian.slack.com/ (invitation) | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
|  | @ -0,0 +1,89 @@ | |||
| // Copyright 2019 Google LLC. All Rights Reserved.
 | ||||
| //
 | ||||
| // Licensed under the Apache License, Version 2.0 (the "License");
 | ||||
| // you may not use this file except in compliance with the License.
 | ||||
| // You may obtain a copy of the License at
 | ||||
| //
 | ||||
| //     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| //
 | ||||
| // Unless required by applicable law or agreed to in writing, software
 | ||||
| // distributed under the License is distributed on an "AS IS" BASIS,
 | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | ||||
| // See the License for the specific language governing permissions and
 | ||||
| // limitations under the License.
 | ||||
| 
 | ||||
| package compact | ||||
| 
 | ||||
| import "math/bits" | ||||
| 
 | ||||
| // NodeID identifies a node of a Merkle tree.
 | ||||
| //
 | ||||
| // The ID consists of a level and index within this level. Levels are numbered
 | ||||
| // from 0, which corresponds to the tree leaves. Within each level, nodes are
 | ||||
| // numbered with consecutive indices starting from 0.
 | ||||
| //
 | ||||
| //  L4:         ┌───────0───────┐                ...
 | ||||
| //  L3:     ┌───0───┐       ┌───1───┐       ┌─── ...
 | ||||
| //  L2:   ┌─0─┐   ┌─1─┐   ┌─2─┐   ┌─3─┐   ┌─4─┐  ...
 | ||||
| //  L1:  ┌0┐ ┌1┐ ┌2┐ ┌3┐ ┌4┐ ┌5┐ ┌6┐ ┌7┐ ┌8┐ ┌9┐ ...
 | ||||
| //  L0:  0 1 2 3 4 5 6 7 8 9 ... ... ... ... ... ...
 | ||||
| //
 | ||||
| // When the tree is not perfect, the nodes that would complement it to perfect
 | ||||
| // are called ephemeral. Algorithms that operate with ephemeral nodes still map
 | ||||
| // them to the same address space.
 | ||||
| type NodeID struct { | ||||
| 	Level uint | ||||
| 	Index uint64 | ||||
| } | ||||
| 
 | ||||
| // NewNodeID returns a NodeID with the passed in node coordinates.
 | ||||
| func NewNodeID(level uint, index uint64) NodeID { | ||||
| 	return NodeID{Level: level, Index: index} | ||||
| } | ||||
| 
 | ||||
| // Parent returns the ID of the parent node.
 | ||||
| func (id NodeID) Parent() NodeID { | ||||
| 	return NewNodeID(id.Level+1, id.Index>>1) | ||||
| } | ||||
| 
 | ||||
| // Sibling returns the ID of the sibling node.
 | ||||
| func (id NodeID) Sibling() NodeID { | ||||
| 	return NewNodeID(id.Level, id.Index^1) | ||||
| } | ||||
| 
 | ||||
| // Coverage returns the [begin, end) range of leaves covered by the node.
 | ||||
| func (id NodeID) Coverage() (uint64, uint64) { | ||||
| 	return id.Index << id.Level, (id.Index + 1) << id.Level | ||||
| } | ||||
| 
 | ||||
| // RangeNodes appends the IDs of the nodes that comprise the [begin, end)
 | ||||
| // compact range to the given slice, and returns the new slice. The caller may
 | ||||
| // pre-allocate space with the help of the RangeSize function.
 | ||||
| func RangeNodes(begin, end uint64, ids []NodeID) []NodeID { | ||||
| 	left, right := Decompose(begin, end) | ||||
| 
 | ||||
| 	pos := begin | ||||
| 	// Iterate over perfect subtrees along the left border of the range, ordered
 | ||||
| 	// from lower to upper levels.
 | ||||
| 	for bit := uint64(0); left != 0; pos, left = pos+bit, left^bit { | ||||
| 		level := uint(bits.TrailingZeros64(left)) | ||||
| 		bit = uint64(1) << level | ||||
| 		ids = append(ids, NewNodeID(level, pos>>level)) | ||||
| 	} | ||||
| 
 | ||||
| 	// Iterate over perfect subtrees along the right border of the range, ordered
 | ||||
| 	// from upper to lower levels.
 | ||||
| 	for bit := uint64(0); right != 0; pos, right = pos+bit, right^bit { | ||||
| 		level := uint(bits.Len64(right)) - 1 | ||||
| 		bit = uint64(1) << level | ||||
| 		ids = append(ids, NewNodeID(level, pos>>level)) | ||||
| 	} | ||||
| 
 | ||||
| 	return ids | ||||
| } | ||||
| 
 | ||||
| // RangeSize returns the number of nodes in the [begin, end) compact range.
 | ||||
| func RangeSize(begin, end uint64) int { | ||||
| 	left, right := Decompose(begin, end) | ||||
| 	return bits.OnesCount64(left) + bits.OnesCount64(right) | ||||
| } | ||||
|  | @ -0,0 +1,264 @@ | |||
| // Copyright 2019 Google LLC. All Rights Reserved.
 | ||||
| //
 | ||||
| // Licensed under the Apache License, Version 2.0 (the "License");
 | ||||
| // you may not use this file except in compliance with the License.
 | ||||
| // You may obtain a copy of the License at
 | ||||
| //
 | ||||
| //     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| //
 | ||||
| // Unless required by applicable law or agreed to in writing, software
 | ||||
| // distributed under the License is distributed on an "AS IS" BASIS,
 | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | ||||
| // See the License for the specific language governing permissions and
 | ||||
| // limitations under the License.
 | ||||
| 
 | ||||
| // Package compact provides compact Merkle tree data structures.
 | ||||
| package compact | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"math/bits" | ||||
| ) | ||||
| 
 | ||||
| // HashFn computes an internal node's hash using the hashes of its child nodes.
 | ||||
| type HashFn func(left, right []byte) []byte | ||||
| 
 | ||||
| // VisitFn visits the node with the specified ID and hash.
 | ||||
| type VisitFn func(id NodeID, hash []byte) | ||||
| 
 | ||||
| // RangeFactory allows creating compact ranges with the specified hash
 | ||||
| // function, which must not be nil, and must not be changed.
 | ||||
| type RangeFactory struct { | ||||
| 	Hash HashFn | ||||
| } | ||||
| 
 | ||||
| // NewRange creates a Range for [begin, end) with the given set of hashes. The
 | ||||
| // hashes correspond to the roots of the minimal set of perfect sub-trees
 | ||||
| // covering the [begin, end) leaves range, ordered left to right.
 | ||||
| func (f *RangeFactory) NewRange(begin, end uint64, hashes [][]byte) (*Range, error) { | ||||
| 	if end < begin { | ||||
| 		return nil, fmt.Errorf("invalid range: end=%d, want >= %d", end, begin) | ||||
| 	} | ||||
| 	if got, want := len(hashes), RangeSize(begin, end); got != want { | ||||
| 		return nil, fmt.Errorf("invalid hashes: got %d values, want %d", got, want) | ||||
| 	} | ||||
| 	return &Range{f: f, begin: begin, end: end, hashes: hashes}, nil | ||||
| } | ||||
| 
 | ||||
| // NewEmptyRange returns a new Range for an empty [begin, begin) range. The
 | ||||
| // value of begin defines where the range will start growing from when entries
 | ||||
| // are appended to it.
 | ||||
| func (f *RangeFactory) NewEmptyRange(begin uint64) *Range { | ||||
| 	return &Range{f: f, begin: begin, end: begin} | ||||
| } | ||||
| 
 | ||||
| // Range represents a compact Merkle tree range for leaf indices [begin, end).
 | ||||
| //
 | ||||
| // It contains the minimal set of perfect subtrees whose leaves comprise this
 | ||||
| // range. The structure is efficiently mergeable with other compact ranges that
 | ||||
| // share one of the endpoints with it.
 | ||||
| //
 | ||||
| // For more details, see
 | ||||
| // https://github.com/transparency-dev/merkle/blob/main/docs/compact_ranges.md.
 | ||||
| type Range struct { | ||||
| 	f      *RangeFactory | ||||
| 	begin  uint64 | ||||
| 	end    uint64 | ||||
| 	hashes [][]byte | ||||
| } | ||||
| 
 | ||||
| // Begin returns the first index covered by the range (inclusive).
 | ||||
| func (r *Range) Begin() uint64 { | ||||
| 	return r.begin | ||||
| } | ||||
| 
 | ||||
| // End returns the last index covered by the range (exclusive).
 | ||||
| func (r *Range) End() uint64 { | ||||
| 	return r.end | ||||
| } | ||||
| 
 | ||||
| // Hashes returns sub-tree hashes corresponding to the minimal set of perfect
 | ||||
| // sub-trees covering the [begin, end) range, ordered left to right.
 | ||||
| func (r *Range) Hashes() [][]byte { | ||||
| 	return r.hashes | ||||
| } | ||||
| 
 | ||||
| // Append extends the compact range by appending the passed in hash to it. It
 | ||||
| // reports all the added nodes through the visitor function (if non-nil).
 | ||||
| func (r *Range) Append(hash []byte, visitor VisitFn) error { | ||||
| 	if visitor != nil { | ||||
| 		visitor(NewNodeID(0, r.end), hash) | ||||
| 	} | ||||
| 	return r.appendImpl(r.end+1, hash, nil, visitor) | ||||
| } | ||||
| 
 | ||||
| // AppendRange extends the compact range by merging in the other compact range
 | ||||
| // from the right. It uses the tree hasher to calculate hashes of newly created
 | ||||
| // nodes, and reports them through the visitor function (if non-nil).
 | ||||
| func (r *Range) AppendRange(other *Range, visitor VisitFn) error { | ||||
| 	if other.f != r.f { | ||||
| 		return errors.New("incompatible ranges") | ||||
| 	} | ||||
| 	if got, want := other.begin, r.end; got != want { | ||||
| 		return fmt.Errorf("ranges are disjoint: other.begin=%d, want %d", got, want) | ||||
| 	} | ||||
| 	if len(other.hashes) == 0 { // The other range is empty, merging is trivial.
 | ||||
| 		return nil | ||||
| 	} | ||||
| 	return r.appendImpl(other.end, other.hashes[0], other.hashes[1:], visitor) | ||||
| } | ||||
| 
 | ||||
| // GetRootHash returns the root hash of the Merkle tree represented by this
 | ||||
| // compact range. Requires the range to start at index 0. If the range is
 | ||||
| // empty, returns nil.
 | ||||
| //
 | ||||
| // If visitor is not nil, it is called with all "ephemeral" nodes (i.e. the
 | ||||
| // ones rooting imperfect subtrees) along the right border of the tree.
 | ||||
| func (r *Range) GetRootHash(visitor VisitFn) ([]byte, error) { | ||||
| 	if r.begin != 0 { | ||||
| 		return nil, fmt.Errorf("begin=%d, want 0", r.begin) | ||||
| 	} | ||||
| 	ln := len(r.hashes) | ||||
| 	if ln == 0 { | ||||
| 		return nil, nil | ||||
| 	} | ||||
| 	hash := r.hashes[ln-1] | ||||
| 	// All non-perfect subtree hashes along the right border of the tree
 | ||||
| 	// correspond to the parents of all perfect subtree nodes except the lowest
 | ||||
| 	// one (therefore the loop skips it).
 | ||||
| 	for i, size := ln-2, r.end; i >= 0; i-- { | ||||
| 		hash = r.f.Hash(r.hashes[i], hash) | ||||
| 		if visitor != nil { | ||||
| 			size &= size - 1                              // Delete the previous node.
 | ||||
| 			level := uint(bits.TrailingZeros64(size)) + 1 // Compute the parent level.
 | ||||
| 			index := size >> level                        // And its horizontal index.
 | ||||
| 			visitor(NewNodeID(level, index), hash) | ||||
| 		} | ||||
| 	} | ||||
| 	return hash, nil | ||||
| } | ||||
| 
 | ||||
| // Equal compares two Ranges for equality.
 | ||||
| func (r *Range) Equal(other *Range) bool { | ||||
| 	if r.f != other.f || r.begin != other.begin || r.end != other.end { | ||||
| 		return false | ||||
| 	} | ||||
| 	if len(r.hashes) != len(other.hashes) { | ||||
| 		return false | ||||
| 	} | ||||
| 	for i := range r.hashes { | ||||
| 		if !bytes.Equal(r.hashes[i], other.hashes[i]) { | ||||
| 			return false | ||||
| 		} | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
| 
 | ||||
| // appendImpl extends the compact range by merging the [r.end, end) compact
 | ||||
| // range into it. The other compact range is decomposed into a seed hash and
 | ||||
| // all the other hashes (possibly none). The method uses the tree hasher to
 | ||||
| // calculate hashes of newly created nodes, and reports them through the
 | ||||
| // visitor function (if non-nil).
 | ||||
| func (r *Range) appendImpl(end uint64, seed []byte, hashes [][]byte, visitor VisitFn) error { | ||||
| 	// Bits [low, high) of r.end encode the merge path, i.e. the sequence of node
 | ||||
| 	// merges that transforms the two compact ranges into one.
 | ||||
| 	low, high := getMergePath(r.begin, r.end, end) | ||||
| 	if high < low { | ||||
| 		high = low | ||||
| 	} | ||||
| 	index := r.end >> low | ||||
| 	// Now bits [0, high-low) of index encode the merge path.
 | ||||
| 
 | ||||
| 	// The number of one bits in index is the number of nodes from the left range
 | ||||
| 	// that will be merged, and zero bits correspond to the nodes in the right
 | ||||
| 	// range. Below we make sure that both ranges have enough hashes, which can
 | ||||
| 	// be false only in case the data is corrupted in some way.
 | ||||
| 	ones := bits.OnesCount64(index & (1<<(high-low) - 1)) | ||||
| 	if ln := len(r.hashes); ln < ones { | ||||
| 		return fmt.Errorf("corrupted lhs range: got %d hashes, want >= %d", ln, ones) | ||||
| 	} | ||||
| 	if ln, zeros := len(hashes), int(high-low)-ones; ln < zeros { | ||||
| 		return fmt.Errorf("corrupted rhs range: got %d hashes, want >= %d", ln+1, zeros+1) | ||||
| 	} | ||||
| 
 | ||||
| 	// Some of the trailing nodes of the left compact range, and some of the
 | ||||
| 	// leading nodes of the right range, are sequentially merged with the seed,
 | ||||
| 	// according to the mask. All new nodes are reported through the visitor.
 | ||||
| 	idx1, idx2 := len(r.hashes), 0 | ||||
| 	for h := low; h < high; h++ { | ||||
| 		if index&1 == 0 { | ||||
| 			seed = r.f.Hash(seed, hashes[idx2]) | ||||
| 			idx2++ | ||||
| 		} else { | ||||
| 			idx1-- | ||||
| 			seed = r.f.Hash(r.hashes[idx1], seed) | ||||
| 		} | ||||
| 		index >>= 1 | ||||
| 		if visitor != nil { | ||||
| 			visitor(NewNodeID(h+1, index), seed) | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	// All nodes from both ranges that have not been merged are bundled together
 | ||||
| 	// with the "merged" seed node.
 | ||||
| 	r.hashes = append(append(r.hashes[:idx1], seed), hashes[idx2:]...) | ||||
| 	r.end = end | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // getMergePath returns the merging path between the compact range [begin, mid)
 | ||||
| // and [mid, end). The path is represented as a range of bits within mid, with
 | ||||
| // bit indices [low, high). A bit value of 1 on level i of mid means that the
 | ||||
| // node on this level merges with the corresponding node in the left compact
 | ||||
| // range, whereas 0 represents merging with the right compact range. If the
 | ||||
| // path is empty then high <= low.
 | ||||
| //
 | ||||
| // The output is not specified if begin <= mid <= end doesn't hold, but the
 | ||||
| // function never panics.
 | ||||
| func getMergePath(begin, mid, end uint64) (uint, uint) { | ||||
| 	low := bits.TrailingZeros64(mid) | ||||
| 	high := 64 | ||||
| 	if begin != 0 { | ||||
| 		high = bits.Len64(mid ^ (begin - 1)) | ||||
| 	} | ||||
| 	if high2 := bits.Len64((mid - 1) ^ end); high2 < high { | ||||
| 		high = high2 | ||||
| 	} | ||||
| 	return uint(low), uint(high - 1) | ||||
| } | ||||
| 
 | ||||
| // Decompose splits the [begin, end) range into a minimal number of sub-ranges,
 | ||||
| // each of which is of the form [m * 2^k, (m+1) * 2^k), i.e. of length 2^k, for
 | ||||
| // some integers m, k >= 0.
 | ||||
| //
 | ||||
| // The sequence of sizes is returned encoded as bitmasks left and right, where:
 | ||||
| //  - a 1 bit in a bitmask denotes a sub-range of the corresponding size 2^k
 | ||||
| //  - left mask bits in LSB-to-MSB order encode the left part of the sequence
 | ||||
| //  - right mask bits in MSB-to-LSB order encode the right part
 | ||||
| //
 | ||||
| // The corresponding values of m are not returned (they can be calculated from
 | ||||
| // begin and the sub-range sizes).
 | ||||
| //
 | ||||
| // For example, (begin, end) values of (0b110, 0b11101) would indicate a
 | ||||
| // sequence of tree sizes: 2,8; 8,4,1.
 | ||||
| //
 | ||||
| // The output is not specified if begin > end, but the function never panics.
 | ||||
| func Decompose(begin, end uint64) (uint64, uint64) { | ||||
| 	// Special case, as the code below works only if begin != 0, or end < 2^63.
 | ||||
| 	if begin == 0 { | ||||
| 		return 0, end | ||||
| 	} | ||||
| 	xbegin := begin - 1 | ||||
| 	// Find where paths to leaves #begin-1 and #end diverge, and mask the upper
 | ||||
| 	// bits away, as only the nodes strictly below this point are in the range.
 | ||||
| 	d := bits.Len64(xbegin^end) - 1 | ||||
| 	mask := uint64(1)<<uint(d) - 1 | ||||
| 	// The left part of the compact range consists of all nodes strictly below
 | ||||
| 	// and to the right from the path to leaf #begin-1, corresponding to zero
 | ||||
| 	// bits in the masked part of begin-1. Likewise, the right part consists of
 | ||||
| 	// nodes below and to the left from the path to leaf #end, corresponding to
 | ||||
| 	// ones in the masked part of end.
 | ||||
| 	return ^xbegin & mask, end & mask | ||||
| } | ||||
|  | @ -0,0 +1,32 @@ | |||
| // Copyright 2017 Google LLC. All Rights Reserved.
 | ||||
| //
 | ||||
| // Licensed under the Apache License, Version 2.0 (the "License");
 | ||||
| // you may not use this file except in compliance with the License.
 | ||||
| // You may obtain a copy of the License at
 | ||||
| //
 | ||||
| //     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| //
 | ||||
| // Unless required by applicable law or agreed to in writing, software
 | ||||
| // distributed under the License is distributed on an "AS IS" BASIS,
 | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | ||||
| // See the License for the specific language governing permissions and
 | ||||
| // limitations under the License.
 | ||||
| 
 | ||||
| // Package merkle provides Merkle tree interfaces and implementation.
 | ||||
| package merkle | ||||
| 
 | ||||
| // TODO(pavelkalinnikov): Remove this root package. The only interface provided
 | ||||
| // here does not have to exist, and can be [re-]defined on the user side, such
 | ||||
| // as in compact or proof package.
 | ||||
| 
 | ||||
| // LogHasher provides the hash functions needed to compute dense merkle trees.
 | ||||
| type LogHasher interface { | ||||
| 	// EmptyRoot supports returning a special case for the root of an empty tree.
 | ||||
| 	EmptyRoot() []byte | ||||
| 	// HashLeaf computes the hash of a leaf that exists.
 | ||||
| 	HashLeaf(leaf []byte) []byte | ||||
| 	// HashChildren computes interior nodes.
 | ||||
| 	HashChildren(l, r []byte) []byte | ||||
| 	// Size returns the number of bytes the Hash* functions will return.
 | ||||
| 	Size() int | ||||
| } | ||||
|  | @ -0,0 +1,191 @@ | |||
| // Copyright 2022 Google LLC. All Rights Reserved.
 | ||||
| //
 | ||||
| // Licensed under the Apache License, Version 2.0 (the "License");
 | ||||
| // you may not use this file except in compliance with the License.
 | ||||
| // You may obtain a copy of the License at
 | ||||
| //
 | ||||
| //     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| //
 | ||||
| // Unless required by applicable law or agreed to in writing, software
 | ||||
| // distributed under the License is distributed on an "AS IS" BASIS,
 | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | ||||
| // See the License for the specific language governing permissions and
 | ||||
| // limitations under the License.
 | ||||
| 
 | ||||
| // Package proof contains helpers for constructing log Merkle tree proofs.
 | ||||
| package proof | ||||
| 
 | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"math/bits" | ||||
| 
 | ||||
| 	"github.com/transparency-dev/merkle/compact" | ||||
| ) | ||||
| 
 | ||||
| // Nodes contains information on how to construct a log Merkle tree proof. It
 | ||||
| // supports any proof that has at most one ephemeral node, such as inclusion
 | ||||
| // and consistency proofs defined in RFC 6962.
 | ||||
| type Nodes struct { | ||||
| 	// IDs contains the IDs of non-ephemeral nodes sufficient to build the proof.
 | ||||
| 	// If an ephemeral node is needed for a proof, it can be recomputed based on
 | ||||
| 	// a subset of nodes in this list.
 | ||||
| 	IDs []compact.NodeID | ||||
| 	// begin is the beginning index (inclusive) into the IDs[begin:end] subslice
 | ||||
| 	// of the nodes which will be used to re-create the ephemeral node.
 | ||||
| 	begin int | ||||
| 	// end is the ending (exclusive) index into the IDs[begin:end] subslice of
 | ||||
| 	// the nodes which will be used to re-create the ephemeral node.
 | ||||
| 	end int | ||||
| 	// ephem is the ID of the ephemeral node in the proof. This node is a common
 | ||||
| 	// ancestor of all nodes in IDs[begin:end]. It is the node that otherwise
 | ||||
| 	// would have been used in the proof if the tree was perfect.
 | ||||
| 	ephem compact.NodeID | ||||
| } | ||||
| 
 | ||||
| // Inclusion returns the information on how to fetch and construct an inclusion
 | ||||
| // proof for the given leaf index in a log Merkle tree of the given size. It
 | ||||
| // requires 0 <= index < size.
 | ||||
| func Inclusion(index, size uint64) (Nodes, error) { | ||||
| 	if index >= size { | ||||
| 		return Nodes{}, fmt.Errorf("index %d out of bounds for tree size %d", index, size) | ||||
| 	} | ||||
| 	return nodes(index, 0, size).skipFirst(), nil | ||||
| } | ||||
| 
 | ||||
| // Consistency returns the information on how to fetch and construct a
 | ||||
| // consistency proof between the two given tree sizes of a log Merkle tree. It
 | ||||
| // requires 0 <= size1 <= size2.
 | ||||
| func Consistency(size1, size2 uint64) (Nodes, error) { | ||||
| 	if size1 > size2 { | ||||
| 		return Nodes{}, fmt.Errorf("tree size %d > %d", size1, size2) | ||||
| 	} | ||||
| 	if size1 == size2 || size1 == 0 { | ||||
| 		return Nodes{IDs: []compact.NodeID{}}, nil | ||||
| 	} | ||||
| 
 | ||||
| 	// Find the root of the biggest perfect subtree that ends at size1.
 | ||||
| 	level := uint(bits.TrailingZeros64(size1)) | ||||
| 	index := (size1 - 1) >> level | ||||
| 	// The consistency proof consists of this node (except if size1 is a power of
 | ||||
| 	// two, in which case adding this node would be redundant because the client
 | ||||
| 	// is assumed to know it from a checkpoint), and nodes of the inclusion proof
 | ||||
| 	// into this node in the tree of size2.
 | ||||
| 	p := nodes(index, level, size2) | ||||
| 
 | ||||
| 	// Handle the case when size1 is a power of 2.
 | ||||
| 	if index == 0 { | ||||
| 		return p.skipFirst(), nil | ||||
| 	} | ||||
| 	return p, nil | ||||
| } | ||||
| 
 | ||||
| // nodes returns the node IDs necessary to prove that the (level, index) node
 | ||||
| // is included in the Merkle tree of the given size.
 | ||||
| func nodes(index uint64, level uint, size uint64) Nodes { | ||||
| 	// Compute the `fork` node, where the path from root to (level, index) node
 | ||||
| 	// diverges from the path to (0, size).
 | ||||
| 	//
 | ||||
| 	// The sibling of this node is the ephemeral node which represents a subtree
 | ||||
| 	// that is not complete in the tree of the given size. To compute the hash
 | ||||
| 	// of the ephemeral node, we need all the non-ephemeral nodes that cover the
 | ||||
| 	// same range of leaves.
 | ||||
| 	//
 | ||||
| 	// The `inner` variable is how many layers up from (level, index) the `fork`
 | ||||
| 	// and the ephemeral nodes are.
 | ||||
| 	inner := bits.Len64(index^(size>>level)) - 1 | ||||
| 	fork := compact.NewNodeID(level+uint(inner), index>>inner) | ||||
| 
 | ||||
| 	begin, end := fork.Coverage() | ||||
| 	left := compact.RangeSize(0, begin) | ||||
| 	right := compact.RangeSize(end, size) | ||||
| 
 | ||||
| 	node := compact.NewNodeID(level, index) | ||||
| 	// Pre-allocate the exact number of nodes for the proof, in order:
 | ||||
| 	// - The seed node for which we are building the proof.
 | ||||
| 	// - The `inner` nodes at each level up to the fork node.
 | ||||
| 	// - The `right` nodes, comprising the ephemeral node.
 | ||||
| 	// - The `left` nodes, completing the coverage of the whole [0, size) range.
 | ||||
| 	nodes := append(make([]compact.NodeID, 0, 1+inner+right+left), node) | ||||
| 
 | ||||
| 	// The first portion of the proof consists of the siblings for nodes of the
 | ||||
| 	// path going up to the level at which the ephemeral node appears.
 | ||||
| 	for ; node.Level < fork.Level; node = node.Parent() { | ||||
| 		nodes = append(nodes, node.Sibling()) | ||||
| 	} | ||||
| 	// This portion of the proof covers the range [begin, end) under it. The
 | ||||
| 	// ranges to the left and to the right from it remain to be covered.
 | ||||
| 
 | ||||
| 	// Add all the nodes (potentially none) that cover the right range, and
 | ||||
| 	// represent the ephemeral node. Reverse them so that the Rehash method can
 | ||||
| 	// process hashes in the convenient order, from lower to upper levels.
 | ||||
| 	len1 := len(nodes) | ||||
| 	nodes = compact.RangeNodes(end, size, nodes) | ||||
| 	reverse(nodes[len(nodes)-right:]) | ||||
| 	len2 := len(nodes) | ||||
| 	// Add the nodes that cover the left range, ordered increasingly by level.
 | ||||
| 	nodes = compact.RangeNodes(0, begin, nodes) | ||||
| 	reverse(nodes[len(nodes)-left:]) | ||||
| 
 | ||||
| 	// nodes[len1:len2] contains the nodes representing the ephemeral node. If
 | ||||
| 	// it's empty, make it zero. Note that it can also contain a single node.
 | ||||
| 	// Depending on the preference of the layer above, it may or may not be
 | ||||
| 	// considered ephemeral.
 | ||||
| 	if len1 >= len2 { | ||||
| 		len1, len2 = 0, 0 | ||||
| 	} | ||||
| 
 | ||||
| 	return Nodes{IDs: nodes, begin: len1, end: len2, ephem: fork.Sibling()} | ||||
| } | ||||
| 
 | ||||
| // Ephem returns the ephemeral node, and indices begin and end, such that
 | ||||
| // IDs[begin:end] slice contains the child nodes of the ephemeral node.
 | ||||
| //
 | ||||
| // The list is empty iff there are no ephemeral nodes in the proof. Some
 | ||||
| // examples of when this can happen: a proof in a perfect tree; an inclusion
 | ||||
| // proof for a leaf in a perfect subtree at the right edge of the tree.
 | ||||
| func (n Nodes) Ephem() (compact.NodeID, int, int) { | ||||
| 	return n.ephem, n.begin, n.end | ||||
| } | ||||
| 
 | ||||
| // Rehash computes the proof based on the slice of node hashes corresponding to
 | ||||
| // their IDs in the n.IDs field. The slices must be of the same length. The hc
 | ||||
| // parameter computes a node's hash based on hashes of its children.
 | ||||
| //
 | ||||
| // Warning: The passed-in slice of hashes can be modified in-place.
 | ||||
| func (n Nodes) Rehash(h [][]byte, hc func(left, right []byte) []byte) ([][]byte, error) { | ||||
| 	if got, want := len(h), len(n.IDs); got != want { | ||||
| 		return nil, fmt.Errorf("got %d hashes but expected %d", got, want) | ||||
| 	} | ||||
| 	cursor := 0 | ||||
| 	// Scan the list of node hashes, and store the rehashed list in-place.
 | ||||
| 	// Invariant: cursor <= i, and h[:cursor] contains all the hashes of the
 | ||||
| 	// rehashed list after scanning h up to index i-1.
 | ||||
| 	for i, ln := 0, len(h); i < ln; i, cursor = i+1, cursor+1 { | ||||
| 		hash := h[i] | ||||
| 		if i >= n.begin && i < n.end { | ||||
| 			// Scan the block of node hashes that need rehashing.
 | ||||
| 			for i++; i < n.end; i++ { | ||||
| 				hash = hc(h[i], hash) | ||||
| 			} | ||||
| 			i-- | ||||
| 		} | ||||
| 		h[cursor] = hash | ||||
| 	} | ||||
| 	return h[:cursor], nil | ||||
| } | ||||
| 
 | ||||
| func (n Nodes) skipFirst() Nodes { | ||||
| 	n.IDs = n.IDs[1:] | ||||
| 	// Fixup the indices into the IDs slice.
 | ||||
| 	if n.begin < n.end { | ||||
| 		n.begin-- | ||||
| 		n.end-- | ||||
| 	} | ||||
| 	return n | ||||
| } | ||||
| 
 | ||||
| func reverse(ids []compact.NodeID) { | ||||
| 	for i, j := 0, len(ids)-1; i < j; i, j = i+1, j-1 { | ||||
| 		ids[i], ids[j] = ids[j], ids[i] | ||||
| 	} | ||||
| } | ||||
|  | @ -0,0 +1,176 @@ | |||
| // Copyright 2017 Google LLC. All Rights Reserved.
 | ||||
| //
 | ||||
| // Licensed under the Apache License, Version 2.0 (the "License");
 | ||||
| // you may not use this file except in compliance with the License.
 | ||||
| // You may obtain a copy of the License at
 | ||||
| //
 | ||||
| //     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| //
 | ||||
| // Unless required by applicable law or agreed to in writing, software
 | ||||
| // distributed under the License is distributed on an "AS IS" BASIS,
 | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | ||||
| // See the License for the specific language governing permissions and
 | ||||
| // limitations under the License.
 | ||||
| 
 | ||||
| package proof | ||||
| 
 | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"math/bits" | ||||
| 
 | ||||
| 	"github.com/transparency-dev/merkle" | ||||
| ) | ||||
| 
 | ||||
| // RootMismatchError occurs when an inclusion proof fails.
 | ||||
| type RootMismatchError struct { | ||||
| 	ExpectedRoot   []byte | ||||
| 	CalculatedRoot []byte | ||||
| } | ||||
| 
 | ||||
| func (e RootMismatchError) Error() string { | ||||
| 	return fmt.Sprintf("calculated root:\n%v\n does not match expected root:\n%v", e.CalculatedRoot, e.ExpectedRoot) | ||||
| } | ||||
| 
 | ||||
| func verifyMatch(calculated, expected []byte) error { | ||||
| 	if !bytes.Equal(calculated, expected) { | ||||
| 		return RootMismatchError{ExpectedRoot: expected, CalculatedRoot: calculated} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| // VerifyInclusion verifies the correctness of the inclusion proof for the leaf
 | ||||
| // with the specified hash and index, relatively to the tree of the given size
 | ||||
| // and root hash. Requires 0 <= index < size.
 | ||||
| func VerifyInclusion(hasher merkle.LogHasher, index, size uint64, leafHash []byte, proof [][]byte, root []byte) error { | ||||
| 	calcRoot, err := RootFromInclusionProof(hasher, index, size, leafHash, proof) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	return verifyMatch(calcRoot, root) | ||||
| } | ||||
| 
 | ||||
| // RootFromInclusionProof calculates the expected root hash for a tree of the
 | ||||
| // given size, provided a leaf index and hash with the corresponding inclusion
 | ||||
| // proof. Requires 0 <= index < size.
 | ||||
| func RootFromInclusionProof(hasher merkle.LogHasher, index, size uint64, leafHash []byte, proof [][]byte) ([]byte, error) { | ||||
| 	if index >= size { | ||||
| 		return nil, fmt.Errorf("index is beyond size: %d >= %d", index, size) | ||||
| 	} | ||||
| 	if got, want := len(leafHash), hasher.Size(); got != want { | ||||
| 		return nil, fmt.Errorf("leafHash has unexpected size %d, want %d", got, want) | ||||
| 	} | ||||
| 
 | ||||
| 	inner, border := decompInclProof(index, size) | ||||
| 	if got, want := len(proof), inner+border; got != want { | ||||
| 		return nil, fmt.Errorf("wrong proof size %d, want %d", got, want) | ||||
| 	} | ||||
| 
 | ||||
| 	res := chainInner(hasher, leafHash, proof[:inner], index) | ||||
| 	res = chainBorderRight(hasher, res, proof[inner:]) | ||||
| 	return res, nil | ||||
| } | ||||
| 
 | ||||
| // VerifyConsistency checks that the passed-in consistency proof is valid
 | ||||
| // between the passed in tree sizes, with respect to the corresponding root
 | ||||
| // hashes. Requires 0 <= size1 <= size2.
 | ||||
| func VerifyConsistency(hasher merkle.LogHasher, size1, size2 uint64, proof [][]byte, root1, root2 []byte) error { | ||||
| 	switch { | ||||
| 	case size2 < size1: | ||||
| 		return fmt.Errorf("size2 (%d) < size1 (%d)", size1, size2) | ||||
| 	case size1 == size2: | ||||
| 		if len(proof) > 0 { | ||||
| 			return errors.New("size1=size2, but proof is not empty") | ||||
| 		} | ||||
| 		return verifyMatch(root1, root2) | ||||
| 	case size1 == 0: | ||||
| 		// Any size greater than 0 is consistent with size 0.
 | ||||
| 		if len(proof) > 0 { | ||||
| 			return fmt.Errorf("expected empty proof, but got %d components", len(proof)) | ||||
| 		} | ||||
| 		return nil // Proof OK.
 | ||||
| 	case len(proof) == 0: | ||||
| 		return errors.New("empty proof") | ||||
| 	} | ||||
| 
 | ||||
| 	inner, border := decompInclProof(size1-1, size2) | ||||
| 	shift := bits.TrailingZeros64(size1) | ||||
| 	inner -= shift // Note: shift < inner if size1 < size2.
 | ||||
| 
 | ||||
| 	// The proof includes the root hash for the sub-tree of size 2^shift.
 | ||||
| 	seed, start := proof[0], 1 | ||||
| 	if size1 == 1<<uint(shift) { // Unless size1 is that very 2^shift.
 | ||||
| 		seed, start = root1, 0 | ||||
| 	} | ||||
| 	if got, want := len(proof), start+inner+border; got != want { | ||||
| 		return fmt.Errorf("wrong proof size %d, want %d", got, want) | ||||
| 	} | ||||
| 	proof = proof[start:] | ||||
| 	// Now len(proof) == inner+border, and proof is effectively a suffix of
 | ||||
| 	// inclusion proof for entry |size1-1| in a tree of size |size2|.
 | ||||
| 
 | ||||
| 	// Verify the first root.
 | ||||
| 	mask := (size1 - 1) >> uint(shift) // Start chaining from level |shift|.
 | ||||
| 	hash1 := chainInnerRight(hasher, seed, proof[:inner], mask) | ||||
| 	hash1 = chainBorderRight(hasher, hash1, proof[inner:]) | ||||
| 	if err := verifyMatch(hash1, root1); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 
 | ||||
| 	// Verify the second root.
 | ||||
| 	hash2 := chainInner(hasher, seed, proof[:inner], mask) | ||||
| 	hash2 = chainBorderRight(hasher, hash2, proof[inner:]) | ||||
| 	return verifyMatch(hash2, root2) | ||||
| } | ||||
| 
 | ||||
| // decompInclProof breaks down inclusion proof for a leaf at the specified
 | ||||
| // |index| in a tree of the specified |size| into 2 components. The splitting
 | ||||
| // point between them is where paths to leaves |index| and |size-1| diverge.
 | ||||
| // Returns lengths of the bottom and upper proof parts correspondingly. The sum
 | ||||
| // of the two determines the correct length of the inclusion proof.
 | ||||
| func decompInclProof(index, size uint64) (int, int) { | ||||
| 	inner := innerProofSize(index, size) | ||||
| 	border := bits.OnesCount64(index >> uint(inner)) | ||||
| 	return inner, border | ||||
| } | ||||
| 
 | ||||
| func innerProofSize(index, size uint64) int { | ||||
| 	return bits.Len64(index ^ (size - 1)) | ||||
| } | ||||
| 
 | ||||
| // chainInner computes a subtree hash for a node on or below the tree's right
 | ||||
| // border. Assumes |proof| hashes are ordered from lower levels to upper, and
 | ||||
| // |seed| is the initial subtree/leaf hash on the path located at the specified
 | ||||
| // |index| on its level.
 | ||||
| func chainInner(hasher merkle.LogHasher, seed []byte, proof [][]byte, index uint64) []byte { | ||||
| 	for i, h := range proof { | ||||
| 		if (index>>uint(i))&1 == 0 { | ||||
| 			seed = hasher.HashChildren(seed, h) | ||||
| 		} else { | ||||
| 			seed = hasher.HashChildren(h, seed) | ||||
| 		} | ||||
| 	} | ||||
| 	return seed | ||||
| } | ||||
| 
 | ||||
| // chainInnerRight computes a subtree hash like chainInner, but only takes
 | ||||
| // hashes to the left from the path into consideration, which effectively means
 | ||||
| // the result is a hash of the corresponding earlier version of this subtree.
 | ||||
| func chainInnerRight(hasher merkle.LogHasher, seed []byte, proof [][]byte, index uint64) []byte { | ||||
| 	for i, h := range proof { | ||||
| 		if (index>>uint(i))&1 == 1 { | ||||
| 			seed = hasher.HashChildren(h, seed) | ||||
| 		} | ||||
| 	} | ||||
| 	return seed | ||||
| } | ||||
| 
 | ||||
| // chainBorderRight chains proof hashes along tree borders. This differs from
 | ||||
| // inner chaining because |proof| contains only left-side subtree hashes.
 | ||||
| func chainBorderRight(hasher merkle.LogHasher, seed []byte, proof [][]byte) []byte { | ||||
| 	for _, h := range proof { | ||||
| 		seed = hasher.HashChildren(h, seed) | ||||
| 	} | ||||
| 	return seed | ||||
| } | ||||
|  | @ -0,0 +1,68 @@ | |||
| // Copyright 2016 Google LLC. All Rights Reserved.
 | ||||
| //
 | ||||
| // Licensed under the Apache License, Version 2.0 (the "License");
 | ||||
| // you may not use this file except in compliance with the License.
 | ||||
| // You may obtain a copy of the License at
 | ||||
| //
 | ||||
| //     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| //
 | ||||
| // Unless required by applicable law or agreed to in writing, software
 | ||||
| // distributed under the License is distributed on an "AS IS" BASIS,
 | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | ||||
| // See the License for the specific language governing permissions and
 | ||||
| // limitations under the License.
 | ||||
| 
 | ||||
| // Package rfc6962 provides hashing functionality according to RFC6962.
 | ||||
| package rfc6962 | ||||
| 
 | ||||
| import ( | ||||
| 	"crypto" | ||||
| 	_ "crypto/sha256" // SHA256 is the default algorithm.
 | ||||
| ) | ||||
| 
 | ||||
| // Domain separation prefixes
 | ||||
| const ( | ||||
| 	RFC6962LeafHashPrefix = 0 | ||||
| 	RFC6962NodeHashPrefix = 1 | ||||
| ) | ||||
| 
 | ||||
| // DefaultHasher is a SHA256 based LogHasher.
 | ||||
| var DefaultHasher = New(crypto.SHA256) | ||||
| 
 | ||||
| // Hasher implements the RFC6962 tree hashing algorithm.
 | ||||
| type Hasher struct { | ||||
| 	crypto.Hash | ||||
| } | ||||
| 
 | ||||
| // New creates a new Hashers.LogHasher on the passed in hash function.
 | ||||
| func New(h crypto.Hash) *Hasher { | ||||
| 	return &Hasher{Hash: h} | ||||
| } | ||||
| 
 | ||||
| // EmptyRoot returns a special case for an empty tree.
 | ||||
| func (t *Hasher) EmptyRoot() []byte { | ||||
| 	return t.New().Sum(nil) | ||||
| } | ||||
| 
 | ||||
| // HashLeaf returns the Merkle tree leaf hash of the data passed in through leaf.
 | ||||
| // The data in leaf is prefixed by the LeafHashPrefix.
 | ||||
| func (t *Hasher) HashLeaf(leaf []byte) []byte { | ||||
| 	h := t.New() | ||||
| 	h.Write([]byte{RFC6962LeafHashPrefix}) | ||||
| 	h.Write(leaf) | ||||
| 	return h.Sum(nil) | ||||
| } | ||||
| 
 | ||||
| // HashChildren returns the inner Merkle tree node hash of the two child nodes l and r.
 | ||||
| // The hashed structure is NodeHashPrefix||l||r.
 | ||||
| func (t *Hasher) HashChildren(l, r []byte) []byte { | ||||
| 	h := t.New() | ||||
| 	b := append(append(append( | ||||
| 		make([]byte, 0, 1+len(l)+len(r)), | ||||
| 		RFC6962NodeHashPrefix), | ||||
| 		l...), | ||||
| 		r...) | ||||
| 
 | ||||
| 	h.Write(b) | ||||
| 	return h.Sum(nil) | ||||
| } | ||||
|  | @ -7,6 +7,8 @@ Thumbs.db | |||
| *.iml | ||||
| *.so | ||||
| coverage.* | ||||
| go.work | ||||
| go.work.sum | ||||
| 
 | ||||
| gen/ | ||||
| 
 | ||||
|  |  | |||
|  | @ -85,6 +85,8 @@ linters-settings: | |||
|           - "**/internal/matchers/*.go" | ||||
|   godot: | ||||
|     exclude: | ||||
|       # Exclude links. | ||||
|       - '^ *\[[^]]+\]:' | ||||
|       # Exclude sentence fragments for lists. | ||||
|       - '^[ ]*[-•]' | ||||
|       # Exclude sentences prefixing a list. | ||||
|  |  | |||
|  | @ -8,6 +8,141 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm | |||
| 
 | ||||
| ## [Unreleased] | ||||
| 
 | ||||
| ## [1.15.0/0.38.0] 2023-04-27 | ||||
| 
 | ||||
| ### Added | ||||
| 
 | ||||
| - The `go.opentelemetry.io/otel/metric/embedded` package. (#3916) | ||||
| - The `Version` function to `go.opentelemetry.io/otel/sdk` to return the SDK version. (#3949) | ||||
| - Add a `WithNamespace` option to `go.opentelemetry.io/otel/exporters/prometheus` to allow users to prefix metrics with a namespace. (#3970) | ||||
| - The following configuration types were added to `go.opentelemetry.io/otel/metric/instrument` to be used in the configuration of measurement methods. (#3971) | ||||
|   - The `AddConfig` used to hold configuration for addition measurements | ||||
|     - `NewAddConfig` used to create a new `AddConfig` | ||||
|     - `AddOption` used to configure an `AddConfig` | ||||
|   - The `RecordConfig` used to hold configuration for recorded measurements | ||||
|     - `NewRecordConfig` used to create a new `RecordConfig` | ||||
|     - `RecordOption` used to configure a `RecordConfig` | ||||
|   - The `ObserveConfig` used to hold configuration for observed measurements | ||||
|     - `NewObserveConfig` used to create a new `ObserveConfig` | ||||
|     - `ObserveOption` used to configure an `ObserveConfig` | ||||
| - `WithAttributeSet` and `WithAttributes` are added to `go.opentelemetry.io/otel/metric/instrument`. | ||||
|   They return an option used during a measurement that defines the attribute Set associated with the measurement. (#3971) | ||||
| - The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` to return the OTLP metrics client version. (#3956) | ||||
| - The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlptrace` to return the OTLP trace client version. (#3956) | ||||
| 
 | ||||
| ### Changed | ||||
| 
 | ||||
| - The `Extrema` in `go.opentelemetry.io/otel/sdk/metric/metricdata` is redefined with a generic argument of `[N int64 | float64]`. (#3870) | ||||
| - Update all exported interfaces from `go.opentelemetry.io/otel/metric` to embed their corresponding interface from `go.opentelemetry.io/otel/metric/embedded`. | ||||
|   This adds an implementation requirement to set the interface default behavior for unimplemented methods. (#3916) | ||||
| - Move No-Op implementation from `go.opentelemetry.io/otel/metric` into its own package `go.opentelemetry.io/otel/metric/noop`. (#3941) | ||||
|   - `metric.NewNoopMeterProvider` is replaced with `noop.NewMeterProvider` | ||||
| - Add all the methods from `"go.opentelemetry.io/otel/trace".SpanContext` to `bridgeSpanContext` by embedding `otel.SpanContext` in `bridgeSpanContext`. (#3966) | ||||
| - Wrap `UploadMetrics` error in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/` to improve error message when encountering generic grpc errors. (#3974) | ||||
| - The measurement methods for all instruments in `go.opentelemetry.io/otel/metric/instrument` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971) | ||||
|   - The `Int64Counter.Add` method now accepts `...AddOption` | ||||
|   - The `Float64Counter.Add` method now accepts `...AddOption` | ||||
|   - The `Int64UpDownCounter.Add` method now accepts `...AddOption` | ||||
|   - The `Float64UpDownCounter.Add` method now accepts `...AddOption` | ||||
|   - The `Int64Histogram.Record` method now accepts `...RecordOption` | ||||
|   - The `Float64Histogram.Record` method now accepts `...RecordOption` | ||||
|   - The `Int64Observer.Observe` method now accepts `...ObserveOption` | ||||
|   - The `Float64Observer.Observe` method now accepts `...ObserveOption` | ||||
| - The `Observer` methods in `go.opentelemetry.io/otel/metric` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971) | ||||
|   - The `Observer.ObserveInt64` method now accepts `...ObserveOption` | ||||
|   - The `Observer.ObserveFloat64` method now accepts `...ObserveOption` | ||||
| - Move global metric back to `go.opentelemetry.io/otel/metric/global` from `go.opentelemetry.io/otel`. (#3986) | ||||
| 
 | ||||
| ### Fixed | ||||
| 
 | ||||
| - `TracerProvider` allows calling `Tracer()` while it's shutting down. | ||||
|   It used to deadlock. (#3924) | ||||
| - Use the SDK version for the Telemetry SDK resource detector in `go.opentelemetry.io/otel/sdk/resource`. (#3949) | ||||
| - Fix a data race in `SpanProcessor` returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace`. (#3951) | ||||
| - Automatically figure out the default aggregation with `aggregation.Default`. (#3967) | ||||
| 
 | ||||
| ### Deprecated | ||||
| 
 | ||||
| - The `go.opentelemetry.io/otel/metric/instrument` package is deprecated. | ||||
|   Use the equivalent types added to `go.opentelemetry.io/otel/metric` instead. (#4018) | ||||
| 
 | ||||
| ## [1.15.0-rc.2/0.38.0-rc.2] 2023-03-23 | ||||
| 
 | ||||
| This is a release candidate for the v1.15.0/v0.38.0 release. | ||||
| That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. | ||||
| See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. | ||||
| 
 | ||||
| ### Added | ||||
| 
 | ||||
| - The `WithHostID` option to `go.opentelemetry.io/otel/sdk/resource`. (#3812) | ||||
| - The `WithoutTimestamps` option to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to sets all timestamps to zero. (#3828) | ||||
| - The new `Exemplar` type is added to `go.opentelemetry.io/otel/sdk/metric/metricdata`. | ||||
|   Both the `DataPoint` and `HistogramDataPoint` types from that package have a new field of `Exemplars` containing the sampled exemplars for their timeseries. (#3849) | ||||
| - Configuration for each metric instrument in `go.opentelemetry.io/otel/sdk/metric/instrument`. (#3895) | ||||
| - The internal logging introduces a warning level verbosity equal to `V(1)`. (#3900) | ||||
| - Added a log message warning about usage of `SimpleSpanProcessor` in production environments. (#3854) | ||||
| 
 | ||||
| ### Changed | ||||
| 
 | ||||
| - Optimize memory allocation when creation a new `Set` using `NewSet` or `NewSetWithFiltered` in `go.opentelemetry.io/otel/attribute`. (#3832) | ||||
| - Optimize memory allocation when creation new metric instruments in `go.opentelemetry.io/otel/sdk/metric`. (#3832) | ||||
| - Avoid creating new objects on all calls to `WithDeferredSetup` and `SkipContextSetup` in OpenTracing bridge. (#3833) | ||||
| - The `New` and `Detect` functions from `go.opentelemetry.io/otel/sdk/resource` return errors that wrap underlying errors instead of just containing the underlying error strings. (#3844) | ||||
| - Both the `Histogram` and `HistogramDataPoint` are redefined with a generic argument of `[N int64 | float64]` in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#3849) | ||||
| - The metric `Export` interface from `go.opentelemetry.io/otel/sdk/metric` accepts a `*ResourceMetrics` instead of `ResourceMetrics`. (#3853) | ||||
| - Rename `Asynchronous` to `Observable` in `go.opentelemetry.io/otel/metric/instrument`. (#3892) | ||||
| - Rename `Int64ObserverOption` to `Int64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895) | ||||
| - Rename `Float64ObserverOption` to `Float64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895) | ||||
| - The internal logging changes the verbosity level of info to `V(4)`, the verbosity level of debug to `V(8)`. (#3900) | ||||
| 
 | ||||
| ### Fixed | ||||
| 
 | ||||
| - `TracerProvider` consistently doesn't allow to register a `SpanProcessor` after shutdown. (#3845) | ||||
| 
 | ||||
| ### Removed | ||||
| 
 | ||||
| - The deprecated `go.opentelemetry.io/otel/metric/global` package is removed. (#3829) | ||||
| - The unneeded `Synchronous` interface in `go.opentelemetry.io/otel/metric/instrument` was removed. (#3892) | ||||
| - The `Float64ObserverConfig` and `NewFloat64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`. | ||||
|   Use the added `float64` instrument configuration instead. (#3895) | ||||
| - The `Int64ObserverConfig` and `NewInt64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`. | ||||
|   Use the added `int64` instrument configuration instead. (#3895) | ||||
| - The `NewNoopMeter` function in `go.opentelemetry.io/otel/metric`, use `NewMeterProvider().Meter("")` instead. (#3893) | ||||
| 
 | ||||
| ## [1.15.0-rc.1/0.38.0-rc.1] 2023-03-01 | ||||
| 
 | ||||
| This is a release candidate for the v1.15.0/v0.38.0 release. | ||||
| That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. | ||||
| See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. | ||||
| 
 | ||||
| This release drops the compatibility guarantee of [Go 1.18]. | ||||
| 
 | ||||
| ### Added | ||||
| 
 | ||||
| - Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#3818) | ||||
|   - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`. | ||||
|   - Use `GetMeterProivder` for a global `metric.MeterProvider`. | ||||
|   - Use `SetMeterProivder` to set the global `metric.MeterProvider`. | ||||
| 
 | ||||
| ### Changed | ||||
| 
 | ||||
| - Dropped compatibility testing for [Go 1.18]. | ||||
|   The project no longer guarantees support for this version of Go. (#3813) | ||||
| 
 | ||||
| ### Fixed | ||||
| 
 | ||||
| - Handle empty environment variable as it they were not set. (#3764) | ||||
| - Clarify the `httpconv` and `netconv` packages in `go.opentelemetry.io/otel/semconv/*` provide tracing semantic conventions. (#3823) | ||||
| 
 | ||||
| ### Deprecated | ||||
| 
 | ||||
| - The `go.opentelemetry.io/otel/metric/global` package is deprecated. | ||||
|   Use `go.opentelemetry.io/otel` instead. (#3818) | ||||
| 
 | ||||
| ### Removed | ||||
| 
 | ||||
| - The deprecated `go.opentelemetry.io/otel/metric/unit` package is removed. (#3814) | ||||
| 
 | ||||
| ## [1.14.0/0.37.0/0.0.4] 2023-02-27 | ||||
| 
 | ||||
| This release is the last to support [Go 1.18]. | ||||
|  | @ -121,7 +256,7 @@ The next release will require at least [Go 1.19]. | |||
| - The `go.opentelemetry.io/otel/semconv/v1.16.0` package. | ||||
|   The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579) | ||||
| - Metric instruments to `go.opentelemetry.io/otel/metric/instrument`. | ||||
|   These instruments are use as replacements of the depreacted `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586) | ||||
|   These instruments are use as replacements of the deprecated `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586) | ||||
|   - `Float64ObservableCounter` replaces the `asyncfloat64.Counter` | ||||
|   - `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter` | ||||
|   - `Float64ObservableGauge` replaces the `asyncfloat64.Gauge` | ||||
|  | @ -144,7 +279,7 @@ The next release will require at least [Go 1.19]. | |||
| ### Changed | ||||
| 
 | ||||
| - Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500) | ||||
| - Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and confguration based on the instrument type. (#3507) | ||||
| - Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and configuration based on the instrument type. (#3507) | ||||
|   - Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`. | ||||
|   - Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`. | ||||
|   - Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`. | ||||
|  | @ -157,7 +292,7 @@ The next release will require at least [Go 1.19]. | |||
| - The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551) | ||||
| - The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions. | ||||
|   This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557) | ||||
| - Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in erros identifying their signal name. | ||||
| - Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in errors identifying their signal name. | ||||
|   Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516) | ||||
| - Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514) | ||||
| - The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562) | ||||
|  | @ -266,7 +401,7 @@ The next release will require at least [Go 1.19]. | |||
| - Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398) | ||||
| - Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) | ||||
| - `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) | ||||
| - Reenabled Attribute Filters in the Metric SDK. (#3396) | ||||
| - Re-enabled Attribute Filters in the Metric SDK. (#3396) | ||||
| - Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408) | ||||
| - Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) | ||||
| - Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) | ||||
|  | @ -847,7 +982,7 @@ This release includes an API and SDK for the tracing signal that will comply wit | |||
| - Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140) | ||||
| - The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169) | ||||
| - Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120) | ||||
| - The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly ommit timestamps. (#2195) | ||||
| - The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly omit timestamps. (#2195) | ||||
| - Fixed typos in resources.go. (#2201) | ||||
| 
 | ||||
| ## [1.0.0-RC2] - 2021-07-26 | ||||
|  | @ -1293,7 +1428,7 @@ with major version 0. | |||
| - `NewGRPCDriver` function returns a `ProtocolDriver` that maintains a single gRPC connection to the collector. (#1369) | ||||
| - Added documentation about the project's versioning policy. (#1388) | ||||
| - Added `NewSplitDriver` for OTLP exporter that allows sending traces and metrics to different endpoints. (#1418) | ||||
| - Added codeql worfklow to GitHub Actions (#1428) | ||||
| - Added codeql workflow to GitHub Actions (#1428) | ||||
| - Added Gosec workflow to GitHub Actions (#1429) | ||||
| - Add new HTTP driver for OTLP exporter in `exporters/otlp/otlphttp`. Currently it only supports the binary protobuf payloads. (#1420) | ||||
| - Add an OpenCensus exporter bridge. (#1444) | ||||
|  | @ -2136,7 +2271,7 @@ There is still a possibility of breaking changes. | |||
| 
 | ||||
| ### Fixed | ||||
| 
 | ||||
| - Use stateful batcher on Prometheus exporter fixing regresion introduced in #395. (#428) | ||||
| - Use stateful batcher on Prometheus exporter fixing regression introduced in #395. (#428) | ||||
| 
 | ||||
| ## [0.2.1] - 2020-01-08 | ||||
| 
 | ||||
|  | @ -2302,7 +2437,10 @@ It contains api and sdk for trace and meter. | |||
| - CircleCI build CI manifest files. | ||||
| - CODEOWNERS file to track owners of this project. | ||||
| 
 | ||||
| [Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.14.0...HEAD | ||||
| [Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.15.0...HEAD | ||||
| [1.15.0/0.38.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0 | ||||
| [1.15.0-rc.2/0.38.0-rc.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.2 | ||||
| [1.15.0-rc.1/0.38.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.1 | ||||
| [1.14.0/0.37.0/0.0.4]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.14.0 | ||||
| [1.13.0/0.36.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.13.0 | ||||
| [1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0 | ||||
|  |  | |||
|  | @ -12,6 +12,6 @@ | |||
| #  https://help.github.com/en/articles/about-code-owners | ||||
| # | ||||
| 
 | ||||
| * @jmacd @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu | ||||
| * @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu | ||||
| 
 | ||||
| CODEOWNERS @MrAlias @Aneurysm9 @MadVikingGod | ||||
|  |  | |||
|  | @ -6,7 +6,7 @@ OpenTelemetry | |||
| repo for information on this and other language SIGs. | ||||
| 
 | ||||
| See the [public meeting | ||||
| notes](https://docs.google.com/document/d/1A63zSWX0x2CyCK_LoNhmQC4rqhLpYXJzXbEPDUQ2n6w/edit#heading=h.9tngw7jdwd6b) | ||||
| notes](https://docs.google.com/document/d/1E5e7Ld0NuU1iVvf-42tOBpu2VBBLYnh73GJuITGJTTU/edit) | ||||
| for a summary description of past meetings. To request edit access, | ||||
| join the meeting or get in touch on | ||||
| [Slack](https://cloud-native.slack.com/archives/C01NPAXACKT). | ||||
|  | @ -94,30 +94,58 @@ request ID to the entry you added to `CHANGELOG.md`. | |||
| 
 | ||||
| ### How to Get PRs Merged | ||||
| 
 | ||||
| A PR is considered to be **ready to merge** when: | ||||
| A PR is considered **ready to merge** when: | ||||
| 
 | ||||
| * It has received two approvals from Collaborators/Maintainers (at | ||||
|   different companies). This is not enforced through technical means | ||||
|   and a PR may be **ready to merge** with a single approval if the change | ||||
|   and its approach have been discussed and consensus reached. | ||||
| * Feedback has been addressed. | ||||
| * Any substantive changes to your PR will require that you clear any prior | ||||
|   Approval reviews, this includes changes resulting from other feedback. Unless | ||||
|   the approver explicitly stated that their approval will persist across | ||||
|   changes it should be assumed that the PR needs their review again. Other | ||||
|   project members (e.g. approvers, maintainers) can help with this if there are | ||||
|   any questions or if you forget to clear reviews. | ||||
| * It has been open for review for at least one working day. This gives | ||||
|   people reasonable time to review. | ||||
| * Trivial changes (typo, cosmetic, doc, etc.) do not have to wait for | ||||
|   one day and may be merged with a single Maintainer's approval. | ||||
| * `CHANGELOG.md` has been updated to reflect what has been | ||||
|   added, changed, removed, or fixed. | ||||
| * `README.md` has been updated if necessary. | ||||
| * Urgent fix can take exception as long as it has been actively | ||||
|   communicated. | ||||
| * It has received two qualified approvals[^1]. | ||||
| 
 | ||||
| Any Maintainer can merge the PR once it is **ready to merge**. | ||||
|   This is not enforced through automation, but needs to be validated by the | ||||
|   maintainer merging. | ||||
|   * The qualified approvals need to be from [Approver]s/[Maintainer]s | ||||
|     affiliated with different companies. Two qualified approvals from | ||||
|     [Approver]s or [Maintainer]s affiliated with the same company counts as a | ||||
|     single qualified approval. | ||||
|   * PRs introducing changes that have already been discussed and consensus | ||||
|     reached only need one qualified approval. The discussion and resolution | ||||
|     needs to be linked to the PR. | ||||
|   * Trivial changes[^2] only need one qualified approval. | ||||
| 
 | ||||
| * All feedback has been addressed. | ||||
|   * All PR comments and suggestions are resolved. | ||||
|   * All GitHub Pull Request reviews with a status of "Request changes" have | ||||
|     been addressed. Another review by the objecting reviewer with a different | ||||
|     status can be submitted to clear the original review, or the review can be | ||||
|     dismissed by a [Maintainer] when the issues from the original review have | ||||
|     been addressed. | ||||
|   * Any comments or reviews that cannot be resolved between the PR author and | ||||
|     reviewers can be submitted to the community [Approver]s and [Maintainer]s | ||||
|     during the weekly SIG meeting. If consensus is reached among the | ||||
|     [Approver]s and [Maintainer]s during the SIG meeting the objections to the | ||||
|     PR may be dismissed or resolved or the PR closed by a [Maintainer]. | ||||
|   * Any substantive changes to the PR require existing Approval reviews be | ||||
|     cleared unless the approver explicitly states that their approval persists | ||||
|     across changes. This includes changes resulting from other feedback. | ||||
|     [Approver]s and [Maintainer]s can help in clearing reviews and they should | ||||
|     be consulted if there are any questions. | ||||
| 
 | ||||
| * The PR branch is up to date with the base branch it is merging into. | ||||
|   * To ensure this does not block the PR, it should be configured to allow | ||||
|     maintainers to update it. | ||||
| 
 | ||||
| * It has been open for review for at least one working day. This gives people | ||||
|   reasonable time to review. | ||||
|   * Trivial changes[^2] do not have to wait for one day and may be merged with | ||||
|     a single [Maintainer]'s approval. | ||||
| 
 | ||||
| * All required GitHub workflows have succeeded. | ||||
| * Urgent fix can take exception as long as it has been actively communicated | ||||
|   among [Maintainer]s. | ||||
| 
 | ||||
| Any [Maintainer] can merge the PR once the above criteria have been met. | ||||
| 
 | ||||
| [^1]: A qualified approval is a GitHub Pull Request review with "Approve" | ||||
|   status from an OpenTelemetry Go [Approver] or [Maintainer]. | ||||
| [^2]: Trivial changes include: typo corrections, cosmetic non-substantive | ||||
|   changes, documentation corrections or updates, dependency updates, etc. | ||||
| 
 | ||||
| ## Design Choices | ||||
| 
 | ||||
|  | @ -216,7 +244,7 @@ Meaning a `config` from one package should not be directly used by another. The | |||
| one exception is the API packages.  The configs from the base API, eg. | ||||
| `go.opentelemetry.io/otel/trace.TracerConfig` and | ||||
| `go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed | ||||
| by the SDK therefor it is expected that these are exported. | ||||
| by the SDK therefore it is expected that these are exported. | ||||
| 
 | ||||
| When a config is exported we want to maintain forward and backward | ||||
| compatibility, to achieve this no fields should be exported but should | ||||
|  | @ -234,12 +262,12 @@ func newConfig(options ...Option) config { | |||
| 	for _, option := range options { | ||||
| 		config = option.apply(config) | ||||
| 	} | ||||
| 	// Preform any validation here. | ||||
| 	// Perform any validation here. | ||||
| 	return config | ||||
| } | ||||
| ``` | ||||
| 
 | ||||
| If validation of the `config` options is also preformed this can return an | ||||
| If validation of the `config` options is also performed this can return an | ||||
| error as well that is expected to be handled by the instantiation function | ||||
| or propagated to the user. | ||||
| 
 | ||||
|  | @ -438,7 +466,7 @@ their parameters appropriately named. | |||
| #### Interface Stability | ||||
| 
 | ||||
| All exported stable interfaces that include the following warning in their | ||||
| doumentation are allowed to be extended with additional methods. | ||||
| documentation are allowed to be extended with additional methods. | ||||
| 
 | ||||
| > Warning: methods may be added to this interface in minor releases. | ||||
| 
 | ||||
|  | @ -500,27 +528,30 @@ interface that defines the specific functionality should be preferred. | |||
| 
 | ||||
| ## Approvers and Maintainers | ||||
| 
 | ||||
| Approvers: | ||||
| ### Approvers | ||||
| 
 | ||||
| - [Evan Torrie](https://github.com/evantorrie), Verizon Media | ||||
| - [Josh MacDonald](https://github.com/jmacd), LightStep | ||||
| - [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics | ||||
| - [David Ashpole](https://github.com/dashpole), Google | ||||
| - [Robert Pająk](https://github.com/pellared), Splunk | ||||
| - [Chester Cheung](https://github.com/hanyuancheung), Tencent | ||||
| - [Damien Mathieu](https://github.com/dmathieu), Elastic | ||||
| 
 | ||||
| Maintainers: | ||||
| ### Maintainers | ||||
| 
 | ||||
| - [Aaron Clawson](https://github.com/MadVikingGod), LightStep | ||||
| - [Anthony Mirabella](https://github.com/Aneurysm9), AWS | ||||
| - [Tyler Yahn](https://github.com/MrAlias), Splunk | ||||
| 
 | ||||
| Emeritus: | ||||
| ### Emeritus | ||||
| 
 | ||||
| - [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep | ||||
| - [Josh MacDonald](https://github.com/jmacd), LightStep | ||||
| 
 | ||||
| ### Become an Approver or a Maintainer | ||||
| 
 | ||||
| See the [community membership document in OpenTelemetry community | ||||
| repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). | ||||
| 
 | ||||
| [Approver]: #approvers | ||||
| [Maintainer]: #maintainers | ||||
|  |  | |||
|  | @ -156,7 +156,7 @@ go-mod-tidy/%: DIR=$* | |||
| go-mod-tidy/%: | crosslink | ||||
| 	@echo "$(GO) mod tidy in $(DIR)" \
 | ||||
| 		&& cd $(DIR) \
 | ||||
| 		&& $(GO) mod tidy -compat=1.18 | ||||
| 		&& $(GO) mod tidy -compat=1.19 | ||||
| 
 | ||||
| .PHONY: lint-modules | ||||
| lint-modules: go-mod-tidy | ||||
|  |  | |||
|  | @ -14,7 +14,7 @@ It provides a set of APIs to directly measure performance and behavior of your s | |||
| | Signal  | Status     | Project | | ||||
| | ------- | ---------- | ------- | | ||||
| | Traces  | Stable     | N/A     | | ||||
| | Metrics | Alpha      | N/A     | | ||||
| | Metrics | Beta       | N/A     | | ||||
| | Logs    | Frozen [1] | N/A     | | ||||
| 
 | ||||
| - [1]: The Logs signal development is halted for this project while we develop both Traces and Metrics. | ||||
|  | @ -52,19 +52,14 @@ Currently, this project supports the following environments. | |||
| | ------- | ---------- | ------------ | | ||||
| | Ubuntu  | 1.20       | amd64        | | ||||
| | Ubuntu  | 1.19       | amd64        | | ||||
| | Ubuntu  | 1.18       | amd64        | | ||||
| | Ubuntu  | 1.20       | 386          | | ||||
| | Ubuntu  | 1.19       | 386          | | ||||
| | Ubuntu  | 1.18       | 386          | | ||||
| | MacOS   | 1.20       | amd64        | | ||||
| | MacOS   | 1.19       | amd64        | | ||||
| | MacOS   | 1.18       | amd64        | | ||||
| | Windows | 1.20       | amd64        | | ||||
| | Windows | 1.19       | amd64        | | ||||
| | Windows | 1.18       | amd64        | | ||||
| | Windows | 1.20       | 386          | | ||||
| | Windows | 1.19       | 386          | | ||||
| | Windows | 1.18       | 386          | | ||||
| 
 | ||||
| While this project should work for other systems, no compatibility guarantees | ||||
| are made for those systems currently. | ||||
|  |  | |||
|  | @ -18,6 +18,7 @@ import ( | |||
| 	"encoding/json" | ||||
| 	"reflect" | ||||
| 	"sort" | ||||
| 	"sync" | ||||
| ) | ||||
| 
 | ||||
| type ( | ||||
|  | @ -62,6 +63,12 @@ var ( | |||
| 			iface: [0]KeyValue{}, | ||||
| 		}, | ||||
| 	} | ||||
| 
 | ||||
| 	// sortables is a pool of Sortables used to create Sets with a user does
 | ||||
| 	// not provide one.
 | ||||
| 	sortables = sync.Pool{ | ||||
| 		New: func() interface{} { return new(Sortable) }, | ||||
| 	} | ||||
| ) | ||||
| 
 | ||||
| // EmptySet returns a reference to a Set with no elements.
 | ||||
|  | @ -91,7 +98,7 @@ func (l *Set) Len() int { | |||
| 
 | ||||
| // Get returns the KeyValue at ordered position idx in this set.
 | ||||
| func (l *Set) Get(idx int) (KeyValue, bool) { | ||||
| 	if l == nil { | ||||
| 	if l == nil || !l.equivalent.Valid() { | ||||
| 		return KeyValue{}, false | ||||
| 	} | ||||
| 	value := l.equivalent.reflectValue() | ||||
|  | @ -107,7 +114,7 @@ func (l *Set) Get(idx int) (KeyValue, bool) { | |||
| 
 | ||||
| // Value returns the value of a specified key in this set.
 | ||||
| func (l *Set) Value(k Key) (Value, bool) { | ||||
| 	if l == nil { | ||||
| 	if l == nil || !l.equivalent.Valid() { | ||||
| 		return Value{}, false | ||||
| 	} | ||||
| 	rValue := l.equivalent.reflectValue() | ||||
|  | @ -191,7 +198,9 @@ func NewSet(kvs ...KeyValue) Set { | |||
| 	if len(kvs) == 0 { | ||||
| 		return empty() | ||||
| 	} | ||||
| 	s, _ := NewSetWithSortableFiltered(kvs, new(Sortable), nil) | ||||
| 	srt := sortables.Get().(*Sortable) | ||||
| 	s, _ := NewSetWithSortableFiltered(kvs, srt, nil) | ||||
| 	sortables.Put(srt) | ||||
| 	return s | ||||
| } | ||||
| 
 | ||||
|  | @ -218,7 +227,10 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { | |||
| 	if len(kvs) == 0 { | ||||
| 		return empty(), nil | ||||
| 	} | ||||
| 	return NewSetWithSortableFiltered(kvs, new(Sortable), filter) | ||||
| 	srt := sortables.Get().(*Sortable) | ||||
| 	s, filtered := NewSetWithSortableFiltered(kvs, srt, filter) | ||||
| 	sortables.Put(srt) | ||||
| 	return s, filtered | ||||
| } | ||||
| 
 | ||||
| // NewSetWithSortableFiltered returns a new Set.
 | ||||
|  |  | |||
|  | @ -15,58 +15,16 @@ | |||
| package otel // import "go.opentelemetry.io/otel"
 | ||||
| 
 | ||||
| import ( | ||||
| 	"log" | ||||
| 	"os" | ||||
| 	"sync/atomic" | ||||
| 	"unsafe" | ||||
| 	"go.opentelemetry.io/otel/internal/global" | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	// globalErrorHandler provides an ErrorHandler that can be used
 | ||||
| 	// throughout an OpenTelemetry instrumented project. When a user
 | ||||
| 	// specified ErrorHandler is registered (`SetErrorHandler`) all calls to
 | ||||
| 	// `Handle` and will be delegated to the registered ErrorHandler.
 | ||||
| 	globalErrorHandler = defaultErrorHandler() | ||||
| 
 | ||||
| 	// Compile-time check that delegator implements ErrorHandler.
 | ||||
| 	_ ErrorHandler = (*delegator)(nil) | ||||
| 	// Compile-time check that errLogger implements ErrorHandler.
 | ||||
| 	_ ErrorHandler = (*errLogger)(nil) | ||||
| 	// Compile-time check global.ErrDelegator implements ErrorHandler.
 | ||||
| 	_ ErrorHandler = (*global.ErrDelegator)(nil) | ||||
| 	// Compile-time check global.ErrLogger implements ErrorHandler.
 | ||||
| 	_ ErrorHandler = (*global.ErrLogger)(nil) | ||||
| ) | ||||
| 
 | ||||
| type delegator struct { | ||||
| 	delegate unsafe.Pointer | ||||
| } | ||||
| 
 | ||||
| func (d *delegator) Handle(err error) { | ||||
| 	d.getDelegate().Handle(err) | ||||
| } | ||||
| 
 | ||||
| func (d *delegator) getDelegate() ErrorHandler { | ||||
| 	return *(*ErrorHandler)(atomic.LoadPointer(&d.delegate)) | ||||
| } | ||||
| 
 | ||||
| // setDelegate sets the ErrorHandler delegate.
 | ||||
| func (d *delegator) setDelegate(eh ErrorHandler) { | ||||
| 	atomic.StorePointer(&d.delegate, unsafe.Pointer(&eh)) | ||||
| } | ||||
| 
 | ||||
| func defaultErrorHandler() *delegator { | ||||
| 	d := &delegator{} | ||||
| 	d.setDelegate(&errLogger{l: log.New(os.Stderr, "", log.LstdFlags)}) | ||||
| 	return d | ||||
| } | ||||
| 
 | ||||
| // errLogger logs errors if no delegate is set, otherwise they are delegated.
 | ||||
| type errLogger struct { | ||||
| 	l *log.Logger | ||||
| } | ||||
| 
 | ||||
| // Handle logs err if no delegate is set, otherwise it is delegated.
 | ||||
| func (h *errLogger) Handle(err error) { | ||||
| 	h.l.Print(err) | ||||
| } | ||||
| 
 | ||||
| // GetErrorHandler returns the global ErrorHandler instance.
 | ||||
| //
 | ||||
| // The default ErrorHandler instance returned will log all errors to STDERR
 | ||||
|  | @ -76,9 +34,7 @@ func (h *errLogger) Handle(err error) { | |||
| //
 | ||||
| // Subsequent calls to SetErrorHandler after the first will not forward errors
 | ||||
| // to the new ErrorHandler for prior returned instances.
 | ||||
| func GetErrorHandler() ErrorHandler { | ||||
| 	return globalErrorHandler | ||||
| } | ||||
| func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() } | ||||
| 
 | ||||
| // SetErrorHandler sets the global ErrorHandler to h.
 | ||||
| //
 | ||||
|  | @ -86,11 +42,7 @@ func GetErrorHandler() ErrorHandler { | |||
| // GetErrorHandler will send errors to h instead of the default logging
 | ||||
| // ErrorHandler. Subsequent calls will set the global ErrorHandler, but not
 | ||||
| // delegate errors to h.
 | ||||
| func SetErrorHandler(h ErrorHandler) { | ||||
| 	globalErrorHandler.setDelegate(h) | ||||
| } | ||||
| func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) } | ||||
| 
 | ||||
| // Handle is a convenience function for ErrorHandler().Handle(err).
 | ||||
| func Handle(err error) { | ||||
| 	GetErrorHandler().Handle(err) | ||||
| } | ||||
| func Handle(err error) { global.Handle(err) } | ||||
|  |  | |||
|  | @ -0,0 +1,103 @@ | |||
| // Copyright The OpenTelemetry Authors
 | ||||
| //
 | ||||
| // Licensed under the Apache License, Version 2.0 (the "License");
 | ||||
| // you may not use this file except in compliance with the License.
 | ||||
| // You may obtain a copy of the License at
 | ||||
| //
 | ||||
| //     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| //
 | ||||
| // Unless required by applicable law or agreed to in writing, software
 | ||||
| // distributed under the License is distributed on an "AS IS" BASIS,
 | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | ||||
| // See the License for the specific language governing permissions and
 | ||||
| // limitations under the License.
 | ||||
| 
 | ||||
| package global // import "go.opentelemetry.io/otel/internal/global"
 | ||||
| 
 | ||||
| import ( | ||||
| 	"log" | ||||
| 	"os" | ||||
| 	"sync/atomic" | ||||
| 	"unsafe" | ||||
| ) | ||||
| 
 | ||||
| var ( | ||||
| 	// GlobalErrorHandler provides an ErrorHandler that can be used
 | ||||
| 	// throughout an OpenTelemetry instrumented project. When a user
 | ||||
| 	// specified ErrorHandler is registered (`SetErrorHandler`) all calls to
 | ||||
| 	// `Handle` and will be delegated to the registered ErrorHandler.
 | ||||
| 	GlobalErrorHandler = defaultErrorHandler() | ||||
| 
 | ||||
| 	// Compile-time check that delegator implements ErrorHandler.
 | ||||
| 	_ ErrorHandler = (*ErrDelegator)(nil) | ||||
| 	// Compile-time check that errLogger implements ErrorHandler.
 | ||||
| 	_ ErrorHandler = (*ErrLogger)(nil) | ||||
| ) | ||||
| 
 | ||||
| // ErrorHandler handles irremediable events.
 | ||||
| type ErrorHandler interface { | ||||
| 	// Handle handles any error deemed irremediable by an OpenTelemetry
 | ||||
| 	// component.
 | ||||
| 	Handle(error) | ||||
| } | ||||
| 
 | ||||
| type ErrDelegator struct { | ||||
| 	delegate unsafe.Pointer | ||||
| } | ||||
| 
 | ||||
| func (d *ErrDelegator) Handle(err error) { | ||||
| 	d.getDelegate().Handle(err) | ||||
| } | ||||
| 
 | ||||
| func (d *ErrDelegator) getDelegate() ErrorHandler { | ||||
| 	return *(*ErrorHandler)(atomic.LoadPointer(&d.delegate)) | ||||
| } | ||||
| 
 | ||||
| // setDelegate sets the ErrorHandler delegate.
 | ||||
| func (d *ErrDelegator) setDelegate(eh ErrorHandler) { | ||||
| 	atomic.StorePointer(&d.delegate, unsafe.Pointer(&eh)) | ||||
| } | ||||
| 
 | ||||
| func defaultErrorHandler() *ErrDelegator { | ||||
| 	d := &ErrDelegator{} | ||||
| 	d.setDelegate(&ErrLogger{l: log.New(os.Stderr, "", log.LstdFlags)}) | ||||
| 	return d | ||||
| } | ||||
| 
 | ||||
| // ErrLogger logs errors if no delegate is set, otherwise they are delegated.
 | ||||
| type ErrLogger struct { | ||||
| 	l *log.Logger | ||||
| } | ||||
| 
 | ||||
| // Handle logs err if no delegate is set, otherwise it is delegated.
 | ||||
| func (h *ErrLogger) Handle(err error) { | ||||
| 	h.l.Print(err) | ||||
| } | ||||
| 
 | ||||
| // GetErrorHandler returns the global ErrorHandler instance.
 | ||||
| //
 | ||||
| // The default ErrorHandler instance returned will log all errors to STDERR
 | ||||
| // until an override ErrorHandler is set with SetErrorHandler. All
 | ||||
| // ErrorHandler returned prior to this will automatically forward errors to
 | ||||
| // the set instance instead of logging.
 | ||||
| //
 | ||||
| // Subsequent calls to SetErrorHandler after the first will not forward errors
 | ||||
| // to the new ErrorHandler for prior returned instances.
 | ||||
| func GetErrorHandler() ErrorHandler { | ||||
| 	return GlobalErrorHandler | ||||
| } | ||||
| 
 | ||||
| // SetErrorHandler sets the global ErrorHandler to h.
 | ||||
| //
 | ||||
| // The first time this is called all ErrorHandler previously returned from
 | ||||
| // GetErrorHandler will send errors to h instead of the default logging
 | ||||
| // ErrorHandler. Subsequent calls will set the global ErrorHandler, but not
 | ||||
| // delegate errors to h.
 | ||||
| func SetErrorHandler(h ErrorHandler) { | ||||
| 	GlobalErrorHandler.setDelegate(h) | ||||
| } | ||||
| 
 | ||||
| // Handle is a convenience function for ErrorHandler().Handle(err).
 | ||||
| func Handle(err error) { | ||||
| 	GetErrorHandler().Handle(err) | ||||
| } | ||||
|  | @ -24,7 +24,7 @@ import ( | |||
| 	"github.com/go-logr/stdr" | ||||
| ) | ||||
| 
 | ||||
| // globalLogger is the logging interface used within the otel api and sdk provide deatails of the internals.
 | ||||
| // globalLogger is the logging interface used within the otel api and sdk provide details of the internals.
 | ||||
| //
 | ||||
| // The default logger uses stdr which is backed by the standard `log.Logger`
 | ||||
| // interface. This logger will only show messages at the Error Level.
 | ||||
|  | @ -36,8 +36,9 @@ func init() { | |||
| 
 | ||||
| // SetLogger overrides the globalLogger with l.
 | ||||
| //
 | ||||
| // To see Info messages use a logger with `l.V(1).Enabled() == true`
 | ||||
| // To see Debug messages use a logger with `l.V(5).Enabled() == true`.
 | ||||
| // To see Warn messages use a logger with `l.V(1).Enabled() == true`
 | ||||
| // To see Info messages use a logger with `l.V(4).Enabled() == true`
 | ||||
| // To see Debug messages use a logger with `l.V(8).Enabled() == true`.
 | ||||
| func SetLogger(l logr.Logger) { | ||||
| 	atomic.StorePointer(&globalLogger, unsafe.Pointer(&l)) | ||||
| } | ||||
|  | @ -47,9 +48,9 @@ func getLogger() logr.Logger { | |||
| } | ||||
| 
 | ||||
| // Info prints messages about the general state of the API or SDK.
 | ||||
| // This should usually be less then 5 messages a minute.
 | ||||
| // This should usually be less than 5 messages a minute.
 | ||||
| func Info(msg string, keysAndValues ...interface{}) { | ||||
| 	getLogger().V(1).Info(msg, keysAndValues...) | ||||
| 	getLogger().V(4).Info(msg, keysAndValues...) | ||||
| } | ||||
| 
 | ||||
| // Error prints messages about exceptional states of the API or SDK.
 | ||||
|  | @ -59,5 +60,11 @@ func Error(err error, msg string, keysAndValues ...interface{}) { | |||
| 
 | ||||
| // Debug prints messages about all internal changes in the API or SDK.
 | ||||
| func Debug(msg string, keysAndValues ...interface{}) { | ||||
| 	getLogger().V(5).Info(msg, keysAndValues...) | ||||
| 	getLogger().V(8).Info(msg, keysAndValues...) | ||||
| } | ||||
| 
 | ||||
| // Warn prints messages about warnings in the API or SDK.
 | ||||
| // Not an error but is likely more important than an informational event.
 | ||||
| func Warn(msg string, keysAndValues ...interface{}) { | ||||
| 	getLogger().V(1).Info(msg, keysAndValues...) | ||||
| } | ||||
|  |  | |||
|  | @ -37,7 +37,7 @@ func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { | |||
| 	return noopTracer{} | ||||
| } | ||||
| 
 | ||||
| // noopTracer is an implementation of Tracer that preforms no operations.
 | ||||
| // noopTracer is an implementation of Tracer that performs no operations.
 | ||||
| type noopTracer struct{} | ||||
| 
 | ||||
| var _ Tracer = noopTracer{} | ||||
|  | @ -53,7 +53,7 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption | |||
| 	return ContextWithSpan(ctx, span), span | ||||
| } | ||||
| 
 | ||||
| // noopSpan is an implementation of Span that preforms no operations.
 | ||||
| // noopSpan is an implementation of Span that performs no operations.
 | ||||
| type noopSpan struct{} | ||||
| 
 | ||||
| var _ Span = noopSpan{} | ||||
|  |  | |||
|  | @ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel" | |||
| 
 | ||||
| // Version is the current release version of OpenTelemetry in use.
 | ||||
| func Version() string { | ||||
| 	return "1.14.0" | ||||
| 	return "1.15.0" | ||||
| } | ||||
|  |  | |||
Some files were not shown because too many files have changed in this diff Show More
		Loading…
	
		Reference in New Issue