Merge pull request #18528 from mtrmac/rekor-bloat

Update sigstore/rekor after https://github.com/sigstore/rekor/pull/1469
This commit is contained in:
OpenShift Merge Robot 2023-05-10 14:55:31 -04:00 committed by GitHub
commit 6f7c9465b7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
235 changed files with 226 additions and 35284 deletions

14
go.mod
View File

@ -94,7 +94,6 @@ require (
github.com/docker/docker-credential-helpers v0.7.0 // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/fsouza/go-dockerclient v1.9.7 // indirect
github.com/go-chi/chi v4.1.2+incompatible // indirect
github.com/go-jose/go-jose/v3 v3.0.0 // indirect
github.com/go-logr/logr v1.2.4 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
@ -109,9 +108,6 @@ require (
github.com/go-openapi/strfmt v0.21.7 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/go-openapi/validate v0.22.1 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.13.0 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
@ -131,7 +127,6 @@ require (
github.com/klauspost/compress v1.16.5 // indirect
github.com/klauspost/pgzip v1.2.6 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/leodido/go-urn v1.2.3 // indirect
github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/manifoldco/promptui v0.9.0 // indirect
@ -156,7 +151,7 @@ require (
github.com/seccomp/libseccomp-golang v0.10.0 // indirect
github.com/segmentio/ksuid v1.0.4 // indirect
github.com/sigstore/fulcio v1.3.1 // indirect
github.com/sigstore/rekor v1.1.1 // indirect
github.com/sigstore/rekor v1.1.2-0.20230508234306-ad288b385a44 // indirect
github.com/sigstore/sigstore v1.6.4 // indirect
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
@ -164,7 +159,6 @@ require (
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
github.com/theupdateframework/go-tuf v0.5.2 // indirect
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
github.com/transparency-dev/merkle v0.0.1 // indirect
github.com/vbatts/tar-split v0.11.3 // indirect
github.com/vishvananda/netns v0.0.4 // indirect
go.mongodb.org/mongo-driver v1.11.3 // indirect
@ -172,9 +166,6 @@ require (
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel v1.15.0 // indirect
go.opentelemetry.io/otel/trace v1.15.0 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.24.0 // indirect
golang.org/x/crypto v0.8.0 // indirect
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 // indirect
golang.org/x/mod v0.10.0 // indirect
@ -182,12 +173,11 @@ require (
golang.org/x/tools v0.8.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
google.golang.org/grpc v1.54.0 // indirect
google.golang.org/grpc v1.55.0 // indirect
gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/klog/v2 v2.90.1 // indirect
)
replace github.com/opencontainers/runc => github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc

36
go.sum
View File

@ -91,7 +91,6 @@ github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:W
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@ -373,8 +372,6 @@ github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXt
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec=
github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@ -387,7 +384,6 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
@ -439,15 +435,7 @@ github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU=
github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.13.0 h1:cFRQdfaSMCOSfGCCLB20MHvuoHb/s5G8L5pu2ppK5AQ=
github.com/go-playground/validator/v10 v10.13.0/go.mod h1:dwu7+CG8/CtBiJFZDz4e+5Upb6OLw04gtBYw0mcG/z4=
github.com/go-rod/rod v0.112.9 h1:uA/yLbB+t0UlqJcLJtK2pZrCNPzd15dOKRUEOnmnt9k=
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
@ -508,7 +496,6 @@ github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFU
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@ -682,11 +669,8 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leodido/go-urn v1.2.3 h1:6BE2vPT0lqoz3fmOesHZiaiFh7889ssCo2GMvLCfiuA=
github.com/leodido/go-urn v1.2.3/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 h1:unJdfS94Y3k85TKy+mvKzjW5R9rIC+Lv4KGbE7uNu0I=
github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6/go.mod h1:PUgW5vI9ANEaV6qv9a6EKu8gAySgwf0xrzG9xIB/CK0=
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
@ -849,7 +833,7 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.15.0 h1:5fCgGYogn0hFdhyhLbw7hEsWxufKtY9klyvdNfFlFhM=
github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -902,8 +886,8 @@ github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sigstore/fulcio v1.3.1 h1:0ntW9VbQbt2JytoSs8BOGB84A65eeyvGSavWteYp29Y=
github.com/sigstore/fulcio v1.3.1/go.mod h1:/XfqazOec45ulJZpyL9sq+OsVQ8g2UOVoNVi7abFgqU=
github.com/sigstore/rekor v1.1.1 h1:JCeSss+qUHnCATmwAZh4zT9k0Frdyq0BjmRwewSfEy4=
github.com/sigstore/rekor v1.1.1/go.mod h1:x/xK+HK08MiuJv+v4OxY/Oo3bhuz1DtJXNJrV7hrzvs=
github.com/sigstore/rekor v1.1.2-0.20230508234306-ad288b385a44 h1:03iBveStHVfP97eB54lMjkgrSMYWW7F7X9IlOI9N+A0=
github.com/sigstore/rekor v1.1.2-0.20230508234306-ad288b385a44/go.mod h1:4/htUXiP2z4KwvZOXTalCkh5C9I7msJbVWrdjWyD+fU=
github.com/sigstore/sigstore v1.6.4 h1:jH4AzR7qlEH/EWzm+opSpxCfuUcjHL+LJPuQE7h40WE=
github.com/sigstore/sigstore v1.6.4/go.mod h1:pjR64lBxnjoSrAr+Ydye/FV73IfrgtoYlAI11a8xMfA=
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
@ -978,8 +962,6 @@ github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/transparency-dev/merkle v0.0.1 h1:T9/9gYB8uZl7VOJIhdwjALeRWlxUxSfDEysjfmx+L9E=
github.com/transparency-dev/merkle v0.0.1/go.mod h1:B8FIw5LTq6DaULoHsVFRzYIUDkl8yuSwCdZnOZGKL/A=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8=
@ -1064,15 +1046,9 @@ go.opentelemetry.io/otel/trace v1.15.0/go.mod h1:CUsmE2Ht1CRkvE8OsMESvraoZrrcgD1
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@ -1435,8 +1411,8 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag=
google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag=
google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -1531,11 +1507,11 @@ k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw=
k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=

View File

@ -1,3 +0,0 @@
.idea
*.sw?
.vscode

View File

@ -1,20 +0,0 @@
language: go
go:
- 1.10.x
- 1.11.x
- 1.12.x
- 1.13.x
- 1.14.x
script:
- go get -d -t ./...
- go vet ./...
- go test ./...
- >
go_version=$(go version);
if [ ${go_version:13:4} = "1.12" ]; then
go get -u golang.org/x/tools/cmd/goimports;
goimports -d -e ./ | grep '.*' && { echo; echo "Aborting due to non-empty goimports output."; exit 1; } || :;
fi

View File

@ -1,190 +0,0 @@
# Changelog
## v4.1.2 (2020-06-02)
- fix that handles MethodNotAllowed with path variables, thank you @caseyhadden for your contribution
- fix to replace nested wildcards correctly in RoutePattern, thank you @@unmultimedio for your contribution
- History of changes: see https://github.com/go-chi/chi/compare/v4.1.1...v4.1.2
## v4.1.1 (2020-04-16)
- fix for issue https://github.com/go-chi/chi/issues/411 which allows for overlapping regexp
route to the correct handler through a recursive tree search, thanks to @Jahaja for the PR/fix!
- new middleware.RouteHeaders as a simple router for request headers with wildcard support
- History of changes: see https://github.com/go-chi/chi/compare/v4.1.0...v4.1.1
## v4.1.0 (2020-04-1)
- middleware.LogEntry: Write method on interface now passes the response header
and an extra interface type useful for custom logger implementations.
- middleware.WrapResponseWriter: minor fix
- middleware.Recoverer: a bit prettier
- History of changes: see https://github.com/go-chi/chi/compare/v4.0.4...v4.1.0
## v4.0.4 (2020-03-24)
- middleware.Recoverer: new pretty stack trace printing (https://github.com/go-chi/chi/pull/496)
- a few minor improvements and fixes
- History of changes: see https://github.com/go-chi/chi/compare/v4.0.3...v4.0.4
## v4.0.3 (2020-01-09)
- core: fix regexp routing to include default value when param is not matched
- middleware: rewrite of middleware.Compress
- middleware: suppress http.ErrAbortHandler in middleware.Recoverer
- History of changes: see https://github.com/go-chi/chi/compare/v4.0.2...v4.0.3
## v4.0.2 (2019-02-26)
- Minor fixes
- History of changes: see https://github.com/go-chi/chi/compare/v4.0.1...v4.0.2
## v4.0.1 (2019-01-21)
- Fixes issue with compress middleware: #382 #385
- History of changes: see https://github.com/go-chi/chi/compare/v4.0.0...v4.0.1
## v4.0.0 (2019-01-10)
- chi v4 requires Go 1.10.3+ (or Go 1.9.7+) - we have deprecated support for Go 1.7 and 1.8
- router: respond with 404 on router with no routes (#362)
- router: additional check to ensure wildcard is at the end of a url pattern (#333)
- middleware: deprecate use of http.CloseNotifier (#347)
- middleware: fix RedirectSlashes to include query params on redirect (#334)
- History of changes: see https://github.com/go-chi/chi/compare/v3.3.4...v4.0.0
## v3.3.4 (2019-01-07)
- Minor middleware improvements. No changes to core library/router. Moving v3 into its
- own branch as a version of chi for Go 1.7, 1.8, 1.9, 1.10, 1.11
- History of changes: see https://github.com/go-chi/chi/compare/v3.3.3...v3.3.4
## v3.3.3 (2018-08-27)
- Minor release
- See https://github.com/go-chi/chi/compare/v3.3.2...v3.3.3
## v3.3.2 (2017-12-22)
- Support to route trailing slashes on mounted sub-routers (#281)
- middleware: new `ContentCharset` to check matching charsets. Thank you
@csucu for your community contribution!
## v3.3.1 (2017-11-20)
- middleware: new `AllowContentType` handler for explicit whitelist of accepted request Content-Types
- middleware: new `SetHeader` handler for short-hand middleware to set a response header key/value
- Minor bug fixes
## v3.3.0 (2017-10-10)
- New chi.RegisterMethod(method) to add support for custom HTTP methods, see _examples/custom-method for usage
- Deprecated LINK and UNLINK methods from the default list, please use `chi.RegisterMethod("LINK")` and `chi.RegisterMethod("UNLINK")` in an `init()` function
## v3.2.1 (2017-08-31)
- Add new `Match(rctx *Context, method, path string) bool` method to `Routes` interface
and `Mux`. Match searches the mux's routing tree for a handler that matches the method/path
- Add new `RouteMethod` to `*Context`
- Add new `Routes` pointer to `*Context`
- Add new `middleware.GetHead` to route missing HEAD requests to GET handler
- Updated benchmarks (see README)
## v3.1.5 (2017-08-02)
- Setup golint and go vet for the project
- As per golint, we've redefined `func ServerBaseContext(h http.Handler, baseCtx context.Context) http.Handler`
to `func ServerBaseContext(baseCtx context.Context, h http.Handler) http.Handler`
## v3.1.0 (2017-07-10)
- Fix a few minor issues after v3 release
- Move `docgen` sub-pkg to https://github.com/go-chi/docgen
- Move `render` sub-pkg to https://github.com/go-chi/render
- Add new `URLFormat` handler to chi/middleware sub-pkg to make working with url mime
suffixes easier, ie. parsing `/articles/1.json` and `/articles/1.xml`. See comments in
https://github.com/go-chi/chi/blob/master/middleware/url_format.go for example usage.
## v3.0.0 (2017-06-21)
- Major update to chi library with many exciting updates, but also some *breaking changes*
- URL parameter syntax changed from `/:id` to `/{id}` for even more flexible routing, such as
`/articles/{month}-{day}-{year}-{slug}`, `/articles/{id}`, and `/articles/{id}.{ext}` on the
same router
- Support for regexp for routing patterns, in the form of `/{paramKey:regExp}` for example:
`r.Get("/articles/{name:[a-z]+}", h)` and `chi.URLParam(r, "name")`
- Add `Method` and `MethodFunc` to `chi.Router` to allow routing definitions such as
`r.Method("GET", "/", h)` which provides a cleaner interface for custom handlers like
in `_examples/custom-handler`
- Deprecating `mux#FileServer` helper function. Instead, we encourage users to create their
own using file handler with the stdlib, see `_examples/fileserver` for an example
- Add support for LINK/UNLINK http methods via `r.Method()` and `r.MethodFunc()`
- Moved the chi project to its own organization, to allow chi-related community packages to
be easily discovered and supported, at: https://github.com/go-chi
- *NOTE:* please update your import paths to `"github.com/go-chi/chi"`
- *NOTE:* chi v2 is still available at https://github.com/go-chi/chi/tree/v2
## v2.1.0 (2017-03-30)
- Minor improvements and update to the chi core library
- Introduced a brand new `chi/render` sub-package to complete the story of building
APIs to offer a pattern for managing well-defined request / response payloads. Please
check out the updated `_examples/rest` example for how it works.
- Added `MethodNotAllowed(h http.HandlerFunc)` to chi.Router interface
## v2.0.0 (2017-01-06)
- After many months of v2 being in an RC state with many companies and users running it in
production, the inclusion of some improvements to the middlewares, we are very pleased to
announce v2.0.0 of chi.
## v2.0.0-rc1 (2016-07-26)
- Huge update! chi v2 is a large refactor targetting Go 1.7+. As of Go 1.7, the popular
community `"net/context"` package has been included in the standard library as `"context"` and
utilized by `"net/http"` and `http.Request` to managing deadlines, cancelation signals and other
request-scoped values. We're very excited about the new context addition and are proud to
introduce chi v2, a minimal and powerful routing package for building large HTTP services,
with zero external dependencies. Chi focuses on idiomatic design and encourages the use of
stdlib HTTP handlers and middlwares.
- chi v2 deprecates its `chi.Handler` interface and requires `http.Handler` or `http.HandlerFunc`
- chi v2 stores URL routing parameters and patterns in the standard request context: `r.Context()`
- chi v2 lower-level routing context is accessible by `chi.RouteContext(r.Context()) *chi.Context`,
which provides direct access to URL routing parameters, the routing path and the matching
routing patterns.
- Users upgrading from chi v1 to v2, need to:
1. Update the old chi.Handler signature, `func(ctx context.Context, w http.ResponseWriter, r *http.Request)` to
the standard http.Handler: `func(w http.ResponseWriter, r *http.Request)`
2. Use `chi.URLParam(r *http.Request, paramKey string) string`
or `URLParamFromCtx(ctx context.Context, paramKey string) string` to access a url parameter value
## v1.0.0 (2016-07-01)
- Released chi v1 stable https://github.com/go-chi/chi/tree/v1.0.0 for Go 1.6 and older.
## v0.9.0 (2016-03-31)
- Reuse context objects via sync.Pool for zero-allocation routing [#33](https://github.com/go-chi/chi/pull/33)
- BREAKING NOTE: due to subtle API changes, previously `chi.URLParams(ctx)["id"]` used to access url parameters
has changed to: `chi.URLParam(ctx, "id")`

View File

@ -1,31 +0,0 @@
# Contributing
## Prerequisites
1. [Install Go][go-install].
2. Download the sources and switch the working directory:
```bash
go get -u -d github.com/go-chi/chi
cd $GOPATH/src/github.com/go-chi/chi
```
## Submitting a Pull Request
A typical workflow is:
1. [Fork the repository.][fork] [This tip maybe also helpful.][go-fork-tip]
2. [Create a topic branch.][branch]
3. Add tests for your change.
4. Run `go test`. If your tests pass, return to the step 3.
5. Implement the change and ensure the steps from the previous step pass.
6. Run `goimports -w .`, to ensure the new code conforms to Go formatting guideline.
7. [Add, commit and push your changes.][git-help]
8. [Submit a pull request.][pull-req]
[go-install]: https://golang.org/doc/install
[go-fork-tip]: http://blog.campoy.cat/2014/03/github-and-go-forking-pull-requests-and.html
[fork]: https://help.github.com/articles/fork-a-repo
[branch]: http://learn.github.com/p/branching.html
[git-help]: https://guides.github.com
[pull-req]: https://help.github.com/articles/using-pull-requests

20
vendor/github.com/go-chi/chi/LICENSE generated vendored
View File

@ -1,20 +0,0 @@
Copyright (c) 2015-present Peter Kieltyka (https://github.com/pkieltyka), Google Inc.
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,441 +0,0 @@
# <img alt="chi" src="https://cdn.rawgit.com/go-chi/chi/master/_examples/chi.svg" width="220" />
[![GoDoc Widget]][GoDoc] [![Travis Widget]][Travis]
`chi` is a lightweight, idiomatic and composable router for building Go HTTP services. It's
especially good at helping you write large REST API services that are kept maintainable as your
project grows and changes. `chi` is built on the new `context` package introduced in Go 1.7 to
handle signaling, cancelation and request-scoped values across a handler chain.
The focus of the project has been to seek out an elegant and comfortable design for writing
REST API servers, written during the development of the Pressly API service that powers our
public API service, which in turn powers all of our client-side applications.
The key considerations of chi's design are: project structure, maintainability, standard http
handlers (stdlib-only), developer productivity, and deconstructing a large system into many small
parts. The core router `github.com/go-chi/chi` is quite small (less than 1000 LOC), but we've also
included some useful/optional subpackages: [middleware](/middleware), [render](https://github.com/go-chi/render) and [docgen](https://github.com/go-chi/docgen). We hope you enjoy it too!
## Install
`go get -u github.com/go-chi/chi`
## Features
* **Lightweight** - cloc'd in ~1000 LOC for the chi router
* **Fast** - yes, see [benchmarks](#benchmarks)
* **100% compatible with net/http** - use any http or middleware pkg in the ecosystem that is also compatible with `net/http`
* **Designed for modular/composable APIs** - middlewares, inline middlewares, route groups and subrouter mounting
* **Context control** - built on new `context` package, providing value chaining, cancellations and timeouts
* **Robust** - in production at Pressly, CloudFlare, Heroku, 99Designs, and many others (see [discussion](https://github.com/go-chi/chi/issues/91))
* **Doc generation** - `docgen` auto-generates routing documentation from your source to JSON or Markdown
* **No external dependencies** - plain ol' Go stdlib + net/http
## Examples
See [_examples/](https://github.com/go-chi/chi/blob/master/_examples/) for a variety of examples.
**As easy as:**
```go
package main
import (
"net/http"
"github.com/go-chi/chi"
"github.com/go-chi/chi/middleware"
)
func main() {
r := chi.NewRouter()
r.Use(middleware.Logger)
r.Get("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("welcome"))
})
http.ListenAndServe(":3000", r)
}
```
**REST Preview:**
Here is a little preview of how routing looks like with chi. Also take a look at the generated routing docs
in JSON ([routes.json](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.json)) and in
Markdown ([routes.md](https://github.com/go-chi/chi/blob/master/_examples/rest/routes.md)).
I highly recommend reading the source of the [examples](https://github.com/go-chi/chi/blob/master/_examples/) listed
above, they will show you all the features of chi and serve as a good form of documentation.
```go
import (
//...
"context"
"github.com/go-chi/chi"
"github.com/go-chi/chi/middleware"
)
func main() {
r := chi.NewRouter()
// A good base middleware stack
r.Use(middleware.RequestID)
r.Use(middleware.RealIP)
r.Use(middleware.Logger)
r.Use(middleware.Recoverer)
// Set a timeout value on the request context (ctx), that will signal
// through ctx.Done() that the request has timed out and further
// processing should be stopped.
r.Use(middleware.Timeout(60 * time.Second))
r.Get("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("hi"))
})
// RESTy routes for "articles" resource
r.Route("/articles", func(r chi.Router) {
r.With(paginate).Get("/", listArticles) // GET /articles
r.With(paginate).Get("/{month}-{day}-{year}", listArticlesByDate) // GET /articles/01-16-2017
r.Post("/", createArticle) // POST /articles
r.Get("/search", searchArticles) // GET /articles/search
// Regexp url parameters:
r.Get("/{articleSlug:[a-z-]+}", getArticleBySlug) // GET /articles/home-is-toronto
// Subrouters:
r.Route("/{articleID}", func(r chi.Router) {
r.Use(ArticleCtx)
r.Get("/", getArticle) // GET /articles/123
r.Put("/", updateArticle) // PUT /articles/123
r.Delete("/", deleteArticle) // DELETE /articles/123
})
})
// Mount the admin sub-router
r.Mount("/admin", adminRouter())
http.ListenAndServe(":3333", r)
}
func ArticleCtx(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
articleID := chi.URLParam(r, "articleID")
article, err := dbGetArticle(articleID)
if err != nil {
http.Error(w, http.StatusText(404), 404)
return
}
ctx := context.WithValue(r.Context(), "article", article)
next.ServeHTTP(w, r.WithContext(ctx))
})
}
func getArticle(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
article, ok := ctx.Value("article").(*Article)
if !ok {
http.Error(w, http.StatusText(422), 422)
return
}
w.Write([]byte(fmt.Sprintf("title:%s", article.Title)))
}
// A completely separate router for administrator routes
func adminRouter() http.Handler {
r := chi.NewRouter()
r.Use(AdminOnly)
r.Get("/", adminIndex)
r.Get("/accounts", adminListAccounts)
return r
}
func AdminOnly(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
perm, ok := ctx.Value("acl.permission").(YourPermissionType)
if !ok || !perm.IsAdmin() {
http.Error(w, http.StatusText(403), 403)
return
}
next.ServeHTTP(w, r)
})
}
```
## Router design
chi's router is based on a kind of [Patricia Radix trie](https://en.wikipedia.org/wiki/Radix_tree).
The router is fully compatible with `net/http`.
Built on top of the tree is the `Router` interface:
```go
// Router consisting of the core routing methods used by chi's Mux,
// using only the standard net/http.
type Router interface {
http.Handler
Routes
// Use appends one or more middlewares onto the Router stack.
Use(middlewares ...func(http.Handler) http.Handler)
// With adds inline middlewares for an endpoint handler.
With(middlewares ...func(http.Handler) http.Handler) Router
// Group adds a new inline-Router along the current routing
// path, with a fresh middleware stack for the inline-Router.
Group(fn func(r Router)) Router
// Route mounts a sub-Router along a `pattern`` string.
Route(pattern string, fn func(r Router)) Router
// Mount attaches another http.Handler along ./pattern/*
Mount(pattern string, h http.Handler)
// Handle and HandleFunc adds routes for `pattern` that matches
// all HTTP methods.
Handle(pattern string, h http.Handler)
HandleFunc(pattern string, h http.HandlerFunc)
// Method and MethodFunc adds routes for `pattern` that matches
// the `method` HTTP method.
Method(method, pattern string, h http.Handler)
MethodFunc(method, pattern string, h http.HandlerFunc)
// HTTP-method routing along `pattern`
Connect(pattern string, h http.HandlerFunc)
Delete(pattern string, h http.HandlerFunc)
Get(pattern string, h http.HandlerFunc)
Head(pattern string, h http.HandlerFunc)
Options(pattern string, h http.HandlerFunc)
Patch(pattern string, h http.HandlerFunc)
Post(pattern string, h http.HandlerFunc)
Put(pattern string, h http.HandlerFunc)
Trace(pattern string, h http.HandlerFunc)
// NotFound defines a handler to respond whenever a route could
// not be found.
NotFound(h http.HandlerFunc)
// MethodNotAllowed defines a handler to respond whenever a method is
// not allowed.
MethodNotAllowed(h http.HandlerFunc)
}
// Routes interface adds two methods for router traversal, which is also
// used by the github.com/go-chi/docgen package to generate documentation for Routers.
type Routes interface {
// Routes returns the routing tree in an easily traversable structure.
Routes() []Route
// Middlewares returns the list of middlewares in use by the router.
Middlewares() Middlewares
// Match searches the routing tree for a handler that matches
// the method/path - similar to routing a http request, but without
// executing the handler thereafter.
Match(rctx *Context, method, path string) bool
}
```
Each routing method accepts a URL `pattern` and chain of `handlers`. The URL pattern
supports named params (ie. `/users/{userID}`) and wildcards (ie. `/admin/*`). URL parameters
can be fetched at runtime by calling `chi.URLParam(r, "userID")` for named parameters
and `chi.URLParam(r, "*")` for a wildcard parameter.
### Middleware handlers
chi's middlewares are just stdlib net/http middleware handlers. There is nothing special
about them, which means the router and all the tooling is designed to be compatible and
friendly with any middleware in the community. This offers much better extensibility and reuse
of packages and is at the heart of chi's purpose.
Here is an example of a standard net/http middleware handler using the new request context
available in Go. This middleware sets a hypothetical user identifier on the request
context and calls the next handler in the chain.
```go
// HTTP middleware setting a value on the request context
func MyMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := context.WithValue(r.Context(), "user", "123")
next.ServeHTTP(w, r.WithContext(ctx))
})
}
```
### Request handlers
chi uses standard net/http request handlers. This little snippet is an example of a http.Handler
func that reads a user identifier from the request context - hypothetically, identifying
the user sending an authenticated request, validated+set by a previous middleware handler.
```go
// HTTP handler accessing data from the request context.
func MyRequestHandler(w http.ResponseWriter, r *http.Request) {
user := r.Context().Value("user").(string)
w.Write([]byte(fmt.Sprintf("hi %s", user)))
}
```
### URL parameters
chi's router parses and stores URL parameters right onto the request context. Here is
an example of how to access URL params in your net/http handlers. And of course, middlewares
are able to access the same information.
```go
// HTTP handler accessing the url routing parameters.
func MyRequestHandler(w http.ResponseWriter, r *http.Request) {
userID := chi.URLParam(r, "userID") // from a route like /users/{userID}
ctx := r.Context()
key := ctx.Value("key").(string)
w.Write([]byte(fmt.Sprintf("hi %v, %v", userID, key)))
}
```
## Middlewares
chi comes equipped with an optional `middleware` package, providing a suite of standard
`net/http` middlewares. Please note, any middleware in the ecosystem that is also compatible
with `net/http` can be used with chi's mux.
### Core middlewares
-----------------------------------------------------------------------------------------------------------
| chi/middleware Handler | description |
|:----------------------|:---------------------------------------------------------------------------------
| AllowContentType | Explicit whitelist of accepted request Content-Types |
| BasicAuth | Basic HTTP authentication |
| Compress | Gzip compression for clients that accept compressed responses |
| GetHead | Automatically route undefined HEAD requests to GET handlers |
| Heartbeat | Monitoring endpoint to check the servers pulse |
| Logger | Logs the start and end of each request with the elapsed processing time |
| NoCache | Sets response headers to prevent clients from caching |
| Profiler | Easily attach net/http/pprof to your routers |
| RealIP | Sets a http.Request's RemoteAddr to either X-Forwarded-For or X-Real-IP |
| Recoverer | Gracefully absorb panics and prints the stack trace |
| RequestID | Injects a request ID into the context of each request |
| RedirectSlashes | Redirect slashes on routing paths |
| SetHeader | Short-hand middleware to set a response header key/value |
| StripSlashes | Strip slashes on routing paths |
| Throttle | Puts a ceiling on the number of concurrent requests |
| Timeout | Signals to the request context when the timeout deadline is reached |
| URLFormat | Parse extension from url and put it on request context |
| WithValue | Short-hand middleware to set a key/value on the request context |
-----------------------------------------------------------------------------------------------------------
### Extra middlewares & packages
Please see https://github.com/go-chi for additional packages.
--------------------------------------------------------------------------------------------------------------------
| package | description |
|:---------------------------------------------------|:-------------------------------------------------------------
| [cors](https://github.com/go-chi/cors) | Cross-origin resource sharing (CORS) |
| [docgen](https://github.com/go-chi/docgen) | Print chi.Router routes at runtime |
| [jwtauth](https://github.com/go-chi/jwtauth) | JWT authentication |
| [hostrouter](https://github.com/go-chi/hostrouter) | Domain/host based request routing |
| [httplog](https://github.com/go-chi/httplog) | Small but powerful structured HTTP request logging |
| [httprate](https://github.com/go-chi/httprate) | HTTP request rate limiter |
| [httptracer](https://github.com/go-chi/httptracer) | HTTP request performance tracing library |
| [httpvcr](https://github.com/go-chi/httpvcr) | Write deterministic tests for external sources |
| [stampede](https://github.com/go-chi/stampede) | HTTP request coalescer |
--------------------------------------------------------------------------------------------------------------------
please [submit a PR](./CONTRIBUTING.md) if you'd like to include a link to a chi-compatible middleware
## context?
`context` is a tiny pkg that provides simple interface to signal context across call stacks
and goroutines. It was originally written by [Sameer Ajmani](https://github.com/Sajmani)
and is available in stdlib since go1.7.
Learn more at https://blog.golang.org/context
and..
* Docs: https://golang.org/pkg/context
* Source: https://github.com/golang/go/tree/master/src/context
## Benchmarks
The benchmark suite: https://github.com/pkieltyka/go-http-routing-benchmark
Results as of Jan 9, 2019 with Go 1.11.4 on Linux X1 Carbon laptop
```shell
BenchmarkChi_Param 3000000 475 ns/op 432 B/op 3 allocs/op
BenchmarkChi_Param5 2000000 696 ns/op 432 B/op 3 allocs/op
BenchmarkChi_Param20 1000000 1275 ns/op 432 B/op 3 allocs/op
BenchmarkChi_ParamWrite 3000000 505 ns/op 432 B/op 3 allocs/op
BenchmarkChi_GithubStatic 3000000 508 ns/op 432 B/op 3 allocs/op
BenchmarkChi_GithubParam 2000000 669 ns/op 432 B/op 3 allocs/op
BenchmarkChi_GithubAll 10000 134627 ns/op 87699 B/op 609 allocs/op
BenchmarkChi_GPlusStatic 3000000 402 ns/op 432 B/op 3 allocs/op
BenchmarkChi_GPlusParam 3000000 500 ns/op 432 B/op 3 allocs/op
BenchmarkChi_GPlus2Params 3000000 586 ns/op 432 B/op 3 allocs/op
BenchmarkChi_GPlusAll 200000 7237 ns/op 5616 B/op 39 allocs/op
BenchmarkChi_ParseStatic 3000000 408 ns/op 432 B/op 3 allocs/op
BenchmarkChi_ParseParam 3000000 488 ns/op 432 B/op 3 allocs/op
BenchmarkChi_Parse2Params 3000000 551 ns/op 432 B/op 3 allocs/op
BenchmarkChi_ParseAll 100000 13508 ns/op 11232 B/op 78 allocs/op
BenchmarkChi_StaticAll 20000 81933 ns/op 67826 B/op 471 allocs/op
```
Comparison with other routers: https://gist.github.com/pkieltyka/123032f12052520aaccab752bd3e78cc
NOTE: the allocs in the benchmark above are from the calls to http.Request's
`WithContext(context.Context)` method that clones the http.Request, sets the `Context()`
on the duplicated (alloc'd) request and returns it the new request object. This is just
how setting context on a request in Go works.
## Credits
* Carl Jackson for https://github.com/zenazn/goji
* Parts of chi's thinking comes from goji, and chi's middleware package
sources from goji.
* Armon Dadgar for https://github.com/armon/go-radix
* Contributions: [@VojtechVitek](https://github.com/VojtechVitek)
We'll be more than happy to see [your contributions](./CONTRIBUTING.md)!
## Beyond REST
chi is just a http router that lets you decompose request handling into many smaller layers.
Many companies use chi to write REST services for their public APIs. But, REST is just a convention
for managing state via HTTP, and there's a lot of other pieces required to write a complete client-server
system or network of microservices.
Looking beyond REST, I also recommend some newer works in the field:
* [webrpc](https://github.com/webrpc/webrpc) - Web-focused RPC client+server framework with code-gen
* [gRPC](https://github.com/grpc/grpc-go) - Google's RPC framework via protobufs
* [graphql](https://github.com/99designs/gqlgen) - Declarative query language
* [NATS](https://nats.io) - lightweight pub-sub
## License
Copyright (c) 2015-present [Peter Kieltyka](https://github.com/pkieltyka)
Licensed under [MIT License](./LICENSE)
[GoDoc]: https://godoc.org/github.com/go-chi/chi
[GoDoc Widget]: https://godoc.org/github.com/go-chi/chi?status.svg
[Travis]: https://travis-ci.org/go-chi/chi
[Travis Widget]: https://travis-ci.org/go-chi/chi.svg?branch=master

View File

@ -1,49 +0,0 @@
package chi
import "net/http"
// Chain returns a Middlewares type from a slice of middleware handlers.
func Chain(middlewares ...func(http.Handler) http.Handler) Middlewares {
return Middlewares(middlewares)
}
// Handler builds and returns a http.Handler from the chain of middlewares,
// with `h http.Handler` as the final handler.
func (mws Middlewares) Handler(h http.Handler) http.Handler {
return &ChainHandler{mws, h, chain(mws, h)}
}
// HandlerFunc builds and returns a http.Handler from the chain of middlewares,
// with `h http.Handler` as the final handler.
func (mws Middlewares) HandlerFunc(h http.HandlerFunc) http.Handler {
return &ChainHandler{mws, h, chain(mws, h)}
}
// ChainHandler is a http.Handler with support for handler composition and
// execution.
type ChainHandler struct {
Middlewares Middlewares
Endpoint http.Handler
chain http.Handler
}
func (c *ChainHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
c.chain.ServeHTTP(w, r)
}
// chain builds a http.Handler composed of an inline middleware stack and endpoint
// handler in the order they are passed.
func chain(middlewares []func(http.Handler) http.Handler, endpoint http.Handler) http.Handler {
// Return ahead of time if there aren't any middlewares for the chain
if len(middlewares) == 0 {
return endpoint
}
// Wrap the end handler with the middleware chain
h := middlewares[len(middlewares)-1](endpoint)
for i := len(middlewares) - 2; i >= 0; i-- {
h = middlewares[i](h)
}
return h
}

134
vendor/github.com/go-chi/chi/chi.go generated vendored
View File

@ -1,134 +0,0 @@
//
// Package chi is a small, idiomatic and composable router for building HTTP services.
//
// chi requires Go 1.10 or newer.
//
// Example:
// package main
//
// import (
// "net/http"
//
// "github.com/go-chi/chi"
// "github.com/go-chi/chi/middleware"
// )
//
// func main() {
// r := chi.NewRouter()
// r.Use(middleware.Logger)
// r.Use(middleware.Recoverer)
//
// r.Get("/", func(w http.ResponseWriter, r *http.Request) {
// w.Write([]byte("root."))
// })
//
// http.ListenAndServe(":3333", r)
// }
//
// See github.com/go-chi/chi/_examples/ for more in-depth examples.
//
// URL patterns allow for easy matching of path components in HTTP
// requests. The matching components can then be accessed using
// chi.URLParam(). All patterns must begin with a slash.
//
// A simple named placeholder {name} matches any sequence of characters
// up to the next / or the end of the URL. Trailing slashes on paths must
// be handled explicitly.
//
// A placeholder with a name followed by a colon allows a regular
// expression match, for example {number:\\d+}. The regular expression
// syntax is Go's normal regexp RE2 syntax, except that regular expressions
// including { or } are not supported, and / will never be
// matched. An anonymous regexp pattern is allowed, using an empty string
// before the colon in the placeholder, such as {:\\d+}
//
// The special placeholder of asterisk matches the rest of the requested
// URL. Any trailing characters in the pattern are ignored. This is the only
// placeholder which will match / characters.
//
// Examples:
// "/user/{name}" matches "/user/jsmith" but not "/user/jsmith/info" or "/user/jsmith/"
// "/user/{name}/info" matches "/user/jsmith/info"
// "/page/*" matches "/page/intro/latest"
// "/page/*/index" also matches "/page/intro/latest"
// "/date/{yyyy:\\d\\d\\d\\d}/{mm:\\d\\d}/{dd:\\d\\d}" matches "/date/2017/04/01"
//
package chi
import "net/http"
// NewRouter returns a new Mux object that implements the Router interface.
func NewRouter() *Mux {
return NewMux()
}
// Router consisting of the core routing methods used by chi's Mux,
// using only the standard net/http.
type Router interface {
http.Handler
Routes
// Use appends one or more middlewares onto the Router stack.
Use(middlewares ...func(http.Handler) http.Handler)
// With adds inline middlewares for an endpoint handler.
With(middlewares ...func(http.Handler) http.Handler) Router
// Group adds a new inline-Router along the current routing
// path, with a fresh middleware stack for the inline-Router.
Group(fn func(r Router)) Router
// Route mounts a sub-Router along a `pattern`` string.
Route(pattern string, fn func(r Router)) Router
// Mount attaches another http.Handler along ./pattern/*
Mount(pattern string, h http.Handler)
// Handle and HandleFunc adds routes for `pattern` that matches
// all HTTP methods.
Handle(pattern string, h http.Handler)
HandleFunc(pattern string, h http.HandlerFunc)
// Method and MethodFunc adds routes for `pattern` that matches
// the `method` HTTP method.
Method(method, pattern string, h http.Handler)
MethodFunc(method, pattern string, h http.HandlerFunc)
// HTTP-method routing along `pattern`
Connect(pattern string, h http.HandlerFunc)
Delete(pattern string, h http.HandlerFunc)
Get(pattern string, h http.HandlerFunc)
Head(pattern string, h http.HandlerFunc)
Options(pattern string, h http.HandlerFunc)
Patch(pattern string, h http.HandlerFunc)
Post(pattern string, h http.HandlerFunc)
Put(pattern string, h http.HandlerFunc)
Trace(pattern string, h http.HandlerFunc)
// NotFound defines a handler to respond whenever a route could
// not be found.
NotFound(h http.HandlerFunc)
// MethodNotAllowed defines a handler to respond whenever a method is
// not allowed.
MethodNotAllowed(h http.HandlerFunc)
}
// Routes interface adds two methods for router traversal, which is also
// used by the `docgen` subpackage to generation documentation for Routers.
type Routes interface {
// Routes returns the routing tree in an easily traversable structure.
Routes() []Route
// Middlewares returns the list of middlewares in use by the router.
Middlewares() Middlewares
// Match searches the routing tree for a handler that matches
// the method/path - similar to routing a http request, but without
// executing the handler thereafter.
Match(rctx *Context, method, path string) bool
}
// Middlewares type is a slice of standard middleware handlers with methods
// to compose middleware chains and http.Handler's.
type Middlewares []func(http.Handler) http.Handler

View File

@ -1,172 +0,0 @@
package chi
import (
"context"
"net"
"net/http"
"strings"
)
// URLParam returns the url parameter from a http.Request object.
func URLParam(r *http.Request, key string) string {
if rctx := RouteContext(r.Context()); rctx != nil {
return rctx.URLParam(key)
}
return ""
}
// URLParamFromCtx returns the url parameter from a http.Request Context.
func URLParamFromCtx(ctx context.Context, key string) string {
if rctx := RouteContext(ctx); rctx != nil {
return rctx.URLParam(key)
}
return ""
}
// RouteContext returns chi's routing Context object from a
// http.Request Context.
func RouteContext(ctx context.Context) *Context {
val, _ := ctx.Value(RouteCtxKey).(*Context)
return val
}
// ServerBaseContext wraps an http.Handler to set the request context to the
// `baseCtx`.
func ServerBaseContext(baseCtx context.Context, h http.Handler) http.Handler {
fn := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
baseCtx := baseCtx
// Copy over default net/http server context keys
if v, ok := ctx.Value(http.ServerContextKey).(*http.Server); ok {
baseCtx = context.WithValue(baseCtx, http.ServerContextKey, v)
}
if v, ok := ctx.Value(http.LocalAddrContextKey).(net.Addr); ok {
baseCtx = context.WithValue(baseCtx, http.LocalAddrContextKey, v)
}
h.ServeHTTP(w, r.WithContext(baseCtx))
})
return fn
}
// NewRouteContext returns a new routing Context object.
func NewRouteContext() *Context {
return &Context{}
}
var (
// RouteCtxKey is the context.Context key to store the request context.
RouteCtxKey = &contextKey{"RouteContext"}
)
// Context is the default routing context set on the root node of a
// request context to track route patterns, URL parameters and
// an optional routing path.
type Context struct {
Routes Routes
// Routing path/method override used during the route search.
// See Mux#routeHTTP method.
RoutePath string
RouteMethod string
// Routing pattern stack throughout the lifecycle of the request,
// across all connected routers. It is a record of all matching
// patterns across a stack of sub-routers.
RoutePatterns []string
// URLParams are the stack of routeParams captured during the
// routing lifecycle across a stack of sub-routers.
URLParams RouteParams
// The endpoint routing pattern that matched the request URI path
// or `RoutePath` of the current sub-router. This value will update
// during the lifecycle of a request passing through a stack of
// sub-routers.
routePattern string
// Route parameters matched for the current sub-router. It is
// intentionally unexported so it cant be tampered.
routeParams RouteParams
// methodNotAllowed hint
methodNotAllowed bool
}
// Reset a routing context to its initial state.
func (x *Context) Reset() {
x.Routes = nil
x.RoutePath = ""
x.RouteMethod = ""
x.RoutePatterns = x.RoutePatterns[:0]
x.URLParams.Keys = x.URLParams.Keys[:0]
x.URLParams.Values = x.URLParams.Values[:0]
x.routePattern = ""
x.routeParams.Keys = x.routeParams.Keys[:0]
x.routeParams.Values = x.routeParams.Values[:0]
x.methodNotAllowed = false
}
// URLParam returns the corresponding URL parameter value from the request
// routing context.
func (x *Context) URLParam(key string) string {
for k := len(x.URLParams.Keys) - 1; k >= 0; k-- {
if x.URLParams.Keys[k] == key {
return x.URLParams.Values[k]
}
}
return ""
}
// RoutePattern builds the routing pattern string for the particular
// request, at the particular point during routing. This means, the value
// will change throughout the execution of a request in a router. That is
// why its advised to only use this value after calling the next handler.
//
// For example,
//
// func Instrument(next http.Handler) http.Handler {
// return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// next.ServeHTTP(w, r)
// routePattern := chi.RouteContext(r.Context()).RoutePattern()
// measure(w, r, routePattern)
// })
// }
func (x *Context) RoutePattern() string {
routePattern := strings.Join(x.RoutePatterns, "")
return replaceWildcards(routePattern)
}
// replaceWildcards takes a route pattern and recursively replaces all
// occurrences of "/*/" to "/".
func replaceWildcards(p string) string {
if strings.Contains(p, "/*/") {
return replaceWildcards(strings.Replace(p, "/*/", "/", -1))
}
return p
}
// RouteParams is a structure to track URL routing parameters efficiently.
type RouteParams struct {
Keys, Values []string
}
// Add will append a URL parameter to the end of the route param
func (s *RouteParams) Add(key, value string) {
s.Keys = append(s.Keys, key)
s.Values = append(s.Values, value)
}
// contextKey is a value for use with context.WithValue. It's used as
// a pointer so it fits in an interface{} without allocation. This technique
// for defining context keys was copied from Go 1.7's new use of context in net/http.
type contextKey struct {
name string
}
func (k *contextKey) String() string {
return "chi context value " + k.name
}

View File

@ -1,32 +0,0 @@
package middleware
import (
"fmt"
"net/http"
)
// BasicAuth implements a simple middleware handler for adding basic http auth to a route.
func BasicAuth(realm string, creds map[string]string) func(next http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
user, pass, ok := r.BasicAuth()
if !ok {
basicAuthFailed(w, realm)
return
}
credPass, credUserOk := creds[user]
if !credUserOk || pass != credPass {
basicAuthFailed(w, realm)
return
}
next.ServeHTTP(w, r)
})
}
}
func basicAuthFailed(w http.ResponseWriter, realm string) {
w.Header().Add("WWW-Authenticate", fmt.Sprintf(`Basic realm="%s"`, realm))
w.WriteHeader(http.StatusUnauthorized)
}

View File

@ -1,399 +0,0 @@
package middleware
import (
"bufio"
"compress/flate"
"compress/gzip"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"strings"
"sync"
)
var defaultCompressibleContentTypes = []string{
"text/html",
"text/css",
"text/plain",
"text/javascript",
"application/javascript",
"application/x-javascript",
"application/json",
"application/atom+xml",
"application/rss+xml",
"image/svg+xml",
}
// Compress is a middleware that compresses response
// body of a given content types to a data format based
// on Accept-Encoding request header. It uses a given
// compression level.
//
// NOTE: make sure to set the Content-Type header on your response
// otherwise this middleware will not compress the response body. For ex, in
// your handler you should set w.Header().Set("Content-Type", http.DetectContentType(yourBody))
// or set it manually.
//
// Passing a compression level of 5 is sensible value
func Compress(level int, types ...string) func(next http.Handler) http.Handler {
compressor := NewCompressor(level, types...)
return compressor.Handler
}
// Compressor represents a set of encoding configurations.
type Compressor struct {
level int // The compression level.
// The mapping of encoder names to encoder functions.
encoders map[string]EncoderFunc
// The mapping of pooled encoders to pools.
pooledEncoders map[string]*sync.Pool
// The set of content types allowed to be compressed.
allowedTypes map[string]struct{}
allowedWildcards map[string]struct{}
// The list of encoders in order of decreasing precedence.
encodingPrecedence []string
}
// NewCompressor creates a new Compressor that will handle encoding responses.
//
// The level should be one of the ones defined in the flate package.
// The types are the content types that are allowed to be compressed.
func NewCompressor(level int, types ...string) *Compressor {
// If types are provided, set those as the allowed types. If none are
// provided, use the default list.
allowedTypes := make(map[string]struct{})
allowedWildcards := make(map[string]struct{})
if len(types) > 0 {
for _, t := range types {
if strings.Contains(strings.TrimSuffix(t, "/*"), "*") {
panic(fmt.Sprintf("middleware/compress: Unsupported content-type wildcard pattern '%s'. Only '/*' supported", t))
}
if strings.HasSuffix(t, "/*") {
allowedWildcards[strings.TrimSuffix(t, "/*")] = struct{}{}
} else {
allowedTypes[t] = struct{}{}
}
}
} else {
for _, t := range defaultCompressibleContentTypes {
allowedTypes[t] = struct{}{}
}
}
c := &Compressor{
level: level,
encoders: make(map[string]EncoderFunc),
pooledEncoders: make(map[string]*sync.Pool),
allowedTypes: allowedTypes,
allowedWildcards: allowedWildcards,
}
// Set the default encoders. The precedence order uses the reverse
// ordering that the encoders were added. This means adding new encoders
// will move them to the front of the order.
//
// TODO:
// lzma: Opera.
// sdch: Chrome, Android. Gzip output + dictionary header.
// br: Brotli, see https://github.com/go-chi/chi/pull/326
// HTTP 1.1 "deflate" (RFC 2616) stands for DEFLATE data (RFC 1951)
// wrapped with zlib (RFC 1950). The zlib wrapper uses Adler-32
// checksum compared to CRC-32 used in "gzip" and thus is faster.
//
// But.. some old browsers (MSIE, Safari 5.1) incorrectly expect
// raw DEFLATE data only, without the mentioned zlib wrapper.
// Because of this major confusion, most modern browsers try it
// both ways, first looking for zlib headers.
// Quote by Mark Adler: http://stackoverflow.com/a/9186091/385548
//
// The list of browsers having problems is quite big, see:
// http://zoompf.com/blog/2012/02/lose-the-wait-http-compression
// https://web.archive.org/web/20120321182910/http://www.vervestudios.co/projects/compression-tests/results
//
// That's why we prefer gzip over deflate. It's just more reliable
// and not significantly slower than gzip.
c.SetEncoder("deflate", encoderDeflate)
// TODO: Exception for old MSIE browsers that can't handle non-HTML?
// https://zoompf.com/blog/2012/02/lose-the-wait-http-compression
c.SetEncoder("gzip", encoderGzip)
// NOTE: Not implemented, intentionally:
// case "compress": // LZW. Deprecated.
// case "bzip2": // Too slow on-the-fly.
// case "zopfli": // Too slow on-the-fly.
// case "xz": // Too slow on-the-fly.
return c
}
// SetEncoder can be used to set the implementation of a compression algorithm.
//
// The encoding should be a standardised identifier. See:
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Encoding
//
// For example, add the Brotli algortithm:
//
// import brotli_enc "gopkg.in/kothar/brotli-go.v0/enc"
//
// compressor := middleware.NewCompressor(5, "text/html")
// compressor.SetEncoder("br", func(w http.ResponseWriter, level int) io.Writer {
// params := brotli_enc.NewBrotliParams()
// params.SetQuality(level)
// return brotli_enc.NewBrotliWriter(params, w)
// })
func (c *Compressor) SetEncoder(encoding string, fn EncoderFunc) {
encoding = strings.ToLower(encoding)
if encoding == "" {
panic("the encoding can not be empty")
}
if fn == nil {
panic("attempted to set a nil encoder function")
}
// If we are adding a new encoder that is already registered, we have to
// clear that one out first.
if _, ok := c.pooledEncoders[encoding]; ok {
delete(c.pooledEncoders, encoding)
}
if _, ok := c.encoders[encoding]; ok {
delete(c.encoders, encoding)
}
// If the encoder supports Resetting (IoReseterWriter), then it can be pooled.
encoder := fn(ioutil.Discard, c.level)
if encoder != nil {
if _, ok := encoder.(ioResetterWriter); ok {
pool := &sync.Pool{
New: func() interface{} {
return fn(ioutil.Discard, c.level)
},
}
c.pooledEncoders[encoding] = pool
}
}
// If the encoder is not in the pooledEncoders, add it to the normal encoders.
if _, ok := c.pooledEncoders[encoding]; !ok {
c.encoders[encoding] = fn
}
for i, v := range c.encodingPrecedence {
if v == encoding {
c.encodingPrecedence = append(c.encodingPrecedence[:i], c.encodingPrecedence[i+1:]...)
}
}
c.encodingPrecedence = append([]string{encoding}, c.encodingPrecedence...)
}
// Handler returns a new middleware that will compress the response based on the
// current Compressor.
func (c *Compressor) Handler(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
encoder, encoding, cleanup := c.selectEncoder(r.Header, w)
cw := &compressResponseWriter{
ResponseWriter: w,
w: w,
contentTypes: c.allowedTypes,
contentWildcards: c.allowedWildcards,
encoding: encoding,
compressable: false, // determined in post-handler
}
if encoder != nil {
cw.w = encoder
}
// Re-add the encoder to the pool if applicable.
defer cleanup()
defer cw.Close()
next.ServeHTTP(cw, r)
})
}
// selectEncoder returns the encoder, the name of the encoder, and a closer function.
func (c *Compressor) selectEncoder(h http.Header, w io.Writer) (io.Writer, string, func()) {
header := h.Get("Accept-Encoding")
// Parse the names of all accepted algorithms from the header.
accepted := strings.Split(strings.ToLower(header), ",")
// Find supported encoder by accepted list by precedence
for _, name := range c.encodingPrecedence {
if matchAcceptEncoding(accepted, name) {
if pool, ok := c.pooledEncoders[name]; ok {
encoder := pool.Get().(ioResetterWriter)
cleanup := func() {
pool.Put(encoder)
}
encoder.Reset(w)
return encoder, name, cleanup
}
if fn, ok := c.encoders[name]; ok {
return fn(w, c.level), name, func() {}
}
}
}
// No encoder found to match the accepted encoding
return nil, "", func() {}
}
func matchAcceptEncoding(accepted []string, encoding string) bool {
for _, v := range accepted {
if strings.Contains(v, encoding) {
return true
}
}
return false
}
// An EncoderFunc is a function that wraps the provided io.Writer with a
// streaming compression algorithm and returns it.
//
// In case of failure, the function should return nil.
type EncoderFunc func(w io.Writer, level int) io.Writer
// Interface for types that allow resetting io.Writers.
type ioResetterWriter interface {
io.Writer
Reset(w io.Writer)
}
type compressResponseWriter struct {
http.ResponseWriter
// The streaming encoder writer to be used if there is one. Otherwise,
// this is just the normal writer.
w io.Writer
encoding string
contentTypes map[string]struct{}
contentWildcards map[string]struct{}
wroteHeader bool
compressable bool
}
func (cw *compressResponseWriter) isCompressable() bool {
// Parse the first part of the Content-Type response header.
contentType := cw.Header().Get("Content-Type")
if idx := strings.Index(contentType, ";"); idx >= 0 {
contentType = contentType[0:idx]
}
// Is the content type compressable?
if _, ok := cw.contentTypes[contentType]; ok {
return true
}
if idx := strings.Index(contentType, "/"); idx > 0 {
contentType = contentType[0:idx]
_, ok := cw.contentWildcards[contentType]
return ok
}
return false
}
func (cw *compressResponseWriter) WriteHeader(code int) {
if cw.wroteHeader {
cw.ResponseWriter.WriteHeader(code) // Allow multiple calls to propagate.
return
}
cw.wroteHeader = true
defer cw.ResponseWriter.WriteHeader(code)
// Already compressed data?
if cw.Header().Get("Content-Encoding") != "" {
return
}
if !cw.isCompressable() {
cw.compressable = false
return
}
if cw.encoding != "" {
cw.compressable = true
cw.Header().Set("Content-Encoding", cw.encoding)
cw.Header().Set("Vary", "Accept-Encoding")
// The content-length after compression is unknown
cw.Header().Del("Content-Length")
}
}
func (cw *compressResponseWriter) Write(p []byte) (int, error) {
if !cw.wroteHeader {
cw.WriteHeader(http.StatusOK)
}
return cw.writer().Write(p)
}
func (cw *compressResponseWriter) writer() io.Writer {
if cw.compressable {
return cw.w
} else {
return cw.ResponseWriter
}
}
type compressFlusher interface {
Flush() error
}
func (cw *compressResponseWriter) Flush() {
if f, ok := cw.writer().(http.Flusher); ok {
f.Flush()
}
// If the underlying writer has a compression flush signature,
// call this Flush() method instead
if f, ok := cw.writer().(compressFlusher); ok {
f.Flush()
// Also flush the underlying response writer
if f, ok := cw.ResponseWriter.(http.Flusher); ok {
f.Flush()
}
}
}
func (cw *compressResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
if hj, ok := cw.writer().(http.Hijacker); ok {
return hj.Hijack()
}
return nil, nil, errors.New("chi/middleware: http.Hijacker is unavailable on the writer")
}
func (cw *compressResponseWriter) Push(target string, opts *http.PushOptions) error {
if ps, ok := cw.writer().(http.Pusher); ok {
return ps.Push(target, opts)
}
return errors.New("chi/middleware: http.Pusher is unavailable on the writer")
}
func (cw *compressResponseWriter) Close() error {
if c, ok := cw.writer().(io.WriteCloser); ok {
return c.Close()
}
return errors.New("chi/middleware: io.WriteCloser is unavailable on the writer")
}
func encoderGzip(w io.Writer, level int) io.Writer {
gw, err := gzip.NewWriterLevel(w, level)
if err != nil {
return nil
}
return gw
}
func encoderDeflate(w io.Writer, level int) io.Writer {
dw, err := flate.NewWriter(w, level)
if err != nil {
return nil
}
return dw
}

View File

@ -1,51 +0,0 @@
package middleware
import (
"net/http"
"strings"
)
// ContentCharset generates a handler that writes a 415 Unsupported Media Type response if none of the charsets match.
// An empty charset will allow requests with no Content-Type header or no specified charset.
func ContentCharset(charsets ...string) func(next http.Handler) http.Handler {
for i, c := range charsets {
charsets[i] = strings.ToLower(c)
}
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !contentEncoding(r.Header.Get("Content-Type"), charsets...) {
w.WriteHeader(http.StatusUnsupportedMediaType)
return
}
next.ServeHTTP(w, r)
})
}
}
// Check the content encoding against a list of acceptable values.
func contentEncoding(ce string, charsets ...string) bool {
_, ce = split(strings.ToLower(ce), ";")
_, ce = split(ce, "charset=")
ce, _ = split(ce, ";")
for _, c := range charsets {
if ce == c {
return true
}
}
return false
}
// Split a string in two parts, cleaning any whitespace.
func split(str, sep string) (string, string) {
var a, b string
var parts = strings.SplitN(str, sep, 2)
a = strings.TrimSpace(parts[0])
if len(parts) == 2 {
b = strings.TrimSpace(parts[1])
}
return a, b
}

View File

@ -1,34 +0,0 @@
package middleware
import (
"net/http"
"strings"
)
// AllowContentEncoding enforces a whitelist of request Content-Encoding otherwise responds
// with a 415 Unsupported Media Type status.
func AllowContentEncoding(contentEncoding ...string) func(next http.Handler) http.Handler {
allowedEncodings := make(map[string]struct{}, len(contentEncoding))
for _, encoding := range contentEncoding {
allowedEncodings[strings.TrimSpace(strings.ToLower(encoding))] = struct{}{}
}
return func(next http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
requestEncodings := r.Header["Content-Encoding"]
// skip check for empty content body or no Content-Encoding
if r.ContentLength == 0 {
next.ServeHTTP(w, r)
return
}
// All encodings in the request must be allowed
for _, encoding := range requestEncodings {
if _, ok := allowedEncodings[strings.TrimSpace(strings.ToLower(encoding))]; !ok {
w.WriteHeader(http.StatusUnsupportedMediaType)
return
}
}
next.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}
}

View File

@ -1,51 +0,0 @@
package middleware
import (
"net/http"
"strings"
)
// SetHeader is a convenience handler to set a response header key/value
func SetHeader(key, value string) func(next http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
w.Header().Set(key, value)
next.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}
}
// AllowContentType enforces a whitelist of request Content-Types otherwise responds
// with a 415 Unsupported Media Type status.
func AllowContentType(contentTypes ...string) func(next http.Handler) http.Handler {
cT := []string{}
for _, t := range contentTypes {
cT = append(cT, strings.ToLower(t))
}
return func(next http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
if r.ContentLength == 0 {
// skip check for empty content body
next.ServeHTTP(w, r)
return
}
s := strings.ToLower(strings.TrimSpace(r.Header.Get("Content-Type")))
if i := strings.Index(s, ";"); i > -1 {
s = s[0:i]
}
for _, t := range cT {
if t == s {
next.ServeHTTP(w, r)
return
}
}
w.WriteHeader(http.StatusUnsupportedMediaType)
}
return http.HandlerFunc(fn)
}
}

View File

@ -1,39 +0,0 @@
package middleware
import (
"net/http"
"github.com/go-chi/chi"
)
// GetHead automatically route undefined HEAD requests to GET handlers.
func GetHead(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "HEAD" {
rctx := chi.RouteContext(r.Context())
routePath := rctx.RoutePath
if routePath == "" {
if r.URL.RawPath != "" {
routePath = r.URL.RawPath
} else {
routePath = r.URL.Path
}
}
// Temporary routing context to look-ahead before routing the request
tctx := chi.NewRouteContext()
// Attempt to find a HEAD handler for the routing path, if not found, traverse
// the router as through its a GET route, but proceed with the request
// with the HEAD method.
if !rctx.Routes.Match(tctx, "HEAD", routePath) {
rctx.RouteMethod = "GET"
rctx.RoutePath = routePath
next.ServeHTTP(w, r)
return
}
}
next.ServeHTTP(w, r)
})
}

View File

@ -1,26 +0,0 @@
package middleware
import (
"net/http"
"strings"
)
// Heartbeat endpoint middleware useful to setting up a path like
// `/ping` that load balancers or uptime testing external services
// can make a request before hitting any routes. It's also convenient
// to place this above ACL middlewares as well.
func Heartbeat(endpoint string) func(http.Handler) http.Handler {
f := func(h http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" && strings.EqualFold(r.URL.Path, endpoint) {
w.Header().Set("Content-Type", "text/plain")
w.WriteHeader(http.StatusOK)
w.Write([]byte("."))
return
}
h.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}
return f
}

View File

@ -1,155 +0,0 @@
package middleware
import (
"bytes"
"context"
"log"
"net/http"
"os"
"time"
)
var (
// LogEntryCtxKey is the context.Context key to store the request log entry.
LogEntryCtxKey = &contextKey{"LogEntry"}
// DefaultLogger is called by the Logger middleware handler to log each request.
// Its made a package-level variable so that it can be reconfigured for custom
// logging configurations.
DefaultLogger = RequestLogger(&DefaultLogFormatter{Logger: log.New(os.Stdout, "", log.LstdFlags), NoColor: false})
)
// Logger is a middleware that logs the start and end of each request, along
// with some useful data about what was requested, what the response status was,
// and how long it took to return. When standard output is a TTY, Logger will
// print in color, otherwise it will print in black and white. Logger prints a
// request ID if one is provided.
//
// Alternatively, look at https://github.com/goware/httplog for a more in-depth
// http logger with structured logging support.
func Logger(next http.Handler) http.Handler {
return DefaultLogger(next)
}
// RequestLogger returns a logger handler using a custom LogFormatter.
func RequestLogger(f LogFormatter) func(next http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
entry := f.NewLogEntry(r)
ww := NewWrapResponseWriter(w, r.ProtoMajor)
t1 := time.Now()
defer func() {
entry.Write(ww.Status(), ww.BytesWritten(), ww.Header(), time.Since(t1), nil)
}()
next.ServeHTTP(ww, WithLogEntry(r, entry))
}
return http.HandlerFunc(fn)
}
}
// LogFormatter initiates the beginning of a new LogEntry per request.
// See DefaultLogFormatter for an example implementation.
type LogFormatter interface {
NewLogEntry(r *http.Request) LogEntry
}
// LogEntry records the final log when a request completes.
// See defaultLogEntry for an example implementation.
type LogEntry interface {
Write(status, bytes int, header http.Header, elapsed time.Duration, extra interface{})
Panic(v interface{}, stack []byte)
}
// GetLogEntry returns the in-context LogEntry for a request.
func GetLogEntry(r *http.Request) LogEntry {
entry, _ := r.Context().Value(LogEntryCtxKey).(LogEntry)
return entry
}
// WithLogEntry sets the in-context LogEntry for a request.
func WithLogEntry(r *http.Request, entry LogEntry) *http.Request {
r = r.WithContext(context.WithValue(r.Context(), LogEntryCtxKey, entry))
return r
}
// LoggerInterface accepts printing to stdlib logger or compatible logger.
type LoggerInterface interface {
Print(v ...interface{})
}
// DefaultLogFormatter is a simple logger that implements a LogFormatter.
type DefaultLogFormatter struct {
Logger LoggerInterface
NoColor bool
}
// NewLogEntry creates a new LogEntry for the request.
func (l *DefaultLogFormatter) NewLogEntry(r *http.Request) LogEntry {
useColor := !l.NoColor
entry := &defaultLogEntry{
DefaultLogFormatter: l,
request: r,
buf: &bytes.Buffer{},
useColor: useColor,
}
reqID := GetReqID(r.Context())
if reqID != "" {
cW(entry.buf, useColor, nYellow, "[%s] ", reqID)
}
cW(entry.buf, useColor, nCyan, "\"")
cW(entry.buf, useColor, bMagenta, "%s ", r.Method)
scheme := "http"
if r.TLS != nil {
scheme = "https"
}
cW(entry.buf, useColor, nCyan, "%s://%s%s %s\" ", scheme, r.Host, r.RequestURI, r.Proto)
entry.buf.WriteString("from ")
entry.buf.WriteString(r.RemoteAddr)
entry.buf.WriteString(" - ")
return entry
}
type defaultLogEntry struct {
*DefaultLogFormatter
request *http.Request
buf *bytes.Buffer
useColor bool
}
func (l *defaultLogEntry) Write(status, bytes int, header http.Header, elapsed time.Duration, extra interface{}) {
switch {
case status < 200:
cW(l.buf, l.useColor, bBlue, "%03d", status)
case status < 300:
cW(l.buf, l.useColor, bGreen, "%03d", status)
case status < 400:
cW(l.buf, l.useColor, bCyan, "%03d", status)
case status < 500:
cW(l.buf, l.useColor, bYellow, "%03d", status)
default:
cW(l.buf, l.useColor, bRed, "%03d", status)
}
cW(l.buf, l.useColor, bBlue, " %dB", bytes)
l.buf.WriteString(" in ")
if elapsed < 500*time.Millisecond {
cW(l.buf, l.useColor, nGreen, "%s", elapsed)
} else if elapsed < 5*time.Second {
cW(l.buf, l.useColor, nYellow, "%s", elapsed)
} else {
cW(l.buf, l.useColor, nRed, "%s", elapsed)
}
l.Logger.Print(l.buf.String())
}
func (l *defaultLogEntry) Panic(v interface{}, stack []byte) {
PrintPrettyStack(v)
}

View File

@ -1,23 +0,0 @@
package middleware
import "net/http"
// New will create a new middleware handler from a http.Handler.
func New(h http.Handler) func(next http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
h.ServeHTTP(w, r)
})
}
}
// contextKey is a value for use with context.WithValue. It's used as
// a pointer so it fits in an interface{} without allocation. This technique
// for defining context keys was copied from Go 1.7's new use of context in net/http.
type contextKey struct {
name string
}
func (k *contextKey) String() string {
return "chi/middleware context value " + k.name
}

View File

@ -1,58 +0,0 @@
package middleware
// Ported from Goji's middleware, source:
// https://github.com/zenazn/goji/tree/master/web/middleware
import (
"net/http"
"time"
)
// Unix epoch time
var epoch = time.Unix(0, 0).Format(time.RFC1123)
// Taken from https://github.com/mytrile/nocache
var noCacheHeaders = map[string]string{
"Expires": epoch,
"Cache-Control": "no-cache, no-store, no-transform, must-revalidate, private, max-age=0",
"Pragma": "no-cache",
"X-Accel-Expires": "0",
}
var etagHeaders = []string{
"ETag",
"If-Modified-Since",
"If-Match",
"If-None-Match",
"If-Range",
"If-Unmodified-Since",
}
// NoCache is a simple piece of middleware that sets a number of HTTP headers to prevent
// a router (or subrouter) from being cached by an upstream proxy and/or client.
//
// As per http://wiki.nginx.org/HttpProxyModule - NoCache sets:
// Expires: Thu, 01 Jan 1970 00:00:00 UTC
// Cache-Control: no-cache, private, max-age=0
// X-Accel-Expires: 0
// Pragma: no-cache (for HTTP/1.0 proxies/clients)
func NoCache(h http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
// Delete any ETag headers that may have been set
for _, v := range etagHeaders {
if r.Header.Get(v) != "" {
r.Header.Del(v)
}
}
// Set our NoCache headers
for k, v := range noCacheHeaders {
w.Header().Set(k, v)
}
h.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}

View File

@ -1,55 +0,0 @@
package middleware
import (
"expvar"
"fmt"
"net/http"
"net/http/pprof"
"github.com/go-chi/chi"
)
// Profiler is a convenient subrouter used for mounting net/http/pprof. ie.
//
// func MyService() http.Handler {
// r := chi.NewRouter()
// // ..middlewares
// r.Mount("/debug", middleware.Profiler())
// // ..routes
// return r
// }
func Profiler() http.Handler {
r := chi.NewRouter()
r.Use(NoCache)
r.Get("/", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, r.RequestURI+"/pprof/", 301)
})
r.HandleFunc("/pprof", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, r.RequestURI+"/", 301)
})
r.HandleFunc("/pprof/*", pprof.Index)
r.HandleFunc("/pprof/cmdline", pprof.Cmdline)
r.HandleFunc("/pprof/profile", pprof.Profile)
r.HandleFunc("/pprof/symbol", pprof.Symbol)
r.HandleFunc("/pprof/trace", pprof.Trace)
r.HandleFunc("/vars", expVars)
return r
}
// Replicated from expvar.go as not public.
func expVars(w http.ResponseWriter, r *http.Request) {
first := true
w.Header().Set("Content-Type", "application/json")
fmt.Fprintf(w, "{\n")
expvar.Do(func(kv expvar.KeyValue) {
if !first {
fmt.Fprintf(w, ",\n")
}
first = false
fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
})
fmt.Fprintf(w, "\n}\n")
}

View File

@ -1,54 +0,0 @@
package middleware
// Ported from Goji's middleware, source:
// https://github.com/zenazn/goji/tree/master/web/middleware
import (
"net/http"
"strings"
)
var xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For")
var xRealIP = http.CanonicalHeaderKey("X-Real-IP")
// RealIP is a middleware that sets a http.Request's RemoteAddr to the results
// of parsing either the X-Forwarded-For header or the X-Real-IP header (in that
// order).
//
// This middleware should be inserted fairly early in the middleware stack to
// ensure that subsequent layers (e.g., request loggers) which examine the
// RemoteAddr will see the intended value.
//
// You should only use this middleware if you can trust the headers passed to
// you (in particular, the two headers this middleware uses), for example
// because you have placed a reverse proxy like HAProxy or nginx in front of
// chi. If your reverse proxies are configured to pass along arbitrary header
// values from the client, or if you use this middleware without a reverse
// proxy, malicious clients will be able to make you very sad (or, depending on
// how you're using RemoteAddr, vulnerable to an attack of some sort).
func RealIP(h http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
if rip := realIP(r); rip != "" {
r.RemoteAddr = rip
}
h.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}
func realIP(r *http.Request) string {
var ip string
if xrip := r.Header.Get(xRealIP); xrip != "" {
ip = xrip
} else if xff := r.Header.Get(xForwardedFor); xff != "" {
i := strings.Index(xff, ", ")
if i == -1 {
i = len(xff)
}
ip = xff[:i]
}
return ip
}

View File

@ -1,192 +0,0 @@
package middleware
// The original work was derived from Goji's middleware, source:
// https://github.com/zenazn/goji/tree/master/web/middleware
import (
"bytes"
"errors"
"fmt"
"net/http"
"os"
"runtime/debug"
"strings"
)
// Recoverer is a middleware that recovers from panics, logs the panic (and a
// backtrace), and returns a HTTP 500 (Internal Server Error) status if
// possible. Recoverer prints a request ID if one is provided.
//
// Alternatively, look at https://github.com/pressly/lg middleware pkgs.
func Recoverer(next http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
defer func() {
if rvr := recover(); rvr != nil && rvr != http.ErrAbortHandler {
logEntry := GetLogEntry(r)
if logEntry != nil {
logEntry.Panic(rvr, debug.Stack())
} else {
PrintPrettyStack(rvr)
}
w.WriteHeader(http.StatusInternalServerError)
}
}()
next.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}
func PrintPrettyStack(rvr interface{}) {
debugStack := debug.Stack()
s := prettyStack{}
out, err := s.parse(debugStack, rvr)
if err == nil {
os.Stderr.Write(out)
} else {
// print stdlib output as a fallback
os.Stderr.Write(debugStack)
}
}
type prettyStack struct {
}
func (s prettyStack) parse(debugStack []byte, rvr interface{}) ([]byte, error) {
var err error
useColor := true
buf := &bytes.Buffer{}
cW(buf, false, bRed, "\n")
cW(buf, useColor, bCyan, " panic: ")
cW(buf, useColor, bBlue, "%v", rvr)
cW(buf, false, bWhite, "\n \n")
// process debug stack info
stack := strings.Split(string(debugStack), "\n")
lines := []string{}
// locate panic line, as we may have nested panics
for i := len(stack) - 1; i > 0; i-- {
lines = append(lines, stack[i])
if strings.HasPrefix(stack[i], "panic(0x") {
lines = lines[0 : len(lines)-2] // remove boilerplate
break
}
}
// reverse
for i := len(lines)/2 - 1; i >= 0; i-- {
opp := len(lines) - 1 - i
lines[i], lines[opp] = lines[opp], lines[i]
}
// decorate
for i, line := range lines {
lines[i], err = s.decorateLine(line, useColor, i)
if err != nil {
return nil, err
}
}
for _, l := range lines {
fmt.Fprintf(buf, "%s", l)
}
return buf.Bytes(), nil
}
func (s prettyStack) decorateLine(line string, useColor bool, num int) (string, error) {
line = strings.TrimSpace(line)
if strings.HasPrefix(line, "\t") || strings.Contains(line, ".go:") {
return s.decorateSourceLine(line, useColor, num)
} else if strings.HasSuffix(line, ")") {
return s.decorateFuncCallLine(line, useColor, num)
} else {
if strings.HasPrefix(line, "\t") {
return strings.Replace(line, "\t", " ", 1), nil
} else {
return fmt.Sprintf(" %s\n", line), nil
}
}
}
func (s prettyStack) decorateFuncCallLine(line string, useColor bool, num int) (string, error) {
idx := strings.LastIndex(line, "(")
if idx < 0 {
return "", errors.New("not a func call line")
}
buf := &bytes.Buffer{}
pkg := line[0:idx]
// addr := line[idx:]
method := ""
idx = strings.LastIndex(pkg, string(os.PathSeparator))
if idx < 0 {
idx = strings.Index(pkg, ".")
method = pkg[idx:]
pkg = pkg[0:idx]
} else {
method = pkg[idx+1:]
pkg = pkg[0 : idx+1]
idx = strings.Index(method, ".")
pkg += method[0:idx]
method = method[idx:]
}
pkgColor := nYellow
methodColor := bGreen
if num == 0 {
cW(buf, useColor, bRed, " -> ")
pkgColor = bMagenta
methodColor = bRed
} else {
cW(buf, useColor, bWhite, " ")
}
cW(buf, useColor, pkgColor, "%s", pkg)
cW(buf, useColor, methodColor, "%s\n", method)
// cW(buf, useColor, nBlack, "%s", addr)
return buf.String(), nil
}
func (s prettyStack) decorateSourceLine(line string, useColor bool, num int) (string, error) {
idx := strings.LastIndex(line, ".go:")
if idx < 0 {
return "", errors.New("not a source line")
}
buf := &bytes.Buffer{}
path := line[0 : idx+3]
lineno := line[idx+3:]
idx = strings.LastIndex(path, string(os.PathSeparator))
dir := path[0 : idx+1]
file := path[idx+1:]
idx = strings.Index(lineno, " ")
if idx > 0 {
lineno = lineno[0:idx]
}
fileColor := bCyan
lineColor := bGreen
if num == 1 {
cW(buf, useColor, bRed, " -> ")
fileColor = bRed
lineColor = bMagenta
} else {
cW(buf, false, bWhite, " ")
}
cW(buf, useColor, bWhite, "%s", dir)
cW(buf, useColor, fileColor, "%s", file)
cW(buf, useColor, lineColor, "%s", lineno)
if num == 1 {
cW(buf, false, bWhite, "\n")
}
cW(buf, false, bWhite, "\n")
return buf.String(), nil
}

View File

@ -1,96 +0,0 @@
package middleware
// Ported from Goji's middleware, source:
// https://github.com/zenazn/goji/tree/master/web/middleware
import (
"context"
"crypto/rand"
"encoding/base64"
"fmt"
"net/http"
"os"
"strings"
"sync/atomic"
)
// Key to use when setting the request ID.
type ctxKeyRequestID int
// RequestIDKey is the key that holds the unique request ID in a request context.
const RequestIDKey ctxKeyRequestID = 0
// RequestIDHeader is the name of the HTTP Header which contains the request id.
// Exported so that it can be changed by developers
var RequestIDHeader = "X-Request-Id"
var prefix string
var reqid uint64
// A quick note on the statistics here: we're trying to calculate the chance that
// two randomly generated base62 prefixes will collide. We use the formula from
// http://en.wikipedia.org/wiki/Birthday_problem
//
// P[m, n] \approx 1 - e^{-m^2/2n}
//
// We ballpark an upper bound for $m$ by imagining (for whatever reason) a server
// that restarts every second over 10 years, for $m = 86400 * 365 * 10 = 315360000$
//
// For a $k$ character base-62 identifier, we have $n(k) = 62^k$
//
// Plugging this in, we find $P[m, n(10)] \approx 5.75%$, which is good enough for
// our purposes, and is surely more than anyone would ever need in practice -- a
// process that is rebooted a handful of times a day for a hundred years has less
// than a millionth of a percent chance of generating two colliding IDs.
func init() {
hostname, err := os.Hostname()
if hostname == "" || err != nil {
hostname = "localhost"
}
var buf [12]byte
var b64 string
for len(b64) < 10 {
rand.Read(buf[:])
b64 = base64.StdEncoding.EncodeToString(buf[:])
b64 = strings.NewReplacer("+", "", "/", "").Replace(b64)
}
prefix = fmt.Sprintf("%s/%s", hostname, b64[0:10])
}
// RequestID is a middleware that injects a request ID into the context of each
// request. A request ID is a string of the form "host.example.com/random-0001",
// where "random" is a base62 random string that uniquely identifies this go
// process, and where the last number is an atomically incremented request
// counter.
func RequestID(next http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
requestID := r.Header.Get(RequestIDHeader)
if requestID == "" {
myid := atomic.AddUint64(&reqid, 1)
requestID = fmt.Sprintf("%s-%06d", prefix, myid)
}
ctx = context.WithValue(ctx, RequestIDKey, requestID)
next.ServeHTTP(w, r.WithContext(ctx))
}
return http.HandlerFunc(fn)
}
// GetReqID returns a request ID from the given context if one is present.
// Returns the empty string if a request ID cannot be found.
func GetReqID(ctx context.Context) string {
if ctx == nil {
return ""
}
if reqID, ok := ctx.Value(RequestIDKey).(string); ok {
return reqID
}
return ""
}
// NextRequestID generates the next request ID in the sequence.
func NextRequestID() uint64 {
return atomic.AddUint64(&reqid, 1)
}

View File

@ -1,160 +0,0 @@
package middleware
import (
"net/http"
"strings"
)
// RouteHeaders is a neat little header-based router that allows you to direct
// the flow of a request through a middleware stack based on a request header.
//
// For example, lets say you'd like to setup multiple routers depending on the
// request Host header, you could then do something as so:
//
// r := chi.NewRouter()
// rSubdomain := chi.NewRouter()
//
// r.Use(middleware.RouteHeaders().
// Route("Host", "example.com", middleware.New(r)).
// Route("Host", "*.example.com", middleware.New(rSubdomain)).
// Handler)
//
// r.Get("/", h)
// rSubdomain.Get("/", h2)
//
//
// Another example, imagine you want to setup multiple CORS handlers, where for
// your origin servers you allow authorized requests, but for third-party public
// requests, authorization is disabled.
//
// r := chi.NewRouter()
//
// r.Use(middleware.RouteHeaders().
// Route("Origin", "https://app.skyweaver.net", cors.Handler(cors.Options{
// AllowedOrigins: []string{"https://api.skyweaver.net"},
// AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"},
// AllowedHeaders: []string{"Accept", "Authorization", "Content-Type"},
// AllowCredentials: true, // <----------<<< allow credentials
// })).
// Route("Origin", "*", cors.Handler(cors.Options{
// AllowedOrigins: []string{"*"},
// AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"},
// AllowedHeaders: []string{"Accept", "Content-Type"},
// AllowCredentials: false, // <----------<<< do not allow credentials
// })).
// Handler)
//
func RouteHeaders() HeaderRouter {
return HeaderRouter{}
}
type HeaderRouter map[string][]HeaderRoute
func (hr HeaderRouter) Route(header string, match string, middlewareHandler func(next http.Handler) http.Handler) HeaderRouter {
header = strings.ToLower(header)
k := hr[header]
if k == nil {
hr[header] = []HeaderRoute{}
}
hr[header] = append(hr[header], HeaderRoute{MatchOne: NewPattern(match), Middleware: middlewareHandler})
return hr
}
func (hr HeaderRouter) RouteAny(header string, match []string, middlewareHandler func(next http.Handler) http.Handler) HeaderRouter {
header = strings.ToLower(header)
k := hr[header]
if k == nil {
hr[header] = []HeaderRoute{}
}
patterns := []Pattern{}
for _, m := range match {
patterns = append(patterns, NewPattern(m))
}
hr[header] = append(hr[header], HeaderRoute{MatchAny: patterns, Middleware: middlewareHandler})
return hr
}
func (hr HeaderRouter) RouteDefault(handler func(next http.Handler) http.Handler) HeaderRouter {
hr["*"] = []HeaderRoute{{Middleware: handler}}
return hr
}
func (hr HeaderRouter) Handler(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if len(hr) == 0 {
// skip if no routes set
next.ServeHTTP(w, r)
}
// find first matching header route, and continue
for header, matchers := range hr {
headerValue := r.Header.Get(header)
if headerValue == "" {
continue
}
headerValue = strings.ToLower(headerValue)
for _, matcher := range matchers {
if matcher.IsMatch(headerValue) {
matcher.Middleware(next).ServeHTTP(w, r)
return
}
}
}
// if no match, check for "*" default route
matcher, ok := hr["*"]
if !ok || matcher[0].Middleware == nil {
next.ServeHTTP(w, r)
return
}
matcher[0].Middleware(next).ServeHTTP(w, r)
})
}
type HeaderRoute struct {
MatchAny []Pattern
MatchOne Pattern
Middleware func(next http.Handler) http.Handler
}
func (r HeaderRoute) IsMatch(value string) bool {
if len(r.MatchAny) > 0 {
for _, m := range r.MatchAny {
if m.Match(value) {
return true
}
}
} else if r.MatchOne.Match(value) {
return true
}
return false
}
type Pattern struct {
prefix string
suffix string
wildcard bool
}
func NewPattern(value string) Pattern {
p := Pattern{}
if i := strings.IndexByte(value, '*'); i >= 0 {
p.wildcard = true
p.prefix = value[0:i]
p.suffix = value[i+1:]
} else {
p.prefix = value
}
return p
}
func (p Pattern) Match(v string) bool {
if !p.wildcard {
if p.prefix == v {
return true
} else {
return false
}
}
return len(v) >= len(p.prefix+p.suffix) && strings.HasPrefix(v, p.prefix) && strings.HasSuffix(v, p.suffix)
}

View File

@ -1,56 +0,0 @@
package middleware
import (
"fmt"
"net/http"
"github.com/go-chi/chi"
)
// StripSlashes is a middleware that will match request paths with a trailing
// slash, strip it from the path and continue routing through the mux, if a route
// matches, then it will serve the handler.
func StripSlashes(next http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
var path string
rctx := chi.RouteContext(r.Context())
if rctx.RoutePath != "" {
path = rctx.RoutePath
} else {
path = r.URL.Path
}
if len(path) > 1 && path[len(path)-1] == '/' {
rctx.RoutePath = path[:len(path)-1]
}
next.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}
// RedirectSlashes is a middleware that will match request paths with a trailing
// slash and redirect to the same path, less the trailing slash.
//
// NOTE: RedirectSlashes middleware is *incompatible* with http.FileServer,
// see https://github.com/go-chi/chi/issues/343
func RedirectSlashes(next http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
var path string
rctx := chi.RouteContext(r.Context())
if rctx.RoutePath != "" {
path = rctx.RoutePath
} else {
path = r.URL.Path
}
if len(path) > 1 && path[len(path)-1] == '/' {
if r.URL.RawQuery != "" {
path = fmt.Sprintf("%s?%s", path[:len(path)-1], r.URL.RawQuery)
} else {
path = path[:len(path)-1]
}
http.Redirect(w, r, path, 301)
return
}
next.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}

View File

@ -1,63 +0,0 @@
package middleware
// Ported from Goji's middleware, source:
// https://github.com/zenazn/goji/tree/master/web/middleware
import (
"fmt"
"io"
"os"
)
var (
// Normal colors
nBlack = []byte{'\033', '[', '3', '0', 'm'}
nRed = []byte{'\033', '[', '3', '1', 'm'}
nGreen = []byte{'\033', '[', '3', '2', 'm'}
nYellow = []byte{'\033', '[', '3', '3', 'm'}
nBlue = []byte{'\033', '[', '3', '4', 'm'}
nMagenta = []byte{'\033', '[', '3', '5', 'm'}
nCyan = []byte{'\033', '[', '3', '6', 'm'}
nWhite = []byte{'\033', '[', '3', '7', 'm'}
// Bright colors
bBlack = []byte{'\033', '[', '3', '0', ';', '1', 'm'}
bRed = []byte{'\033', '[', '3', '1', ';', '1', 'm'}
bGreen = []byte{'\033', '[', '3', '2', ';', '1', 'm'}
bYellow = []byte{'\033', '[', '3', '3', ';', '1', 'm'}
bBlue = []byte{'\033', '[', '3', '4', ';', '1', 'm'}
bMagenta = []byte{'\033', '[', '3', '5', ';', '1', 'm'}
bCyan = []byte{'\033', '[', '3', '6', ';', '1', 'm'}
bWhite = []byte{'\033', '[', '3', '7', ';', '1', 'm'}
reset = []byte{'\033', '[', '0', 'm'}
)
var IsTTY bool
func init() {
// This is sort of cheating: if stdout is a character device, we assume
// that means it's a TTY. Unfortunately, there are many non-TTY
// character devices, but fortunately stdout is rarely set to any of
// them.
//
// We could solve this properly by pulling in a dependency on
// code.google.com/p/go.crypto/ssh/terminal, for instance, but as a
// heuristic for whether to print in color or in black-and-white, I'd
// really rather not.
fi, err := os.Stdout.Stat()
if err == nil {
m := os.ModeDevice | os.ModeCharDevice
IsTTY = fi.Mode()&m == m
}
}
// colorWrite
func cW(w io.Writer, useColor bool, color []byte, s string, args ...interface{}) {
if IsTTY && useColor {
w.Write(color)
}
fmt.Fprintf(w, s, args...)
if IsTTY && useColor {
w.Write(reset)
}
}

View File

@ -1,132 +0,0 @@
package middleware
import (
"net/http"
"strconv"
"time"
)
const (
errCapacityExceeded = "Server capacity exceeded."
errTimedOut = "Timed out while waiting for a pending request to complete."
errContextCanceled = "Context was canceled."
)
var (
defaultBacklogTimeout = time.Second * 60
)
// ThrottleOpts represents a set of throttling options.
type ThrottleOpts struct {
Limit int
BacklogLimit int
BacklogTimeout time.Duration
RetryAfterFn func(ctxDone bool) time.Duration
}
// Throttle is a middleware that limits number of currently processed requests
// at a time across all users. Note: Throttle is not a rate-limiter per user,
// instead it just puts a ceiling on the number of currentl in-flight requests
// being processed from the point from where the Throttle middleware is mounted.
func Throttle(limit int) func(http.Handler) http.Handler {
return ThrottleWithOpts(ThrottleOpts{Limit: limit, BacklogTimeout: defaultBacklogTimeout})
}
// ThrottleBacklog is a middleware that limits number of currently processed
// requests at a time and provides a backlog for holding a finite number of
// pending requests.
func ThrottleBacklog(limit int, backlogLimit int, backlogTimeout time.Duration) func(http.Handler) http.Handler {
return ThrottleWithOpts(ThrottleOpts{Limit: limit, BacklogLimit: backlogLimit, BacklogTimeout: backlogTimeout})
}
// ThrottleWithOpts is a middleware that limits number of currently processed requests using passed ThrottleOpts.
func ThrottleWithOpts(opts ThrottleOpts) func(http.Handler) http.Handler {
if opts.Limit < 1 {
panic("chi/middleware: Throttle expects limit > 0")
}
if opts.BacklogLimit < 0 {
panic("chi/middleware: Throttle expects backlogLimit to be positive")
}
t := throttler{
tokens: make(chan token, opts.Limit),
backlogTokens: make(chan token, opts.Limit+opts.BacklogLimit),
backlogTimeout: opts.BacklogTimeout,
retryAfterFn: opts.RetryAfterFn,
}
// Filling tokens.
for i := 0; i < opts.Limit+opts.BacklogLimit; i++ {
if i < opts.Limit {
t.tokens <- token{}
}
t.backlogTokens <- token{}
}
return func(next http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
select {
case <-ctx.Done():
t.setRetryAfterHeaderIfNeeded(w, true)
http.Error(w, errContextCanceled, http.StatusServiceUnavailable)
return
case btok := <-t.backlogTokens:
timer := time.NewTimer(t.backlogTimeout)
defer func() {
t.backlogTokens <- btok
}()
select {
case <-timer.C:
t.setRetryAfterHeaderIfNeeded(w, false)
http.Error(w, errTimedOut, http.StatusServiceUnavailable)
return
case <-ctx.Done():
timer.Stop()
t.setRetryAfterHeaderIfNeeded(w, true)
http.Error(w, errContextCanceled, http.StatusServiceUnavailable)
return
case tok := <-t.tokens:
defer func() {
timer.Stop()
t.tokens <- tok
}()
next.ServeHTTP(w, r)
}
return
default:
t.setRetryAfterHeaderIfNeeded(w, false)
http.Error(w, errCapacityExceeded, http.StatusServiceUnavailable)
return
}
}
return http.HandlerFunc(fn)
}
}
// token represents a request that is being processed.
type token struct{}
// throttler limits number of currently processed requests at a time.
type throttler struct {
tokens chan token
backlogTokens chan token
backlogTimeout time.Duration
retryAfterFn func(ctxDone bool) time.Duration
}
// setRetryAfterHeaderIfNeeded sets Retry-After HTTP header if corresponding retryAfterFn option of throttler is initialized.
func (t throttler) setRetryAfterHeaderIfNeeded(w http.ResponseWriter, ctxDone bool) {
if t.retryAfterFn == nil {
return
}
w.Header().Set("Retry-After", strconv.Itoa(int(t.retryAfterFn(ctxDone).Seconds())))
}

View File

@ -1,49 +0,0 @@
package middleware
import (
"context"
"net/http"
"time"
)
// Timeout is a middleware that cancels ctx after a given timeout and return
// a 504 Gateway Timeout error to the client.
//
// It's required that you select the ctx.Done() channel to check for the signal
// if the context has reached its deadline and return, otherwise the timeout
// signal will be just ignored.
//
// ie. a route/handler may look like:
//
// r.Get("/long", func(w http.ResponseWriter, r *http.Request) {
// ctx := r.Context()
// processTime := time.Duration(rand.Intn(4)+1) * time.Second
//
// select {
// case <-ctx.Done():
// return
//
// case <-time.After(processTime):
// // The above channel simulates some hard work.
// }
//
// w.Write([]byte("done"))
// })
//
func Timeout(timeout time.Duration) func(next http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), timeout)
defer func() {
cancel()
if ctx.Err() == context.DeadlineExceeded {
w.WriteHeader(http.StatusGatewayTimeout)
}
}()
r = r.WithContext(ctx)
next.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}
}

View File

@ -1,72 +0,0 @@
package middleware
import (
"context"
"net/http"
"strings"
"github.com/go-chi/chi"
)
var (
// URLFormatCtxKey is the context.Context key to store the URL format data
// for a request.
URLFormatCtxKey = &contextKey{"URLFormat"}
)
// URLFormat is a middleware that parses the url extension from a request path and stores it
// on the context as a string under the key `middleware.URLFormatCtxKey`. The middleware will
// trim the suffix from the routing path and continue routing.
//
// Routers should not include a url parameter for the suffix when using this middleware.
//
// Sample usage.. for url paths: `/articles/1`, `/articles/1.json` and `/articles/1.xml`
//
// func routes() http.Handler {
// r := chi.NewRouter()
// r.Use(middleware.URLFormat)
//
// r.Get("/articles/{id}", ListArticles)
//
// return r
// }
//
// func ListArticles(w http.ResponseWriter, r *http.Request) {
// urlFormat, _ := r.Context().Value(middleware.URLFormatCtxKey).(string)
//
// switch urlFormat {
// case "json":
// render.JSON(w, r, articles)
// case "xml:"
// render.XML(w, r, articles)
// default:
// render.JSON(w, r, articles)
// }
// }
//
func URLFormat(next http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
var format string
path := r.URL.Path
if strings.Index(path, ".") > 0 {
base := strings.LastIndex(path, "/")
idx := strings.Index(path[base:], ".")
if idx > 0 {
idx += base
format = path[idx+1:]
rctx := chi.RouteContext(r.Context())
rctx.RoutePath = path[:idx]
}
}
r = r.WithContext(context.WithValue(ctx, URLFormatCtxKey, format))
next.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}

View File

@ -1,17 +0,0 @@
package middleware
import (
"context"
"net/http"
)
// WithValue is a middleware that sets a given key/value in a context chain.
func WithValue(key interface{}, val interface{}) func(next http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
fn := func(w http.ResponseWriter, r *http.Request) {
r = r.WithContext(context.WithValue(r.Context(), key, val))
next.ServeHTTP(w, r)
}
return http.HandlerFunc(fn)
}
}

View File

@ -1,180 +0,0 @@
package middleware
// The original work was derived from Goji's middleware, source:
// https://github.com/zenazn/goji/tree/master/web/middleware
import (
"bufio"
"io"
"net"
"net/http"
)
// NewWrapResponseWriter wraps an http.ResponseWriter, returning a proxy that allows you to
// hook into various parts of the response process.
func NewWrapResponseWriter(w http.ResponseWriter, protoMajor int) WrapResponseWriter {
_, fl := w.(http.Flusher)
bw := basicWriter{ResponseWriter: w}
if protoMajor == 2 {
_, ps := w.(http.Pusher)
if fl && ps {
return &http2FancyWriter{bw}
}
} else {
_, hj := w.(http.Hijacker)
_, rf := w.(io.ReaderFrom)
if fl && hj && rf {
return &httpFancyWriter{bw}
}
}
if fl {
return &flushWriter{bw}
}
return &bw
}
// WrapResponseWriter is a proxy around an http.ResponseWriter that allows you to hook
// into various parts of the response process.
type WrapResponseWriter interface {
http.ResponseWriter
// Status returns the HTTP status of the request, or 0 if one has not
// yet been sent.
Status() int
// BytesWritten returns the total number of bytes sent to the client.
BytesWritten() int
// Tee causes the response body to be written to the given io.Writer in
// addition to proxying the writes through. Only one io.Writer can be
// tee'd to at once: setting a second one will overwrite the first.
// Writes will be sent to the proxy before being written to this
// io.Writer. It is illegal for the tee'd writer to be modified
// concurrently with writes.
Tee(io.Writer)
// Unwrap returns the original proxied target.
Unwrap() http.ResponseWriter
}
// basicWriter wraps a http.ResponseWriter that implements the minimal
// http.ResponseWriter interface.
type basicWriter struct {
http.ResponseWriter
wroteHeader bool
code int
bytes int
tee io.Writer
}
func (b *basicWriter) WriteHeader(code int) {
if !b.wroteHeader {
b.code = code
b.wroteHeader = true
b.ResponseWriter.WriteHeader(code)
}
}
func (b *basicWriter) Write(buf []byte) (int, error) {
b.maybeWriteHeader()
n, err := b.ResponseWriter.Write(buf)
if b.tee != nil {
_, err2 := b.tee.Write(buf[:n])
// Prefer errors generated by the proxied writer.
if err == nil {
err = err2
}
}
b.bytes += n
return n, err
}
func (b *basicWriter) maybeWriteHeader() {
if !b.wroteHeader {
b.WriteHeader(http.StatusOK)
}
}
func (b *basicWriter) Status() int {
return b.code
}
func (b *basicWriter) BytesWritten() int {
return b.bytes
}
func (b *basicWriter) Tee(w io.Writer) {
b.tee = w
}
func (b *basicWriter) Unwrap() http.ResponseWriter {
return b.ResponseWriter
}
type flushWriter struct {
basicWriter
}
func (f *flushWriter) Flush() {
f.wroteHeader = true
fl := f.basicWriter.ResponseWriter.(http.Flusher)
fl.Flush()
}
var _ http.Flusher = &flushWriter{}
// httpFancyWriter is a HTTP writer that additionally satisfies
// http.Flusher, http.Hijacker, and io.ReaderFrom. It exists for the common case
// of wrapping the http.ResponseWriter that package http gives you, in order to
// make the proxied object support the full method set of the proxied object.
type httpFancyWriter struct {
basicWriter
}
func (f *httpFancyWriter) Flush() {
f.wroteHeader = true
fl := f.basicWriter.ResponseWriter.(http.Flusher)
fl.Flush()
}
func (f *httpFancyWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
hj := f.basicWriter.ResponseWriter.(http.Hijacker)
return hj.Hijack()
}
func (f *http2FancyWriter) Push(target string, opts *http.PushOptions) error {
return f.basicWriter.ResponseWriter.(http.Pusher).Push(target, opts)
}
func (f *httpFancyWriter) ReadFrom(r io.Reader) (int64, error) {
if f.basicWriter.tee != nil {
n, err := io.Copy(&f.basicWriter, r)
f.basicWriter.bytes += int(n)
return n, err
}
rf := f.basicWriter.ResponseWriter.(io.ReaderFrom)
f.basicWriter.maybeWriteHeader()
n, err := rf.ReadFrom(r)
f.basicWriter.bytes += int(n)
return n, err
}
var _ http.Flusher = &httpFancyWriter{}
var _ http.Hijacker = &httpFancyWriter{}
var _ http.Pusher = &http2FancyWriter{}
var _ io.ReaderFrom = &httpFancyWriter{}
// http2FancyWriter is a HTTP2 writer that additionally satisfies
// http.Flusher, and io.ReaderFrom. It exists for the common case
// of wrapping the http.ResponseWriter that package http gives you, in order to
// make the proxied object support the full method set of the proxied object.
type http2FancyWriter struct {
basicWriter
}
func (f *http2FancyWriter) Flush() {
f.wroteHeader = true
fl := f.basicWriter.ResponseWriter.(http.Flusher)
fl.Flush()
}
var _ http.Flusher = &http2FancyWriter{}

466
vendor/github.com/go-chi/chi/mux.go generated vendored
View File

@ -1,466 +0,0 @@
package chi
import (
"context"
"fmt"
"net/http"
"strings"
"sync"
)
var _ Router = &Mux{}
// Mux is a simple HTTP route multiplexer that parses a request path,
// records any URL params, and executes an end handler. It implements
// the http.Handler interface and is friendly with the standard library.
//
// Mux is designed to be fast, minimal and offer a powerful API for building
// modular and composable HTTP services with a large set of handlers. It's
// particularly useful for writing large REST API services that break a handler
// into many smaller parts composed of middlewares and end handlers.
type Mux struct {
// The radix trie router
tree *node
// The middleware stack
middlewares []func(http.Handler) http.Handler
// Controls the behaviour of middleware chain generation when a mux
// is registered as an inline group inside another mux.
inline bool
parent *Mux
// The computed mux handler made of the chained middleware stack and
// the tree router
handler http.Handler
// Routing context pool
pool *sync.Pool
// Custom route not found handler
notFoundHandler http.HandlerFunc
// Custom method not allowed handler
methodNotAllowedHandler http.HandlerFunc
}
// NewMux returns a newly initialized Mux object that implements the Router
// interface.
func NewMux() *Mux {
mux := &Mux{tree: &node{}, pool: &sync.Pool{}}
mux.pool.New = func() interface{} {
return NewRouteContext()
}
return mux
}
// ServeHTTP is the single method of the http.Handler interface that makes
// Mux interoperable with the standard library. It uses a sync.Pool to get and
// reuse routing contexts for each request.
func (mx *Mux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Ensure the mux has some routes defined on the mux
if mx.handler == nil {
mx.NotFoundHandler().ServeHTTP(w, r)
return
}
// Check if a routing context already exists from a parent router.
rctx, _ := r.Context().Value(RouteCtxKey).(*Context)
if rctx != nil {
mx.handler.ServeHTTP(w, r)
return
}
// Fetch a RouteContext object from the sync pool, and call the computed
// mx.handler that is comprised of mx.middlewares + mx.routeHTTP.
// Once the request is finished, reset the routing context and put it back
// into the pool for reuse from another request.
rctx = mx.pool.Get().(*Context)
rctx.Reset()
rctx.Routes = mx
// NOTE: r.WithContext() causes 2 allocations and context.WithValue() causes 1 allocation
r = r.WithContext(context.WithValue(r.Context(), RouteCtxKey, rctx))
// Serve the request and once its done, put the request context back in the sync pool
mx.handler.ServeHTTP(w, r)
mx.pool.Put(rctx)
}
// Use appends a middleware handler to the Mux middleware stack.
//
// The middleware stack for any Mux will execute before searching for a matching
// route to a specific handler, which provides opportunity to respond early,
// change the course of the request execution, or set request-scoped values for
// the next http.Handler.
func (mx *Mux) Use(middlewares ...func(http.Handler) http.Handler) {
if mx.handler != nil {
panic("chi: all middlewares must be defined before routes on a mux")
}
mx.middlewares = append(mx.middlewares, middlewares...)
}
// Handle adds the route `pattern` that matches any http method to
// execute the `handler` http.Handler.
func (mx *Mux) Handle(pattern string, handler http.Handler) {
mx.handle(mALL, pattern, handler)
}
// HandleFunc adds the route `pattern` that matches any http method to
// execute the `handlerFn` http.HandlerFunc.
func (mx *Mux) HandleFunc(pattern string, handlerFn http.HandlerFunc) {
mx.handle(mALL, pattern, handlerFn)
}
// Method adds the route `pattern` that matches `method` http method to
// execute the `handler` http.Handler.
func (mx *Mux) Method(method, pattern string, handler http.Handler) {
m, ok := methodMap[strings.ToUpper(method)]
if !ok {
panic(fmt.Sprintf("chi: '%s' http method is not supported.", method))
}
mx.handle(m, pattern, handler)
}
// MethodFunc adds the route `pattern` that matches `method` http method to
// execute the `handlerFn` http.HandlerFunc.
func (mx *Mux) MethodFunc(method, pattern string, handlerFn http.HandlerFunc) {
mx.Method(method, pattern, handlerFn)
}
// Connect adds the route `pattern` that matches a CONNECT http method to
// execute the `handlerFn` http.HandlerFunc.
func (mx *Mux) Connect(pattern string, handlerFn http.HandlerFunc) {
mx.handle(mCONNECT, pattern, handlerFn)
}
// Delete adds the route `pattern` that matches a DELETE http method to
// execute the `handlerFn` http.HandlerFunc.
func (mx *Mux) Delete(pattern string, handlerFn http.HandlerFunc) {
mx.handle(mDELETE, pattern, handlerFn)
}
// Get adds the route `pattern` that matches a GET http method to
// execute the `handlerFn` http.HandlerFunc.
func (mx *Mux) Get(pattern string, handlerFn http.HandlerFunc) {
mx.handle(mGET, pattern, handlerFn)
}
// Head adds the route `pattern` that matches a HEAD http method to
// execute the `handlerFn` http.HandlerFunc.
func (mx *Mux) Head(pattern string, handlerFn http.HandlerFunc) {
mx.handle(mHEAD, pattern, handlerFn)
}
// Options adds the route `pattern` that matches a OPTIONS http method to
// execute the `handlerFn` http.HandlerFunc.
func (mx *Mux) Options(pattern string, handlerFn http.HandlerFunc) {
mx.handle(mOPTIONS, pattern, handlerFn)
}
// Patch adds the route `pattern` that matches a PATCH http method to
// execute the `handlerFn` http.HandlerFunc.
func (mx *Mux) Patch(pattern string, handlerFn http.HandlerFunc) {
mx.handle(mPATCH, pattern, handlerFn)
}
// Post adds the route `pattern` that matches a POST http method to
// execute the `handlerFn` http.HandlerFunc.
func (mx *Mux) Post(pattern string, handlerFn http.HandlerFunc) {
mx.handle(mPOST, pattern, handlerFn)
}
// Put adds the route `pattern` that matches a PUT http method to
// execute the `handlerFn` http.HandlerFunc.
func (mx *Mux) Put(pattern string, handlerFn http.HandlerFunc) {
mx.handle(mPUT, pattern, handlerFn)
}
// Trace adds the route `pattern` that matches a TRACE http method to
// execute the `handlerFn` http.HandlerFunc.
func (mx *Mux) Trace(pattern string, handlerFn http.HandlerFunc) {
mx.handle(mTRACE, pattern, handlerFn)
}
// NotFound sets a custom http.HandlerFunc for routing paths that could
// not be found. The default 404 handler is `http.NotFound`.
func (mx *Mux) NotFound(handlerFn http.HandlerFunc) {
// Build NotFound handler chain
m := mx
hFn := handlerFn
if mx.inline && mx.parent != nil {
m = mx.parent
hFn = Chain(mx.middlewares...).HandlerFunc(hFn).ServeHTTP
}
// Update the notFoundHandler from this point forward
m.notFoundHandler = hFn
m.updateSubRoutes(func(subMux *Mux) {
if subMux.notFoundHandler == nil {
subMux.NotFound(hFn)
}
})
}
// MethodNotAllowed sets a custom http.HandlerFunc for routing paths where the
// method is unresolved. The default handler returns a 405 with an empty body.
func (mx *Mux) MethodNotAllowed(handlerFn http.HandlerFunc) {
// Build MethodNotAllowed handler chain
m := mx
hFn := handlerFn
if mx.inline && mx.parent != nil {
m = mx.parent
hFn = Chain(mx.middlewares...).HandlerFunc(hFn).ServeHTTP
}
// Update the methodNotAllowedHandler from this point forward
m.methodNotAllowedHandler = hFn
m.updateSubRoutes(func(subMux *Mux) {
if subMux.methodNotAllowedHandler == nil {
subMux.MethodNotAllowed(hFn)
}
})
}
// With adds inline middlewares for an endpoint handler.
func (mx *Mux) With(middlewares ...func(http.Handler) http.Handler) Router {
// Similarly as in handle(), we must build the mux handler once additional
// middleware registration isn't allowed for this stack, like now.
if !mx.inline && mx.handler == nil {
mx.buildRouteHandler()
}
// Copy middlewares from parent inline muxs
var mws Middlewares
if mx.inline {
mws = make(Middlewares, len(mx.middlewares))
copy(mws, mx.middlewares)
}
mws = append(mws, middlewares...)
im := &Mux{
pool: mx.pool, inline: true, parent: mx, tree: mx.tree, middlewares: mws,
notFoundHandler: mx.notFoundHandler, methodNotAllowedHandler: mx.methodNotAllowedHandler,
}
return im
}
// Group creates a new inline-Mux with a fresh middleware stack. It's useful
// for a group of handlers along the same routing path that use an additional
// set of middlewares. See _examples/.
func (mx *Mux) Group(fn func(r Router)) Router {
im := mx.With().(*Mux)
if fn != nil {
fn(im)
}
return im
}
// Route creates a new Mux with a fresh middleware stack and mounts it
// along the `pattern` as a subrouter. Effectively, this is a short-hand
// call to Mount. See _examples/.
func (mx *Mux) Route(pattern string, fn func(r Router)) Router {
subRouter := NewRouter()
if fn != nil {
fn(subRouter)
}
mx.Mount(pattern, subRouter)
return subRouter
}
// Mount attaches another http.Handler or chi Router as a subrouter along a routing
// path. It's very useful to split up a large API as many independent routers and
// compose them as a single service using Mount. See _examples/.
//
// Note that Mount() simply sets a wildcard along the `pattern` that will continue
// routing at the `handler`, which in most cases is another chi.Router. As a result,
// if you define two Mount() routes on the exact same pattern the mount will panic.
func (mx *Mux) Mount(pattern string, handler http.Handler) {
// Provide runtime safety for ensuring a pattern isn't mounted on an existing
// routing pattern.
if mx.tree.findPattern(pattern+"*") || mx.tree.findPattern(pattern+"/*") {
panic(fmt.Sprintf("chi: attempting to Mount() a handler on an existing path, '%s'", pattern))
}
// Assign sub-Router's with the parent not found & method not allowed handler if not specified.
subr, ok := handler.(*Mux)
if ok && subr.notFoundHandler == nil && mx.notFoundHandler != nil {
subr.NotFound(mx.notFoundHandler)
}
if ok && subr.methodNotAllowedHandler == nil && mx.methodNotAllowedHandler != nil {
subr.MethodNotAllowed(mx.methodNotAllowedHandler)
}
mountHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
rctx := RouteContext(r.Context())
rctx.RoutePath = mx.nextRoutePath(rctx)
handler.ServeHTTP(w, r)
})
if pattern == "" || pattern[len(pattern)-1] != '/' {
mx.handle(mALL|mSTUB, pattern, mountHandler)
mx.handle(mALL|mSTUB, pattern+"/", mountHandler)
pattern += "/"
}
method := mALL
subroutes, _ := handler.(Routes)
if subroutes != nil {
method |= mSTUB
}
n := mx.handle(method, pattern+"*", mountHandler)
if subroutes != nil {
n.subroutes = subroutes
}
}
// Routes returns a slice of routing information from the tree,
// useful for traversing available routes of a router.
func (mx *Mux) Routes() []Route {
return mx.tree.routes()
}
// Middlewares returns a slice of middleware handler functions.
func (mx *Mux) Middlewares() Middlewares {
return mx.middlewares
}
// Match searches the routing tree for a handler that matches the method/path.
// It's similar to routing a http request, but without executing the handler
// thereafter.
//
// Note: the *Context state is updated during execution, so manage
// the state carefully or make a NewRouteContext().
func (mx *Mux) Match(rctx *Context, method, path string) bool {
m, ok := methodMap[method]
if !ok {
return false
}
node, _, h := mx.tree.FindRoute(rctx, m, path)
if node != nil && node.subroutes != nil {
rctx.RoutePath = mx.nextRoutePath(rctx)
return node.subroutes.Match(rctx, method, rctx.RoutePath)
}
return h != nil
}
// NotFoundHandler returns the default Mux 404 responder whenever a route
// cannot be found.
func (mx *Mux) NotFoundHandler() http.HandlerFunc {
if mx.notFoundHandler != nil {
return mx.notFoundHandler
}
return http.NotFound
}
// MethodNotAllowedHandler returns the default Mux 405 responder whenever
// a method cannot be resolved for a route.
func (mx *Mux) MethodNotAllowedHandler() http.HandlerFunc {
if mx.methodNotAllowedHandler != nil {
return mx.methodNotAllowedHandler
}
return methodNotAllowedHandler
}
// buildRouteHandler builds the single mux handler that is a chain of the middleware
// stack, as defined by calls to Use(), and the tree router (Mux) itself. After this
// point, no other middlewares can be registered on this Mux's stack. But you can still
// compose additional middlewares via Group()'s or using a chained middleware handler.
func (mx *Mux) buildRouteHandler() {
mx.handler = chain(mx.middlewares, http.HandlerFunc(mx.routeHTTP))
}
// handle registers a http.Handler in the routing tree for a particular http method
// and routing pattern.
func (mx *Mux) handle(method methodTyp, pattern string, handler http.Handler) *node {
if len(pattern) == 0 || pattern[0] != '/' {
panic(fmt.Sprintf("chi: routing pattern must begin with '/' in '%s'", pattern))
}
// Build the computed routing handler for this routing pattern.
if !mx.inline && mx.handler == nil {
mx.buildRouteHandler()
}
// Build endpoint handler with inline middlewares for the route
var h http.Handler
if mx.inline {
mx.handler = http.HandlerFunc(mx.routeHTTP)
h = Chain(mx.middlewares...).Handler(handler)
} else {
h = handler
}
// Add the endpoint to the tree and return the node
return mx.tree.InsertRoute(method, pattern, h)
}
// routeHTTP routes a http.Request through the Mux routing tree to serve
// the matching handler for a particular http method.
func (mx *Mux) routeHTTP(w http.ResponseWriter, r *http.Request) {
// Grab the route context object
rctx := r.Context().Value(RouteCtxKey).(*Context)
// The request routing path
routePath := rctx.RoutePath
if routePath == "" {
if r.URL.RawPath != "" {
routePath = r.URL.RawPath
} else {
routePath = r.URL.Path
}
}
// Check if method is supported by chi
if rctx.RouteMethod == "" {
rctx.RouteMethod = r.Method
}
method, ok := methodMap[rctx.RouteMethod]
if !ok {
mx.MethodNotAllowedHandler().ServeHTTP(w, r)
return
}
// Find the route
if _, _, h := mx.tree.FindRoute(rctx, method, routePath); h != nil {
h.ServeHTTP(w, r)
return
}
if rctx.methodNotAllowed {
mx.MethodNotAllowedHandler().ServeHTTP(w, r)
} else {
mx.NotFoundHandler().ServeHTTP(w, r)
}
}
func (mx *Mux) nextRoutePath(rctx *Context) string {
routePath := "/"
nx := len(rctx.routeParams.Keys) - 1 // index of last param in list
if nx >= 0 && rctx.routeParams.Keys[nx] == "*" && len(rctx.routeParams.Values) > nx {
routePath = "/" + rctx.routeParams.Values[nx]
}
return routePath
}
// Recursively update data on child routers.
func (mx *Mux) updateSubRoutes(fn func(subMux *Mux)) {
for _, r := range mx.tree.routes() {
subMux, ok := r.SubRoutes.(*Mux)
if !ok {
continue
}
fn(subMux)
}
}
// methodNotAllowedHandler is a helper function to respond with a 405,
// method not allowed.
func methodNotAllowedHandler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(405)
w.Write(nil)
}

865
vendor/github.com/go-chi/chi/tree.go generated vendored
View File

@ -1,865 +0,0 @@
package chi
// Radix tree implementation below is a based on the original work by
// Armon Dadgar in https://github.com/armon/go-radix/blob/master/radix.go
// (MIT licensed). It's been heavily modified for use as a HTTP routing tree.
import (
"fmt"
"math"
"net/http"
"regexp"
"sort"
"strconv"
"strings"
)
type methodTyp int
const (
mSTUB methodTyp = 1 << iota
mCONNECT
mDELETE
mGET
mHEAD
mOPTIONS
mPATCH
mPOST
mPUT
mTRACE
)
var mALL = mCONNECT | mDELETE | mGET | mHEAD |
mOPTIONS | mPATCH | mPOST | mPUT | mTRACE
var methodMap = map[string]methodTyp{
http.MethodConnect: mCONNECT,
http.MethodDelete: mDELETE,
http.MethodGet: mGET,
http.MethodHead: mHEAD,
http.MethodOptions: mOPTIONS,
http.MethodPatch: mPATCH,
http.MethodPost: mPOST,
http.MethodPut: mPUT,
http.MethodTrace: mTRACE,
}
// RegisterMethod adds support for custom HTTP method handlers, available
// via Router#Method and Router#MethodFunc
func RegisterMethod(method string) {
if method == "" {
return
}
method = strings.ToUpper(method)
if _, ok := methodMap[method]; ok {
return
}
n := len(methodMap)
if n > strconv.IntSize {
panic(fmt.Sprintf("chi: max number of methods reached (%d)", strconv.IntSize))
}
mt := methodTyp(math.Exp2(float64(n)))
methodMap[method] = mt
mALL |= mt
}
type nodeTyp uint8
const (
ntStatic nodeTyp = iota // /home
ntRegexp // /{id:[0-9]+}
ntParam // /{user}
ntCatchAll // /api/v1/*
)
type node struct {
// node type: static, regexp, param, catchAll
typ nodeTyp
// first byte of the prefix
label byte
// first byte of the child prefix
tail byte
// prefix is the common prefix we ignore
prefix string
// regexp matcher for regexp nodes
rex *regexp.Regexp
// HTTP handler endpoints on the leaf node
endpoints endpoints
// subroutes on the leaf node
subroutes Routes
// child nodes should be stored in-order for iteration,
// in groups of the node type.
children [ntCatchAll + 1]nodes
}
// endpoints is a mapping of http method constants to handlers
// for a given route.
type endpoints map[methodTyp]*endpoint
type endpoint struct {
// endpoint handler
handler http.Handler
// pattern is the routing pattern for handler nodes
pattern string
// parameter keys recorded on handler nodes
paramKeys []string
}
func (s endpoints) Value(method methodTyp) *endpoint {
mh, ok := s[method]
if !ok {
mh = &endpoint{}
s[method] = mh
}
return mh
}
func (n *node) InsertRoute(method methodTyp, pattern string, handler http.Handler) *node {
var parent *node
search := pattern
for {
// Handle key exhaustion
if len(search) == 0 {
// Insert or update the node's leaf handler
n.setEndpoint(method, handler, pattern)
return n
}
// We're going to be searching for a wild node next,
// in this case, we need to get the tail
var label = search[0]
var segTail byte
var segEndIdx int
var segTyp nodeTyp
var segRexpat string
if label == '{' || label == '*' {
segTyp, _, segRexpat, segTail, _, segEndIdx = patNextSegment(search)
}
var prefix string
if segTyp == ntRegexp {
prefix = segRexpat
}
// Look for the edge to attach to
parent = n
n = n.getEdge(segTyp, label, segTail, prefix)
// No edge, create one
if n == nil {
child := &node{label: label, tail: segTail, prefix: search}
hn := parent.addChild(child, search)
hn.setEndpoint(method, handler, pattern)
return hn
}
// Found an edge to match the pattern
if n.typ > ntStatic {
// We found a param node, trim the param from the search path and continue.
// This param/wild pattern segment would already be on the tree from a previous
// call to addChild when creating a new node.
search = search[segEndIdx:]
continue
}
// Static nodes fall below here.
// Determine longest prefix of the search key on match.
commonPrefix := longestPrefix(search, n.prefix)
if commonPrefix == len(n.prefix) {
// the common prefix is as long as the current node's prefix we're attempting to insert.
// keep the search going.
search = search[commonPrefix:]
continue
}
// Split the node
child := &node{
typ: ntStatic,
prefix: search[:commonPrefix],
}
parent.replaceChild(search[0], segTail, child)
// Restore the existing node
n.label = n.prefix[commonPrefix]
n.prefix = n.prefix[commonPrefix:]
child.addChild(n, n.prefix)
// If the new key is a subset, set the method/handler on this node and finish.
search = search[commonPrefix:]
if len(search) == 0 {
child.setEndpoint(method, handler, pattern)
return child
}
// Create a new edge for the node
subchild := &node{
typ: ntStatic,
label: search[0],
prefix: search,
}
hn := child.addChild(subchild, search)
hn.setEndpoint(method, handler, pattern)
return hn
}
}
// addChild appends the new `child` node to the tree using the `pattern` as the trie key.
// For a URL router like chi's, we split the static, param, regexp and wildcard segments
// into different nodes. In addition, addChild will recursively call itself until every
// pattern segment is added to the url pattern tree as individual nodes, depending on type.
func (n *node) addChild(child *node, prefix string) *node {
search := prefix
// handler leaf node added to the tree is the child.
// this may be overridden later down the flow
hn := child
// Parse next segment
segTyp, _, segRexpat, segTail, segStartIdx, segEndIdx := patNextSegment(search)
// Add child depending on next up segment
switch segTyp {
case ntStatic:
// Search prefix is all static (that is, has no params in path)
// noop
default:
// Search prefix contains a param, regexp or wildcard
if segTyp == ntRegexp {
rex, err := regexp.Compile(segRexpat)
if err != nil {
panic(fmt.Sprintf("chi: invalid regexp pattern '%s' in route param", segRexpat))
}
child.prefix = segRexpat
child.rex = rex
}
if segStartIdx == 0 {
// Route starts with a param
child.typ = segTyp
if segTyp == ntCatchAll {
segStartIdx = -1
} else {
segStartIdx = segEndIdx
}
if segStartIdx < 0 {
segStartIdx = len(search)
}
child.tail = segTail // for params, we set the tail
if segStartIdx != len(search) {
// add static edge for the remaining part, split the end.
// its not possible to have adjacent param nodes, so its certainly
// going to be a static node next.
search = search[segStartIdx:] // advance search position
nn := &node{
typ: ntStatic,
label: search[0],
prefix: search,
}
hn = child.addChild(nn, search)
}
} else if segStartIdx > 0 {
// Route has some param
// starts with a static segment
child.typ = ntStatic
child.prefix = search[:segStartIdx]
child.rex = nil
// add the param edge node
search = search[segStartIdx:]
nn := &node{
typ: segTyp,
label: search[0],
tail: segTail,
}
hn = child.addChild(nn, search)
}
}
n.children[child.typ] = append(n.children[child.typ], child)
n.children[child.typ].Sort()
return hn
}
func (n *node) replaceChild(label, tail byte, child *node) {
for i := 0; i < len(n.children[child.typ]); i++ {
if n.children[child.typ][i].label == label && n.children[child.typ][i].tail == tail {
n.children[child.typ][i] = child
n.children[child.typ][i].label = label
n.children[child.typ][i].tail = tail
return
}
}
panic("chi: replacing missing child")
}
func (n *node) getEdge(ntyp nodeTyp, label, tail byte, prefix string) *node {
nds := n.children[ntyp]
for i := 0; i < len(nds); i++ {
if nds[i].label == label && nds[i].tail == tail {
if ntyp == ntRegexp && nds[i].prefix != prefix {
continue
}
return nds[i]
}
}
return nil
}
func (n *node) setEndpoint(method methodTyp, handler http.Handler, pattern string) {
// Set the handler for the method type on the node
if n.endpoints == nil {
n.endpoints = make(endpoints)
}
paramKeys := patParamKeys(pattern)
if method&mSTUB == mSTUB {
n.endpoints.Value(mSTUB).handler = handler
}
if method&mALL == mALL {
h := n.endpoints.Value(mALL)
h.handler = handler
h.pattern = pattern
h.paramKeys = paramKeys
for _, m := range methodMap {
h := n.endpoints.Value(m)
h.handler = handler
h.pattern = pattern
h.paramKeys = paramKeys
}
} else {
h := n.endpoints.Value(method)
h.handler = handler
h.pattern = pattern
h.paramKeys = paramKeys
}
}
func (n *node) FindRoute(rctx *Context, method methodTyp, path string) (*node, endpoints, http.Handler) {
// Reset the context routing pattern and params
rctx.routePattern = ""
rctx.routeParams.Keys = rctx.routeParams.Keys[:0]
rctx.routeParams.Values = rctx.routeParams.Values[:0]
// Find the routing handlers for the path
rn := n.findRoute(rctx, method, path)
if rn == nil {
return nil, nil, nil
}
// Record the routing params in the request lifecycle
rctx.URLParams.Keys = append(rctx.URLParams.Keys, rctx.routeParams.Keys...)
rctx.URLParams.Values = append(rctx.URLParams.Values, rctx.routeParams.Values...)
// Record the routing pattern in the request lifecycle
if rn.endpoints[method].pattern != "" {
rctx.routePattern = rn.endpoints[method].pattern
rctx.RoutePatterns = append(rctx.RoutePatterns, rctx.routePattern)
}
return rn, rn.endpoints, rn.endpoints[method].handler
}
// Recursive edge traversal by checking all nodeTyp groups along the way.
// It's like searching through a multi-dimensional radix trie.
func (n *node) findRoute(rctx *Context, method methodTyp, path string) *node {
nn := n
search := path
for t, nds := range nn.children {
ntyp := nodeTyp(t)
if len(nds) == 0 {
continue
}
var xn *node
xsearch := search
var label byte
if search != "" {
label = search[0]
}
switch ntyp {
case ntStatic:
xn = nds.findEdge(label)
if xn == nil || !strings.HasPrefix(xsearch, xn.prefix) {
continue
}
xsearch = xsearch[len(xn.prefix):]
case ntParam, ntRegexp:
// short-circuit and return no matching route for empty param values
if xsearch == "" {
continue
}
// serially loop through each node grouped by the tail delimiter
for idx := 0; idx < len(nds); idx++ {
xn = nds[idx]
// label for param nodes is the delimiter byte
p := strings.IndexByte(xsearch, xn.tail)
if p < 0 {
if xn.tail == '/' {
p = len(xsearch)
} else {
continue
}
}
if ntyp == ntRegexp && xn.rex != nil {
if !xn.rex.Match([]byte(xsearch[:p])) {
continue
}
} else if strings.IndexByte(xsearch[:p], '/') != -1 {
// avoid a match across path segments
continue
}
prevlen := len(rctx.routeParams.Values)
rctx.routeParams.Values = append(rctx.routeParams.Values, xsearch[:p])
xsearch = xsearch[p:]
if len(xsearch) == 0 {
if xn.isLeaf() {
h := xn.endpoints[method]
if h != nil && h.handler != nil {
rctx.routeParams.Keys = append(rctx.routeParams.Keys, h.paramKeys...)
return xn
}
// flag that the routing context found a route, but not a corresponding
// supported method
rctx.methodNotAllowed = true
}
}
// recursively find the next node on this branch
fin := xn.findRoute(rctx, method, xsearch)
if fin != nil {
return fin
}
// not found on this branch, reset vars
rctx.routeParams.Values = rctx.routeParams.Values[:prevlen]
xsearch = search
}
rctx.routeParams.Values = append(rctx.routeParams.Values, "")
default:
// catch-all nodes
rctx.routeParams.Values = append(rctx.routeParams.Values, search)
xn = nds[0]
xsearch = ""
}
if xn == nil {
continue
}
// did we find it yet?
if len(xsearch) == 0 {
if xn.isLeaf() {
h := xn.endpoints[method]
if h != nil && h.handler != nil {
rctx.routeParams.Keys = append(rctx.routeParams.Keys, h.paramKeys...)
return xn
}
// flag that the routing context found a route, but not a corresponding
// supported method
rctx.methodNotAllowed = true
}
}
// recursively find the next node..
fin := xn.findRoute(rctx, method, xsearch)
if fin != nil {
return fin
}
// Did not find final handler, let's remove the param here if it was set
if xn.typ > ntStatic {
if len(rctx.routeParams.Values) > 0 {
rctx.routeParams.Values = rctx.routeParams.Values[:len(rctx.routeParams.Values)-1]
}
}
}
return nil
}
func (n *node) findEdge(ntyp nodeTyp, label byte) *node {
nds := n.children[ntyp]
num := len(nds)
idx := 0
switch ntyp {
case ntStatic, ntParam, ntRegexp:
i, j := 0, num-1
for i <= j {
idx = i + (j-i)/2
if label > nds[idx].label {
i = idx + 1
} else if label < nds[idx].label {
j = idx - 1
} else {
i = num // breaks cond
}
}
if nds[idx].label != label {
return nil
}
return nds[idx]
default: // catch all
return nds[idx]
}
}
func (n *node) isLeaf() bool {
return n.endpoints != nil
}
func (n *node) findPattern(pattern string) bool {
nn := n
for _, nds := range nn.children {
if len(nds) == 0 {
continue
}
n = nn.findEdge(nds[0].typ, pattern[0])
if n == nil {
continue
}
var idx int
var xpattern string
switch n.typ {
case ntStatic:
idx = longestPrefix(pattern, n.prefix)
if idx < len(n.prefix) {
continue
}
case ntParam, ntRegexp:
idx = strings.IndexByte(pattern, '}') + 1
case ntCatchAll:
idx = longestPrefix(pattern, "*")
default:
panic("chi: unknown node type")
}
xpattern = pattern[idx:]
if len(xpattern) == 0 {
return true
}
return n.findPattern(xpattern)
}
return false
}
func (n *node) routes() []Route {
rts := []Route{}
n.walk(func(eps endpoints, subroutes Routes) bool {
if eps[mSTUB] != nil && eps[mSTUB].handler != nil && subroutes == nil {
return false
}
// Group methodHandlers by unique patterns
pats := make(map[string]endpoints)
for mt, h := range eps {
if h.pattern == "" {
continue
}
p, ok := pats[h.pattern]
if !ok {
p = endpoints{}
pats[h.pattern] = p
}
p[mt] = h
}
for p, mh := range pats {
hs := make(map[string]http.Handler)
if mh[mALL] != nil && mh[mALL].handler != nil {
hs["*"] = mh[mALL].handler
}
for mt, h := range mh {
if h.handler == nil {
continue
}
m := methodTypString(mt)
if m == "" {
continue
}
hs[m] = h.handler
}
rt := Route{p, hs, subroutes}
rts = append(rts, rt)
}
return false
})
return rts
}
func (n *node) walk(fn func(eps endpoints, subroutes Routes) bool) bool {
// Visit the leaf values if any
if (n.endpoints != nil || n.subroutes != nil) && fn(n.endpoints, n.subroutes) {
return true
}
// Recurse on the children
for _, ns := range n.children {
for _, cn := range ns {
if cn.walk(fn) {
return true
}
}
}
return false
}
// patNextSegment returns the next segment details from a pattern:
// node type, param key, regexp string, param tail byte, param starting index, param ending index
func patNextSegment(pattern string) (nodeTyp, string, string, byte, int, int) {
ps := strings.Index(pattern, "{")
ws := strings.Index(pattern, "*")
if ps < 0 && ws < 0 {
return ntStatic, "", "", 0, 0, len(pattern) // we return the entire thing
}
// Sanity check
if ps >= 0 && ws >= 0 && ws < ps {
panic("chi: wildcard '*' must be the last pattern in a route, otherwise use a '{param}'")
}
var tail byte = '/' // Default endpoint tail to / byte
if ps >= 0 {
// Param/Regexp pattern is next
nt := ntParam
// Read to closing } taking into account opens and closes in curl count (cc)
cc := 0
pe := ps
for i, c := range pattern[ps:] {
if c == '{' {
cc++
} else if c == '}' {
cc--
if cc == 0 {
pe = ps + i
break
}
}
}
if pe == ps {
panic("chi: route param closing delimiter '}' is missing")
}
key := pattern[ps+1 : pe]
pe++ // set end to next position
if pe < len(pattern) {
tail = pattern[pe]
}
var rexpat string
if idx := strings.Index(key, ":"); idx >= 0 {
nt = ntRegexp
rexpat = key[idx+1:]
key = key[:idx]
}
if len(rexpat) > 0 {
if rexpat[0] != '^' {
rexpat = "^" + rexpat
}
if rexpat[len(rexpat)-1] != '$' {
rexpat += "$"
}
}
return nt, key, rexpat, tail, ps, pe
}
// Wildcard pattern as finale
if ws < len(pattern)-1 {
panic("chi: wildcard '*' must be the last value in a route. trim trailing text or use a '{param}' instead")
}
return ntCatchAll, "*", "", 0, ws, len(pattern)
}
func patParamKeys(pattern string) []string {
pat := pattern
paramKeys := []string{}
for {
ptyp, paramKey, _, _, _, e := patNextSegment(pat)
if ptyp == ntStatic {
return paramKeys
}
for i := 0; i < len(paramKeys); i++ {
if paramKeys[i] == paramKey {
panic(fmt.Sprintf("chi: routing pattern '%s' contains duplicate param key, '%s'", pattern, paramKey))
}
}
paramKeys = append(paramKeys, paramKey)
pat = pat[e:]
}
}
// longestPrefix finds the length of the shared prefix
// of two strings
func longestPrefix(k1, k2 string) int {
max := len(k1)
if l := len(k2); l < max {
max = l
}
var i int
for i = 0; i < max; i++ {
if k1[i] != k2[i] {
break
}
}
return i
}
func methodTypString(method methodTyp) string {
for s, t := range methodMap {
if method == t {
return s
}
}
return ""
}
type nodes []*node
// Sort the list of nodes by label
func (ns nodes) Sort() { sort.Sort(ns); ns.tailSort() }
func (ns nodes) Len() int { return len(ns) }
func (ns nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] }
func (ns nodes) Less(i, j int) bool { return ns[i].label < ns[j].label }
// tailSort pushes nodes with '/' as the tail to the end of the list for param nodes.
// The list order determines the traversal order.
func (ns nodes) tailSort() {
for i := len(ns) - 1; i >= 0; i-- {
if ns[i].typ > ntStatic && ns[i].tail == '/' {
ns.Swap(i, len(ns)-1)
return
}
}
}
func (ns nodes) findEdge(label byte) *node {
num := len(ns)
idx := 0
i, j := 0, num-1
for i <= j {
idx = i + (j-i)/2
if label > ns[idx].label {
i = idx + 1
} else if label < ns[idx].label {
j = idx - 1
} else {
i = num // breaks cond
}
}
if ns[idx].label != label {
return nil
}
return ns[idx]
}
// Route describes the details of a routing handler.
// Handlers map key is an HTTP method
type Route struct {
Pattern string
Handlers map[string]http.Handler
SubRoutes Routes
}
// WalkFunc is the type of the function called for each method and route visited by Walk.
type WalkFunc func(method string, route string, handler http.Handler, middlewares ...func(http.Handler) http.Handler) error
// Walk walks any router tree that implements Routes interface.
func Walk(r Routes, walkFn WalkFunc) error {
return walk(r, walkFn, "")
}
func walk(r Routes, walkFn WalkFunc, parentRoute string, parentMw ...func(http.Handler) http.Handler) error {
for _, route := range r.Routes() {
mws := make([]func(http.Handler) http.Handler, len(parentMw))
copy(mws, parentMw)
mws = append(mws, r.Middlewares()...)
if route.SubRoutes != nil {
if err := walk(route.SubRoutes, walkFn, parentRoute+route.Pattern, mws...); err != nil {
return err
}
continue
}
for method, handler := range route.Handlers {
if method == "*" {
// Ignore a "catchAll" method, since we pass down all the specific methods for each route.
continue
}
fullRoute := parentRoute + route.Pattern
fullRoute = strings.Replace(fullRoute, "/*/", "/", -1)
if chain, ok := handler.(*ChainHandler); ok {
if err := walkFn(method, fullRoute, chain.Endpoint, append(mws, chain.Middlewares...)...); err != nil {
return err
}
} else {
if err := walkFn(method, fullRoute, handler, mws...); err != nil {
return err
}
}
}
}
return nil
}

View File

@ -1,24 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

View File

@ -1,26 +0,0 @@
language: go
go:
- 1.13.1
- tip
matrix:
allow_failures:
- go: tip
notifications:
email:
recipients: dean.karn@gmail.com
on_success: change
on_failure: always
before_install:
- go install github.com/mattn/goveralls
# Only clone the most recent commit.
git:
depth: 1
script:
- go test -v -race -covermode=atomic -coverprofile=coverage.coverprofile ./...
after_success: |
goveralls -coverprofile=coverage.coverprofile -service travis-ci -repotoken $COVERALLS_TOKEN

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2016 Go Playground
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,170 +0,0 @@
## locales
<img align="right" src="https://raw.githubusercontent.com/go-playground/locales/master/logo.png">![Project status](https://img.shields.io/badge/version-0.14.1-green.svg)
[![Build Status](https://travis-ci.org/go-playground/locales.svg?branch=master)](https://travis-ci.org/go-playground/locales)
[![GoDoc](https://godoc.org/github.com/go-playground/locales?status.svg)](https://godoc.org/github.com/go-playground/locales)
![License](https://img.shields.io/dub/l/vibe-d.svg)
Locales is a set of locales generated from the [Unicode CLDR Project](http://cldr.unicode.org/) which can be used independently or within
an i18n package; these were built for use with, but not exclusive to, [Universal Translator](https://github.com/go-playground/universal-translator).
Features
--------
- [x] Rules generated from the latest [CLDR](http://cldr.unicode.org/index/downloads) data, v36.0.1
- [x] Contains Cardinal, Ordinal and Range Plural Rules
- [x] Contains Month, Weekday and Timezone translations built in
- [x] Contains Date & Time formatting functions
- [x] Contains Number, Currency, Accounting and Percent formatting functions
- [x] Supports the "Gregorian" calendar only ( my time isn't unlimited, had to draw the line somewhere )
Full Tests
--------------------
I could sure use your help adding tests for every locale, it is a huge undertaking and I just don't have the free time to do it all at the moment;
any help would be **greatly appreciated!!!!** please see [issue](https://github.com/go-playground/locales/issues/1) for details.
Installation
-----------
Use go get
```shell
go get github.com/go-playground/locales
```
NOTES
--------
You'll notice most return types are []byte, this is because most of the time the results will be concatenated with a larger body
of text and can avoid some allocations if already appending to a byte array, otherwise just cast as string.
Usage
-------
```go
package main
import (
"fmt"
"time"
"github.com/go-playground/locales/currency"
"github.com/go-playground/locales/en_CA"
)
func main() {
loc, _ := time.LoadLocation("America/Toronto")
datetime := time.Date(2016, 02, 03, 9, 0, 1, 0, loc)
l := en_CA.New()
// Dates
fmt.Println(l.FmtDateFull(datetime))
fmt.Println(l.FmtDateLong(datetime))
fmt.Println(l.FmtDateMedium(datetime))
fmt.Println(l.FmtDateShort(datetime))
// Times
fmt.Println(l.FmtTimeFull(datetime))
fmt.Println(l.FmtTimeLong(datetime))
fmt.Println(l.FmtTimeMedium(datetime))
fmt.Println(l.FmtTimeShort(datetime))
// Months Wide
fmt.Println(l.MonthWide(time.January))
fmt.Println(l.MonthWide(time.February))
fmt.Println(l.MonthWide(time.March))
// ...
// Months Abbreviated
fmt.Println(l.MonthAbbreviated(time.January))
fmt.Println(l.MonthAbbreviated(time.February))
fmt.Println(l.MonthAbbreviated(time.March))
// ...
// Months Narrow
fmt.Println(l.MonthNarrow(time.January))
fmt.Println(l.MonthNarrow(time.February))
fmt.Println(l.MonthNarrow(time.March))
// ...
// Weekdays Wide
fmt.Println(l.WeekdayWide(time.Sunday))
fmt.Println(l.WeekdayWide(time.Monday))
fmt.Println(l.WeekdayWide(time.Tuesday))
// ...
// Weekdays Abbreviated
fmt.Println(l.WeekdayAbbreviated(time.Sunday))
fmt.Println(l.WeekdayAbbreviated(time.Monday))
fmt.Println(l.WeekdayAbbreviated(time.Tuesday))
// ...
// Weekdays Short
fmt.Println(l.WeekdayShort(time.Sunday))
fmt.Println(l.WeekdayShort(time.Monday))
fmt.Println(l.WeekdayShort(time.Tuesday))
// ...
// Weekdays Narrow
fmt.Println(l.WeekdayNarrow(time.Sunday))
fmt.Println(l.WeekdayNarrow(time.Monday))
fmt.Println(l.WeekdayNarrow(time.Tuesday))
// ...
var f64 float64
f64 = -10356.4523
// Number
fmt.Println(l.FmtNumber(f64, 2))
// Currency
fmt.Println(l.FmtCurrency(f64, 2, currency.CAD))
fmt.Println(l.FmtCurrency(f64, 2, currency.USD))
// Accounting
fmt.Println(l.FmtAccounting(f64, 2, currency.CAD))
fmt.Println(l.FmtAccounting(f64, 2, currency.USD))
f64 = 78.12
// Percent
fmt.Println(l.FmtPercent(f64, 0))
// Plural Rules for locale, so you know what rules you must cover
fmt.Println(l.PluralsCardinal())
fmt.Println(l.PluralsOrdinal())
// Cardinal Plural Rules
fmt.Println(l.CardinalPluralRule(1, 0))
fmt.Println(l.CardinalPluralRule(1.0, 0))
fmt.Println(l.CardinalPluralRule(1.0, 1))
fmt.Println(l.CardinalPluralRule(3, 0))
// Ordinal Plural Rules
fmt.Println(l.OrdinalPluralRule(21, 0)) // 21st
fmt.Println(l.OrdinalPluralRule(22, 0)) // 22nd
fmt.Println(l.OrdinalPluralRule(33, 0)) // 33rd
fmt.Println(l.OrdinalPluralRule(34, 0)) // 34th
// Range Plural Rules
fmt.Println(l.RangePluralRule(1, 0, 1, 0)) // 1-1
fmt.Println(l.RangePluralRule(1, 0, 2, 0)) // 1-2
fmt.Println(l.RangePluralRule(5, 0, 8, 0)) // 5-8
}
```
NOTES:
-------
These rules were generated from the [Unicode CLDR Project](http://cldr.unicode.org/), if you encounter any issues
I strongly encourage contributing to the CLDR project to get the locale information corrected and the next time
these locales are regenerated the fix will come with.
I do however realize that time constraints are often important and so there are two options:
1. Create your own locale, copy, paste and modify, and ensure it complies with the `Translator` interface.
2. Add an exception in the locale generation code directly and once regenerated, fix will be in place.
Please to not make fixes inside the locale files, they WILL get overwritten when the locales are regenerated.
License
------
Distributed under MIT License, please see license file in code for more details.

View File

@ -1,311 +0,0 @@
package currency
// Type is the currency type associated with the locales currency enum
type Type int
// locale currencies
const (
ADP Type = iota
AED
AFA
AFN
ALK
ALL
AMD
ANG
AOA
AOK
AON
AOR
ARA
ARL
ARM
ARP
ARS
ATS
AUD
AWG
AZM
AZN
BAD
BAM
BAN
BBD
BDT
BEC
BEF
BEL
BGL
BGM
BGN
BGO
BHD
BIF
BMD
BND
BOB
BOL
BOP
BOV
BRB
BRC
BRE
BRL
BRN
BRR
BRZ
BSD
BTN
BUK
BWP
BYB
BYN
BYR
BZD
CAD
CDF
CHE
CHF
CHW
CLE
CLF
CLP
CNH
CNX
CNY
COP
COU
CRC
CSD
CSK
CUC
CUP
CVE
CYP
CZK
DDM
DEM
DJF
DKK
DOP
DZD
ECS
ECV
EEK
EGP
ERN
ESA
ESB
ESP
ETB
EUR
FIM
FJD
FKP
FRF
GBP
GEK
GEL
GHC
GHS
GIP
GMD
GNF
GNS
GQE
GRD
GTQ
GWE
GWP
GYD
HKD
HNL
HRD
HRK
HTG
HUF
IDR
IEP
ILP
ILR
ILS
INR
IQD
IRR
ISJ
ISK
ITL
JMD
JOD
JPY
KES
KGS
KHR
KMF
KPW
KRH
KRO
KRW
KWD
KYD
KZT
LAK
LBP
LKR
LRD
LSL
LTL
LTT
LUC
LUF
LUL
LVL
LVR
LYD
MAD
MAF
MCF
MDC
MDL
MGA
MGF
MKD
MKN
MLF
MMK
MNT
MOP
MRO
MRU
MTL
MTP
MUR
MVP
MVR
MWK
MXN
MXP
MXV
MYR
MZE
MZM
MZN
NAD
NGN
NIC
NIO
NLG
NOK
NPR
NZD
OMR
PAB
PEI
PEN
PES
PGK
PHP
PKR
PLN
PLZ
PTE
PYG
QAR
RHD
ROL
RON
RSD
RUB
RUR
RWF
SAR
SBD
SCR
SDD
SDG
SDP
SEK
SGD
SHP
SIT
SKK
SLL
SOS
SRD
SRG
SSP
STD
STN
SUR
SVC
SYP
SZL
THB
TJR
TJS
TMM
TMT
TND
TOP
TPE
TRL
TRY
TTD
TWD
TZS
UAH
UAK
UGS
UGX
USD
USN
USS
UYI
UYP
UYU
UYW
UZS
VEB
VEF
VES
VND
VNN
VUV
WST
XAF
XAG
XAU
XBA
XBB
XBC
XBD
XCD
XDR
XEU
XFO
XFU
XOF
XPD
XPF
XPT
XRE
XSU
XTS
XUA
XXX
YDD
YER
YUD
YUM
YUN
YUR
ZAL
ZAR
ZMK
ZMW
ZRN
ZRZ
ZWD
ZWL
ZWR
)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 36 KiB

View File

@ -1,293 +0,0 @@
package locales
import (
"strconv"
"time"
"github.com/go-playground/locales/currency"
)
// // ErrBadNumberValue is returned when the number passed for
// // plural rule determination cannot be parsed
// type ErrBadNumberValue struct {
// NumberValue string
// InnerError error
// }
// // Error returns ErrBadNumberValue error string
// func (e *ErrBadNumberValue) Error() string {
// return fmt.Sprintf("Invalid Number Value '%s' %s", e.NumberValue, e.InnerError)
// }
// var _ error = new(ErrBadNumberValue)
// PluralRule denotes the type of plural rules
type PluralRule int
// PluralRule's
const (
PluralRuleUnknown PluralRule = iota
PluralRuleZero // zero
PluralRuleOne // one - singular
PluralRuleTwo // two - dual
PluralRuleFew // few - paucal
PluralRuleMany // many - also used for fractions if they have a separate class
PluralRuleOther // other - required—general plural form—also used if the language only has a single form
)
const (
pluralsString = "UnknownZeroOneTwoFewManyOther"
)
// Translator encapsulates an instance of a locale
// NOTE: some values are returned as a []byte just in case the caller
// wishes to add more and can help avoid allocations; otherwise just cast as string
type Translator interface {
// The following Functions are for overriding, debugging or developing
// with a Translator Locale
// Locale returns the string value of the translator
Locale() string
// returns an array of cardinal plural rules associated
// with this translator
PluralsCardinal() []PluralRule
// returns an array of ordinal plural rules associated
// with this translator
PluralsOrdinal() []PluralRule
// returns an array of range plural rules associated
// with this translator
PluralsRange() []PluralRule
// returns the cardinal PluralRule given 'num' and digits/precision of 'v' for locale
CardinalPluralRule(num float64, v uint64) PluralRule
// returns the ordinal PluralRule given 'num' and digits/precision of 'v' for locale
OrdinalPluralRule(num float64, v uint64) PluralRule
// returns the ordinal PluralRule given 'num1', 'num2' and digits/precision of 'v1' and 'v2' for locale
RangePluralRule(num1 float64, v1 uint64, num2 float64, v2 uint64) PluralRule
// returns the locales abbreviated month given the 'month' provided
MonthAbbreviated(month time.Month) string
// returns the locales abbreviated months
MonthsAbbreviated() []string
// returns the locales narrow month given the 'month' provided
MonthNarrow(month time.Month) string
// returns the locales narrow months
MonthsNarrow() []string
// returns the locales wide month given the 'month' provided
MonthWide(month time.Month) string
// returns the locales wide months
MonthsWide() []string
// returns the locales abbreviated weekday given the 'weekday' provided
WeekdayAbbreviated(weekday time.Weekday) string
// returns the locales abbreviated weekdays
WeekdaysAbbreviated() []string
// returns the locales narrow weekday given the 'weekday' provided
WeekdayNarrow(weekday time.Weekday) string
// WeekdaysNarrowreturns the locales narrow weekdays
WeekdaysNarrow() []string
// returns the locales short weekday given the 'weekday' provided
WeekdayShort(weekday time.Weekday) string
// returns the locales short weekdays
WeekdaysShort() []string
// returns the locales wide weekday given the 'weekday' provided
WeekdayWide(weekday time.Weekday) string
// returns the locales wide weekdays
WeekdaysWide() []string
// The following Functions are common Formatting functionsfor the Translator's Locale
// returns 'num' with digits/precision of 'v' for locale and handles both Whole and Real numbers based on 'v'
FmtNumber(num float64, v uint64) string
// returns 'num' with digits/precision of 'v' for locale and handles both Whole and Real numbers based on 'v'
// NOTE: 'num' passed into FmtPercent is assumed to be in percent already
FmtPercent(num float64, v uint64) string
// returns the currency representation of 'num' with digits/precision of 'v' for locale
FmtCurrency(num float64, v uint64, currency currency.Type) string
// returns the currency representation of 'num' with digits/precision of 'v' for locale
// in accounting notation.
FmtAccounting(num float64, v uint64, currency currency.Type) string
// returns the short date representation of 't' for locale
FmtDateShort(t time.Time) string
// returns the medium date representation of 't' for locale
FmtDateMedium(t time.Time) string
// returns the long date representation of 't' for locale
FmtDateLong(t time.Time) string
// returns the full date representation of 't' for locale
FmtDateFull(t time.Time) string
// returns the short time representation of 't' for locale
FmtTimeShort(t time.Time) string
// returns the medium time representation of 't' for locale
FmtTimeMedium(t time.Time) string
// returns the long time representation of 't' for locale
FmtTimeLong(t time.Time) string
// returns the full time representation of 't' for locale
FmtTimeFull(t time.Time) string
}
// String returns the string value of PluralRule
func (p PluralRule) String() string {
switch p {
case PluralRuleZero:
return pluralsString[7:11]
case PluralRuleOne:
return pluralsString[11:14]
case PluralRuleTwo:
return pluralsString[14:17]
case PluralRuleFew:
return pluralsString[17:20]
case PluralRuleMany:
return pluralsString[20:24]
case PluralRuleOther:
return pluralsString[24:]
default:
return pluralsString[:7]
}
}
//
// Precision Notes:
//
// must specify a precision >= 0, and here is why https://play.golang.org/p/LyL90U0Vyh
//
// v := float64(3.141)
// i := float64(int64(v))
//
// fmt.Println(v - i)
//
// or
//
// s := strconv.FormatFloat(v-i, 'f', -1, 64)
// fmt.Println(s)
//
// these will not print what you'd expect: 0.14100000000000001
// and so this library requires a precision to be specified, or
// inaccurate plural rules could be applied.
//
//
//
// n - absolute value of the source number (integer and decimals).
// i - integer digits of n.
// v - number of visible fraction digits in n, with trailing zeros.
// w - number of visible fraction digits in n, without trailing zeros.
// f - visible fractional digits in n, with trailing zeros.
// t - visible fractional digits in n, without trailing zeros.
//
//
// Func(num float64, v uint64) // v = digits/precision and prevents -1 as a special case as this can lead to very unexpected behaviour, see precision note's above.
//
// n := math.Abs(num)
// i := int64(n)
// v := v
//
//
// w := strconv.FormatFloat(num-float64(i), 'f', int(v), 64) // then parse backwards on string until no more zero's....
// f := strconv.FormatFloat(n, 'f', int(v), 64) // then turn everything after decimal into an int64
// t := strconv.FormatFloat(n, 'f', int(v), 64) // then parse backwards on string until no more zero's....
//
//
//
// General Inclusion Rules
// - v will always be available inherently
// - all require n
// - w requires i
//
// W returns the number of visible fraction digits in N, without trailing zeros.
func W(n float64, v uint64) (w int64) {
s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64)
// with either be '0' or '0.xxxx', so if 1 then w will be zero
// otherwise need to parse
if len(s) != 1 {
s = s[2:]
end := len(s) + 1
for i := end; i >= 0; i-- {
if s[i] != '0' {
end = i + 1
break
}
}
w = int64(len(s[:end]))
}
return
}
// F returns the visible fractional digits in N, with trailing zeros.
func F(n float64, v uint64) (f int64) {
s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64)
// with either be '0' or '0.xxxx', so if 1 then f will be zero
// otherwise need to parse
if len(s) != 1 {
// ignoring error, because it can't fail as we generated
// the string internally from a real number
f, _ = strconv.ParseInt(s[2:], 10, 64)
}
return
}
// T returns the visible fractional digits in N, without trailing zeros.
func T(n float64, v uint64) (t int64) {
s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64)
// with either be '0' or '0.xxxx', so if 1 then t will be zero
// otherwise need to parse
if len(s) != 1 {
s = s[2:]
end := len(s) + 1
for i := end; i >= 0; i-- {
if s[i] != '0' {
end = i + 1
break
}
}
// ignoring error, because it can't fail as we generated
// the string internally from a real number
t, _ = strconv.ParseInt(s[:end], 10, 64)
}
return
}

View File

@ -1,25 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
*.coverprofile

View File

@ -1,27 +0,0 @@
language: go
go:
- 1.13.4
- tip
matrix:
allow_failures:
- go: tip
notifications:
email:
recipients: dean.karn@gmail.com
on_success: change
on_failure: always
before_install:
- go install github.com/mattn/goveralls
# Only clone the most recent commit.
git:
depth: 1
script:
- go test -v -race -covermode=atomic -coverprofile=coverage.coverprofile ./...
after_success: |
[ $TRAVIS_GO_VERSION = 1.13.4 ] &&
goveralls -coverprofile=coverage.coverprofile -service travis-ci -repotoken $COVERALLS_TOKEN

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2016 Go Playground
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,18 +0,0 @@
GOCMD=GO111MODULE=on go
linters-install:
@golangci-lint --version >/dev/null 2>&1 || { \
echo "installing linting tools..."; \
curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s v1.41.1; \
}
lint: linters-install
golangci-lint run
test:
$(GOCMD) test -cover -race ./...
bench:
$(GOCMD) test -bench=. -benchmem ./...
.PHONY: test lint linters-install

View File

@ -1,87 +0,0 @@
## universal-translator
<img align="right" src="https://raw.githubusercontent.com/go-playground/universal-translator/master/logo.png">![Project status](https://img.shields.io/badge/version-0.18.1-green.svg)
[![Coverage Status](https://coveralls.io/repos/github/go-playground/universal-translator/badge.svg)](https://coveralls.io/github/go-playground/universal-translator)
[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/universal-translator)](https://goreportcard.com/report/github.com/go-playground/universal-translator)
[![GoDoc](https://godoc.org/github.com/go-playground/universal-translator?status.svg)](https://godoc.org/github.com/go-playground/universal-translator)
![License](https://img.shields.io/dub/l/vibe-d.svg)
Universal Translator is an i18n Translator for Go/Golang using CLDR data + pluralization rules
Why another i18n library?
--------------------------
Because none of the plural rules seem to be correct out there, including the previous implementation of this package,
so I took it upon myself to create [locales](https://github.com/go-playground/locales) for everyone to use; this package
is a thin wrapper around [locales](https://github.com/go-playground/locales) in order to store and translate text for
use in your applications.
Features
--------
- [x] Rules generated from the [CLDR](http://cldr.unicode.org/index/downloads) data, v36.0.1
- [x] Contains Cardinal, Ordinal and Range Plural Rules
- [x] Contains Month, Weekday and Timezone translations built in
- [x] Contains Date & Time formatting functions
- [x] Contains Number, Currency, Accounting and Percent formatting functions
- [x] Supports the "Gregorian" calendar only ( my time isn't unlimited, had to draw the line somewhere )
- [x] Support loading translations from files
- [x] Exporting translations to file(s), mainly for getting them professionally translated
- [ ] Code Generation for translation files -> Go code.. i.e. after it has been professionally translated
- [ ] Tests for all languages, I need help with this, please see [here](https://github.com/go-playground/locales/issues/1)
Installation
-----------
Use go get
```shell
go get github.com/go-playground/universal-translator
```
Usage & Documentation
-------
Please see https://godoc.org/github.com/go-playground/universal-translator for usage docs
##### Examples:
- [Basic](https://github.com/go-playground/universal-translator/tree/master/_examples/basic)
- [Full - no files](https://github.com/go-playground/universal-translator/tree/master/_examples/full-no-files)
- [Full - with files](https://github.com/go-playground/universal-translator/tree/master/_examples/full-with-files)
File formatting
--------------
All types, Plain substitution, Cardinal, Ordinal and Range translations can all be contained within the same file(s);
they are only separated for easy viewing.
##### Examples:
- [Formats](https://github.com/go-playground/universal-translator/tree/master/_examples/file-formats)
##### Basic Makeup
NOTE: not all fields are needed for all translation types, see [examples](https://github.com/go-playground/universal-translator/tree/master/_examples/file-formats)
```json
{
"locale": "en",
"key": "days-left",
"trans": "You have {0} day left.",
"type": "Cardinal",
"rule": "One",
"override": false
}
```
|Field|Description|
|---|---|
|locale|The locale for which the translation is for.|
|key|The translation key that will be used to store and lookup each translation; normally it is a string or integer.|
|trans|The actual translation text.|
|type|The type of translation Cardinal, Ordinal, Range or "" for a plain substitution(not required to be defined if plain used)|
|rule|The plural rule for which the translation is for eg. One, Two, Few, Many or Other.(not required to be defined if plain used)|
|override|If you wish to override an existing translation that has already been registered, set this to 'true'. 99% of the time there is no need to define it.|
Help With Tests
---------------
To anyone interesting in helping or contributing, I sure could use some help creating tests for each language.
Please see issue [here](https://github.com/go-playground/locales/issues/1) for details.
License
------
Distributed under MIT License, please see license file in code for more details.

View File

@ -1,148 +0,0 @@
package ut
import (
"errors"
"fmt"
"github.com/go-playground/locales"
)
var (
// ErrUnknowTranslation indicates the translation could not be found
ErrUnknowTranslation = errors.New("Unknown Translation")
)
var _ error = new(ErrConflictingTranslation)
var _ error = new(ErrRangeTranslation)
var _ error = new(ErrOrdinalTranslation)
var _ error = new(ErrCardinalTranslation)
var _ error = new(ErrMissingPluralTranslation)
var _ error = new(ErrExistingTranslator)
// ErrExistingTranslator is the error representing a conflicting translator
type ErrExistingTranslator struct {
locale string
}
// Error returns ErrExistingTranslator's internal error text
func (e *ErrExistingTranslator) Error() string {
return fmt.Sprintf("error: conflicting translator for locale '%s'", e.locale)
}
// ErrConflictingTranslation is the error representing a conflicting translation
type ErrConflictingTranslation struct {
locale string
key interface{}
rule locales.PluralRule
text string
}
// Error returns ErrConflictingTranslation's internal error text
func (e *ErrConflictingTranslation) Error() string {
if _, ok := e.key.(string); !ok {
return fmt.Sprintf("error: conflicting key '%#v' rule '%s' with text '%s' for locale '%s', value being ignored", e.key, e.rule, e.text, e.locale)
}
return fmt.Sprintf("error: conflicting key '%s' rule '%s' with text '%s' for locale '%s', value being ignored", e.key, e.rule, e.text, e.locale)
}
// ErrRangeTranslation is the error representing a range translation error
type ErrRangeTranslation struct {
text string
}
// Error returns ErrRangeTranslation's internal error text
func (e *ErrRangeTranslation) Error() string {
return e.text
}
// ErrOrdinalTranslation is the error representing an ordinal translation error
type ErrOrdinalTranslation struct {
text string
}
// Error returns ErrOrdinalTranslation's internal error text
func (e *ErrOrdinalTranslation) Error() string {
return e.text
}
// ErrCardinalTranslation is the error representing a cardinal translation error
type ErrCardinalTranslation struct {
text string
}
// Error returns ErrCardinalTranslation's internal error text
func (e *ErrCardinalTranslation) Error() string {
return e.text
}
// ErrMissingPluralTranslation is the error signifying a missing translation given
// the locales plural rules.
type ErrMissingPluralTranslation struct {
locale string
key interface{}
rule locales.PluralRule
translationType string
}
// Error returns ErrMissingPluralTranslation's internal error text
func (e *ErrMissingPluralTranslation) Error() string {
if _, ok := e.key.(string); !ok {
return fmt.Sprintf("error: missing '%s' plural rule '%s' for translation with key '%#v' and locale '%s'", e.translationType, e.rule, e.key, e.locale)
}
return fmt.Sprintf("error: missing '%s' plural rule '%s' for translation with key '%s' and locale '%s'", e.translationType, e.rule, e.key, e.locale)
}
// ErrMissingBracket is the error representing a missing bracket in a translation
// eg. This is a {0 <-- missing ending '}'
type ErrMissingBracket struct {
locale string
key interface{}
text string
}
// Error returns ErrMissingBracket error message
func (e *ErrMissingBracket) Error() string {
return fmt.Sprintf("error: missing bracket '{}', in translation. locale: '%s' key: '%v' text: '%s'", e.locale, e.key, e.text)
}
// ErrBadParamSyntax is the error representing a bad parameter definition in a translation
// eg. This is a {must-be-int}
type ErrBadParamSyntax struct {
locale string
param string
key interface{}
text string
}
// Error returns ErrBadParamSyntax error message
func (e *ErrBadParamSyntax) Error() string {
return fmt.Sprintf("error: bad parameter syntax, missing parameter '%s' in translation. locale: '%s' key: '%v' text: '%s'", e.param, e.locale, e.key, e.text)
}
// import/export errors
// ErrMissingLocale is the error representing an expected locale that could
// not be found aka locale not registered with the UniversalTranslator Instance
type ErrMissingLocale struct {
locale string
}
// Error returns ErrMissingLocale's internal error text
func (e *ErrMissingLocale) Error() string {
return fmt.Sprintf("error: locale '%s' not registered.", e.locale)
}
// ErrBadPluralDefinition is the error representing an incorrect plural definition
// usually found within translations defined within files during the import process.
type ErrBadPluralDefinition struct {
tl translation
}
// Error returns ErrBadPluralDefinition's internal error text
func (e *ErrBadPluralDefinition) Error() string {
return fmt.Sprintf("error: bad plural definition '%#v'", e.tl)
}

View File

@ -1,274 +0,0 @@
package ut
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"io"
"github.com/go-playground/locales"
)
type translation struct {
Locale string `json:"locale"`
Key interface{} `json:"key"` // either string or integer
Translation string `json:"trans"`
PluralType string `json:"type,omitempty"`
PluralRule string `json:"rule,omitempty"`
OverrideExisting bool `json:"override,omitempty"`
}
const (
cardinalType = "Cardinal"
ordinalType = "Ordinal"
rangeType = "Range"
)
// ImportExportFormat is the format of the file import or export
type ImportExportFormat uint8
// supported Export Formats
const (
FormatJSON ImportExportFormat = iota
)
// Export writes the translations out to a file on disk.
//
// NOTE: this currently only works with string or int translations keys.
func (t *UniversalTranslator) Export(format ImportExportFormat, dirname string) error {
_, err := os.Stat(dirname)
if err != nil {
if !os.IsNotExist(err) {
return err
}
if err = os.MkdirAll(dirname, 0744); err != nil {
return err
}
}
// build up translations
var trans []translation
var b []byte
var ext string
for _, locale := range t.translators {
for k, v := range locale.(*translator).translations {
trans = append(trans, translation{
Locale: locale.Locale(),
Key: k,
Translation: v.text,
})
}
for k, pluralTrans := range locale.(*translator).cardinalTanslations {
for i, plural := range pluralTrans {
// leave enough for all plural rules
// but not all are set for all languages.
if plural == nil {
continue
}
trans = append(trans, translation{
Locale: locale.Locale(),
Key: k.(string),
Translation: plural.text,
PluralType: cardinalType,
PluralRule: locales.PluralRule(i).String(),
})
}
}
for k, pluralTrans := range locale.(*translator).ordinalTanslations {
for i, plural := range pluralTrans {
// leave enough for all plural rules
// but not all are set for all languages.
if plural == nil {
continue
}
trans = append(trans, translation{
Locale: locale.Locale(),
Key: k.(string),
Translation: plural.text,
PluralType: ordinalType,
PluralRule: locales.PluralRule(i).String(),
})
}
}
for k, pluralTrans := range locale.(*translator).rangeTanslations {
for i, plural := range pluralTrans {
// leave enough for all plural rules
// but not all are set for all languages.
if plural == nil {
continue
}
trans = append(trans, translation{
Locale: locale.Locale(),
Key: k.(string),
Translation: plural.text,
PluralType: rangeType,
PluralRule: locales.PluralRule(i).String(),
})
}
}
switch format {
case FormatJSON:
b, err = json.MarshalIndent(trans, "", " ")
ext = ".json"
}
if err != nil {
return err
}
err = os.WriteFile(filepath.Join(dirname, fmt.Sprintf("%s%s", locale.Locale(), ext)), b, 0644)
if err != nil {
return err
}
trans = trans[0:0]
}
return nil
}
// Import reads the translations out of a file or directory on disk.
//
// NOTE: this currently only works with string or int translations keys.
func (t *UniversalTranslator) Import(format ImportExportFormat, dirnameOrFilename string) error {
fi, err := os.Stat(dirnameOrFilename)
if err != nil {
return err
}
processFn := func(filename string) error {
f, err := os.Open(filename)
if err != nil {
return err
}
defer f.Close()
return t.ImportByReader(format, f)
}
if !fi.IsDir() {
return processFn(dirnameOrFilename)
}
// recursively go through directory
walker := func(path string, info os.FileInfo, err error) error {
if info.IsDir() {
return nil
}
switch format {
case FormatJSON:
// skip non JSON files
if filepath.Ext(info.Name()) != ".json" {
return nil
}
}
return processFn(path)
}
return filepath.Walk(dirnameOrFilename, walker)
}
// ImportByReader imports the the translations found within the contents read from the supplied reader.
//
// NOTE: generally used when assets have been embedded into the binary and are already in memory.
func (t *UniversalTranslator) ImportByReader(format ImportExportFormat, reader io.Reader) error {
b, err := io.ReadAll(reader)
if err != nil {
return err
}
var trans []translation
switch format {
case FormatJSON:
err = json.Unmarshal(b, &trans)
}
if err != nil {
return err
}
for _, tl := range trans {
locale, found := t.FindTranslator(tl.Locale)
if !found {
return &ErrMissingLocale{locale: tl.Locale}
}
pr := stringToPR(tl.PluralRule)
if pr == locales.PluralRuleUnknown {
err = locale.Add(tl.Key, tl.Translation, tl.OverrideExisting)
if err != nil {
return err
}
continue
}
switch tl.PluralType {
case cardinalType:
err = locale.AddCardinal(tl.Key, tl.Translation, pr, tl.OverrideExisting)
case ordinalType:
err = locale.AddOrdinal(tl.Key, tl.Translation, pr, tl.OverrideExisting)
case rangeType:
err = locale.AddRange(tl.Key, tl.Translation, pr, tl.OverrideExisting)
default:
return &ErrBadPluralDefinition{tl: tl}
}
if err != nil {
return err
}
}
return nil
}
func stringToPR(s string) locales.PluralRule {
switch s {
case "Zero":
return locales.PluralRuleZero
case "One":
return locales.PluralRuleOne
case "Two":
return locales.PluralRuleTwo
case "Few":
return locales.PluralRuleFew
case "Many":
return locales.PluralRuleMany
case "Other":
return locales.PluralRuleOther
default:
return locales.PluralRuleUnknown
}
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

View File

@ -1,420 +0,0 @@
package ut
import (
"fmt"
"strconv"
"strings"
"github.com/go-playground/locales"
)
const (
paramZero = "{0}"
paramOne = "{1}"
unknownTranslation = ""
)
// Translator is universal translators
// translator instance which is a thin wrapper
// around locales.Translator instance providing
// some extra functionality
type Translator interface {
locales.Translator
// adds a normal translation for a particular language/locale
// {#} is the only replacement type accepted and are ad infinitum
// eg. one: '{0} day left' other: '{0} days left'
Add(key interface{}, text string, override bool) error
// adds a cardinal plural translation for a particular language/locale
// {0} is the only replacement type accepted and only one variable is accepted as
// multiple cannot be used for a plural rule determination, unless it is a range;
// see AddRange below.
// eg. in locale 'en' one: '{0} day left' other: '{0} days left'
AddCardinal(key interface{}, text string, rule locales.PluralRule, override bool) error
// adds an ordinal plural translation for a particular language/locale
// {0} is the only replacement type accepted and only one variable is accepted as
// multiple cannot be used for a plural rule determination, unless it is a range;
// see AddRange below.
// eg. in locale 'en' one: '{0}st day of spring' other: '{0}nd day of spring'
// - 1st, 2nd, 3rd...
AddOrdinal(key interface{}, text string, rule locales.PluralRule, override bool) error
// adds a range plural translation for a particular language/locale
// {0} and {1} are the only replacement types accepted and only these are accepted.
// eg. in locale 'nl' one: '{0}-{1} day left' other: '{0}-{1} days left'
AddRange(key interface{}, text string, rule locales.PluralRule, override bool) error
// creates the translation for the locale given the 'key' and params passed in
T(key interface{}, params ...string) (string, error)
// creates the cardinal translation for the locale given the 'key', 'num' and 'digit' arguments
// and param passed in
C(key interface{}, num float64, digits uint64, param string) (string, error)
// creates the ordinal translation for the locale given the 'key', 'num' and 'digit' arguments
// and param passed in
O(key interface{}, num float64, digits uint64, param string) (string, error)
// creates the range translation for the locale given the 'key', 'num1', 'digit1', 'num2' and
// 'digit2' arguments and 'param1' and 'param2' passed in
R(key interface{}, num1 float64, digits1 uint64, num2 float64, digits2 uint64, param1, param2 string) (string, error)
// VerifyTranslations checks to ensures that no plural rules have been
// missed within the translations.
VerifyTranslations() error
}
var _ Translator = new(translator)
var _ locales.Translator = new(translator)
type translator struct {
locales.Translator
translations map[interface{}]*transText
cardinalTanslations map[interface{}][]*transText // array index is mapped to locales.PluralRule index + the locales.PluralRuleUnknown
ordinalTanslations map[interface{}][]*transText
rangeTanslations map[interface{}][]*transText
}
type transText struct {
text string
indexes []int
}
func newTranslator(trans locales.Translator) Translator {
return &translator{
Translator: trans,
translations: make(map[interface{}]*transText), // translation text broken up by byte index
cardinalTanslations: make(map[interface{}][]*transText),
ordinalTanslations: make(map[interface{}][]*transText),
rangeTanslations: make(map[interface{}][]*transText),
}
}
// Add adds a normal translation for a particular language/locale
// {#} is the only replacement type accepted and are ad infinitum
// eg. one: '{0} day left' other: '{0} days left'
func (t *translator) Add(key interface{}, text string, override bool) error {
if _, ok := t.translations[key]; ok && !override {
return &ErrConflictingTranslation{locale: t.Locale(), key: key, text: text}
}
lb := strings.Count(text, "{")
rb := strings.Count(text, "}")
if lb != rb {
return &ErrMissingBracket{locale: t.Locale(), key: key, text: text}
}
trans := &transText{
text: text,
}
var idx int
for i := 0; i < lb; i++ {
s := "{" + strconv.Itoa(i) + "}"
idx = strings.Index(text, s)
if idx == -1 {
return &ErrBadParamSyntax{locale: t.Locale(), param: s, key: key, text: text}
}
trans.indexes = append(trans.indexes, idx)
trans.indexes = append(trans.indexes, idx+len(s))
}
t.translations[key] = trans
return nil
}
// AddCardinal adds a cardinal plural translation for a particular language/locale
// {0} is the only replacement type accepted and only one variable is accepted as
// multiple cannot be used for a plural rule determination, unless it is a range;
// see AddRange below.
// eg. in locale 'en' one: '{0} day left' other: '{0} days left'
func (t *translator) AddCardinal(key interface{}, text string, rule locales.PluralRule, override bool) error {
var verified bool
// verify plural rule exists for locale
for _, pr := range t.PluralsCardinal() {
if pr == rule {
verified = true
break
}
}
if !verified {
return &ErrCardinalTranslation{text: fmt.Sprintf("error: cardinal plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)}
}
tarr, ok := t.cardinalTanslations[key]
if ok {
// verify not adding a conflicting record
if len(tarr) > 0 && tarr[rule] != nil && !override {
return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text}
}
} else {
tarr = make([]*transText, 7)
t.cardinalTanslations[key] = tarr
}
trans := &transText{
text: text,
indexes: make([]int, 2),
}
tarr[rule] = trans
idx := strings.Index(text, paramZero)
if idx == -1 {
tarr[rule] = nil
return &ErrCardinalTranslation{text: fmt.Sprintf("error: parameter '%s' not found, may want to use 'Add' instead of 'AddCardinal'. locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)}
}
trans.indexes[0] = idx
trans.indexes[1] = idx + len(paramZero)
return nil
}
// AddOrdinal adds an ordinal plural translation for a particular language/locale
// {0} is the only replacement type accepted and only one variable is accepted as
// multiple cannot be used for a plural rule determination, unless it is a range;
// see AddRange below.
// eg. in locale 'en' one: '{0}st day of spring' other: '{0}nd day of spring' - 1st, 2nd, 3rd...
func (t *translator) AddOrdinal(key interface{}, text string, rule locales.PluralRule, override bool) error {
var verified bool
// verify plural rule exists for locale
for _, pr := range t.PluralsOrdinal() {
if pr == rule {
verified = true
break
}
}
if !verified {
return &ErrOrdinalTranslation{text: fmt.Sprintf("error: ordinal plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)}
}
tarr, ok := t.ordinalTanslations[key]
if ok {
// verify not adding a conflicting record
if len(tarr) > 0 && tarr[rule] != nil && !override {
return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text}
}
} else {
tarr = make([]*transText, 7)
t.ordinalTanslations[key] = tarr
}
trans := &transText{
text: text,
indexes: make([]int, 2),
}
tarr[rule] = trans
idx := strings.Index(text, paramZero)
if idx == -1 {
tarr[rule] = nil
return &ErrOrdinalTranslation{text: fmt.Sprintf("error: parameter '%s' not found, may want to use 'Add' instead of 'AddOrdinal'. locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)}
}
trans.indexes[0] = idx
trans.indexes[1] = idx + len(paramZero)
return nil
}
// AddRange adds a range plural translation for a particular language/locale
// {0} and {1} are the only replacement types accepted and only these are accepted.
// eg. in locale 'nl' one: '{0}-{1} day left' other: '{0}-{1} days left'
func (t *translator) AddRange(key interface{}, text string, rule locales.PluralRule, override bool) error {
var verified bool
// verify plural rule exists for locale
for _, pr := range t.PluralsRange() {
if pr == rule {
verified = true
break
}
}
if !verified {
return &ErrRangeTranslation{text: fmt.Sprintf("error: range plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)}
}
tarr, ok := t.rangeTanslations[key]
if ok {
// verify not adding a conflicting record
if len(tarr) > 0 && tarr[rule] != nil && !override {
return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text}
}
} else {
tarr = make([]*transText, 7)
t.rangeTanslations[key] = tarr
}
trans := &transText{
text: text,
indexes: make([]int, 4),
}
tarr[rule] = trans
idx := strings.Index(text, paramZero)
if idx == -1 {
tarr[rule] = nil
return &ErrRangeTranslation{text: fmt.Sprintf("error: parameter '%s' not found, are you sure you're adding a Range Translation? locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)}
}
trans.indexes[0] = idx
trans.indexes[1] = idx + len(paramZero)
idx = strings.Index(text, paramOne)
if idx == -1 {
tarr[rule] = nil
return &ErrRangeTranslation{text: fmt.Sprintf("error: parameter '%s' not found, a Range Translation requires two parameters. locale: '%s' key: '%v' text: '%s'", paramOne, t.Locale(), key, text)}
}
trans.indexes[2] = idx
trans.indexes[3] = idx + len(paramOne)
return nil
}
// T creates the translation for the locale given the 'key' and params passed in
func (t *translator) T(key interface{}, params ...string) (string, error) {
trans, ok := t.translations[key]
if !ok {
return unknownTranslation, ErrUnknowTranslation
}
b := make([]byte, 0, 64)
var start, end, count int
for i := 0; i < len(trans.indexes); i++ {
end = trans.indexes[i]
b = append(b, trans.text[start:end]...)
b = append(b, params[count]...)
i++
start = trans.indexes[i]
count++
}
b = append(b, trans.text[start:]...)
return string(b), nil
}
// C creates the cardinal translation for the locale given the 'key', 'num' and 'digit' arguments and param passed in
func (t *translator) C(key interface{}, num float64, digits uint64, param string) (string, error) {
tarr, ok := t.cardinalTanslations[key]
if !ok {
return unknownTranslation, ErrUnknowTranslation
}
rule := t.CardinalPluralRule(num, digits)
trans := tarr[rule]
b := make([]byte, 0, 64)
b = append(b, trans.text[:trans.indexes[0]]...)
b = append(b, param...)
b = append(b, trans.text[trans.indexes[1]:]...)
return string(b), nil
}
// O creates the ordinal translation for the locale given the 'key', 'num' and 'digit' arguments and param passed in
func (t *translator) O(key interface{}, num float64, digits uint64, param string) (string, error) {
tarr, ok := t.ordinalTanslations[key]
if !ok {
return unknownTranslation, ErrUnknowTranslation
}
rule := t.OrdinalPluralRule(num, digits)
trans := tarr[rule]
b := make([]byte, 0, 64)
b = append(b, trans.text[:trans.indexes[0]]...)
b = append(b, param...)
b = append(b, trans.text[trans.indexes[1]:]...)
return string(b), nil
}
// R creates the range translation for the locale given the 'key', 'num1', 'digit1', 'num2' and 'digit2' arguments
// and 'param1' and 'param2' passed in
func (t *translator) R(key interface{}, num1 float64, digits1 uint64, num2 float64, digits2 uint64, param1, param2 string) (string, error) {
tarr, ok := t.rangeTanslations[key]
if !ok {
return unknownTranslation, ErrUnknowTranslation
}
rule := t.RangePluralRule(num1, digits1, num2, digits2)
trans := tarr[rule]
b := make([]byte, 0, 64)
b = append(b, trans.text[:trans.indexes[0]]...)
b = append(b, param1...)
b = append(b, trans.text[trans.indexes[1]:trans.indexes[2]]...)
b = append(b, param2...)
b = append(b, trans.text[trans.indexes[3]:]...)
return string(b), nil
}
// VerifyTranslations checks to ensures that no plural rules have been
// missed within the translations.
func (t *translator) VerifyTranslations() error {
for k, v := range t.cardinalTanslations {
for _, rule := range t.PluralsCardinal() {
if v[rule] == nil {
return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "plural", rule: rule, key: k}
}
}
}
for k, v := range t.ordinalTanslations {
for _, rule := range t.PluralsOrdinal() {
if v[rule] == nil {
return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "ordinal", rule: rule, key: k}
}
}
}
for k, v := range t.rangeTanslations {
for _, rule := range t.PluralsRange() {
if v[rule] == nil {
return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "range", rule: rule, key: k}
}
}
}
return nil
}

View File

@ -1,113 +0,0 @@
package ut
import (
"strings"
"github.com/go-playground/locales"
)
// UniversalTranslator holds all locale & translation data
type UniversalTranslator struct {
translators map[string]Translator
fallback Translator
}
// New returns a new UniversalTranslator instance set with
// the fallback locale and locales it should support
func New(fallback locales.Translator, supportedLocales ...locales.Translator) *UniversalTranslator {
t := &UniversalTranslator{
translators: make(map[string]Translator),
}
for _, v := range supportedLocales {
trans := newTranslator(v)
t.translators[strings.ToLower(trans.Locale())] = trans
if fallback.Locale() == v.Locale() {
t.fallback = trans
}
}
if t.fallback == nil && fallback != nil {
t.fallback = newTranslator(fallback)
}
return t
}
// FindTranslator trys to find a Translator based on an array of locales
// and returns the first one it can find, otherwise returns the
// fallback translator.
func (t *UniversalTranslator) FindTranslator(locales ...string) (trans Translator, found bool) {
for _, locale := range locales {
if trans, found = t.translators[strings.ToLower(locale)]; found {
return
}
}
return t.fallback, false
}
// GetTranslator returns the specified translator for the given locale,
// or fallback if not found
func (t *UniversalTranslator) GetTranslator(locale string) (trans Translator, found bool) {
if trans, found = t.translators[strings.ToLower(locale)]; found {
return
}
return t.fallback, false
}
// GetFallback returns the fallback locale
func (t *UniversalTranslator) GetFallback() Translator {
return t.fallback
}
// AddTranslator adds the supplied translator, if it already exists the override param
// will be checked and if false an error will be returned, otherwise the translator will be
// overridden; if the fallback matches the supplied translator it will be overridden as well
// NOTE: this is normally only used when translator is embedded within a library
func (t *UniversalTranslator) AddTranslator(translator locales.Translator, override bool) error {
lc := strings.ToLower(translator.Locale())
_, ok := t.translators[lc]
if ok && !override {
return &ErrExistingTranslator{locale: translator.Locale()}
}
trans := newTranslator(translator)
if t.fallback.Locale() == translator.Locale() {
// because it's optional to have a fallback, I don't impose that limitation
// don't know why you wouldn't but...
if !override {
return &ErrExistingTranslator{locale: translator.Locale()}
}
t.fallback = trans
}
t.translators[lc] = trans
return nil
}
// VerifyTranslations runs through all locales and identifies any issues
// eg. missing plural rules for a locale
func (t *UniversalTranslator) VerifyTranslations() (err error) {
for _, trans := range t.translators {
err = trans.VerifyTranslations()
if err != nil {
return
}
}
return
}

View File

@ -1,31 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
bin
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
*.test
*.out
*.txt
cover.html
README.html
.idea

View File

@ -1,22 +0,0 @@
The MIT License (MIT)
Copyright (c) 2015 Dean Karn
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,16 +0,0 @@
## Maintainers Guide
### Semantic Versioning
Semantic versioning as defined [here](https://semver.org) must be strictly adhered to.
### External Dependencies
Any new external dependencies MUST:
- Have a compatible LICENSE present.
- Be actively maintained.
- Be approved by @go-playground/admins
### PR Merge Requirements
- Up-to-date branch.
- Passing tests and linting.
- CODEOWNERS approval.
- Tests that cover both the Happy and Unhappy paths.

View File

@ -1,18 +0,0 @@
GOCMD=GO111MODULE=on go
linters-install:
@golangci-lint --version >/dev/null 2>&1 || { \
echo "installing linting tools..."; \
curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s v1.41.1; \
}
lint: linters-install
golangci-lint run
test:
$(GOCMD) test -cover -race ./...
bench:
$(GOCMD) test -bench=. -benchmem ./...
.PHONY: test lint linters-install

View File

@ -1,348 +0,0 @@
Package validator
=================
<img align="right" src="https://raw.githubusercontent.com/go-playground/validator/v10/logo.png">[![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
![Project status](https://img.shields.io/badge/version-10.13.0-green.svg)
[![Build Status](https://travis-ci.org/go-playground/validator.svg?branch=master)](https://travis-ci.org/go-playground/validator)
[![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-playground/validator?branch=master)
[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator)
[![GoDoc](https://godoc.org/github.com/go-playground/validator?status.svg)](https://pkg.go.dev/github.com/go-playground/validator/v10)
![License](https://img.shields.io/dub/l/vibe-d.svg)
Package validator implements value validations for structs and individual fields based on tags.
It has the following **unique** features:
- Cross Field and Cross Struct validations by using validation tags or custom validators.
- Slice, Array and Map diving, which allows any or all levels of a multidimensional field to be validated.
- Ability to dive into both map keys and values for validation
- Handles type interface by determining it's underlying type prior to validation.
- Handles custom field types such as sql driver Valuer see [Valuer](https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29)
- Alias validation tags, which allows for mapping of several validations to a single tag for easier defining of validations on structs
- Extraction of custom defined Field Name e.g. can specify to extract the JSON name while validating and have it available in the resulting FieldError
- Customizable i18n aware error messages.
- Default validator for the [gin](https://github.com/gin-gonic/gin) web framework; upgrading from v8 to v9 in gin see [here](https://github.com/go-playground/validator/tree/master/_examples/gin-upgrading-overriding)
Installation
------------
Use go get.
go get github.com/go-playground/validator/v10
Then import the validator package into your own code.
import "github.com/go-playground/validator/v10"
Error Return Value
-------
Validation functions return type error
They return type error to avoid the issue discussed in the following, where err is always != nil:
* http://stackoverflow.com/a/29138676/3158232
* https://github.com/go-playground/validator/issues/134
Validator returns only InvalidValidationError for bad validation input, nil or ValidationErrors as type error; so, in your code all you need to do is check if the error returned is not nil, and if it's not check if error is InvalidValidationError ( if necessary, most of the time it isn't ) type cast it to type ValidationErrors like so:
```go
err := validate.Struct(mystruct)
validationErrors := err.(validator.ValidationErrors)
```
Usage and documentation
------
Please see https://pkg.go.dev/github.com/go-playground/validator/v10 for detailed usage docs.
##### Examples:
- [Simple](https://github.com/go-playground/validator/blob/master/_examples/simple/main.go)
- [Custom Field Types](https://github.com/go-playground/validator/blob/master/_examples/custom/main.go)
- [Struct Level](https://github.com/go-playground/validator/blob/master/_examples/struct-level/main.go)
- [Translations & Custom Errors](https://github.com/go-playground/validator/blob/master/_examples/translations/main.go)
- [Gin upgrade and/or override validator](https://github.com/go-playground/validator/tree/v9/_examples/gin-upgrading-overriding)
- [wash - an example application putting it all together](https://github.com/bluesuncorp/wash)
Baked-in Validations
------
### Fields:
| Tag | Description |
| - | - |
| eqcsfield | Field Equals Another Field (relative)|
| eqfield | Field Equals Another Field |
| fieldcontains | Check the indicated characters are present in the Field |
| fieldexcludes | Check the indicated characters are not present in the field |
| gtcsfield | Field Greater Than Another Relative Field |
| gtecsfield | Field Greater Than or Equal To Another Relative Field |
| gtefield | Field Greater Than or Equal To Another Field |
| gtfield | Field Greater Than Another Field |
| ltcsfield | Less Than Another Relative Field |
| ltecsfield | Less Than or Equal To Another Relative Field |
| ltefield | Less Than or Equal To Another Field |
| ltfield | Less Than Another Field |
| necsfield | Field Does Not Equal Another Field (relative) |
| nefield | Field Does Not Equal Another Field |
### Network:
| Tag | Description |
| - | - |
| cidr | Classless Inter-Domain Routing CIDR |
| cidrv4 | Classless Inter-Domain Routing CIDRv4 |
| cidrv6 | Classless Inter-Domain Routing CIDRv6 |
| datauri | Data URL |
| fqdn | Full Qualified Domain Name (FQDN) |
| hostname | Hostname RFC 952 |
| hostname_port | HostPort |
| hostname_rfc1123 | Hostname RFC 1123 |
| ip | Internet Protocol Address IP |
| ip4_addr | Internet Protocol Address IPv4 |
| ip6_addr | Internet Protocol Address IPv6 |
| ip_addr | Internet Protocol Address IP |
| ipv4 | Internet Protocol Address IPv4 |
| ipv6 | Internet Protocol Address IPv6 |
| mac | Media Access Control Address MAC |
| tcp4_addr | Transmission Control Protocol Address TCPv4 |
| tcp6_addr | Transmission Control Protocol Address TCPv6 |
| tcp_addr | Transmission Control Protocol Address TCP |
| udp4_addr | User Datagram Protocol Address UDPv4 |
| udp6_addr | User Datagram Protocol Address UDPv6 |
| udp_addr | User Datagram Protocol Address UDP |
| unix_addr | Unix domain socket end point Address |
| uri | URI String |
| url | URL String |
| http_url | HTTP URL String |
| url_encoded | URL Encoded |
| urn_rfc2141 | Urn RFC 2141 String |
### Strings:
| Tag | Description |
| - | - |
| alpha | Alpha Only |
| alphanum | Alphanumeric |
| alphanumunicode | Alphanumeric Unicode |
| alphaunicode | Alpha Unicode |
| ascii | ASCII |
| boolean | Boolean |
| contains | Contains |
| containsany | Contains Any |
| containsrune | Contains Rune |
| endsnotwith | Ends Not With |
| endswith | Ends With |
| excludes | Excludes |
| excludesall | Excludes All |
| excludesrune | Excludes Rune |
| lowercase | Lowercase |
| multibyte | Multi-Byte Characters |
| number | Number |
| numeric | Numeric |
| printascii | Printable ASCII |
| startsnotwith | Starts Not With |
| startswith | Starts With |
| uppercase | Uppercase |
### Format:
| Tag | Description |
| - | - |
| base64 | Base64 String |
| base64url | Base64URL String |
| base64rawurl | Base64RawURL String |
| bic | Business Identifier Code (ISO 9362) |
| bcp47_language_tag | Language tag (BCP 47) |
| btc_addr | Bitcoin Address |
| btc_addr_bech32 | Bitcoin Bech32 Address (segwit) |
| credit_card | Credit Card Number |
| mongodb | MongoDB ObjectID |
| cron | Cron |
| datetime | Datetime |
| e164 | e164 formatted phone number |
| email | E-mail String
| eth_addr | Ethereum Address |
| hexadecimal | Hexadecimal String |
| hexcolor | Hexcolor String |
| hsl | HSL String |
| hsla | HSLA String |
| html | HTML Tags |
| html_encoded | HTML Encoded |
| isbn | International Standard Book Number |
| isbn10 | International Standard Book Number 10 |
| isbn13 | International Standard Book Number 13 |
| iso3166_1_alpha2 | Two-letter country code (ISO 3166-1 alpha-2) |
| iso3166_1_alpha3 | Three-letter country code (ISO 3166-1 alpha-3) |
| iso3166_1_alpha_numeric | Numeric country code (ISO 3166-1 numeric) |
| iso3166_2 | Country subdivision code (ISO 3166-2) |
| iso4217 | Currency code (ISO 4217) |
| json | JSON |
| jwt | JSON Web Token (JWT) |
| latitude | Latitude |
| longitude | Longitude |
| luhn_checksum | Luhn Algorithm Checksum (for strings and (u)int) |
| postcode_iso3166_alpha2 | Postcode |
| postcode_iso3166_alpha2_field | Postcode |
| rgb | RGB String |
| rgba | RGBA String |
| ssn | Social Security Number SSN |
| timezone | Timezone |
| uuid | Universally Unique Identifier UUID |
| uuid3 | Universally Unique Identifier UUID v3 |
| uuid3_rfc4122 | Universally Unique Identifier UUID v3 RFC4122 |
| uuid4 | Universally Unique Identifier UUID v4 |
| uuid4_rfc4122 | Universally Unique Identifier UUID v4 RFC4122 |
| uuid5 | Universally Unique Identifier UUID v5 |
| uuid5_rfc4122 | Universally Unique Identifier UUID v5 RFC4122 |
| uuid_rfc4122 | Universally Unique Identifier UUID RFC4122 |
| md4 | MD4 hash |
| md5 | MD5 hash |
| sha256 | SHA256 hash |
| sha384 | SHA384 hash |
| sha512 | SHA512 hash |
| ripemd128 | RIPEMD-128 hash |
| ripemd128 | RIPEMD-160 hash |
| tiger128 | TIGER128 hash |
| tiger160 | TIGER160 hash |
| tiger192 | TIGER192 hash |
| semver | Semantic Versioning 2.0.0 |
| ulid | Universally Unique Lexicographically Sortable Identifier ULID |
| cve | Common Vulnerabilities and Exposures Identifier (CVE id) |
### Comparisons:
| Tag | Description |
| - | - |
| eq | Equals |
| eq_ignore_case | Equals ignoring case |
| gt | Greater than|
| gte | Greater than or equal |
| lt | Less Than |
| lte | Less Than or Equal |
| ne | Not Equal |
| ne_ignore_case | Not Equal ignoring case |
### Other:
| Tag | Description |
| - | - |
| dir | Existing Directory |
| dirpath | Directory Path |
| file | Existing File |
| filepath | File Path |
| isdefault | Is Default |
| len | Length |
| max | Maximum |
| min | Minimum |
| oneof | One Of |
| required | Required |
| required_if | Required If |
| required_unless | Required Unless |
| required_with | Required With |
| required_with_all | Required With All |
| required_without | Required Without |
| required_without_all | Required Without All |
| excluded_if | Excluded If |
| excluded_unless | Excluded Unless |
| excluded_with | Excluded With |
| excluded_with_all | Excluded With All |
| excluded_without | Excluded Without |
| excluded_without_all | Excluded Without All |
| unique | Unique |
#### Aliases:
| Tag | Description |
| - | - |
| iscolor | hexcolor\|rgb\|rgba\|hsl\|hsla |
| country_code | iso3166_1_alpha2\|iso3166_1_alpha3\|iso3166_1_alpha_numeric |
Benchmarks
------
###### Run on MacBook Pro (15-inch, 2017) go version go1.10.2 darwin/amd64
```go
goos: darwin
goarch: amd64
pkg: github.com/go-playground/validator
BenchmarkFieldSuccess-8 20000000 83.6 ns/op 0 B/op 0 allocs/op
BenchmarkFieldSuccessParallel-8 50000000 26.8 ns/op 0 B/op 0 allocs/op
BenchmarkFieldFailure-8 5000000 291 ns/op 208 B/op 4 allocs/op
BenchmarkFieldFailureParallel-8 20000000 107 ns/op 208 B/op 4 allocs/op
BenchmarkFieldArrayDiveSuccess-8 2000000 623 ns/op 201 B/op 11 allocs/op
BenchmarkFieldArrayDiveSuccessParallel-8 10000000 237 ns/op 201 B/op 11 allocs/op
BenchmarkFieldArrayDiveFailure-8 2000000 859 ns/op 412 B/op 16 allocs/op
BenchmarkFieldArrayDiveFailureParallel-8 5000000 335 ns/op 413 B/op 16 allocs/op
BenchmarkFieldMapDiveSuccess-8 1000000 1292 ns/op 432 B/op 18 allocs/op
BenchmarkFieldMapDiveSuccessParallel-8 3000000 467 ns/op 432 B/op 18 allocs/op
BenchmarkFieldMapDiveFailure-8 1000000 1082 ns/op 512 B/op 16 allocs/op
BenchmarkFieldMapDiveFailureParallel-8 5000000 425 ns/op 512 B/op 16 allocs/op
BenchmarkFieldMapDiveWithKeysSuccess-8 1000000 1539 ns/op 480 B/op 21 allocs/op
BenchmarkFieldMapDiveWithKeysSuccessParallel-8 3000000 613 ns/op 480 B/op 21 allocs/op
BenchmarkFieldMapDiveWithKeysFailure-8 1000000 1413 ns/op 721 B/op 21 allocs/op
BenchmarkFieldMapDiveWithKeysFailureParallel-8 3000000 575 ns/op 721 B/op 21 allocs/op
BenchmarkFieldCustomTypeSuccess-8 10000000 216 ns/op 32 B/op 2 allocs/op
BenchmarkFieldCustomTypeSuccessParallel-8 20000000 82.2 ns/op 32 B/op 2 allocs/op
BenchmarkFieldCustomTypeFailure-8 5000000 274 ns/op 208 B/op 4 allocs/op
BenchmarkFieldCustomTypeFailureParallel-8 20000000 116 ns/op 208 B/op 4 allocs/op
BenchmarkFieldOrTagSuccess-8 2000000 740 ns/op 16 B/op 1 allocs/op
BenchmarkFieldOrTagSuccessParallel-8 3000000 474 ns/op 16 B/op 1 allocs/op
BenchmarkFieldOrTagFailure-8 3000000 471 ns/op 224 B/op 5 allocs/op
BenchmarkFieldOrTagFailureParallel-8 3000000 414 ns/op 224 B/op 5 allocs/op
BenchmarkStructLevelValidationSuccess-8 10000000 213 ns/op 32 B/op 2 allocs/op
BenchmarkStructLevelValidationSuccessParallel-8 20000000 91.8 ns/op 32 B/op 2 allocs/op
BenchmarkStructLevelValidationFailure-8 3000000 473 ns/op 304 B/op 8 allocs/op
BenchmarkStructLevelValidationFailureParallel-8 10000000 234 ns/op 304 B/op 8 allocs/op
BenchmarkStructSimpleCustomTypeSuccess-8 5000000 385 ns/op 32 B/op 2 allocs/op
BenchmarkStructSimpleCustomTypeSuccessParallel-8 10000000 161 ns/op 32 B/op 2 allocs/op
BenchmarkStructSimpleCustomTypeFailure-8 2000000 640 ns/op 424 B/op 9 allocs/op
BenchmarkStructSimpleCustomTypeFailureParallel-8 5000000 318 ns/op 440 B/op 10 allocs/op
BenchmarkStructFilteredSuccess-8 2000000 597 ns/op 288 B/op 9 allocs/op
BenchmarkStructFilteredSuccessParallel-8 10000000 266 ns/op 288 B/op 9 allocs/op
BenchmarkStructFilteredFailure-8 3000000 454 ns/op 256 B/op 7 allocs/op
BenchmarkStructFilteredFailureParallel-8 10000000 214 ns/op 256 B/op 7 allocs/op
BenchmarkStructPartialSuccess-8 3000000 502 ns/op 256 B/op 6 allocs/op
BenchmarkStructPartialSuccessParallel-8 10000000 225 ns/op 256 B/op 6 allocs/op
BenchmarkStructPartialFailure-8 2000000 702 ns/op 480 B/op 11 allocs/op
BenchmarkStructPartialFailureParallel-8 5000000 329 ns/op 480 B/op 11 allocs/op
BenchmarkStructExceptSuccess-8 2000000 793 ns/op 496 B/op 12 allocs/op
BenchmarkStructExceptSuccessParallel-8 10000000 193 ns/op 240 B/op 5 allocs/op
BenchmarkStructExceptFailure-8 2000000 639 ns/op 464 B/op 10 allocs/op
BenchmarkStructExceptFailureParallel-8 5000000 300 ns/op 464 B/op 10 allocs/op
BenchmarkStructSimpleCrossFieldSuccess-8 3000000 417 ns/op 72 B/op 3 allocs/op
BenchmarkStructSimpleCrossFieldSuccessParallel-8 10000000 163 ns/op 72 B/op 3 allocs/op
BenchmarkStructSimpleCrossFieldFailure-8 2000000 645 ns/op 304 B/op 8 allocs/op
BenchmarkStructSimpleCrossFieldFailureParallel-8 5000000 285 ns/op 304 B/op 8 allocs/op
BenchmarkStructSimpleCrossStructCrossFieldSuccess-8 3000000 588 ns/op 80 B/op 4 allocs/op
BenchmarkStructSimpleCrossStructCrossFieldSuccessParallel-8 10000000 221 ns/op 80 B/op 4 allocs/op
BenchmarkStructSimpleCrossStructCrossFieldFailure-8 2000000 868 ns/op 320 B/op 9 allocs/op
BenchmarkStructSimpleCrossStructCrossFieldFailureParallel-8 5000000 337 ns/op 320 B/op 9 allocs/op
BenchmarkStructSimpleSuccess-8 5000000 260 ns/op 0 B/op 0 allocs/op
BenchmarkStructSimpleSuccessParallel-8 20000000 90.6 ns/op 0 B/op 0 allocs/op
BenchmarkStructSimpleFailure-8 2000000 619 ns/op 424 B/op 9 allocs/op
BenchmarkStructSimpleFailureParallel-8 5000000 296 ns/op 424 B/op 9 allocs/op
BenchmarkStructComplexSuccess-8 1000000 1454 ns/op 128 B/op 8 allocs/op
BenchmarkStructComplexSuccessParallel-8 3000000 579 ns/op 128 B/op 8 allocs/op
BenchmarkStructComplexFailure-8 300000 4140 ns/op 3041 B/op 53 allocs/op
BenchmarkStructComplexFailureParallel-8 1000000 2127 ns/op 3041 B/op 53 allocs/op
BenchmarkOneof-8 10000000 140 ns/op 0 B/op 0 allocs/op
BenchmarkOneofParallel-8 20000000 70.1 ns/op 0 B/op 0 allocs/op
```
Complementary Software
----------------------
Here is a list of software that complements using this library either pre or post validation.
* [form](https://github.com/go-playground/form) - Decodes url.Values into Go value(s) and Encodes Go value(s) into url.Values. Dual Array and Full map support.
* [mold](https://github.com/go-playground/mold) - A general library to help modify or set data within data structures and other objects
How to Contribute
------
Make a pull request...
License
-------
Distributed under MIT License, please see license file within the code for more details.
Maintainers
-----------
This project has grown large enough that more than one person is required to properly support the community.
If you are interested in becoming a maintainer please reach out to me https://github.com/deankarn

File diff suppressed because it is too large Load Diff

View File

@ -1,327 +0,0 @@
package validator
import (
"fmt"
"reflect"
"strings"
"sync"
"sync/atomic"
)
type tagType uint8
const (
typeDefault tagType = iota
typeOmitEmpty
typeIsDefault
typeNoStructLevel
typeStructOnly
typeDive
typeOr
typeKeys
typeEndKeys
)
const (
invalidValidation = "Invalid validation tag on field '%s'"
undefinedValidation = "Undefined validation function '%s' on field '%s'"
keysTagNotDefined = "'" + endKeysTag + "' tag encountered without a corresponding '" + keysTag + "' tag"
)
type structCache struct {
lock sync.Mutex
m atomic.Value // map[reflect.Type]*cStruct
}
func (sc *structCache) Get(key reflect.Type) (c *cStruct, found bool) {
c, found = sc.m.Load().(map[reflect.Type]*cStruct)[key]
return
}
func (sc *structCache) Set(key reflect.Type, value *cStruct) {
m := sc.m.Load().(map[reflect.Type]*cStruct)
nm := make(map[reflect.Type]*cStruct, len(m)+1)
for k, v := range m {
nm[k] = v
}
nm[key] = value
sc.m.Store(nm)
}
type tagCache struct {
lock sync.Mutex
m atomic.Value // map[string]*cTag
}
func (tc *tagCache) Get(key string) (c *cTag, found bool) {
c, found = tc.m.Load().(map[string]*cTag)[key]
return
}
func (tc *tagCache) Set(key string, value *cTag) {
m := tc.m.Load().(map[string]*cTag)
nm := make(map[string]*cTag, len(m)+1)
for k, v := range m {
nm[k] = v
}
nm[key] = value
tc.m.Store(nm)
}
type cStruct struct {
name string
fields []*cField
fn StructLevelFuncCtx
}
type cField struct {
idx int
name string
altName string
namesEqual bool
cTags *cTag
}
type cTag struct {
tag string
aliasTag string
actualAliasTag string
param string
keys *cTag // only populated when using tag's 'keys' and 'endkeys' for map key validation
next *cTag
fn FuncCtx
typeof tagType
hasTag bool
hasAlias bool
hasParam bool // true if parameter used eg. eq= where the equal sign has been set
isBlockEnd bool // indicates the current tag represents the last validation in the block
runValidationWhenNil bool
}
func (v *Validate) extractStructCache(current reflect.Value, sName string) *cStruct {
v.structCache.lock.Lock()
defer v.structCache.lock.Unlock() // leave as defer! because if inner panics, it will never get unlocked otherwise!
typ := current.Type()
// could have been multiple trying to access, but once first is done this ensures struct
// isn't parsed again.
cs, ok := v.structCache.Get(typ)
if ok {
return cs
}
cs = &cStruct{name: sName, fields: make([]*cField, 0), fn: v.structLevelFuncs[typ]}
numFields := current.NumField()
rules := v.rules[typ]
var ctag *cTag
var fld reflect.StructField
var tag string
var customName string
for i := 0; i < numFields; i++ {
fld = typ.Field(i)
if !fld.Anonymous && len(fld.PkgPath) > 0 {
continue
}
if rtag, ok := rules[fld.Name]; ok {
tag = rtag
} else {
tag = fld.Tag.Get(v.tagName)
}
if tag == skipValidationTag {
continue
}
customName = fld.Name
if v.hasTagNameFunc {
name := v.tagNameFunc(fld)
if len(name) > 0 {
customName = name
}
}
// NOTE: cannot use shared tag cache, because tags may be equal, but things like alias may be different
// and so only struct level caching can be used instead of combined with Field tag caching
if len(tag) > 0 {
ctag, _ = v.parseFieldTagsRecursive(tag, fld.Name, "", false)
} else {
// even if field doesn't have validations need cTag for traversing to potential inner/nested
// elements of the field.
ctag = new(cTag)
}
cs.fields = append(cs.fields, &cField{
idx: i,
name: fld.Name,
altName: customName,
cTags: ctag,
namesEqual: fld.Name == customName,
})
}
v.structCache.Set(typ, cs)
return cs
}
func (v *Validate) parseFieldTagsRecursive(tag string, fieldName string, alias string, hasAlias bool) (firstCtag *cTag, current *cTag) {
var t string
noAlias := len(alias) == 0
tags := strings.Split(tag, tagSeparator)
for i := 0; i < len(tags); i++ {
t = tags[i]
if noAlias {
alias = t
}
// check map for alias and process new tags, otherwise process as usual
if tagsVal, found := v.aliases[t]; found {
if i == 0 {
firstCtag, current = v.parseFieldTagsRecursive(tagsVal, fieldName, t, true)
} else {
next, curr := v.parseFieldTagsRecursive(tagsVal, fieldName, t, true)
current.next, current = next, curr
}
continue
}
var prevTag tagType
if i == 0 {
current = &cTag{aliasTag: alias, hasAlias: hasAlias, hasTag: true, typeof: typeDefault}
firstCtag = current
} else {
prevTag = current.typeof
current.next = &cTag{aliasTag: alias, hasAlias: hasAlias, hasTag: true}
current = current.next
}
switch t {
case diveTag:
current.typeof = typeDive
continue
case keysTag:
current.typeof = typeKeys
if i == 0 || prevTag != typeDive {
panic(fmt.Sprintf("'%s' tag must be immediately preceded by the '%s' tag", keysTag, diveTag))
}
current.typeof = typeKeys
// need to pass along only keys tag
// need to increment i to skip over the keys tags
b := make([]byte, 0, 64)
i++
for ; i < len(tags); i++ {
b = append(b, tags[i]...)
b = append(b, ',')
if tags[i] == endKeysTag {
break
}
}
current.keys, _ = v.parseFieldTagsRecursive(string(b[:len(b)-1]), fieldName, "", false)
continue
case endKeysTag:
current.typeof = typeEndKeys
// if there are more in tags then there was no keysTag defined
// and an error should be thrown
if i != len(tags)-1 {
panic(keysTagNotDefined)
}
return
case omitempty:
current.typeof = typeOmitEmpty
continue
case structOnlyTag:
current.typeof = typeStructOnly
continue
case noStructLevelTag:
current.typeof = typeNoStructLevel
continue
default:
if t == isdefault {
current.typeof = typeIsDefault
}
// if a pipe character is needed within the param you must use the utf8Pipe representation "0x7C"
orVals := strings.Split(t, orSeparator)
for j := 0; j < len(orVals); j++ {
vals := strings.SplitN(orVals[j], tagKeySeparator, 2)
if noAlias {
alias = vals[0]
current.aliasTag = alias
} else {
current.actualAliasTag = t
}
if j > 0 {
current.next = &cTag{aliasTag: alias, actualAliasTag: current.actualAliasTag, hasAlias: hasAlias, hasTag: true}
current = current.next
}
current.hasParam = len(vals) > 1
current.tag = vals[0]
if len(current.tag) == 0 {
panic(strings.TrimSpace(fmt.Sprintf(invalidValidation, fieldName)))
}
if wrapper, ok := v.validations[current.tag]; ok {
current.fn = wrapper.fn
current.runValidationWhenNil = wrapper.runValidatinOnNil
} else {
panic(strings.TrimSpace(fmt.Sprintf(undefinedValidation, current.tag, fieldName)))
}
if len(orVals) > 1 {
current.typeof = typeOr
}
if len(vals) > 1 {
current.param = strings.Replace(strings.Replace(vals[1], utf8HexComma, ",", -1), utf8Pipe, "|", -1)
}
}
current.isBlockEnd = true
}
}
return
}
func (v *Validate) fetchCacheTag(tag string) *cTag {
// find cached tag
ctag, found := v.tagCache.Get(tag)
if !found {
v.tagCache.lock.Lock()
defer v.tagCache.lock.Unlock()
// could have been multiple trying to access, but once first is done this ensures tag
// isn't parsed again.
ctag, found = v.tagCache.Get(tag)
if !found {
ctag, _ = v.parseFieldTagsRecursive(tag, "", "", false)
v.tagCache.Set(tag, ctag)
}
}
return ctag
}

File diff suppressed because it is too large Load Diff

View File

@ -1,79 +0,0 @@
package validator
var iso4217 = map[string]bool{
"AFN": true, "EUR": true, "ALL": true, "DZD": true, "USD": true,
"AOA": true, "XCD": true, "ARS": true, "AMD": true, "AWG": true,
"AUD": true, "AZN": true, "BSD": true, "BHD": true, "BDT": true,
"BBD": true, "BYN": true, "BZD": true, "XOF": true, "BMD": true,
"INR": true, "BTN": true, "BOB": true, "BOV": true, "BAM": true,
"BWP": true, "NOK": true, "BRL": true, "BND": true, "BGN": true,
"BIF": true, "CVE": true, "KHR": true, "XAF": true, "CAD": true,
"KYD": true, "CLP": true, "CLF": true, "CNY": true, "COP": true,
"COU": true, "KMF": true, "CDF": true, "NZD": true, "CRC": true,
"HRK": true, "CUP": true, "CUC": true, "ANG": true, "CZK": true,
"DKK": true, "DJF": true, "DOP": true, "EGP": true, "SVC": true,
"ERN": true, "SZL": true, "ETB": true, "FKP": true, "FJD": true,
"XPF": true, "GMD": true, "GEL": true, "GHS": true, "GIP": true,
"GTQ": true, "GBP": true, "GNF": true, "GYD": true, "HTG": true,
"HNL": true, "HKD": true, "HUF": true, "ISK": true, "IDR": true,
"XDR": true, "IRR": true, "IQD": true, "ILS": true, "JMD": true,
"JPY": true, "JOD": true, "KZT": true, "KES": true, "KPW": true,
"KRW": true, "KWD": true, "KGS": true, "LAK": true, "LBP": true,
"LSL": true, "ZAR": true, "LRD": true, "LYD": true, "CHF": true,
"MOP": true, "MKD": true, "MGA": true, "MWK": true, "MYR": true,
"MVR": true, "MRU": true, "MUR": true, "XUA": true, "MXN": true,
"MXV": true, "MDL": true, "MNT": true, "MAD": true, "MZN": true,
"MMK": true, "NAD": true, "NPR": true, "NIO": true, "NGN": true,
"OMR": true, "PKR": true, "PAB": true, "PGK": true, "PYG": true,
"PEN": true, "PHP": true, "PLN": true, "QAR": true, "RON": true,
"RUB": true, "RWF": true, "SHP": true, "WST": true, "STN": true,
"SAR": true, "RSD": true, "SCR": true, "SLL": true, "SGD": true,
"XSU": true, "SBD": true, "SOS": true, "SSP": true, "LKR": true,
"SDG": true, "SRD": true, "SEK": true, "CHE": true, "CHW": true,
"SYP": true, "TWD": true, "TJS": true, "TZS": true, "THB": true,
"TOP": true, "TTD": true, "TND": true, "TRY": true, "TMT": true,
"UGX": true, "UAH": true, "AED": true, "USN": true, "UYU": true,
"UYI": true, "UYW": true, "UZS": true, "VUV": true, "VES": true,
"VND": true, "YER": true, "ZMW": true, "ZWL": true, "XBA": true,
"XBB": true, "XBC": true, "XBD": true, "XTS": true, "XXX": true,
"XAU": true, "XPD": true, "XPT": true, "XAG": true,
}
var iso4217_numeric = map[int]bool{
8: true, 12: true, 32: true, 36: true, 44: true,
48: true, 50: true, 51: true, 52: true, 60: true,
64: true, 68: true, 72: true, 84: true, 90: true,
96: true, 104: true, 108: true, 116: true, 124: true,
132: true, 136: true, 144: true, 152: true, 156: true,
170: true, 174: true, 188: true, 191: true, 192: true,
203: true, 208: true, 214: true, 222: true, 230: true,
232: true, 238: true, 242: true, 262: true, 270: true,
292: true, 320: true, 324: true, 328: true, 332: true,
340: true, 344: true, 348: true, 352: true, 356: true,
360: true, 364: true, 368: true, 376: true, 388: true,
392: true, 398: true, 400: true, 404: true, 408: true,
410: true, 414: true, 417: true, 418: true, 422: true,
426: true, 430: true, 434: true, 446: true, 454: true,
458: true, 462: true, 480: true, 484: true, 496: true,
498: true, 504: true, 512: true, 516: true, 524: true,
532: true, 533: true, 548: true, 554: true, 558: true,
566: true, 578: true, 586: true, 590: true, 598: true,
600: true, 604: true, 608: true, 634: true, 643: true,
646: true, 654: true, 682: true, 690: true, 694: true,
702: true, 704: true, 706: true, 710: true, 728: true,
748: true, 752: true, 756: true, 760: true, 764: true,
776: true, 780: true, 784: true, 788: true, 800: true,
807: true, 818: true, 826: true, 834: true, 840: true,
858: true, 860: true, 882: true, 886: true, 901: true,
927: true, 928: true, 929: true, 930: true, 931: true,
932: true, 933: true, 934: true, 936: true, 938: true,
940: true, 941: true, 943: true, 944: true, 946: true,
947: true, 948: true, 949: true, 950: true, 951: true,
952: true, 953: true, 955: true, 956: true, 957: true,
958: true, 959: true, 960: true, 961: true, 962: true,
963: true, 964: true, 965: true, 967: true, 968: true,
969: true, 970: true, 971: true, 972: true, 973: true,
975: true, 976: true, 977: true, 978: true, 979: true,
980: true, 981: true, 984: true, 985: true, 986: true,
990: true, 994: true, 997: true, 999: true,
}

File diff suppressed because it is too large Load Diff

View File

@ -1,272 +0,0 @@
package validator
import (
"bytes"
"fmt"
"reflect"
"strings"
ut "github.com/go-playground/universal-translator"
)
const (
fieldErrMsg = "Key: '%s' Error:Field validation for '%s' failed on the '%s' tag"
)
// ValidationErrorsTranslations is the translation return type
type ValidationErrorsTranslations map[string]string
// InvalidValidationError describes an invalid argument passed to
// `Struct`, `StructExcept`, StructPartial` or `Field`
type InvalidValidationError struct {
Type reflect.Type
}
// Error returns InvalidValidationError message
func (e *InvalidValidationError) Error() string {
if e.Type == nil {
return "validator: (nil)"
}
return "validator: (nil " + e.Type.String() + ")"
}
// ValidationErrors is an array of FieldError's
// for use in custom error messages post validation.
type ValidationErrors []FieldError
// Error is intended for use in development + debugging and not intended to be a production error message.
// It allows ValidationErrors to subscribe to the Error interface.
// All information to create an error message specific to your application is contained within
// the FieldError found within the ValidationErrors array
func (ve ValidationErrors) Error() string {
buff := bytes.NewBufferString("")
for i := 0; i < len(ve); i++ {
buff.WriteString(ve[i].Error())
buff.WriteString("\n")
}
return strings.TrimSpace(buff.String())
}
// Translate translates all of the ValidationErrors
func (ve ValidationErrors) Translate(ut ut.Translator) ValidationErrorsTranslations {
trans := make(ValidationErrorsTranslations)
var fe *fieldError
for i := 0; i < len(ve); i++ {
fe = ve[i].(*fieldError)
// // in case an Anonymous struct was used, ensure that the key
// // would be 'Username' instead of ".Username"
// if len(fe.ns) > 0 && fe.ns[:1] == "." {
// trans[fe.ns[1:]] = fe.Translate(ut)
// continue
// }
trans[fe.ns] = fe.Translate(ut)
}
return trans
}
// FieldError contains all functions to get error details
type FieldError interface {
// Tag returns the validation tag that failed. if the
// validation was an alias, this will return the
// alias name and not the underlying tag that failed.
//
// eg. alias "iscolor": "hexcolor|rgb|rgba|hsl|hsla"
// will return "iscolor"
Tag() string
// ActualTag returns the validation tag that failed, even if an
// alias the actual tag within the alias will be returned.
// If an 'or' validation fails the entire or will be returned.
//
// eg. alias "iscolor": "hexcolor|rgb|rgba|hsl|hsla"
// will return "hexcolor|rgb|rgba|hsl|hsla"
ActualTag() string
// Namespace returns the namespace for the field error, with the tag
// name taking precedence over the field's actual name.
//
// eg. JSON name "User.fname"
//
// See StructNamespace() for a version that returns actual names.
//
// NOTE: this field can be blank when validating a single primitive field
// using validate.Field(...) as there is no way to extract it's name
Namespace() string
// StructNamespace returns the namespace for the field error, with the field's
// actual name.
//
// eq. "User.FirstName" see Namespace for comparison
//
// NOTE: this field can be blank when validating a single primitive field
// using validate.Field(...) as there is no way to extract its name
StructNamespace() string
// Field returns the fields name with the tag name taking precedence over the
// field's actual name.
//
// eq. JSON name "fname"
// see StructField for comparison
Field() string
// StructField returns the field's actual name from the struct, when able to determine.
//
// eq. "FirstName"
// see Field for comparison
StructField() string
// Value returns the actual field's value in case needed for creating the error
// message
Value() interface{}
// Param returns the param value, in string form for comparison; this will also
// help with generating an error message
Param() string
// Kind returns the Field's reflect Kind
//
// eg. time.Time's kind is a struct
Kind() reflect.Kind
// Type returns the Field's reflect Type
//
// eg. time.Time's type is time.Time
Type() reflect.Type
// Translate returns the FieldError's translated error
// from the provided 'ut.Translator' and registered 'TranslationFunc'
//
// NOTE: if no registered translator can be found it returns the same as
// calling fe.Error()
Translate(ut ut.Translator) string
// Error returns the FieldError's message
Error() string
}
// compile time interface checks
var _ FieldError = new(fieldError)
var _ error = new(fieldError)
// fieldError contains a single field's validation error along
// with other properties that may be needed for error message creation
// it complies with the FieldError interface
type fieldError struct {
v *Validate
tag string
actualTag string
ns string
structNs string
fieldLen uint8
structfieldLen uint8
value interface{}
param string
kind reflect.Kind
typ reflect.Type
}
// Tag returns the validation tag that failed.
func (fe *fieldError) Tag() string {
return fe.tag
}
// ActualTag returns the validation tag that failed, even if an
// alias the actual tag within the alias will be returned.
func (fe *fieldError) ActualTag() string {
return fe.actualTag
}
// Namespace returns the namespace for the field error, with the tag
// name taking precedence over the field's actual name.
func (fe *fieldError) Namespace() string {
return fe.ns
}
// StructNamespace returns the namespace for the field error, with the field's
// actual name.
func (fe *fieldError) StructNamespace() string {
return fe.structNs
}
// Field returns the field's name with the tag name taking precedence over the
// field's actual name.
func (fe *fieldError) Field() string {
return fe.ns[len(fe.ns)-int(fe.fieldLen):]
// // return fe.field
// fld := fe.ns[len(fe.ns)-int(fe.fieldLen):]
// log.Println("FLD:", fld)
// if len(fld) > 0 && fld[:1] == "." {
// return fld[1:]
// }
// return fld
}
// StructField returns the field's actual name from the struct, when able to determine.
func (fe *fieldError) StructField() string {
// return fe.structField
return fe.structNs[len(fe.structNs)-int(fe.structfieldLen):]
}
// Value returns the actual field's value in case needed for creating the error
// message
func (fe *fieldError) Value() interface{} {
return fe.value
}
// Param returns the param value, in string form for comparison; this will
// also help with generating an error message
func (fe *fieldError) Param() string {
return fe.param
}
// Kind returns the Field's reflect Kind
func (fe *fieldError) Kind() reflect.Kind {
return fe.kind
}
// Type returns the Field's reflect Type
func (fe *fieldError) Type() reflect.Type {
return fe.typ
}
// Error returns the fieldError's error message
func (fe *fieldError) Error() string {
return fmt.Sprintf(fieldErrMsg, fe.ns, fe.Field(), fe.tag)
}
// Translate returns the FieldError's translated error
// from the provided 'ut.Translator' and registered 'TranslationFunc'
//
// NOTE: if no registered translation can be found, it returns the original
// untranslated error message.
func (fe *fieldError) Translate(ut ut.Translator) string {
m, ok := fe.v.transTagFunc[ut]
if !ok {
return fe.Error()
}
fn, ok := m[fe.tag]
if !ok {
return fe.Error()
}
return fn(ut, fe)
}

View File

@ -1,120 +0,0 @@
package validator
import "reflect"
// FieldLevel contains all the information and helper functions
// to validate a field
type FieldLevel interface {
// Top returns the top level struct, if any
Top() reflect.Value
// Parent returns the current fields parent struct, if any or
// the comparison value if called 'VarWithValue'
Parent() reflect.Value
// Field returns current field for validation
Field() reflect.Value
// FieldName returns the field's name with the tag
// name taking precedence over the fields actual name.
FieldName() string
// StructFieldName returns the struct field's name
StructFieldName() string
// Param returns param for validation against current field
Param() string
// GetTag returns the current validations tag name
GetTag() string
// ExtractType gets the actual underlying type of field value.
// It will dive into pointers, customTypes and return you the
// underlying value and it's kind.
ExtractType(field reflect.Value) (value reflect.Value, kind reflect.Kind, nullable bool)
// GetStructFieldOK traverses the parent struct to retrieve a specific field denoted by the provided namespace
// in the param and returns the field, field kind and whether is was successful in retrieving
// the field at all.
//
// NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field
// could not be retrieved because it didn't exist.
//
// Deprecated: Use GetStructFieldOK2() instead which also return if the value is nullable.
GetStructFieldOK() (reflect.Value, reflect.Kind, bool)
// GetStructFieldOKAdvanced is the same as GetStructFieldOK except that it accepts the parent struct to start looking for
// the field and namespace allowing more extensibility for validators.
//
// Deprecated: Use GetStructFieldOKAdvanced2() instead which also return if the value is nullable.
GetStructFieldOKAdvanced(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool)
// GetStructFieldOK2 traverses the parent struct to retrieve a specific field denoted by the provided namespace
// in the param and returns the field, field kind, if it's a nullable type and whether is was successful in retrieving
// the field at all.
//
// NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field
// could not be retrieved because it didn't exist.
GetStructFieldOK2() (reflect.Value, reflect.Kind, bool, bool)
// GetStructFieldOKAdvanced2 is the same as GetStructFieldOK except that it accepts the parent struct to start looking for
// the field and namespace allowing more extensibility for validators.
GetStructFieldOKAdvanced2(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool, bool)
}
var _ FieldLevel = new(validate)
// Field returns current field for validation
func (v *validate) Field() reflect.Value {
return v.flField
}
// FieldName returns the field's name with the tag
// name taking precedence over the fields actual name.
func (v *validate) FieldName() string {
return v.cf.altName
}
// GetTag returns the current validations tag name
func (v *validate) GetTag() string {
return v.ct.tag
}
// StructFieldName returns the struct field's name
func (v *validate) StructFieldName() string {
return v.cf.name
}
// Param returns param for validation against current field
func (v *validate) Param() string {
return v.ct.param
}
// GetStructFieldOK returns Param returns param for validation against current field
//
// Deprecated: Use GetStructFieldOK2() instead which also return if the value is nullable.
func (v *validate) GetStructFieldOK() (reflect.Value, reflect.Kind, bool) {
current, kind, _, found := v.getStructFieldOKInternal(v.slflParent, v.ct.param)
return current, kind, found
}
// GetStructFieldOKAdvanced is the same as GetStructFieldOK except that it accepts the parent struct to start looking for
// the field and namespace allowing more extensibility for validators.
//
// Deprecated: Use GetStructFieldOKAdvanced2() instead which also return if the value is nullable.
func (v *validate) GetStructFieldOKAdvanced(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool) {
current, kind, _, found := v.GetStructFieldOKAdvanced2(val, namespace)
return current, kind, found
}
// GetStructFieldOK2 returns Param returns param for validation against current field
func (v *validate) GetStructFieldOK2() (reflect.Value, reflect.Kind, bool, bool) {
return v.getStructFieldOKInternal(v.slflParent, v.ct.param)
}
// GetStructFieldOKAdvanced2 is the same as GetStructFieldOK except that it accepts the parent struct to start looking for
// the field and namespace allowing more extensibility for validators.
func (v *validate) GetStructFieldOKAdvanced2(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool, bool) {
return v.getStructFieldOKInternal(val, namespace)
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

View File

@ -1,173 +0,0 @@
package validator
import "regexp"
var postCodePatternDict = map[string]string{
"GB": `^GIR[ ]?0AA|((AB|AL|B|BA|BB|BD|BH|BL|BN|BR|BS|BT|CA|CB|CF|CH|CM|CO|CR|CT|CV|CW|DA|DD|DE|DG|DH|DL|DN|DT|DY|E|EC|EH|EN|EX|FK|FY|G|GL|GY|GU|HA|HD|HG|HP|HR|HS|HU|HX|IG|IM|IP|IV|JE|KA|KT|KW|KY|L|LA|LD|LE|LL|LN|LS|LU|M|ME|MK|ML|N|NE|NG|NN|NP|NR|NW|OL|OX|PA|PE|PH|PL|PO|PR|RG|RH|RM|S|SA|SE|SG|SK|SL|SM|SN|SO|SP|SR|SS|ST|SW|SY|TA|TD|TF|TN|TQ|TR|TS|TW|UB|W|WA|WC|WD|WF|WN|WR|WS|WV|YO|ZE)(\d[\dA-Z]?[ ]?\d[ABD-HJLN-UW-Z]{2}))|BFPO[ ]?\d{1,4}$`,
"JE": `^JE\d[\dA-Z]?[ ]?\d[ABD-HJLN-UW-Z]{2}$`,
"GG": `^GY\d[\dA-Z]?[ ]?\d[ABD-HJLN-UW-Z]{2}$`,
"IM": `^IM\d[\dA-Z]?[ ]?\d[ABD-HJLN-UW-Z]{2}$`,
"US": `^\d{5}([ \-]\d{4})?$`,
"CA": `^[ABCEGHJKLMNPRSTVXY]\d[ABCEGHJ-NPRSTV-Z][ ]?\d[ABCEGHJ-NPRSTV-Z]\d$`,
"DE": `^\d{5}$`,
"JP": `^\d{3}-\d{4}$`,
"FR": `^\d{2}[ ]?\d{3}$`,
"AU": `^\d{4}$`,
"IT": `^\d{5}$`,
"CH": `^\d{4}$`,
"AT": `^\d{4}$`,
"ES": `^\d{5}$`,
"NL": `^\d{4}[ ]?[A-Z]{2}$`,
"BE": `^\d{4}$`,
"DK": `^\d{4}$`,
"SE": `^\d{3}[ ]?\d{2}$`,
"NO": `^\d{4}$`,
"BR": `^\d{5}[\-]?\d{3}$`,
"PT": `^\d{4}([\-]\d{3})?$`,
"FI": `^\d{5}$`,
"AX": `^22\d{3}$`,
"KR": `^\d{3}[\-]\d{3}$`,
"CN": `^\d{6}$`,
"TW": `^\d{3}(\d{2})?$`,
"SG": `^\d{6}$`,
"DZ": `^\d{5}$`,
"AD": `^AD\d{3}$`,
"AR": `^([A-HJ-NP-Z])?\d{4}([A-Z]{3})?$`,
"AM": `^(37)?\d{4}$`,
"AZ": `^\d{4}$`,
"BH": `^((1[0-2]|[2-9])\d{2})?$`,
"BD": `^\d{4}$`,
"BB": `^(BB\d{5})?$`,
"BY": `^\d{6}$`,
"BM": `^[A-Z]{2}[ ]?[A-Z0-9]{2}$`,
"BA": `^\d{5}$`,
"IO": `^BBND 1ZZ$`,
"BN": `^[A-Z]{2}[ ]?\d{4}$`,
"BG": `^\d{4}$`,
"KH": `^\d{5}$`,
"CV": `^\d{4}$`,
"CL": `^\d{7}$`,
"CR": `^\d{4,5}|\d{3}-\d{4}$`,
"HR": `^\d{5}$`,
"CY": `^\d{4}$`,
"CZ": `^\d{3}[ ]?\d{2}$`,
"DO": `^\d{5}$`,
"EC": `^([A-Z]\d{4}[A-Z]|(?:[A-Z]{2})?\d{6})?$`,
"EG": `^\d{5}$`,
"EE": `^\d{5}$`,
"FO": `^\d{3}$`,
"GE": `^\d{4}$`,
"GR": `^\d{3}[ ]?\d{2}$`,
"GL": `^39\d{2}$`,
"GT": `^\d{5}$`,
"HT": `^\d{4}$`,
"HN": `^(?:\d{5})?$`,
"HU": `^\d{4}$`,
"IS": `^\d{3}$`,
"IN": `^\d{6}$`,
"ID": `^\d{5}$`,
"IL": `^\d{5}$`,
"JO": `^\d{5}$`,
"KZ": `^\d{6}$`,
"KE": `^\d{5}$`,
"KW": `^\d{5}$`,
"LA": `^\d{5}$`,
"LV": `^\d{4}$`,
"LB": `^(\d{4}([ ]?\d{4})?)?$`,
"LI": `^(948[5-9])|(949[0-7])$`,
"LT": `^\d{5}$`,
"LU": `^\d{4}$`,
"MK": `^\d{4}$`,
"MY": `^\d{5}$`,
"MV": `^\d{5}$`,
"MT": `^[A-Z]{3}[ ]?\d{2,4}$`,
"MU": `^(\d{3}[A-Z]{2}\d{3})?$`,
"MX": `^\d{5}$`,
"MD": `^\d{4}$`,
"MC": `^980\d{2}$`,
"MA": `^\d{5}$`,
"NP": `^\d{5}$`,
"NZ": `^\d{4}$`,
"NI": `^((\d{4}-)?\d{3}-\d{3}(-\d{1})?)?$`,
"NG": `^(\d{6})?$`,
"OM": `^(PC )?\d{3}$`,
"PK": `^\d{5}$`,
"PY": `^\d{4}$`,
"PH": `^\d{4}$`,
"PL": `^\d{2}-\d{3}$`,
"PR": `^00[679]\d{2}([ \-]\d{4})?$`,
"RO": `^\d{6}$`,
"RU": `^\d{6}$`,
"SM": `^4789\d$`,
"SA": `^\d{5}$`,
"SN": `^\d{5}$`,
"SK": `^\d{3}[ ]?\d{2}$`,
"SI": `^\d{4}$`,
"ZA": `^\d{4}$`,
"LK": `^\d{5}$`,
"TJ": `^\d{6}$`,
"TH": `^\d{5}$`,
"TN": `^\d{4}$`,
"TR": `^\d{5}$`,
"TM": `^\d{6}$`,
"UA": `^\d{5}$`,
"UY": `^\d{5}$`,
"UZ": `^\d{6}$`,
"VA": `^00120$`,
"VE": `^\d{4}$`,
"ZM": `^\d{5}$`,
"AS": `^96799$`,
"CC": `^6799$`,
"CK": `^\d{4}$`,
"RS": `^\d{6}$`,
"ME": `^8\d{4}$`,
"CS": `^\d{5}$`,
"YU": `^\d{5}$`,
"CX": `^6798$`,
"ET": `^\d{4}$`,
"FK": `^FIQQ 1ZZ$`,
"NF": `^2899$`,
"FM": `^(9694[1-4])([ \-]\d{4})?$`,
"GF": `^9[78]3\d{2}$`,
"GN": `^\d{3}$`,
"GP": `^9[78][01]\d{2}$`,
"GS": `^SIQQ 1ZZ$`,
"GU": `^969[123]\d([ \-]\d{4})?$`,
"GW": `^\d{4}$`,
"HM": `^\d{4}$`,
"IQ": `^\d{5}$`,
"KG": `^\d{6}$`,
"LR": `^\d{4}$`,
"LS": `^\d{3}$`,
"MG": `^\d{3}$`,
"MH": `^969[67]\d([ \-]\d{4})?$`,
"MN": `^\d{6}$`,
"MP": `^9695[012]([ \-]\d{4})?$`,
"MQ": `^9[78]2\d{2}$`,
"NC": `^988\d{2}$`,
"NE": `^\d{4}$`,
"VI": `^008(([0-4]\d)|(5[01]))([ \-]\d{4})?$`,
"VN": `^[0-9]{1,6}$`,
"PF": `^987\d{2}$`,
"PG": `^\d{3}$`,
"PM": `^9[78]5\d{2}$`,
"PN": `^PCRN 1ZZ$`,
"PW": `^96940$`,
"RE": `^9[78]4\d{2}$`,
"SH": `^(ASCN|STHL) 1ZZ$`,
"SJ": `^\d{4}$`,
"SO": `^\d{5}$`,
"SZ": `^[HLMS]\d{3}$`,
"TC": `^TKCA 1ZZ$`,
"WF": `^986\d{2}$`,
"XK": `^\d{5}$`,
"YT": `^976\d{2}$`,
}
var postCodeRegexDict = map[string]*regexp.Regexp{}
func init() {
for countryCode, pattern := range postCodePatternDict {
postCodeRegexDict[countryCode] = regexp.MustCompile(pattern)
}
}

View File

@ -1,137 +0,0 @@
package validator
import "regexp"
const (
alphaRegexString = "^[a-zA-Z]+$"
alphaNumericRegexString = "^[a-zA-Z0-9]+$"
alphaUnicodeRegexString = "^[\\p{L}]+$"
alphaUnicodeNumericRegexString = "^[\\p{L}\\p{N}]+$"
numericRegexString = "^[-+]?[0-9]+(?:\\.[0-9]+)?$"
numberRegexString = "^[0-9]+$"
hexadecimalRegexString = "^(0[xX])?[0-9a-fA-F]+$"
hexColorRegexString = "^#(?:[0-9a-fA-F]{3}|[0-9a-fA-F]{4}|[0-9a-fA-F]{6}|[0-9a-fA-F]{8})$"
rgbRegexString = "^rgb\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*\\)$"
rgbaRegexString = "^rgba\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$"
hslRegexString = "^hsl\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*\\)$"
hslaRegexString = "^hsla\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$"
emailRegexString = "^(?:(?:(?:(?:[a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(?:\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|(?:(?:\\x22)(?:(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(?:\\x20|\\x09)+)?(?:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(\\x20|\\x09)+)?(?:\\x22))))@(?:(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$"
e164RegexString = "^\\+[1-9]?[0-9]{7,14}$"
base64RegexString = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$"
base64URLRegexString = "^(?:[A-Za-z0-9-_]{4})*(?:[A-Za-z0-9-_]{2}==|[A-Za-z0-9-_]{3}=|[A-Za-z0-9-_]{4})$"
base64RawURLRegexString = "^(?:[A-Za-z0-9-_]{4})*(?:[A-Za-z0-9-_]{2,4})$"
iSBN10RegexString = "^(?:[0-9]{9}X|[0-9]{10})$"
iSBN13RegexString = "^(?:(?:97(?:8|9))[0-9]{10})$"
uUID3RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$"
uUID4RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
uUID5RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
uUIDRegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$"
uUID3RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-3[0-9a-fA-F]{3}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$"
uUID4RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-4[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$"
uUID5RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-5[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$"
uUIDRFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$"
uLIDRegexString = "^[A-HJKMNP-TV-Z0-9]{26}$"
md4RegexString = "^[0-9a-f]{32}$"
md5RegexString = "^[0-9a-f]{32}$"
sha256RegexString = "^[0-9a-f]{64}$"
sha384RegexString = "^[0-9a-f]{96}$"
sha512RegexString = "^[0-9a-f]{128}$"
ripemd128RegexString = "^[0-9a-f]{32}$"
ripemd160RegexString = "^[0-9a-f]{40}$"
tiger128RegexString = "^[0-9a-f]{32}$"
tiger160RegexString = "^[0-9a-f]{40}$"
tiger192RegexString = "^[0-9a-f]{48}$"
aSCIIRegexString = "^[\x00-\x7F]*$"
printableASCIIRegexString = "^[\x20-\x7E]*$"
multibyteRegexString = "[^\x00-\x7F]"
dataURIRegexString = `^data:((?:\w+\/(?:([^;]|;[^;]).)+)?)`
latitudeRegexString = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
longitudeRegexString = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
sSNRegexString = `^[0-9]{3}[ -]?(0[1-9]|[1-9][0-9])[ -]?([1-9][0-9]{3}|[0-9][1-9][0-9]{2}|[0-9]{2}[1-9][0-9]|[0-9]{3}[1-9])$`
hostnameRegexStringRFC952 = `^[a-zA-Z]([a-zA-Z0-9\-]+[\.]?)*[a-zA-Z0-9]$` // https://tools.ietf.org/html/rfc952
hostnameRegexStringRFC1123 = `^([a-zA-Z0-9]{1}[a-zA-Z0-9-]{0,62}){1}(\.[a-zA-Z0-9]{1}[a-zA-Z0-9-]{0,62})*?$` // accepts hostname starting with a digit https://tools.ietf.org/html/rfc1123
fqdnRegexStringRFC1123 = `^([a-zA-Z0-9]{1}[a-zA-Z0-9-]{0,62})(\.[a-zA-Z0-9]{1}[a-zA-Z0-9-]{0,62})*?(\.[a-zA-Z]{1}[a-zA-Z0-9]{0,62})\.?$` // same as hostnameRegexStringRFC1123 but must contain a non numerical TLD (possibly ending with '.')
btcAddressRegexString = `^[13][a-km-zA-HJ-NP-Z1-9]{25,34}$` // bitcoin address
btcAddressUpperRegexStringBech32 = `^BC1[02-9AC-HJ-NP-Z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32
btcAddressLowerRegexStringBech32 = `^bc1[02-9ac-hj-np-z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32
ethAddressRegexString = `^0x[0-9a-fA-F]{40}$`
ethAddressUpperRegexString = `^0x[0-9A-F]{40}$`
ethAddressLowerRegexString = `^0x[0-9a-f]{40}$`
uRLEncodedRegexString = `^(?:[^%]|%[0-9A-Fa-f]{2})*$`
hTMLEncodedRegexString = `&#[x]?([0-9a-fA-F]{2})|(&gt)|(&lt)|(&quot)|(&amp)+[;]?`
hTMLRegexString = `<[/]?([a-zA-Z]+).*?>`
jWTRegexString = "^[A-Za-z0-9-_]+\\.[A-Za-z0-9-_]+\\.[A-Za-z0-9-_]*$"
splitParamsRegexString = `'[^']*'|\S+`
bicRegexString = `^[A-Za-z]{6}[A-Za-z0-9]{2}([A-Za-z0-9]{3})?$`
semverRegexString = `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$` // numbered capture groups https://semver.org/
dnsRegexStringRFC1035Label = "^[a-z]([-a-z0-9]*[a-z0-9]){0,62}$"
cveRegexString = `^CVE-(1999|2\d{3})-(0[^0]\d{2}|0\d[^0]\d{1}|0\d{2}[^0]|[1-9]{1}\d{3,})$` // CVE Format Id https://cve.mitre.org/cve/identifiers/syntaxchange.html
mongodbRegexString = "^[a-f\\d]{24}$"
cronRegexString = `(@(annually|yearly|monthly|weekly|daily|hourly|reboot))|(@every (\d+(ns|us|µs|ms|s|m|h))+)|((((\d+,)+\d+|(\d+(\/|-)\d+)|\d+|\*) ?){5,7})`
)
var (
alphaRegex = regexp.MustCompile(alphaRegexString)
alphaNumericRegex = regexp.MustCompile(alphaNumericRegexString)
alphaUnicodeRegex = regexp.MustCompile(alphaUnicodeRegexString)
alphaUnicodeNumericRegex = regexp.MustCompile(alphaUnicodeNumericRegexString)
numericRegex = regexp.MustCompile(numericRegexString)
numberRegex = regexp.MustCompile(numberRegexString)
hexadecimalRegex = regexp.MustCompile(hexadecimalRegexString)
hexColorRegex = regexp.MustCompile(hexColorRegexString)
rgbRegex = regexp.MustCompile(rgbRegexString)
rgbaRegex = regexp.MustCompile(rgbaRegexString)
hslRegex = regexp.MustCompile(hslRegexString)
hslaRegex = regexp.MustCompile(hslaRegexString)
e164Regex = regexp.MustCompile(e164RegexString)
emailRegex = regexp.MustCompile(emailRegexString)
base64Regex = regexp.MustCompile(base64RegexString)
base64URLRegex = regexp.MustCompile(base64URLRegexString)
base64RawURLRegex = regexp.MustCompile(base64RawURLRegexString)
iSBN10Regex = regexp.MustCompile(iSBN10RegexString)
iSBN13Regex = regexp.MustCompile(iSBN13RegexString)
uUID3Regex = regexp.MustCompile(uUID3RegexString)
uUID4Regex = regexp.MustCompile(uUID4RegexString)
uUID5Regex = regexp.MustCompile(uUID5RegexString)
uUIDRegex = regexp.MustCompile(uUIDRegexString)
uUID3RFC4122Regex = regexp.MustCompile(uUID3RFC4122RegexString)
uUID4RFC4122Regex = regexp.MustCompile(uUID4RFC4122RegexString)
uUID5RFC4122Regex = regexp.MustCompile(uUID5RFC4122RegexString)
uUIDRFC4122Regex = regexp.MustCompile(uUIDRFC4122RegexString)
uLIDRegex = regexp.MustCompile(uLIDRegexString)
md4Regex = regexp.MustCompile(md4RegexString)
md5Regex = regexp.MustCompile(md5RegexString)
sha256Regex = regexp.MustCompile(sha256RegexString)
sha384Regex = regexp.MustCompile(sha384RegexString)
sha512Regex = regexp.MustCompile(sha512RegexString)
ripemd128Regex = regexp.MustCompile(ripemd128RegexString)
ripemd160Regex = regexp.MustCompile(ripemd160RegexString)
tiger128Regex = regexp.MustCompile(tiger128RegexString)
tiger160Regex = regexp.MustCompile(tiger160RegexString)
tiger192Regex = regexp.MustCompile(tiger192RegexString)
aSCIIRegex = regexp.MustCompile(aSCIIRegexString)
printableASCIIRegex = regexp.MustCompile(printableASCIIRegexString)
multibyteRegex = regexp.MustCompile(multibyteRegexString)
dataURIRegex = regexp.MustCompile(dataURIRegexString)
latitudeRegex = regexp.MustCompile(latitudeRegexString)
longitudeRegex = regexp.MustCompile(longitudeRegexString)
sSNRegex = regexp.MustCompile(sSNRegexString)
hostnameRegexRFC952 = regexp.MustCompile(hostnameRegexStringRFC952)
hostnameRegexRFC1123 = regexp.MustCompile(hostnameRegexStringRFC1123)
fqdnRegexRFC1123 = regexp.MustCompile(fqdnRegexStringRFC1123)
btcAddressRegex = regexp.MustCompile(btcAddressRegexString)
btcUpperAddressRegexBech32 = regexp.MustCompile(btcAddressUpperRegexStringBech32)
btcLowerAddressRegexBech32 = regexp.MustCompile(btcAddressLowerRegexStringBech32)
ethAddressRegex = regexp.MustCompile(ethAddressRegexString)
uRLEncodedRegex = regexp.MustCompile(uRLEncodedRegexString)
hTMLEncodedRegex = regexp.MustCompile(hTMLEncodedRegexString)
hTMLRegex = regexp.MustCompile(hTMLRegexString)
jWTRegex = regexp.MustCompile(jWTRegexString)
splitParamsRegex = regexp.MustCompile(splitParamsRegexString)
bicRegex = regexp.MustCompile(bicRegexString)
semverRegex = regexp.MustCompile(semverRegexString)
dnsRegexRFC1035Label = regexp.MustCompile(dnsRegexStringRFC1035Label)
cveRegex = regexp.MustCompile(cveRegexString)
mongodbRegex = regexp.MustCompile(mongodbRegexString)
cronRegex = regexp.MustCompile(cronRegexString)
)

View File

@ -1,175 +0,0 @@
package validator
import (
"context"
"reflect"
)
// StructLevelFunc accepts all values needed for struct level validation
type StructLevelFunc func(sl StructLevel)
// StructLevelFuncCtx accepts all values needed for struct level validation
// but also allows passing of contextual validation information via context.Context.
type StructLevelFuncCtx func(ctx context.Context, sl StructLevel)
// wrapStructLevelFunc wraps normal StructLevelFunc makes it compatible with StructLevelFuncCtx
func wrapStructLevelFunc(fn StructLevelFunc) StructLevelFuncCtx {
return func(ctx context.Context, sl StructLevel) {
fn(sl)
}
}
// StructLevel contains all the information and helper functions
// to validate a struct
type StructLevel interface {
// Validator returns the main validation object, in case one wants to call validations internally.
// this is so you don't have to use anonymous functions to get access to the validate
// instance.
Validator() *Validate
// Top returns the top level struct, if any
Top() reflect.Value
// Parent returns the current fields parent struct, if any
Parent() reflect.Value
// Current returns the current struct.
Current() reflect.Value
// ExtractType gets the actual underlying type of field value.
// It will dive into pointers, customTypes and return you the
// underlying value and its kind.
ExtractType(field reflect.Value) (value reflect.Value, kind reflect.Kind, nullable bool)
// ReportError reports an error just by passing the field and tag information
//
// NOTES:
//
// fieldName and altName get appended to the existing namespace that
// validator is on. e.g. pass 'FirstName' or 'Names[0]' depending
// on the nesting
//
// tag can be an existing validation tag or just something you make up
// and process on the flip side it's up to you.
ReportError(field interface{}, fieldName, structFieldName string, tag, param string)
// ReportValidationErrors reports an error just by passing ValidationErrors
//
// NOTES:
//
// relativeNamespace and relativeActualNamespace get appended to the
// existing namespace that validator is on.
// e.g. pass 'User.FirstName' or 'Users[0].FirstName' depending
// on the nesting. most of the time they will be blank, unless you validate
// at a level lower the current field depth
ReportValidationErrors(relativeNamespace, relativeActualNamespace string, errs ValidationErrors)
}
var _ StructLevel = new(validate)
// Top returns the top level struct
//
// NOTE: this can be the same as the current struct being validated
// if not is a nested struct.
//
// this is only called when within Struct and Field Level validation and
// should not be relied upon for an accurate value otherwise.
func (v *validate) Top() reflect.Value {
return v.top
}
// Parent returns the current structs parent
//
// NOTE: this can be the same as the current struct being validated
// if not is a nested struct.
//
// this is only called when within Struct and Field Level validation and
// should not be relied upon for an accurate value otherwise.
func (v *validate) Parent() reflect.Value {
return v.slflParent
}
// Current returns the current struct.
func (v *validate) Current() reflect.Value {
return v.slCurrent
}
// Validator returns the main validation object, in case one want to call validations internally.
func (v *validate) Validator() *Validate {
return v.v
}
// ExtractType gets the actual underlying type of field value.
func (v *validate) ExtractType(field reflect.Value) (reflect.Value, reflect.Kind, bool) {
return v.extractTypeInternal(field, false)
}
// ReportError reports an error just by passing the field and tag information
func (v *validate) ReportError(field interface{}, fieldName, structFieldName, tag, param string) {
fv, kind, _ := v.extractTypeInternal(reflect.ValueOf(field), false)
if len(structFieldName) == 0 {
structFieldName = fieldName
}
v.str1 = string(append(v.ns, fieldName...))
if v.v.hasTagNameFunc || fieldName != structFieldName {
v.str2 = string(append(v.actualNs, structFieldName...))
} else {
v.str2 = v.str1
}
if kind == reflect.Invalid {
v.errs = append(v.errs,
&fieldError{
v: v.v,
tag: tag,
actualTag: tag,
ns: v.str1,
structNs: v.str2,
fieldLen: uint8(len(fieldName)),
structfieldLen: uint8(len(structFieldName)),
param: param,
kind: kind,
},
)
return
}
v.errs = append(v.errs,
&fieldError{
v: v.v,
tag: tag,
actualTag: tag,
ns: v.str1,
structNs: v.str2,
fieldLen: uint8(len(fieldName)),
structfieldLen: uint8(len(structFieldName)),
value: fv.Interface(),
param: param,
kind: kind,
typ: fv.Type(),
},
)
}
// ReportValidationErrors reports ValidationErrors obtained from running validations within the Struct Level validation.
//
// NOTE: this function prepends the current namespace to the relative ones.
func (v *validate) ReportValidationErrors(relativeNamespace, relativeStructNamespace string, errs ValidationErrors) {
var err *fieldError
for i := 0; i < len(errs); i++ {
err = errs[i].(*fieldError)
err.ns = string(append(append(v.ns, relativeNamespace...), err.ns...))
err.structNs = string(append(append(v.actualNs, relativeStructNamespace...), err.structNs...))
v.errs = append(v.errs, err)
}
}

View File

@ -1,11 +0,0 @@
package validator
import ut "github.com/go-playground/universal-translator"
// TranslationFunc is the function type used to register or override
// custom translations
type TranslationFunc func(ut ut.Translator, fe FieldError) string
// RegisterTranslationsFunc allows for registering of translations
// for a 'ut.Translator' for use within the 'TranslationFunc'
type RegisterTranslationsFunc func(ut ut.Translator) error

View File

@ -1,288 +0,0 @@
package validator
import (
"reflect"
"strconv"
"strings"
"time"
)
// extractTypeInternal gets the actual underlying type of field value.
// It will dive into pointers, customTypes and return you the
// underlying value and it's kind.
func (v *validate) extractTypeInternal(current reflect.Value, nullable bool) (reflect.Value, reflect.Kind, bool) {
BEGIN:
switch current.Kind() {
case reflect.Ptr:
nullable = true
if current.IsNil() {
return current, reflect.Ptr, nullable
}
current = current.Elem()
goto BEGIN
case reflect.Interface:
nullable = true
if current.IsNil() {
return current, reflect.Interface, nullable
}
current = current.Elem()
goto BEGIN
case reflect.Invalid:
return current, reflect.Invalid, nullable
default:
if v.v.hasCustomFuncs {
if fn, ok := v.v.customFuncs[current.Type()]; ok {
current = reflect.ValueOf(fn(current))
goto BEGIN
}
}
return current, current.Kind(), nullable
}
}
// getStructFieldOKInternal traverses a struct to retrieve a specific field denoted by the provided namespace and
// returns the field, field kind and whether is was successful in retrieving the field at all.
//
// NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field
// could not be retrieved because it didn't exist.
func (v *validate) getStructFieldOKInternal(val reflect.Value, namespace string) (current reflect.Value, kind reflect.Kind, nullable bool, found bool) {
BEGIN:
current, kind, nullable = v.ExtractType(val)
if kind == reflect.Invalid {
return
}
if namespace == "" {
found = true
return
}
switch kind {
case reflect.Ptr, reflect.Interface:
return
case reflect.Struct:
typ := current.Type()
fld := namespace
var ns string
if !typ.ConvertibleTo(timeType) {
idx := strings.Index(namespace, namespaceSeparator)
if idx != -1 {
fld = namespace[:idx]
ns = namespace[idx+1:]
} else {
ns = ""
}
bracketIdx := strings.Index(fld, leftBracket)
if bracketIdx != -1 {
fld = fld[:bracketIdx]
ns = namespace[bracketIdx:]
}
val = current.FieldByName(fld)
namespace = ns
goto BEGIN
}
case reflect.Array, reflect.Slice:
idx := strings.Index(namespace, leftBracket)
idx2 := strings.Index(namespace, rightBracket)
arrIdx, _ := strconv.Atoi(namespace[idx+1 : idx2])
if arrIdx >= current.Len() {
return
}
startIdx := idx2 + 1
if startIdx < len(namespace) {
if namespace[startIdx:startIdx+1] == namespaceSeparator {
startIdx++
}
}
val = current.Index(arrIdx)
namespace = namespace[startIdx:]
goto BEGIN
case reflect.Map:
idx := strings.Index(namespace, leftBracket) + 1
idx2 := strings.Index(namespace, rightBracket)
endIdx := idx2
if endIdx+1 < len(namespace) {
if namespace[endIdx+1:endIdx+2] == namespaceSeparator {
endIdx++
}
}
key := namespace[idx:idx2]
switch current.Type().Key().Kind() {
case reflect.Int:
i, _ := strconv.Atoi(key)
val = current.MapIndex(reflect.ValueOf(i))
namespace = namespace[endIdx+1:]
case reflect.Int8:
i, _ := strconv.ParseInt(key, 10, 8)
val = current.MapIndex(reflect.ValueOf(int8(i)))
namespace = namespace[endIdx+1:]
case reflect.Int16:
i, _ := strconv.ParseInt(key, 10, 16)
val = current.MapIndex(reflect.ValueOf(int16(i)))
namespace = namespace[endIdx+1:]
case reflect.Int32:
i, _ := strconv.ParseInt(key, 10, 32)
val = current.MapIndex(reflect.ValueOf(int32(i)))
namespace = namespace[endIdx+1:]
case reflect.Int64:
i, _ := strconv.ParseInt(key, 10, 64)
val = current.MapIndex(reflect.ValueOf(i))
namespace = namespace[endIdx+1:]
case reflect.Uint:
i, _ := strconv.ParseUint(key, 10, 0)
val = current.MapIndex(reflect.ValueOf(uint(i)))
namespace = namespace[endIdx+1:]
case reflect.Uint8:
i, _ := strconv.ParseUint(key, 10, 8)
val = current.MapIndex(reflect.ValueOf(uint8(i)))
namespace = namespace[endIdx+1:]
case reflect.Uint16:
i, _ := strconv.ParseUint(key, 10, 16)
val = current.MapIndex(reflect.ValueOf(uint16(i)))
namespace = namespace[endIdx+1:]
case reflect.Uint32:
i, _ := strconv.ParseUint(key, 10, 32)
val = current.MapIndex(reflect.ValueOf(uint32(i)))
namespace = namespace[endIdx+1:]
case reflect.Uint64:
i, _ := strconv.ParseUint(key, 10, 64)
val = current.MapIndex(reflect.ValueOf(i))
namespace = namespace[endIdx+1:]
case reflect.Float32:
f, _ := strconv.ParseFloat(key, 32)
val = current.MapIndex(reflect.ValueOf(float32(f)))
namespace = namespace[endIdx+1:]
case reflect.Float64:
f, _ := strconv.ParseFloat(key, 64)
val = current.MapIndex(reflect.ValueOf(f))
namespace = namespace[endIdx+1:]
case reflect.Bool:
b, _ := strconv.ParseBool(key)
val = current.MapIndex(reflect.ValueOf(b))
namespace = namespace[endIdx+1:]
// reflect.Type = string
default:
val = current.MapIndex(reflect.ValueOf(key))
namespace = namespace[endIdx+1:]
}
goto BEGIN
}
// if got here there was more namespace, cannot go any deeper
panic("Invalid field namespace")
}
// asInt returns the parameter as a int64
// or panics if it can't convert
func asInt(param string) int64 {
i, err := strconv.ParseInt(param, 0, 64)
panicIf(err)
return i
}
// asIntFromTimeDuration parses param as time.Duration and returns it as int64
// or panics on error.
func asIntFromTimeDuration(param string) int64 {
d, err := time.ParseDuration(param)
if err != nil {
// attempt parsing as an integer assuming nanosecond precision
return asInt(param)
}
return int64(d)
}
// asIntFromType calls the proper function to parse param as int64,
// given a field's Type t.
func asIntFromType(t reflect.Type, param string) int64 {
switch t {
case timeDurationType:
return asIntFromTimeDuration(param)
default:
return asInt(param)
}
}
// asUint returns the parameter as a uint64
// or panics if it can't convert
func asUint(param string) uint64 {
i, err := strconv.ParseUint(param, 0, 64)
panicIf(err)
return i
}
// asFloat returns the parameter as a float64
// or panics if it can't convert
func asFloat(param string) float64 {
i, err := strconv.ParseFloat(param, 64)
panicIf(err)
return i
}
// asBool returns the parameter as a bool
// or panics if it can't convert
func asBool(param string) bool {
i, err := strconv.ParseBool(param)
panicIf(err)
return i
}
func panicIf(err error) {
if err != nil {
panic(err.Error())
}
}

View File

@ -1,485 +0,0 @@
package validator
import (
"context"
"fmt"
"reflect"
"strconv"
)
// per validate construct
type validate struct {
v *Validate
top reflect.Value
ns []byte
actualNs []byte
errs ValidationErrors
includeExclude map[string]struct{} // reset only if StructPartial or StructExcept are called, no need otherwise
ffn FilterFunc
slflParent reflect.Value // StructLevel & FieldLevel
slCurrent reflect.Value // StructLevel & FieldLevel
flField reflect.Value // StructLevel & FieldLevel
cf *cField // StructLevel & FieldLevel
ct *cTag // StructLevel & FieldLevel
misc []byte // misc reusable
str1 string // misc reusable
str2 string // misc reusable
fldIsPointer bool // StructLevel & FieldLevel
isPartial bool
hasExcludes bool
}
// parent and current will be the same the first run of validateStruct
func (v *validate) validateStruct(ctx context.Context, parent reflect.Value, current reflect.Value, typ reflect.Type, ns []byte, structNs []byte, ct *cTag) {
cs, ok := v.v.structCache.Get(typ)
if !ok {
cs = v.v.extractStructCache(current, typ.Name())
}
if len(ns) == 0 && len(cs.name) != 0 {
ns = append(ns, cs.name...)
ns = append(ns, '.')
structNs = append(structNs, cs.name...)
structNs = append(structNs, '.')
}
// ct is nil on top level struct, and structs as fields that have no tag info
// so if nil or if not nil and the structonly tag isn't present
if ct == nil || ct.typeof != typeStructOnly {
var f *cField
for i := 0; i < len(cs.fields); i++ {
f = cs.fields[i]
if v.isPartial {
if v.ffn != nil {
// used with StructFiltered
if v.ffn(append(structNs, f.name...)) {
continue
}
} else {
// used with StructPartial & StructExcept
_, ok = v.includeExclude[string(append(structNs, f.name...))]
if (ok && v.hasExcludes) || (!ok && !v.hasExcludes) {
continue
}
}
}
v.traverseField(ctx, current, current.Field(f.idx), ns, structNs, f, f.cTags)
}
}
// check if any struct level validations, after all field validations already checked.
// first iteration will have no info about nostructlevel tag, and is checked prior to
// calling the next iteration of validateStruct called from traverseField.
if cs.fn != nil {
v.slflParent = parent
v.slCurrent = current
v.ns = ns
v.actualNs = structNs
cs.fn(ctx, v)
}
}
// traverseField validates any field, be it a struct or single field, ensures it's validity and passes it along to be validated via it's tag options
func (v *validate) traverseField(ctx context.Context, parent reflect.Value, current reflect.Value, ns []byte, structNs []byte, cf *cField, ct *cTag) {
var typ reflect.Type
var kind reflect.Kind
current, kind, v.fldIsPointer = v.extractTypeInternal(current, false)
switch kind {
case reflect.Ptr, reflect.Interface, reflect.Invalid:
if ct == nil {
return
}
if ct.typeof == typeOmitEmpty || ct.typeof == typeIsDefault {
return
}
if ct.hasTag {
if kind == reflect.Invalid {
v.str1 = string(append(ns, cf.altName...))
if v.v.hasTagNameFunc {
v.str2 = string(append(structNs, cf.name...))
} else {
v.str2 = v.str1
}
v.errs = append(v.errs,
&fieldError{
v: v.v,
tag: ct.aliasTag,
actualTag: ct.tag,
ns: v.str1,
structNs: v.str2,
fieldLen: uint8(len(cf.altName)),
structfieldLen: uint8(len(cf.name)),
param: ct.param,
kind: kind,
},
)
return
}
v.str1 = string(append(ns, cf.altName...))
if v.v.hasTagNameFunc {
v.str2 = string(append(structNs, cf.name...))
} else {
v.str2 = v.str1
}
if !ct.runValidationWhenNil {
v.errs = append(v.errs,
&fieldError{
v: v.v,
tag: ct.aliasTag,
actualTag: ct.tag,
ns: v.str1,
structNs: v.str2,
fieldLen: uint8(len(cf.altName)),
structfieldLen: uint8(len(cf.name)),
value: current.Interface(),
param: ct.param,
kind: kind,
typ: current.Type(),
},
)
return
}
}
case reflect.Struct:
typ = current.Type()
if !typ.ConvertibleTo(timeType) {
if ct != nil {
if ct.typeof == typeStructOnly {
goto CONTINUE
} else if ct.typeof == typeIsDefault {
// set Field Level fields
v.slflParent = parent
v.flField = current
v.cf = cf
v.ct = ct
if !ct.fn(ctx, v) {
v.str1 = string(append(ns, cf.altName...))
if v.v.hasTagNameFunc {
v.str2 = string(append(structNs, cf.name...))
} else {
v.str2 = v.str1
}
v.errs = append(v.errs,
&fieldError{
v: v.v,
tag: ct.aliasTag,
actualTag: ct.tag,
ns: v.str1,
structNs: v.str2,
fieldLen: uint8(len(cf.altName)),
structfieldLen: uint8(len(cf.name)),
value: current.Interface(),
param: ct.param,
kind: kind,
typ: typ,
},
)
return
}
}
ct = ct.next
}
if ct != nil && ct.typeof == typeNoStructLevel {
return
}
CONTINUE:
// if len == 0 then validating using 'Var' or 'VarWithValue'
// Var - doesn't make much sense to do it that way, should call 'Struct', but no harm...
// VarWithField - this allows for validating against each field within the struct against a specific value
// pretty handy in certain situations
if len(cf.name) > 0 {
ns = append(append(ns, cf.altName...), '.')
structNs = append(append(structNs, cf.name...), '.')
}
v.validateStruct(ctx, parent, current, typ, ns, structNs, ct)
return
}
}
if ct == nil || !ct.hasTag {
return
}
typ = current.Type()
OUTER:
for {
if ct == nil {
return
}
switch ct.typeof {
case typeOmitEmpty:
// set Field Level fields
v.slflParent = parent
v.flField = current
v.cf = cf
v.ct = ct
if !hasValue(v) {
return
}
ct = ct.next
continue
case typeEndKeys:
return
case typeDive:
ct = ct.next
// traverse slice or map here
// or panic ;)
switch kind {
case reflect.Slice, reflect.Array:
var i64 int64
reusableCF := &cField{}
for i := 0; i < current.Len(); i++ {
i64 = int64(i)
v.misc = append(v.misc[0:0], cf.name...)
v.misc = append(v.misc, '[')
v.misc = strconv.AppendInt(v.misc, i64, 10)
v.misc = append(v.misc, ']')
reusableCF.name = string(v.misc)
if cf.namesEqual {
reusableCF.altName = reusableCF.name
} else {
v.misc = append(v.misc[0:0], cf.altName...)
v.misc = append(v.misc, '[')
v.misc = strconv.AppendInt(v.misc, i64, 10)
v.misc = append(v.misc, ']')
reusableCF.altName = string(v.misc)
}
v.traverseField(ctx, parent, current.Index(i), ns, structNs, reusableCF, ct)
}
case reflect.Map:
var pv string
reusableCF := &cField{}
for _, key := range current.MapKeys() {
pv = fmt.Sprintf("%v", key.Interface())
v.misc = append(v.misc[0:0], cf.name...)
v.misc = append(v.misc, '[')
v.misc = append(v.misc, pv...)
v.misc = append(v.misc, ']')
reusableCF.name = string(v.misc)
if cf.namesEqual {
reusableCF.altName = reusableCF.name
} else {
v.misc = append(v.misc[0:0], cf.altName...)
v.misc = append(v.misc, '[')
v.misc = append(v.misc, pv...)
v.misc = append(v.misc, ']')
reusableCF.altName = string(v.misc)
}
if ct != nil && ct.typeof == typeKeys && ct.keys != nil {
v.traverseField(ctx, parent, key, ns, structNs, reusableCF, ct.keys)
// can be nil when just keys being validated
if ct.next != nil {
v.traverseField(ctx, parent, current.MapIndex(key), ns, structNs, reusableCF, ct.next)
}
} else {
v.traverseField(ctx, parent, current.MapIndex(key), ns, structNs, reusableCF, ct)
}
}
default:
// throw error, if not a slice or map then should not have gotten here
// bad dive tag
panic("dive error! can't dive on a non slice or map")
}
return
case typeOr:
v.misc = v.misc[0:0]
for {
// set Field Level fields
v.slflParent = parent
v.flField = current
v.cf = cf
v.ct = ct
if ct.fn(ctx, v) {
if ct.isBlockEnd {
ct = ct.next
continue OUTER
}
// drain rest of the 'or' values, then continue or leave
for {
ct = ct.next
if ct == nil {
return
}
if ct.typeof != typeOr {
continue OUTER
}
if ct.isBlockEnd {
ct = ct.next
continue OUTER
}
}
}
v.misc = append(v.misc, '|')
v.misc = append(v.misc, ct.tag...)
if ct.hasParam {
v.misc = append(v.misc, '=')
v.misc = append(v.misc, ct.param...)
}
if ct.isBlockEnd || ct.next == nil {
// if we get here, no valid 'or' value and no more tags
v.str1 = string(append(ns, cf.altName...))
if v.v.hasTagNameFunc {
v.str2 = string(append(structNs, cf.name...))
} else {
v.str2 = v.str1
}
if ct.hasAlias {
v.errs = append(v.errs,
&fieldError{
v: v.v,
tag: ct.aliasTag,
actualTag: ct.actualAliasTag,
ns: v.str1,
structNs: v.str2,
fieldLen: uint8(len(cf.altName)),
structfieldLen: uint8(len(cf.name)),
value: current.Interface(),
param: ct.param,
kind: kind,
typ: typ,
},
)
} else {
tVal := string(v.misc)[1:]
v.errs = append(v.errs,
&fieldError{
v: v.v,
tag: tVal,
actualTag: tVal,
ns: v.str1,
structNs: v.str2,
fieldLen: uint8(len(cf.altName)),
structfieldLen: uint8(len(cf.name)),
value: current.Interface(),
param: ct.param,
kind: kind,
typ: typ,
},
)
}
return
}
ct = ct.next
}
default:
// set Field Level fields
v.slflParent = parent
v.flField = current
v.cf = cf
v.ct = ct
if !ct.fn(ctx, v) {
v.str1 = string(append(ns, cf.altName...))
if v.v.hasTagNameFunc {
v.str2 = string(append(structNs, cf.name...))
} else {
v.str2 = v.str1
}
v.errs = append(v.errs,
&fieldError{
v: v.v,
tag: ct.aliasTag,
actualTag: ct.tag,
ns: v.str1,
structNs: v.str2,
fieldLen: uint8(len(cf.altName)),
structfieldLen: uint8(len(cf.name)),
value: current.Interface(),
param: ct.param,
kind: kind,
typ: typ,
},
)
return
}
ct = ct.next
}
}
}

View File

@ -1,702 +0,0 @@
package validator
import (
"context"
"errors"
"fmt"
"reflect"
"strings"
"sync"
"time"
ut "github.com/go-playground/universal-translator"
)
const (
defaultTagName = "validate"
utf8HexComma = "0x2C"
utf8Pipe = "0x7C"
tagSeparator = ","
orSeparator = "|"
tagKeySeparator = "="
structOnlyTag = "structonly"
noStructLevelTag = "nostructlevel"
omitempty = "omitempty"
isdefault = "isdefault"
requiredWithoutAllTag = "required_without_all"
requiredWithoutTag = "required_without"
requiredWithTag = "required_with"
requiredWithAllTag = "required_with_all"
requiredIfTag = "required_if"
requiredUnlessTag = "required_unless"
skipUnlessTag = "skip_unless"
excludedWithoutAllTag = "excluded_without_all"
excludedWithoutTag = "excluded_without"
excludedWithTag = "excluded_with"
excludedWithAllTag = "excluded_with_all"
excludedIfTag = "excluded_if"
excludedUnlessTag = "excluded_unless"
skipValidationTag = "-"
diveTag = "dive"
keysTag = "keys"
endKeysTag = "endkeys"
requiredTag = "required"
namespaceSeparator = "."
leftBracket = "["
rightBracket = "]"
restrictedTagChars = ".[],|=+()`~!@#$%^&*\\\"/?<>{}"
restrictedAliasErr = "Alias '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation"
restrictedTagErr = "Tag '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation"
)
var (
timeDurationType = reflect.TypeOf(time.Duration(0))
timeType = reflect.TypeOf(time.Time{})
defaultCField = &cField{namesEqual: true}
)
// FilterFunc is the type used to filter fields using
// StructFiltered(...) function.
// returning true results in the field being filtered/skipped from
// validation
type FilterFunc func(ns []byte) bool
// CustomTypeFunc allows for overriding or adding custom field type handler functions
// field = field value of the type to return a value to be validated
// example Valuer from sql drive see https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29
type CustomTypeFunc func(field reflect.Value) interface{}
// TagNameFunc allows for adding of a custom tag name parser
type TagNameFunc func(field reflect.StructField) string
type internalValidationFuncWrapper struct {
fn FuncCtx
runValidatinOnNil bool
}
// Validate contains the validator settings and cache
type Validate struct {
tagName string
pool *sync.Pool
hasCustomFuncs bool
hasTagNameFunc bool
tagNameFunc TagNameFunc
structLevelFuncs map[reflect.Type]StructLevelFuncCtx
customFuncs map[reflect.Type]CustomTypeFunc
aliases map[string]string
validations map[string]internalValidationFuncWrapper
transTagFunc map[ut.Translator]map[string]TranslationFunc // map[<locale>]map[<tag>]TranslationFunc
rules map[reflect.Type]map[string]string
tagCache *tagCache
structCache *structCache
}
// New returns a new instance of 'validate' with sane defaults.
// Validate is designed to be thread-safe and used as a singleton instance.
// It caches information about your struct and validations,
// in essence only parsing your validation tags once per struct type.
// Using multiple instances neglects the benefit of caching.
func New() *Validate {
tc := new(tagCache)
tc.m.Store(make(map[string]*cTag))
sc := new(structCache)
sc.m.Store(make(map[reflect.Type]*cStruct))
v := &Validate{
tagName: defaultTagName,
aliases: make(map[string]string, len(bakedInAliases)),
validations: make(map[string]internalValidationFuncWrapper, len(bakedInValidators)),
tagCache: tc,
structCache: sc,
}
// must copy alias validators for separate validations to be used in each validator instance
for k, val := range bakedInAliases {
v.RegisterAlias(k, val)
}
// must copy validators for separate validations to be used in each instance
for k, val := range bakedInValidators {
switch k {
// these require that even if the value is nil that the validation should run, omitempty still overrides this behaviour
case requiredIfTag, requiredUnlessTag, requiredWithTag, requiredWithAllTag, requiredWithoutTag, requiredWithoutAllTag,
excludedIfTag, excludedUnlessTag, excludedWithTag, excludedWithAllTag, excludedWithoutTag, excludedWithoutAllTag,
skipUnlessTag:
_ = v.registerValidation(k, wrapFunc(val), true, true)
default:
// no need to error check here, baked in will always be valid
_ = v.registerValidation(k, wrapFunc(val), true, false)
}
}
v.pool = &sync.Pool{
New: func() interface{} {
return &validate{
v: v,
ns: make([]byte, 0, 64),
actualNs: make([]byte, 0, 64),
misc: make([]byte, 32),
}
},
}
return v
}
// SetTagName allows for changing of the default tag name of 'validate'
func (v *Validate) SetTagName(name string) {
v.tagName = name
}
// ValidateMapCtx validates a map using a map of validation rules and allows passing of contextual
// validation information via context.Context.
func (v Validate) ValidateMapCtx(ctx context.Context, data map[string]interface{}, rules map[string]interface{}) map[string]interface{} {
errs := make(map[string]interface{})
for field, rule := range rules {
if ruleObj, ok := rule.(map[string]interface{}); ok {
if dataObj, ok := data[field].(map[string]interface{}); ok {
err := v.ValidateMapCtx(ctx, dataObj, ruleObj)
if len(err) > 0 {
errs[field] = err
}
} else if dataObjs, ok := data[field].([]map[string]interface{}); ok {
for _, obj := range dataObjs {
err := v.ValidateMapCtx(ctx, obj, ruleObj)
if len(err) > 0 {
errs[field] = err
}
}
} else {
errs[field] = errors.New("The field: '" + field + "' is not a map to dive")
}
} else if ruleStr, ok := rule.(string); ok {
err := v.VarCtx(ctx, data[field], ruleStr)
if err != nil {
errs[field] = err
}
}
}
return errs
}
// ValidateMap validates map data from a map of tags
func (v *Validate) ValidateMap(data map[string]interface{}, rules map[string]interface{}) map[string]interface{} {
return v.ValidateMapCtx(context.Background(), data, rules)
}
// RegisterTagNameFunc registers a function to get alternate names for StructFields.
//
// eg. to use the names which have been specified for JSON representations of structs, rather than normal Go field names:
//
// validate.RegisterTagNameFunc(func(fld reflect.StructField) string {
// name := strings.SplitN(fld.Tag.Get("json"), ",", 2)[0]
// // skip if tag key says it should be ignored
// if name == "-" {
// return ""
// }
// return name
// })
func (v *Validate) RegisterTagNameFunc(fn TagNameFunc) {
v.tagNameFunc = fn
v.hasTagNameFunc = true
}
// RegisterValidation adds a validation with the given tag
//
// NOTES:
// - if the key already exists, the previous validation function will be replaced.
// - this method is not thread-safe it is intended that these all be registered prior to any validation
func (v *Validate) RegisterValidation(tag string, fn Func, callValidationEvenIfNull ...bool) error {
return v.RegisterValidationCtx(tag, wrapFunc(fn), callValidationEvenIfNull...)
}
// RegisterValidationCtx does the same as RegisterValidation on accepts a FuncCtx validation
// allowing context.Context validation support.
func (v *Validate) RegisterValidationCtx(tag string, fn FuncCtx, callValidationEvenIfNull ...bool) error {
var nilCheckable bool
if len(callValidationEvenIfNull) > 0 {
nilCheckable = callValidationEvenIfNull[0]
}
return v.registerValidation(tag, fn, false, nilCheckable)
}
func (v *Validate) registerValidation(tag string, fn FuncCtx, bakedIn bool, nilCheckable bool) error {
if len(tag) == 0 {
return errors.New("function Key cannot be empty")
}
if fn == nil {
return errors.New("function cannot be empty")
}
_, ok := restrictedTags[tag]
if !bakedIn && (ok || strings.ContainsAny(tag, restrictedTagChars)) {
panic(fmt.Sprintf(restrictedTagErr, tag))
}
v.validations[tag] = internalValidationFuncWrapper{fn: fn, runValidatinOnNil: nilCheckable}
return nil
}
// RegisterAlias registers a mapping of a single validation tag that
// defines a common or complex set of validation(s) to simplify adding validation
// to structs.
//
// NOTE: this function is not thread-safe it is intended that these all be registered prior to any validation
func (v *Validate) RegisterAlias(alias, tags string) {
_, ok := restrictedTags[alias]
if ok || strings.ContainsAny(alias, restrictedTagChars) {
panic(fmt.Sprintf(restrictedAliasErr, alias))
}
v.aliases[alias] = tags
}
// RegisterStructValidation registers a StructLevelFunc against a number of types.
//
// NOTE:
// - this method is not thread-safe it is intended that these all be registered prior to any validation
func (v *Validate) RegisterStructValidation(fn StructLevelFunc, types ...interface{}) {
v.RegisterStructValidationCtx(wrapStructLevelFunc(fn), types...)
}
// RegisterStructValidationCtx registers a StructLevelFuncCtx against a number of types and allows passing
// of contextual validation information via context.Context.
//
// NOTE:
// - this method is not thread-safe it is intended that these all be registered prior to any validation
func (v *Validate) RegisterStructValidationCtx(fn StructLevelFuncCtx, types ...interface{}) {
if v.structLevelFuncs == nil {
v.structLevelFuncs = make(map[reflect.Type]StructLevelFuncCtx)
}
for _, t := range types {
tv := reflect.ValueOf(t)
if tv.Kind() == reflect.Ptr {
t = reflect.Indirect(tv).Interface()
}
v.structLevelFuncs[reflect.TypeOf(t)] = fn
}
}
// RegisterStructValidationMapRules registers validate map rules.
// Be aware that map validation rules supersede those defined on a/the struct if present.
//
// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation
func (v *Validate) RegisterStructValidationMapRules(rules map[string]string, types ...interface{}) {
if v.rules == nil {
v.rules = make(map[reflect.Type]map[string]string)
}
deepCopyRules := make(map[string]string)
for i, rule := range rules {
deepCopyRules[i] = rule
}
for _, t := range types {
typ := reflect.TypeOf(t)
if typ.Kind() == reflect.Ptr {
typ = typ.Elem()
}
if typ.Kind() != reflect.Struct {
continue
}
v.rules[typ] = deepCopyRules
}
}
// RegisterCustomTypeFunc registers a CustomTypeFunc against a number of types
//
// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation
func (v *Validate) RegisterCustomTypeFunc(fn CustomTypeFunc, types ...interface{}) {
if v.customFuncs == nil {
v.customFuncs = make(map[reflect.Type]CustomTypeFunc)
}
for _, t := range types {
v.customFuncs[reflect.TypeOf(t)] = fn
}
v.hasCustomFuncs = true
}
// RegisterTranslation registers translations against the provided tag.
func (v *Validate) RegisterTranslation(tag string, trans ut.Translator, registerFn RegisterTranslationsFunc, translationFn TranslationFunc) (err error) {
if v.transTagFunc == nil {
v.transTagFunc = make(map[ut.Translator]map[string]TranslationFunc)
}
if err = registerFn(trans); err != nil {
return
}
m, ok := v.transTagFunc[trans]
if !ok {
m = make(map[string]TranslationFunc)
v.transTagFunc[trans] = m
}
m[tag] = translationFn
return
}
// Struct validates a structs exposed fields, and automatically validates nested structs, unless otherwise specified.
//
// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
func (v *Validate) Struct(s interface{}) error {
return v.StructCtx(context.Background(), s)
}
// StructCtx validates a structs exposed fields, and automatically validates nested structs, unless otherwise specified
// and also allows passing of context.Context for contextual validation information.
//
// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
func (v *Validate) StructCtx(ctx context.Context, s interface{}) (err error) {
val := reflect.ValueOf(s)
top := val
if val.Kind() == reflect.Ptr && !val.IsNil() {
val = val.Elem()
}
if val.Kind() != reflect.Struct || val.Type().ConvertibleTo(timeType) {
return &InvalidValidationError{Type: reflect.TypeOf(s)}
}
// good to validate
vd := v.pool.Get().(*validate)
vd.top = top
vd.isPartial = false
// vd.hasExcludes = false // only need to reset in StructPartial and StructExcept
vd.validateStruct(ctx, top, val, val.Type(), vd.ns[0:0], vd.actualNs[0:0], nil)
if len(vd.errs) > 0 {
err = vd.errs
vd.errs = nil
}
v.pool.Put(vd)
return
}
// StructFiltered validates a structs exposed fields, that pass the FilterFunc check and automatically validates
// nested structs, unless otherwise specified.
//
// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
func (v *Validate) StructFiltered(s interface{}, fn FilterFunc) error {
return v.StructFilteredCtx(context.Background(), s, fn)
}
// StructFilteredCtx validates a structs exposed fields, that pass the FilterFunc check and automatically validates
// nested structs, unless otherwise specified and also allows passing of contextual validation information via
// context.Context
//
// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
func (v *Validate) StructFilteredCtx(ctx context.Context, s interface{}, fn FilterFunc) (err error) {
val := reflect.ValueOf(s)
top := val
if val.Kind() == reflect.Ptr && !val.IsNil() {
val = val.Elem()
}
if val.Kind() != reflect.Struct || val.Type().ConvertibleTo(timeType) {
return &InvalidValidationError{Type: reflect.TypeOf(s)}
}
// good to validate
vd := v.pool.Get().(*validate)
vd.top = top
vd.isPartial = true
vd.ffn = fn
// vd.hasExcludes = false // only need to reset in StructPartial and StructExcept
vd.validateStruct(ctx, top, val, val.Type(), vd.ns[0:0], vd.actualNs[0:0], nil)
if len(vd.errs) > 0 {
err = vd.errs
vd.errs = nil
}
v.pool.Put(vd)
return
}
// StructPartial validates the fields passed in only, ignoring all others.
// Fields may be provided in a namespaced fashion relative to the struct provided
// eg. NestedStruct.Field or NestedArrayField[0].Struct.Name
//
// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
func (v *Validate) StructPartial(s interface{}, fields ...string) error {
return v.StructPartialCtx(context.Background(), s, fields...)
}
// StructPartialCtx validates the fields passed in only, ignoring all others and allows passing of contextual
// validation information via context.Context
// Fields may be provided in a namespaced fashion relative to the struct provided
// eg. NestedStruct.Field or NestedArrayField[0].Struct.Name
//
// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
func (v *Validate) StructPartialCtx(ctx context.Context, s interface{}, fields ...string) (err error) {
val := reflect.ValueOf(s)
top := val
if val.Kind() == reflect.Ptr && !val.IsNil() {
val = val.Elem()
}
if val.Kind() != reflect.Struct || val.Type().ConvertibleTo(timeType) {
return &InvalidValidationError{Type: reflect.TypeOf(s)}
}
// good to validate
vd := v.pool.Get().(*validate)
vd.top = top
vd.isPartial = true
vd.ffn = nil
vd.hasExcludes = false
vd.includeExclude = make(map[string]struct{})
typ := val.Type()
name := typ.Name()
for _, k := range fields {
flds := strings.Split(k, namespaceSeparator)
if len(flds) > 0 {
vd.misc = append(vd.misc[0:0], name...)
// Don't append empty name for unnamed structs
if len(vd.misc) != 0 {
vd.misc = append(vd.misc, '.')
}
for _, s := range flds {
idx := strings.Index(s, leftBracket)
if idx != -1 {
for idx != -1 {
vd.misc = append(vd.misc, s[:idx]...)
vd.includeExclude[string(vd.misc)] = struct{}{}
idx2 := strings.Index(s, rightBracket)
idx2++
vd.misc = append(vd.misc, s[idx:idx2]...)
vd.includeExclude[string(vd.misc)] = struct{}{}
s = s[idx2:]
idx = strings.Index(s, leftBracket)
}
} else {
vd.misc = append(vd.misc, s...)
vd.includeExclude[string(vd.misc)] = struct{}{}
}
vd.misc = append(vd.misc, '.')
}
}
}
vd.validateStruct(ctx, top, val, typ, vd.ns[0:0], vd.actualNs[0:0], nil)
if len(vd.errs) > 0 {
err = vd.errs
vd.errs = nil
}
v.pool.Put(vd)
return
}
// StructExcept validates all fields except the ones passed in.
// Fields may be provided in a namespaced fashion relative to the struct provided
// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name
//
// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
func (v *Validate) StructExcept(s interface{}, fields ...string) error {
return v.StructExceptCtx(context.Background(), s, fields...)
}
// StructExceptCtx validates all fields except the ones passed in and allows passing of contextual
// validation information via context.Context
// Fields may be provided in a namespaced fashion relative to the struct provided
// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name
//
// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
func (v *Validate) StructExceptCtx(ctx context.Context, s interface{}, fields ...string) (err error) {
val := reflect.ValueOf(s)
top := val
if val.Kind() == reflect.Ptr && !val.IsNil() {
val = val.Elem()
}
if val.Kind() != reflect.Struct || val.Type().ConvertibleTo(timeType) {
return &InvalidValidationError{Type: reflect.TypeOf(s)}
}
// good to validate
vd := v.pool.Get().(*validate)
vd.top = top
vd.isPartial = true
vd.ffn = nil
vd.hasExcludes = true
vd.includeExclude = make(map[string]struct{})
typ := val.Type()
name := typ.Name()
for _, key := range fields {
vd.misc = vd.misc[0:0]
if len(name) > 0 {
vd.misc = append(vd.misc, name...)
vd.misc = append(vd.misc, '.')
}
vd.misc = append(vd.misc, key...)
vd.includeExclude[string(vd.misc)] = struct{}{}
}
vd.validateStruct(ctx, top, val, typ, vd.ns[0:0], vd.actualNs[0:0], nil)
if len(vd.errs) > 0 {
err = vd.errs
vd.errs = nil
}
v.pool.Put(vd)
return
}
// Var validates a single variable using tag style validation.
// eg.
// var i int
// validate.Var(i, "gt=1,lt=10")
//
// WARNING: a struct can be passed for validation eg. time.Time is a struct or
// if you have a custom type and have registered a custom type handler, so must
// allow it; however unforeseen validations will occur if trying to validate a
// struct that is meant to be passed to 'validate.Struct'
//
// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
// validate Array, Slice and maps fields which may contain more than one error
func (v *Validate) Var(field interface{}, tag string) error {
return v.VarCtx(context.Background(), field, tag)
}
// VarCtx validates a single variable using tag style validation and allows passing of contextual
// validation information via context.Context.
// eg.
// var i int
// validate.Var(i, "gt=1,lt=10")
//
// WARNING: a struct can be passed for validation eg. time.Time is a struct or
// if you have a custom type and have registered a custom type handler, so must
// allow it; however unforeseen validations will occur if trying to validate a
// struct that is meant to be passed to 'validate.Struct'
//
// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
// validate Array, Slice and maps fields which may contain more than one error
func (v *Validate) VarCtx(ctx context.Context, field interface{}, tag string) (err error) {
if len(tag) == 0 || tag == skipValidationTag {
return nil
}
ctag := v.fetchCacheTag(tag)
val := reflect.ValueOf(field)
vd := v.pool.Get().(*validate)
vd.top = val
vd.isPartial = false
vd.traverseField(ctx, val, val, vd.ns[0:0], vd.actualNs[0:0], defaultCField, ctag)
if len(vd.errs) > 0 {
err = vd.errs
vd.errs = nil
}
v.pool.Put(vd)
return
}
// VarWithValue validates a single variable, against another variable/field's value using tag style validation
// eg.
// s1 := "abcd"
// s2 := "abcd"
// validate.VarWithValue(s1, s2, "eqcsfield") // returns true
//
// WARNING: a struct can be passed for validation eg. time.Time is a struct or
// if you have a custom type and have registered a custom type handler, so must
// allow it; however unforeseen validations will occur if trying to validate a
// struct that is meant to be passed to 'validate.Struct'
//
// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
// validate Array, Slice and maps fields which may contain more than one error
func (v *Validate) VarWithValue(field interface{}, other interface{}, tag string) error {
return v.VarWithValueCtx(context.Background(), field, other, tag)
}
// VarWithValueCtx validates a single variable, against another variable/field's value using tag style validation and
// allows passing of contextual validation validation information via context.Context.
// eg.
// s1 := "abcd"
// s2 := "abcd"
// validate.VarWithValue(s1, s2, "eqcsfield") // returns true
//
// WARNING: a struct can be passed for validation eg. time.Time is a struct or
// if you have a custom type and have registered a custom type handler, so must
// allow it; however unforeseen validations will occur if trying to validate a
// struct that is meant to be passed to 'validate.Struct'
//
// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
// validate Array, Slice and maps fields which may contain more than one error
func (v *Validate) VarWithValueCtx(ctx context.Context, field interface{}, other interface{}, tag string) (err error) {
if len(tag) == 0 || tag == skipValidationTag {
return nil
}
ctag := v.fetchCacheTag(tag)
otherVal := reflect.ValueOf(other)
vd := v.pool.Get().(*validate)
vd.top = otherVal
vd.isPartial = false
vd.traverseField(ctx, otherVal, reflect.ValueOf(field), vd.ns[0:0], vd.actualNs[0:0], defaultCField, ctag)
if len(vd.errs) > 0 {
err = vd.errs
vd.errs = nil
}
v.pool.Put(vd)
return
}

View File

@ -1,124 +0,0 @@
// Copyright 2018 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"context"
"fmt"
"time"
"github.com/google/trillian"
"github.com/google/trillian/client/backoff"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/klog/v2"
)
// CreateAndInitTree uses the adminClient and logClient to create the tree
// described by req.
// If req describes a LOG tree, then this function will also call the InitLog
// function using logClient.
// Internally, the function will continue to retry failed requests until either
// the tree is created (and if necessary, initialised) successfully, or ctx is
// cancelled.
func CreateAndInitTree(
ctx context.Context,
req *trillian.CreateTreeRequest,
adminClient trillian.TrillianAdminClient,
logClient trillian.TrillianLogClient) (*trillian.Tree, error) {
b := &backoff.Backoff{
Min: 100 * time.Millisecond,
Max: 10 * time.Second,
Factor: 2,
Jitter: true,
}
var tree *trillian.Tree
err := b.Retry(ctx, func() error {
klog.Info("CreateTree...")
var err error
tree, err = adminClient.CreateTree(ctx, req)
switch code := status.Code(err); code {
case codes.Unavailable:
klog.Errorf("Admin server unavailable: %v", err)
return err
case codes.OK:
return nil
default:
klog.Errorf("failed to CreateTree(%+v): %T %v", req, err, err)
return err
}
})
if err != nil {
return nil, err
}
switch tree.TreeType {
case trillian.TreeType_LOG, trillian.TreeType_PREORDERED_LOG:
if err := InitLog(ctx, tree, logClient); err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("don't know how or whether to initialise tree type %v", tree.TreeType)
}
return tree, nil
}
// InitLog initialises a freshly created Log tree.
func InitLog(ctx context.Context, tree *trillian.Tree, logClient trillian.TrillianLogClient) error {
if tree.TreeType != trillian.TreeType_LOG &&
tree.TreeType != trillian.TreeType_PREORDERED_LOG {
return fmt.Errorf("InitLog called with tree of type %v", tree.TreeType)
}
b := &backoff.Backoff{
Min: 100 * time.Millisecond,
Max: 10 * time.Second,
Factor: 2,
Jitter: true,
}
err := b.Retry(ctx, func() error {
klog.Infof("Initialising Log %v...", tree.TreeId)
req := &trillian.InitLogRequest{LogId: tree.TreeId}
resp, err := logClient.InitLog(ctx, req)
switch code := status.Code(err); code {
case codes.Unavailable:
klog.Errorf("Log server unavailable: %v", err)
return err
case codes.AlreadyExists:
klog.Warningf("Bizarrely, the just-created Log (%v) is already initialised!: %v", tree.TreeId, err)
return err
case codes.OK:
klog.Infof("Initialised Log (%v) with new SignedTreeHead:\n%+v",
tree.TreeId, resp.Created)
return nil
default:
klog.Errorf("failed to InitLog(%+v): %T %v", req, err, err)
return err
}
})
if err != nil {
return err
}
// Wait for log root to become available.
return b.Retry(ctx, func() error {
_, err := logClient.GetLatestSignedLogRoot(ctx,
&trillian.GetLatestSignedLogRootRequest{LogId: tree.TreeId})
return err
}, codes.FailedPrecondition)
}

View File

@ -1,130 +0,0 @@
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package backoff allows retrying an operation with backoff.
package backoff
import (
"context"
"fmt"
"math/rand"
"time"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// RetriableError explicitly instructs Backoff to retry.
type RetriableError string
// Error returns string representation of the retriable error.
func (re RetriableError) Error() string {
return string(re)
}
// RetriableErrorf wraps a formatted string into a RetriableError.
func RetriableErrorf(format string, a ...interface{}) error {
return RetriableError(fmt.Sprintf(format, a...))
}
// Backoff specifies the parameters of the backoff algorithm. Works correctly
// if 0 < Min <= Max <= 2^62 (nanosec), and Factor >= 1.
type Backoff struct {
Min time.Duration // Duration of the first pause.
Max time.Duration // Max duration of a pause.
Factor float64 // The factor of duration increase between iterations.
Jitter bool // Add random noise to pauses.
delta time.Duration // Current pause duration relative to Min, no jitter.
}
// Duration returns the time to wait on current retry iteration. Every time
// Duration is called, the returned value will exponentially increase by Factor
// until Backoff.Max. If Jitter is enabled, will add an additional random value
// between 0 and the duration, so the result can at most double.
func (b *Backoff) Duration() time.Duration {
base := b.Min + b.delta
pause := base
if b.Jitter { // Add a number in the range [0, pause).
pause += time.Duration(rand.Int63n(int64(pause)))
}
nextPause := time.Duration(float64(base) * b.Factor)
if nextPause > b.Max || nextPause < b.Min { // Multiplication could overflow.
nextPause = b.Max
}
b.delta = nextPause - b.Min
return pause
}
// Reset sets the internal state back to first retry iteration.
func (b *Backoff) Reset() {
b.delta = 0
}
// Retry calls a function until it succeeds or the context is done.
// It will backoff if the function returns a retryable error.
// Once the context is done, retries will end and the most recent error will be returned.
// Backoff is not reset by this function.
func (b *Backoff) Retry(ctx context.Context, f func() error, retry ...codes.Code) error {
// If the context is already done, don't make any attempts to call f.
if ctx.Err() != nil {
return ctx.Err()
}
// Try calling f while the error is retryable and ctx is not done.
for {
if err := f(); !IsRetryable(err, retry...) {
return err
}
select {
case <-time.After(b.Duration()):
case <-ctx.Done():
return ctx.Err()
}
}
}
// IsRetryable returns false unless the error is explicitly retriable per
// https://godoc.org/google.golang.org/grpc/codes,
// or if the error codes is in retry. codes.OK is not retryable.
func IsRetryable(err error, retry ...codes.Code) bool {
code := status.Code(err)
switch code {
// Fast path.
case codes.OK:
return false
// Debatable cases:
case codes.DeadlineExceeded,
codes.ResourceExhausted: // Retry with backoff.
return true
// Errors that are explicitly retryable:
case codes.Unavailable, // Client can just retry the call.
codes.Aborted: // Client can retry the read-modify-write function.
return true
}
for _, c := range retry {
if code == c {
return true
}
}
// Don't retry for all other errors, unless it is a RetriableError.
_, ok := err.(RetriableError)
return ok
}

View File

@ -1,343 +0,0 @@
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package client verifies responses from the Trillian log.
package client
import (
"bytes"
"context"
"fmt"
"sort"
"sync"
"time"
"github.com/google/trillian"
"github.com/google/trillian/client/backoff"
"github.com/google/trillian/types"
"github.com/transparency-dev/merkle"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// LogClient represents a client for a given Trillian log instance.
type LogClient struct {
*LogVerifier
LogID int64
MinMergeDelay time.Duration
client trillian.TrillianLogClient
root types.LogRootV1
rootLock sync.Mutex
updateLock sync.Mutex
}
// New returns a new LogClient.
func New(logID int64, client trillian.TrillianLogClient, verifier *LogVerifier, root types.LogRootV1) *LogClient {
return &LogClient{
LogVerifier: verifier,
LogID: logID,
client: client,
root: root,
}
}
// NewFromTree creates a new LogClient given a tree config.
func NewFromTree(client trillian.TrillianLogClient, config *trillian.Tree, root types.LogRootV1) (*LogClient, error) {
verifier, err := NewLogVerifierFromTree(config)
if err != nil {
return nil, err
}
return New(config.GetTreeId(), client, verifier, root), nil
}
// AddLeaf adds leaf to the append only log.
// Blocks and continuously updates the trusted root until a successful inclusion proof
// can be retrieved.
func (c *LogClient) AddLeaf(ctx context.Context, data []byte) error {
if err := c.QueueLeaf(ctx, data); err != nil {
return fmt.Errorf("QueueLeaf(): %v", err)
}
if err := c.WaitForInclusion(ctx, data); err != nil {
return fmt.Errorf("WaitForInclusion(): %v", err)
}
return nil
}
// ListByIndex returns the requested leaves by index.
func (c *LogClient) ListByIndex(ctx context.Context, start, count int64) ([]*trillian.LogLeaf, error) {
resp, err := c.client.GetLeavesByRange(ctx,
&trillian.GetLeavesByRangeRequest{
LogId: c.LogID,
StartIndex: start,
Count: count,
})
if err != nil {
return nil, err
}
// Verify that we got back the requested leaves.
if len(resp.Leaves) < int(count) {
return nil, fmt.Errorf("len(Leaves)=%d, want %d", len(resp.Leaves), count)
}
for i, l := range resp.Leaves {
if want := start + int64(i); l.LeafIndex != want {
return nil, fmt.Errorf("Leaves[%d].LeafIndex=%d, want %d", i, l.LeafIndex, want)
}
}
return resp.Leaves, nil
}
// WaitForRootUpdate repeatedly fetches the latest root until there is an
// update, which it then applies, or until ctx times out.
func (c *LogClient) WaitForRootUpdate(ctx context.Context) (*types.LogRootV1, error) {
b := &backoff.Backoff{
Min: 100 * time.Millisecond,
Max: 10 * time.Second,
Factor: 2,
Jitter: true,
}
for {
newTrusted, err := c.UpdateRoot(ctx)
switch status.Code(err) {
case codes.OK:
if newTrusted != nil {
return newTrusted, nil
}
case codes.Unavailable, codes.NotFound, codes.FailedPrecondition:
// Retry.
default:
return nil, err
}
select {
case <-ctx.Done():
return nil, status.Errorf(codes.DeadlineExceeded, "%v", ctx.Err())
case <-time.After(b.Duration()):
}
}
}
// getAndVerifyLatestRoot fetches and verifies the latest root against a trusted root, seen in the past.
// Pass nil for trusted if this is the first time querying this log.
func (c *LogClient) getAndVerifyLatestRoot(ctx context.Context, trusted *types.LogRootV1) (*types.LogRootV1, error) {
resp, err := c.client.GetLatestSignedLogRoot(ctx,
&trillian.GetLatestSignedLogRootRequest{
LogId: c.LogID,
FirstTreeSize: int64(trusted.TreeSize),
})
if err != nil {
return nil, err
}
// TODO(gbelvin): Turn on root verification.
/*
logRoot, err := c.VerifyRoot(&types.LogRootV1{}, resp.GetSignedLogRoot(), nil)
if err != nil {
return nil, err
}
*/
// TODO(gbelvin): Remove this hack when all implementations store digital signatures.
var logRoot types.LogRootV1
if err := logRoot.UnmarshalBinary(resp.GetSignedLogRoot().LogRoot); err != nil {
return nil, err
}
if trusted.TreeSize > 0 &&
logRoot.TreeSize == trusted.TreeSize &&
bytes.Equal(logRoot.RootHash, trusted.RootHash) {
// Tree has not been updated.
return &logRoot, nil
}
// Verify root update if the tree / the latest signed log root isn't empty.
if logRoot.TreeSize > 0 {
if _, err := c.VerifyRoot(trusted, resp.GetSignedLogRoot(), resp.GetProof().GetHashes()); err != nil {
return nil, err
}
}
return &logRoot, nil
}
// GetRoot returns a copy of the latest trusted root.
func (c *LogClient) GetRoot() *types.LogRootV1 {
c.rootLock.Lock()
defer c.rootLock.Unlock()
// Copy the internal trusted root in order to prevent clients from modifying it.
ret := c.root
return &ret
}
// UpdateRoot retrieves the current SignedLogRoot, verifying it against roots this client has
// seen in the past, and updating the currently trusted root if the new root verifies, and is
// newer than the currently trusted root.
func (c *LogClient) UpdateRoot(ctx context.Context) (*types.LogRootV1, error) {
// Only one root update should be running at any point in time, because
// the update involves a consistency proof from the old value, and if the
// old value could change along the way (in another goroutine) then the
// result could be inconsistent.
//
// For example, if the current root is A and two root updates A->B and A->C
// happen in parallel, then we might end up with the transitions A->B->C:
// cur := A cur := A
// getRoot() => B getRoot() => C
// proof(A->B) ok proof(A->C) ok
// c.root = B
// c.root = C
// and the last step (B->C) has no proof and so could hide a forked tree.
c.updateLock.Lock()
defer c.updateLock.Unlock()
currentlyTrusted := c.GetRoot()
newTrusted, err := c.getAndVerifyLatestRoot(ctx, currentlyTrusted)
if err != nil {
return nil, err
}
// Lock "rootLock" for the "root" update.
c.rootLock.Lock()
defer c.rootLock.Unlock()
if newTrusted.TimestampNanos > currentlyTrusted.TimestampNanos &&
newTrusted.TreeSize >= currentlyTrusted.TreeSize {
// Take a copy of the new trusted root in order to prevent clients from modifying it.
c.root = *newTrusted
return newTrusted, nil
}
return nil, nil
}
// WaitForInclusion blocks until the requested data has been verified with an
// inclusion proof.
//
// It will continuously update the root to the latest one available until the
// data is found, or an error is returned.
//
// It is best to call this method with a context that will timeout to avoid
// waiting forever.
func (c *LogClient) WaitForInclusion(ctx context.Context, data []byte) error {
leaf := prepareLeaf(c.hasher, data)
// If a minimum merge delay has been configured, wait at least that long before
// starting to poll
if c.MinMergeDelay > 0 {
select {
case <-ctx.Done():
return status.Errorf(codes.DeadlineExceeded, "%v", ctx.Err())
case <-time.After(c.MinMergeDelay):
}
}
var root *types.LogRootV1
for {
root = c.GetRoot()
// It is illegal to ask for an inclusion proof with TreeSize = 0.
if root.TreeSize >= 1 {
ok, err := c.getAndVerifyInclusionProof(ctx, leaf.MerkleLeafHash, root)
if err != nil && status.Code(err) != codes.NotFound {
return err
} else if ok {
return nil
}
}
// If not found or tree is empty, wait for a root update before retrying again.
if _, err := c.WaitForRootUpdate(ctx); err != nil {
return err
}
// Retry
}
}
func (c *LogClient) getAndVerifyInclusionProof(ctx context.Context, leafHash []byte, sth *types.LogRootV1) (bool, error) {
resp, err := c.client.GetInclusionProofByHash(ctx,
&trillian.GetInclusionProofByHashRequest{
LogId: c.LogID,
LeafHash: leafHash,
TreeSize: int64(sth.TreeSize),
})
if err != nil {
return false, err
}
if len(resp.Proof) < 1 {
return false, nil
}
for _, proof := range resp.Proof {
if err := c.VerifyInclusionByHash(sth, leafHash, proof); err != nil {
return false, fmt.Errorf("VerifyInclusionByHash(): %v", err)
}
}
return true, nil
}
// AddSequencedLeaves adds any number of pre-sequenced leaves to the log.
// Indexes must be contiguous.
func (c *LogClient) AddSequencedLeaves(ctx context.Context, dataByIndex map[int64][]byte) error {
if len(dataByIndex) == 0 {
return nil
}
leaves := make([]*trillian.LogLeaf, 0, len(dataByIndex))
indexes := make([]int64, 0, len(dataByIndex))
for index := range dataByIndex {
indexes = append(indexes, index)
}
sort.Slice(indexes, func(a, b int) bool { return indexes[a] < indexes[b] })
for i, index := range indexes {
// Check index continuity.
if want := indexes[0] + int64(i); index != want {
return fmt.Errorf("missing index in contiugous index range. got: %v, want: %v", index, want)
}
leaf := prepareLeaf(c.hasher, dataByIndex[index])
leaf.LeafIndex = index
leaves = append(leaves, leaf)
}
resp, err := c.client.AddSequencedLeaves(ctx, &trillian.AddSequencedLeavesRequest{
LogId: c.LogID,
Leaves: leaves,
})
for _, leaf := range resp.GetResults() {
if s := status.FromProto(leaf.GetStatus()); s.Code() != codes.OK && s.Code() != codes.AlreadyExists {
return status.Errorf(s.Code(), "unexpected fail status in AddSequencedLeaves: %+v, err: %v", leaf, s.Message())
}
}
return err
}
// QueueLeaf adds a leaf to a Trillian log without blocking.
// AlreadyExists is considered a success case by this function.
func (c *LogClient) QueueLeaf(ctx context.Context, data []byte) error {
leaf := prepareLeaf(c.hasher, data)
_, err := c.client.QueueLeaf(ctx, &trillian.QueueLeafRequest{
LogId: c.LogID,
Leaf: leaf,
})
return err
}
// prepareLeaf returns a trillian.LogLeaf prepopulated with leaf data and hash.
func prepareLeaf(hasher merkle.LogHasher, data []byte) *trillian.LogLeaf {
leafHash := hasher.HashLeaf(data)
return &trillian.LogLeaf{
LeafValue: data,
MerkleLeafHash: leafHash,
}
}

View File

@ -1,91 +0,0 @@
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"errors"
"fmt"
"github.com/google/trillian"
"github.com/google/trillian/types"
"github.com/transparency-dev/merkle"
"github.com/transparency-dev/merkle/proof"
"github.com/transparency-dev/merkle/rfc6962"
)
// LogVerifier allows verification of output from Trillian Logs, both regular
// and pre-ordered; it is safe for concurrent use (as its contents are fixed
// after construction).
type LogVerifier struct {
// hasher is the hash strategy used to compute nodes in the Merkle tree.
hasher merkle.LogHasher
}
// NewLogVerifier returns an object that can verify output from Trillian Logs.
func NewLogVerifier(hasher merkle.LogHasher) *LogVerifier {
return &LogVerifier{hasher: hasher}
}
// NewLogVerifierFromTree creates a new LogVerifier using the algorithms
// specified by a Trillian Tree object.
func NewLogVerifierFromTree(config *trillian.Tree) (*LogVerifier, error) {
if config == nil {
return nil, errors.New("client: NewLogVerifierFromTree(): nil config")
}
log, pLog := trillian.TreeType_LOG, trillian.TreeType_PREORDERED_LOG
if got := config.TreeType; got != log && got != pLog {
return nil, fmt.Errorf("client: NewLogVerifierFromTree(): TreeType: %v, want %v or %v", got, log, pLog)
}
return NewLogVerifier(rfc6962.DefaultHasher), nil
}
// VerifyRoot verifies that newRoot is a valid append-only operation from
// trusted. If trusted.TreeSize is zero, a consistency proof is not needed.
func (c *LogVerifier) VerifyRoot(trusted *types.LogRootV1, newRoot *trillian.SignedLogRoot, consistency [][]byte) (*types.LogRootV1, error) {
if trusted == nil {
return nil, fmt.Errorf("VerifyRoot() error: trusted == nil")
}
if newRoot == nil {
return nil, fmt.Errorf("VerifyRoot() error: newRoot == nil")
}
var r types.LogRootV1
if err := r.UnmarshalBinary(newRoot.LogRoot); err != nil {
return nil, err
}
// Implicitly trust the first root we get.
if trusted.TreeSize != 0 {
// Verify consistency proof.
if err := proof.VerifyConsistency(c.hasher, trusted.TreeSize, r.TreeSize, consistency, trusted.RootHash, r.RootHash); err != nil {
return nil, fmt.Errorf("failed to verify consistency proof from %d->%d %x->%x: %v", trusted.TreeSize, r.TreeSize, trusted.RootHash, r.RootHash, err)
}
}
return &r, nil
}
// VerifyInclusionByHash verifies that the inclusion proof for the given Merkle leafHash
// matches the given trusted root.
func (c *LogVerifier) VerifyInclusionByHash(trusted *types.LogRootV1, leafHash []byte, pf *trillian.Proof) error {
if trusted == nil {
return fmt.Errorf("VerifyInclusionByHash() error: trusted == nil")
}
if pf == nil {
return fmt.Errorf("VerifyInclusionByHash() error: proof == nil")
}
return proof.VerifyInclusion(c.hasher, uint64(pf.LeafIndex), trusted.TreeSize, leafHash, pf.Hashes, trusted.RootHash)
}

View File

@ -1,12 +0,0 @@
*.exe
*.dll
*.so
*.dylib
*.test
*.out
*.txt
vendor/
/removecomments

View File

@ -1,21 +0,0 @@
MIT License
Copyright (c) 2018 Leonardo Di Donato
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,81 +0,0 @@
[![Build](https://img.shields.io/circleci/build/github/leodido/go-urn?style=for-the-badge)](https://app.circleci.com/pipelines/github/leodido/go-urn) [![Coverage](https://img.shields.io/codecov/c/github/leodido/go-urn.svg?style=for-the-badge)](https://codecov.io/gh/leodido/go-urn) [![Documentation](https://img.shields.io/badge/godoc-reference-blue.svg?style=for-the-badge)](https://godoc.org/github.com/leodido/go-urn)
**A parser for URNs**.
> As seen on [RFC 2141](https://tools.ietf.org/html/rfc2141#ref-1).
[API documentation](https://godoc.org/github.com/leodido/go-urn).
## Installation
```
go get github.com/leodido/go-urn
```
## Performances
This implementation results to be really fast.
Usually below ½ microsecond on my machine<sup>[1](#mymachine)</sup>.
Notice it also performs, while parsing:
1. fine-grained and informative erroring
2. specific-string normalization
```
ok/00/urn:a:b______________________________________/-4 20000000 265 ns/op 182 B/op 6 allocs/op
ok/01/URN:foo:a123,456_____________________________/-4 30000000 296 ns/op 200 B/op 6 allocs/op
ok/02/urn:foo:a123%2c456___________________________/-4 20000000 331 ns/op 208 B/op 6 allocs/op
ok/03/urn:ietf:params:scim:schemas:core:2.0:User___/-4 20000000 430 ns/op 280 B/op 6 allocs/op
ok/04/urn:ietf:params:scim:schemas:extension:enterp/-4 20000000 411 ns/op 312 B/op 6 allocs/op
ok/05/urn:ietf:params:scim:schemas:extension:enterp/-4 20000000 472 ns/op 344 B/op 6 allocs/op
ok/06/urn:burnout:nss______________________________/-4 30000000 257 ns/op 192 B/op 6 allocs/op
ok/07/urn:abcdefghilmnopqrstuvzabcdefghilm:x_______/-4 20000000 375 ns/op 213 B/op 6 allocs/op
ok/08/urn:urnurnurn:urn____________________________/-4 30000000 265 ns/op 197 B/op 6 allocs/op
ok/09/urn:ciao:@!=%2c(xyz)+a,b.*@g=$_'_____________/-4 20000000 307 ns/op 248 B/op 6 allocs/op
ok/10/URN:x:abc%1dz%2f%3az_________________________/-4 30000000 259 ns/op 212 B/op 6 allocs/op
no/11/URN:-xxx:x___________________________________/-4 20000000 445 ns/op 320 B/op 6 allocs/op
no/12/urn::colon:nss_______________________________/-4 20000000 461 ns/op 320 B/op 6 allocs/op
no/13/urn:abcdefghilmnopqrstuvzabcdefghilmn:specifi/-4 10000000 660 ns/op 320 B/op 6 allocs/op
no/14/URN:a!?:x____________________________________/-4 20000000 507 ns/op 320 B/op 6 allocs/op
no/15/urn:urn:NSS__________________________________/-4 20000000 429 ns/op 288 B/op 6 allocs/op
no/16/urn:white_space:NSS__________________________/-4 20000000 482 ns/op 320 B/op 6 allocs/op
no/17/urn:concat:no_spaces_________________________/-4 20000000 539 ns/op 328 B/op 7 allocs/op
no/18/urn:a:/______________________________________/-4 20000000 470 ns/op 320 B/op 7 allocs/op
no/19/urn:UrN:NSS__________________________________/-4 20000000 399 ns/op 288 B/op 6 allocs/op
```
---
* <a name="mymachine">[1]</a>: Intel Core i7-7600U CPU @ 2.80GHz
---
## Example
```go
package main
import (
"fmt"
"github.com/leodido/go-urn"
)
func main() {
var uid = "URN:foo:a123,456"
u, ok := urn.Parse([]byte(uid))
if !ok {
panic("error parsing urn")
}
fmt.Println(u.ID)
fmt.Println(u.SS)
// Output:
// foo
// a123,456
}
```
[![Analytics](https://ga-beacon.appspot.com/UA-49657176-1/go-urn?flat)](https://github.com/igrigorik/ga-beacon)

File diff suppressed because it is too large Load Diff

View File

@ -1,159 +0,0 @@
package urn
import (
"fmt"
)
var (
errPrefix = "expecting the prefix to be the \"urn\" string (whatever case) [col %d]"
errIdentifier = "expecting the identifier to be string (1..31 alnum chars, also containing dashes but not at its start) [col %d]"
errSpecificString = "expecting the specific string to be a string containing alnum, hex, or others ([()+,-.:=@;$_!*']) chars [col %d]"
errNoUrnWithinID = "expecting the identifier to not contain the \"urn\" reserved string [col %d]"
errHex = "expecting the specific string hex chars to be well-formed (%%alnum{2}) [col %d]"
errParse = "parsing error [col %d]"
)
%%{
machine urn;
# unsigned alphabet
alphtype uint8;
action mark {
m.pb = m.p
}
action tolower {
m.tolower = append(m.tolower, m.p - m.pb)
}
action set_pre {
output.prefix = string(m.text())
}
action set_nid {
output.ID = string(m.text())
}
action set_nss {
raw := m.text()
output.SS = string(raw)
// Iterate upper letters lowering them
for _, i := range m.tolower {
raw[i] = raw[i] + 32
}
output.norm = string(raw)
}
action err_pre {
m.err = fmt.Errorf(errPrefix, m.p)
fhold;
fgoto fail;
}
action err_nid {
m.err = fmt.Errorf(errIdentifier, m.p)
fhold;
fgoto fail;
}
action err_nss {
m.err = fmt.Errorf(errSpecificString, m.p)
fhold;
fgoto fail;
}
action err_urn {
m.err = fmt.Errorf(errNoUrnWithinID, m.p)
fhold;
fgoto fail;
}
action err_hex {
m.err = fmt.Errorf(errHex, m.p)
fhold;
fgoto fail;
}
action err_parse {
m.err = fmt.Errorf(errParse, m.p)
fhold;
fgoto fail;
}
pre = ([uU][rR][nN] @err(err_pre)) >mark %set_pre;
nid = (alnum >mark (alnum | '-'){0,31}) %set_nid;
hex = '%' (digit | lower | upper >tolower){2} $err(err_hex);
sss = (alnum | [()+,\-.:=@;$_!*']);
nss = (sss | hex)+ $err(err_nss);
fail := (any - [\n\r])* @err{ fgoto main; };
main := (pre ':' (nid - pre %err(err_urn)) $err(err_nid) ':' nss >mark %set_nss) $err(err_parse);
}%%
%% write data noerror noprefix;
// Machine is the interface representing the FSM
type Machine interface {
Error() error
Parse(input []byte) (*URN, error)
}
type machine struct {
data []byte
cs int
p, pe, eof, pb int
err error
tolower []int
}
// NewMachine creates a new FSM able to parse RFC 2141 strings.
func NewMachine() Machine {
m := &machine{}
%% access m.;
%% variable p m.p;
%% variable pe m.pe;
%% variable eof m.eof;
%% variable data m.data;
return m
}
// Err returns the error that occurred on the last call to Parse.
//
// If the result is nil, then the line was parsed successfully.
func (m *machine) Error() error {
return m.err
}
func (m *machine) text() []byte {
return m.data[m.pb:m.p]
}
// Parse parses the input byte array as a RFC 2141 string.
func (m *machine) Parse(input []byte) (*URN, error) {
m.data = input
m.p = 0
m.pb = 0
m.pe = len(input)
m.eof = len(input)
m.err = nil
m.tolower = []int{}
output := &URN{}
%% write init;
%% write exec;
if m.cs < first_final || m.cs == en_fail {
return nil, m.err
}
return output, nil
}

View File

@ -1,53 +0,0 @@
SHELL := /bin/bash
RAGEL := ragel
GOFMT := go fmt
export GO_TEST=env GOTRACEBACK=all go test $(GO_ARGS)
.PHONY: build
build: machine.go
.PHONY: clean
clean:
@rm -rf docs
@rm -f machine.go
.PHONY: images
images: docs/urn.png
.PHONY: removecomments
removecomments:
@cd ./tools/removecomments; go build -o ../../removecomments ./main.go
machine.go: machine.go.rl
machine.go: removecomments
machine.go:
$(RAGEL) -Z -G2 -e -o $@ $<
@./removecomments $@
$(MAKE) -s file=$@ snake2camel
$(GOFMT) $@
docs/urn.dot: machine.go.rl
@mkdir -p docs
$(RAGEL) -Z -e -Vp $< -o $@
docs/urn.png: docs/urn.dot
dot $< -Tpng -o $@
.PHONY: bench
bench: *_test.go machine.go
go test -bench=. -benchmem -benchtime=5s ./...
.PHONY: tests
tests: *_test.go
$(GO_TEST) ./...
.PHONY: snake2camel
snake2camel:
@awk -i inplace '{ \
while ( match($$0, /(.*)([a-z]+[0-9]*)_([a-zA-Z0-9])(.*)/, cap) ) \
$$0 = cap[1] cap[2] toupper(cap[3]) cap[4]; \
print \
}' $(file)

View File

@ -1,86 +0,0 @@
package urn
import (
"encoding/json"
"fmt"
"strings"
)
const errInvalidURN = "invalid URN: %s"
// URN represents an Uniform Resource Name.
//
// The general form represented is:
//
// urn:<id>:<ss>
//
// Details at https://tools.ietf.org/html/rfc2141.
type URN struct {
prefix string // Static prefix. Equal to "urn" when empty.
ID string // Namespace identifier
SS string // Namespace specific string
norm string // Normalized namespace specific string
}
// Normalize turns the receiving URN into its norm version.
//
// Which means: lowercase prefix, lowercase namespace identifier, and immutate namespace specific string chars (except <hex> tokens which are lowercased).
func (u *URN) Normalize() *URN {
return &URN{
prefix: "urn",
ID: strings.ToLower(u.ID),
SS: u.norm,
}
}
// Equal checks the lexical equivalence of the current URN with another one.
func (u *URN) Equal(x *URN) bool {
return *u.Normalize() == *x.Normalize()
}
// String reassembles the URN into a valid URN string.
//
// This requires both ID and SS fields to be non-empty.
// Otherwise it returns an empty string.
//
// Default URN prefix is "urn".
func (u *URN) String() string {
var res string
if u.ID != "" && u.SS != "" {
if u.prefix == "" {
res += "urn"
}
res += u.prefix + ":" + u.ID + ":" + u.SS
}
return res
}
// Parse is responsible to create an URN instance from a byte array matching the correct URN syntax.
func Parse(u []byte) (*URN, bool) {
urn, err := NewMachine().Parse(u)
if err != nil {
return nil, false
}
return urn, true
}
// MarshalJSON marshals the URN to JSON string form (e.g. `"urn:oid:1.2.3.4"`).
func (u URN) MarshalJSON() ([]byte, error) {
return json.Marshal(u.String())
}
// MarshalJSON unmarshals a URN from JSON string form (e.g. `"urn:oid:1.2.3.4"`).
func (u *URN) UnmarshalJSON(bytes []byte) error {
var str string
if err := json.Unmarshal(bytes, &str); err != nil {
return err
}
if value, ok := Parse([]byte(str)); !ok {
return fmt.Errorf(errInvalidURN, str)
} else {
*u = *value
}
return nil
}

View File

@ -1,115 +0,0 @@
//
// Copyright 2021 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package log
import (
"context"
"log"
"github.com/go-chi/chi/middleware"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// Logger set the default logger to development mode
var Logger *zap.SugaredLogger
func init() {
ConfigureLogger("dev")
}
func ConfigureLogger(logType string) {
var cfg zap.Config
if logType == "prod" {
cfg = zap.NewProductionConfig()
cfg.EncoderConfig.LevelKey = "severity"
cfg.EncoderConfig.MessageKey = "message"
cfg.EncoderConfig.TimeKey = "time"
cfg.EncoderConfig.EncodeLevel = encodeLevel()
cfg.EncoderConfig.EncodeTime = zapcore.RFC3339TimeEncoder
cfg.EncoderConfig.EncodeDuration = zapcore.SecondsDurationEncoder
cfg.EncoderConfig.EncodeCaller = zapcore.FullCallerEncoder
} else {
cfg = zap.NewDevelopmentConfig()
cfg.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder
}
logger, err := cfg.Build()
if err != nil {
log.Fatalln("createLogger", err)
}
Logger = logger.Sugar()
}
func encodeLevel() zapcore.LevelEncoder {
return func(l zapcore.Level, enc zapcore.PrimitiveArrayEncoder) {
switch l {
case zapcore.DebugLevel:
enc.AppendString("DEBUG")
case zapcore.InfoLevel:
enc.AppendString("INFO")
case zapcore.WarnLevel:
enc.AppendString("WARNING")
case zapcore.ErrorLevel:
enc.AppendString("ERROR")
case zapcore.DPanicLevel:
enc.AppendString("CRITICAL")
case zapcore.PanicLevel:
enc.AppendString("ALERT")
case zapcore.FatalLevel:
enc.AppendString("EMERGENCY")
}
}
}
var CliLogger = createCliLogger()
func createCliLogger() *zap.SugaredLogger {
cfg := zap.NewDevelopmentConfig()
cfg.EncoderConfig.TimeKey = ""
cfg.EncoderConfig.LevelKey = ""
cfg.DisableCaller = true
cfg.DisableStacktrace = true
logger, err := cfg.Build()
if err != nil {
log.Fatalln("createLogger", err)
}
return logger.Sugar()
}
func WithRequestID(ctx context.Context, id string) context.Context {
return context.WithValue(ctx, middleware.RequestIDKey, id)
}
type operation struct {
id string
}
func (o operation) MarshalLogObject(enc zapcore.ObjectEncoder) error {
enc.AddString("id", o.id)
return nil
}
func ContextLogger(ctx context.Context) *zap.SugaredLogger {
proposedLogger := Logger
if ctx != nil {
if ctxRequestID, ok := ctx.Value(middleware.RequestIDKey).(string); ok {
requestID := operation{ctxRequestID}
proposedLogger = proposedLogger.With(zap.Object("operation", requestID))
}
}
return proposedLogger
}

View File

@ -1,44 +0,0 @@
//
// Copyright 2021 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"context"
"crypto/ecdsa"
"errors"
"github.com/sigstore/rekor/pkg/generated/client"
"github.com/sigstore/rekor/pkg/generated/client/pubkey"
"github.com/sigstore/sigstore/pkg/cryptoutils"
)
func PublicKey(ctx context.Context, c *client.Rekor) (*ecdsa.PublicKey, error) {
resp, err := c.Pubkey.GetPublicKey(&pubkey.GetPublicKeyParams{Context: ctx})
if err != nil {
return nil, err
}
// marshal the pubkey
pubKey, err := cryptoutils.UnmarshalPEMToPublicKey([]byte(resp.GetPayload()))
if err != nil {
return nil, err
}
ed, ok := pubKey.(*ecdsa.PublicKey)
if !ok {
return nil, errors.New("public key retrieved from Rekor is not an ECDSA key")
}
return ed, nil
}

View File

@ -1,171 +0,0 @@
//
// Copyright 2021 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"bytes"
"encoding/base64"
"errors"
"fmt"
"net/url"
"strconv"
"strings"
"time"
)
// Signed note based timestamp responses
type TimestampNote struct {
// Origin is the unique identifier/version string
Origin string
// MessageImprint is the hash of the message to timestamp, of the form sha256:<sha>
MessageImprint string
// Nonce is a short random bytes to prove response freshness
Nonce []byte
// Time is the timestamp to imprint on the message
Time time.Time
// Radius is the time in microseconds used to indicate certainty
Radius int64
// CertChainRef is a reference URL to the valid timestamping cert chain used to sign the response
CertChainRef *url.URL
// OtherContent is any additional data to be included in the signed payload; each element is assumed to be one line
OtherContent []string
}
// String returns the String representation of the TimestampNote
func (t TimestampNote) String() string {
var b strings.Builder
time, _ := t.Time.MarshalText()
fmt.Fprintf(&b, "%s\n%s\n%s\n%s\n%d\n%s\n", t.Origin, t.MessageImprint, base64.StdEncoding.EncodeToString(t.Nonce),
time, t.Radius, t.CertChainRef)
for _, line := range t.OtherContent {
fmt.Fprintf(&b, "%s\n", line)
}
return b.String()
}
// MarshalText returns the common format representation of this TimestampNote.
func (t TimestampNote) MarshalText() ([]byte, error) {
return []byte(t.String()), nil
}
// UnmarshalText parses the common formatted timestamp note data and stores the result
// in the TimestampNote.
//
// The supplied data is expected to begin with the following 6 lines of text,
// each followed by a newline:
// <ecosystem/version string>
// <message hash of the format sha256:$SHA>
// <base64 representation of the nonce>
// <RFC 3339 representation of the time>
// <decimal representation of radius>
// <cert chain URI>
// <optional non-empty line of other content>...
// <optional non-empty line of other content>...
//
// This will discard any content found after the checkpoint (including signatures)
func (t *TimestampNote) UnmarshalText(data []byte) error {
l := bytes.Split(data, []byte("\n"))
if len(l) < 7 {
return errors.New("invalid timestamp note - too few newlines")
}
origin := string(l[0])
if len(origin) == 0 {
return errors.New("invalid timestamp note - empty ecosystem")
}
h := string(l[1])
if err := ValidateSHA256Value(h); err != nil {
return fmt.Errorf("invalid timestamp note - invalid message hash: %w", err)
}
nonce, err := base64.StdEncoding.DecodeString(string(l[2]))
if err != nil {
return fmt.Errorf("invalid timestamp note - invalid nonce: %w", err)
}
var timestamp time.Time
if err := timestamp.UnmarshalText(l[3]); err != nil {
return fmt.Errorf("invalid timestamp note - invalid time: %w", err)
}
r, err := strconv.ParseInt(string(l[4]), 10, 64)
if err != nil {
return fmt.Errorf("invalid timestamp note - invalid radius: %w", err)
}
u, err := url.Parse(string(l[5]))
if err != nil {
return fmt.Errorf("invalid timestamp note - invalid URI: %w", err)
}
*t = TimestampNote{
Origin: origin,
MessageImprint: h,
Nonce: nonce,
Time: timestamp,
Radius: r,
CertChainRef: u,
}
if len(l) >= 8 {
for _, line := range l[6:] {
if len(line) == 0 {
break
}
t.OtherContent = append(t.OtherContent, string(line))
}
}
return nil
}
type SignedTimestampNote struct {
TimestampNote
SignedNote
}
func CreateSignedTimestampNote(t TimestampNote) (*SignedTimestampNote, error) {
text, err := t.MarshalText()
if err != nil {
return nil, err
}
return &SignedTimestampNote{
TimestampNote: t,
SignedNote: SignedNote{Note: string(text)},
}, nil
}
func SignedTimestampNoteValidator(strToValidate string) bool {
s := SignedNote{}
if err := s.UnmarshalText([]byte(strToValidate)); err != nil {
return false
}
c := &TimestampNote{}
return c.UnmarshalText([]byte(s.Note)) == nil
}
func TimestampNoteValidator(strToValidate string) bool {
c := &TimestampNote{}
return c.UnmarshalText([]byte(strToValidate)) == nil
}
func (r *SignedTimestampNote) UnmarshalText(data []byte) error {
s := SignedNote{}
if err := s.UnmarshalText([]byte(data)); err != nil {
return fmt.Errorf("unmarshalling signed note: %w", err)
}
t := TimestampNote{}
if err := t.UnmarshalText([]byte(s.Note)); err != nil {
return fmt.Errorf("unmarshalling timestamp note: %w", err)
}
*r = SignedTimestampNote{TimestampNote: t, SignedNote: s}
return nil
}

View File

@ -1,380 +0,0 @@
//
// Copyright 2021 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"context"
"encoding/hex"
"fmt"
"time"
"github.com/sigstore/rekor/pkg/log"
"github.com/transparency-dev/merkle/proof"
"github.com/transparency-dev/merkle/rfc6962"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/durationpb"
"github.com/google/trillian"
"github.com/google/trillian/client"
"github.com/google/trillian/types"
)
// TrillianClient provides a wrapper around the Trillian client
type TrillianClient struct {
client trillian.TrillianLogClient
logID int64
context context.Context
}
// NewTrillianClient creates a TrillianClient with the given Trillian client and log/tree ID.
func NewTrillianClient(ctx context.Context, logClient trillian.TrillianLogClient, logID int64) TrillianClient {
return TrillianClient{
client: logClient,
logID: logID,
context: ctx,
}
}
// Response includes a status code, an optional error message, and one of the results based on the API call
type Response struct {
// Status is the status code of the response
Status codes.Code
// Error contains an error on request or client failure
Err error
// GetAddResult contains the response from queueing a leaf in Trillian
GetAddResult *trillian.QueueLeafResponse
// GetLeafAndProofResult contains the response for fetching an inclusion proof and leaf
GetLeafAndProofResult *trillian.GetEntryAndProofResponse
// GetLatestResult contains the response for the latest checkpoint
GetLatestResult *trillian.GetLatestSignedLogRootResponse
// GetConsistencyProofResult contains the response for a consistency proof between two log sizes
GetConsistencyProofResult *trillian.GetConsistencyProofResponse
// getProofResult contains the response for an inclusion proof fetched by leaf hash
getProofResult *trillian.GetInclusionProofByHashResponse
}
func unmarshalLogRoot(logRoot []byte) (types.LogRootV1, error) {
var root types.LogRootV1
if err := root.UnmarshalBinary(logRoot); err != nil {
return types.LogRootV1{}, err
}
return root, nil
}
func (t *TrillianClient) root() (types.LogRootV1, error) {
rqst := &trillian.GetLatestSignedLogRootRequest{
LogId: t.logID,
}
resp, err := t.client.GetLatestSignedLogRoot(t.context, rqst)
if err != nil {
return types.LogRootV1{}, err
}
return unmarshalLogRoot(resp.SignedLogRoot.LogRoot)
}
func (t *TrillianClient) AddLeaf(byteValue []byte) *Response {
leaf := &trillian.LogLeaf{
LeafValue: byteValue,
}
rqst := &trillian.QueueLeafRequest{
LogId: t.logID,
Leaf: leaf,
}
resp, err := t.client.QueueLeaf(t.context, rqst)
// check for error
if err != nil || (resp.QueuedLeaf.Status != nil && resp.QueuedLeaf.Status.Code != int32(codes.OK)) {
return &Response{
Status: status.Code(err),
Err: err,
GetAddResult: resp,
}
}
root, err := t.root()
if err != nil {
return &Response{
Status: status.Code(err),
Err: err,
GetAddResult: resp,
}
}
v := client.NewLogVerifier(rfc6962.DefaultHasher)
logClient := client.New(t.logID, t.client, v, root)
waitForInclusion := func(ctx context.Context, leafHash []byte) *Response {
if logClient.MinMergeDelay > 0 {
select {
case <-ctx.Done():
return &Response{
Status: codes.DeadlineExceeded,
Err: ctx.Err(),
}
case <-time.After(logClient.MinMergeDelay):
}
}
for {
root = *logClient.GetRoot()
if root.TreeSize >= 1 {
proofResp := t.getProofByHash(resp.QueuedLeaf.Leaf.MerkleLeafHash)
// if this call succeeds or returns an error other than "not found", return
if proofResp.Err == nil || (proofResp.Err != nil && status.Code(proofResp.Err) != codes.NotFound) {
return proofResp
}
// otherwise wait for a root update before trying again
}
if _, err := logClient.WaitForRootUpdate(ctx); err != nil {
return &Response{
Status: codes.Unknown,
Err: err,
}
}
}
}
proofResp := waitForInclusion(t.context, resp.QueuedLeaf.Leaf.MerkleLeafHash)
if proofResp.Err != nil {
return &Response{
Status: status.Code(proofResp.Err),
Err: proofResp.Err,
GetAddResult: resp,
}
}
proofs := proofResp.getProofResult.Proof
if len(proofs) != 1 {
err := fmt.Errorf("expected 1 proof from getProofByHash for %v, found %v", hex.EncodeToString(resp.QueuedLeaf.Leaf.MerkleLeafHash), len(proofs))
return &Response{
Status: status.Code(err),
Err: err,
GetAddResult: resp,
}
}
leafIndex := proofs[0].LeafIndex
leafResp := t.GetLeafAndProofByIndex(leafIndex)
if leafResp.Err != nil {
return &Response{
Status: status.Code(leafResp.Err),
Err: leafResp.Err,
GetAddResult: resp,
}
}
// overwrite queued leaf that doesn't have index set
resp.QueuedLeaf.Leaf = leafResp.GetLeafAndProofResult.Leaf
return &Response{
Status: status.Code(err),
Err: err,
GetAddResult: resp,
// include getLeafAndProofResult for inclusion proof
GetLeafAndProofResult: leafResp.GetLeafAndProofResult,
}
}
func (t *TrillianClient) GetLeafAndProofByHash(hash []byte) *Response {
// get inclusion proof for hash, extract index, then fetch leaf using index
proofResp := t.getProofByHash(hash)
if proofResp.Err != nil {
return &Response{
Status: status.Code(proofResp.Err),
Err: proofResp.Err,
}
}
proofs := proofResp.getProofResult.Proof
if len(proofs) != 1 {
err := fmt.Errorf("expected 1 proof from getProofByHash for %v, found %v", hex.EncodeToString(hash), len(proofs))
return &Response{
Status: status.Code(err),
Err: err,
}
}
return t.GetLeafAndProofByIndex(proofs[0].LeafIndex)
}
func (t *TrillianClient) GetLeafAndProofByIndex(index int64) *Response {
ctx, cancel := context.WithTimeout(t.context, 20*time.Second)
defer cancel()
rootResp := t.GetLatest(0)
if rootResp.Err != nil {
return &Response{
Status: status.Code(rootResp.Err),
Err: rootResp.Err,
}
}
root, err := unmarshalLogRoot(rootResp.GetLatestResult.SignedLogRoot.LogRoot)
if err != nil {
return &Response{
Status: status.Code(rootResp.Err),
Err: rootResp.Err,
}
}
resp, err := t.client.GetEntryAndProof(ctx,
&trillian.GetEntryAndProofRequest{
LogId: t.logID,
LeafIndex: index,
TreeSize: int64(root.TreeSize),
})
if resp != nil && resp.Proof != nil {
if err := proof.VerifyInclusion(rfc6962.DefaultHasher, uint64(index), root.TreeSize, resp.GetLeaf().MerkleLeafHash, resp.Proof.Hashes, root.RootHash); err != nil {
return &Response{
Status: status.Code(err),
Err: err,
}
}
return &Response{
Status: status.Code(err),
Err: err,
GetLeafAndProofResult: &trillian.GetEntryAndProofResponse{
Proof: resp.Proof,
Leaf: resp.Leaf,
SignedLogRoot: rootResp.GetLatestResult.SignedLogRoot,
},
}
}
return &Response{
Status: status.Code(err),
Err: err,
}
}
func (t *TrillianClient) GetLatest(leafSizeInt int64) *Response {
ctx, cancel := context.WithTimeout(t.context, 20*time.Second)
defer cancel()
resp, err := t.client.GetLatestSignedLogRoot(ctx,
&trillian.GetLatestSignedLogRootRequest{
LogId: t.logID,
FirstTreeSize: leafSizeInt,
})
return &Response{
Status: status.Code(err),
Err: err,
GetLatestResult: resp,
}
}
func (t *TrillianClient) GetConsistencyProof(firstSize, lastSize int64) *Response {
ctx, cancel := context.WithTimeout(t.context, 20*time.Second)
defer cancel()
resp, err := t.client.GetConsistencyProof(ctx,
&trillian.GetConsistencyProofRequest{
LogId: t.logID,
FirstTreeSize: firstSize,
SecondTreeSize: lastSize,
})
return &Response{
Status: status.Code(err),
Err: err,
GetConsistencyProofResult: resp,
}
}
func (t *TrillianClient) getProofByHash(hashValue []byte) *Response {
ctx, cancel := context.WithTimeout(t.context, 20*time.Second)
defer cancel()
rootResp := t.GetLatest(0)
if rootResp.Err != nil {
return &Response{
Status: status.Code(rootResp.Err),
Err: rootResp.Err,
}
}
root, err := unmarshalLogRoot(rootResp.GetLatestResult.SignedLogRoot.LogRoot)
if err != nil {
return &Response{
Status: status.Code(rootResp.Err),
Err: rootResp.Err,
}
}
// issue 1308: if the tree is empty, there's no way we can return a proof
if root.TreeSize == 0 {
return &Response{
Status: codes.NotFound,
Err: status.Error(codes.NotFound, "tree is empty"),
}
}
resp, err := t.client.GetInclusionProofByHash(ctx,
&trillian.GetInclusionProofByHashRequest{
LogId: t.logID,
LeafHash: hashValue,
TreeSize: int64(root.TreeSize),
})
if resp != nil {
v := client.NewLogVerifier(rfc6962.DefaultHasher)
for _, proof := range resp.Proof {
if err := v.VerifyInclusionByHash(&root, hashValue, proof); err != nil {
return &Response{
Status: status.Code(err),
Err: err,
}
}
}
// Return an inclusion proof response with the requested
return &Response{
Status: status.Code(err),
Err: err,
getProofResult: &trillian.GetInclusionProofByHashResponse{
Proof: resp.Proof,
SignedLogRoot: rootResp.GetLatestResult.SignedLogRoot,
},
}
}
return &Response{
Status: status.Code(err),
Err: err,
}
}
func CreateAndInitTree(ctx context.Context, adminClient trillian.TrillianAdminClient, logClient trillian.TrillianLogClient) (*trillian.Tree, error) {
t, err := adminClient.CreateTree(ctx, &trillian.CreateTreeRequest{
Tree: &trillian.Tree{
TreeType: trillian.TreeType_LOG,
TreeState: trillian.TreeState_ACTIVE,
MaxRootDuration: durationpb.New(time.Hour),
},
})
if err != nil {
return nil, fmt.Errorf("create tree: %w", err)
}
if err := client.InitLog(ctx, t, logClient); err != nil {
return nil, fmt.Errorf("init log: %w", err)
}
log.Logger.Infof("Created new tree with ID: %v", t.TreeId)
return t, nil
}

View File

@ -32,8 +32,6 @@ import (
"time"
"golang.org/x/crypto/openpgp"
"github.com/sigstore/rekor/pkg/generated/models"
)
var (
@ -377,19 +375,6 @@ func CreateArtifact(t *testing.T, artifactPath string) string {
return artifact
}
func extractLogEntry(t *testing.T, le models.LogEntry) models.LogEntryAnon {
t.Helper()
if len(le) != 1 {
t.Fatal("expected length to be 1, is actually", len(le))
}
for _, v := range le {
return v
}
// this should never happen
return models.LogEntryAnon{}
}
func write(t *testing.T, data string, path string) {
t.Helper()
if err := ioutil.WriteFile(path, []byte(data), 0644); err != nil {

View File

@ -1,92 +0,0 @@
//
// Copyright 2021 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"strings"
validator "github.com/go-playground/validator/v10"
)
// validateSHA512Value ensures that the supplied string matches the
// following format: [sha512:]<128 hexadecimal characters>
// where [sha512:] is optional
func ValidateSHA512Value(v string) error {
var prefix, hash string
split := strings.SplitN(v, ":", 2)
switch len(split) {
case 1:
hash = split[0]
case 2:
prefix = split[0]
hash = split[1]
}
s := struct {
Prefix string `validate:"omitempty,oneof=sha512"`
Hash string `validate:"required,len=128,hexadecimal"`
}{prefix, hash}
validate := validator.New()
return validate.Struct(s)
}
// validateSHA256Value ensures that the supplied string matches the following format:
// [sha256:]<64 hexadecimal characters>
// where [sha256:] is optional
func ValidateSHA256Value(v string) error {
var prefix, hash string
split := strings.SplitN(v, ":", 2)
switch len(split) {
case 1:
hash = split[0]
case 2:
prefix = split[0]
hash = split[1]
}
s := struct {
Prefix string `validate:"omitempty,oneof=sha256"`
Hash string `validate:"required,len=64,hexadecimal"`
}{prefix, hash}
validate := validator.New()
return validate.Struct(s)
}
func ValidateSHA1Value(v string) error {
var prefix, hash string
split := strings.SplitN(v, ":", 2)
switch len(split) {
case 1:
hash = split[0]
case 2:
prefix = split[0]
hash = split[1]
}
s := struct {
Prefix string `validate:"omitempty,oneof=sha1"`
Hash string `validate:"required,len=40,hexadecimal"`
}{prefix, hash}
validate := validator.New()
return validate.Struct(s)
}

View File

@ -1,58 +0,0 @@
# How to contribute #
We'd love to accept your patches and contributions to this project. There are
a just a few small guidelines you need to follow.
## Contributor License Agreement ##
Contributions to any Google project must be accompanied by a Contributor
License Agreement. This is not a copyright **assignment**, it simply gives
Google permission to use and redistribute your contributions as part of the
project.
* If you are an individual writing original source code and you're sure you
own the intellectual property, then you'll need to sign an [individual
CLA][].
* If you work for a company that wants to allow you to contribute your work,
then you'll need to sign a [corporate CLA][].
You generally only need to submit a CLA once, so if you've already submitted
one (even if it was for a different project), you probably don't need to do it
again.
[individual CLA]: https://developers.google.com/open-source/cla/individual
[corporate CLA]: https://developers.google.com/open-source/cla/corporate
Once your CLA is submitted (or if you already submitted one for
another Google project), make a commit adding yourself to the
[AUTHORS][] and [CONTRIBUTORS][] files. This commit can be part
of your first [pull request][].
[AUTHORS]: AUTHORS
[CONTRIBUTORS]: CONTRIBUTORS
## Submitting a patch ##
1. It's generally best to start by opening a new issue describing the bug or
feature you're intending to fix. Even if you think it's relatively minor,
it's helpful to know what people are working on. Mention in the initial
issue that you are planning to work on that bug or feature so that it can
be assigned to you.
1. Follow the normal process of [forking][] the project, and setup a new
branch to work in. It's important that each group of changes be done in
separate branches in order to ensure that a pull request only includes the
commits related to that bug or feature.
1. Do your best to have [well-formed commit messages][] for each change.
This provides consistency throughout the project, and ensures that commit
messages are able to be formatted properly by various git tools.
1. Finally, push the commits to your fork and submit a [pull request][].
[forking]: https://help.github.com/articles/fork-a-repo
[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
[pull request]: https://help.github.com/articles/creating-a-pull-request

View File

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,25 +0,0 @@
# Merkle
[![Go Reference](https://pkg.go.dev/badge/github.com/transparency-dev/merkle.svg)](https://pkg.go.dev/github.com/transparency-dev/merkle)
[![Go Report
Card](https://goreportcard.com/badge/github.com/transparency-dev/merkle)](https://goreportcard.com/report/github.com/transparency-dev/merkle)
[![codecov](https://codecov.io/gh/transparency-dev/merkle/branch/main/graph/badge.svg?token=BBCRAMOBY2)](https://codecov.io/gh/transparency-dev/merkle)
[![Slack
Status](https://img.shields.io/badge/Slack-Chat-blue.svg)](https://gtrillian.slack.com/)
## Overview
This repository contains Go code to help create and manipulate Merkle trees, as
well as constructing and verifying various types of proof.
This is the data structure which is used by projects such as
[Trillian](https://github.com/google/trillian) to provide
[verifiable logs](https://transparency.dev/verifiable-data-structures/#verifiable-log).
## Support
* Mailing list: https://groups.google.com/forum/#!forum/trillian-transparency
* Slack: https://gtrillian.slack.com/ (invitation)

View File

@ -1,89 +0,0 @@
// Copyright 2019 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compact
import "math/bits"
// NodeID identifies a node of a Merkle tree.
//
// The ID consists of a level and index within this level. Levels are numbered
// from 0, which corresponds to the tree leaves. Within each level, nodes are
// numbered with consecutive indices starting from 0.
//
// L4: ┌───────0───────┐ ...
// L3: ┌───0───┐ ┌───1───┐ ┌─── ...
// L2: ┌─0─┐ ┌─1─┐ ┌─2─┐ ┌─3─┐ ┌─4─┐ ...
// L1: ┌0┐ ┌1┐ ┌2┐ ┌3┐ ┌4┐ ┌5┐ ┌6┐ ┌7┐ ┌8┐ ┌9┐ ...
// L0: 0 1 2 3 4 5 6 7 8 9 ... ... ... ... ... ...
//
// When the tree is not perfect, the nodes that would complement it to perfect
// are called ephemeral. Algorithms that operate with ephemeral nodes still map
// them to the same address space.
type NodeID struct {
Level uint
Index uint64
}
// NewNodeID returns a NodeID with the passed in node coordinates.
func NewNodeID(level uint, index uint64) NodeID {
return NodeID{Level: level, Index: index}
}
// Parent returns the ID of the parent node.
func (id NodeID) Parent() NodeID {
return NewNodeID(id.Level+1, id.Index>>1)
}
// Sibling returns the ID of the sibling node.
func (id NodeID) Sibling() NodeID {
return NewNodeID(id.Level, id.Index^1)
}
// Coverage returns the [begin, end) range of leaves covered by the node.
func (id NodeID) Coverage() (uint64, uint64) {
return id.Index << id.Level, (id.Index + 1) << id.Level
}
// RangeNodes appends the IDs of the nodes that comprise the [begin, end)
// compact range to the given slice, and returns the new slice. The caller may
// pre-allocate space with the help of the RangeSize function.
func RangeNodes(begin, end uint64, ids []NodeID) []NodeID {
left, right := Decompose(begin, end)
pos := begin
// Iterate over perfect subtrees along the left border of the range, ordered
// from lower to upper levels.
for bit := uint64(0); left != 0; pos, left = pos+bit, left^bit {
level := uint(bits.TrailingZeros64(left))
bit = uint64(1) << level
ids = append(ids, NewNodeID(level, pos>>level))
}
// Iterate over perfect subtrees along the right border of the range, ordered
// from upper to lower levels.
for bit := uint64(0); right != 0; pos, right = pos+bit, right^bit {
level := uint(bits.Len64(right)) - 1
bit = uint64(1) << level
ids = append(ids, NewNodeID(level, pos>>level))
}
return ids
}
// RangeSize returns the number of nodes in the [begin, end) compact range.
func RangeSize(begin, end uint64) int {
left, right := Decompose(begin, end)
return bits.OnesCount64(left) + bits.OnesCount64(right)
}

View File

@ -1,264 +0,0 @@
// Copyright 2019 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package compact provides compact Merkle tree data structures.
package compact
import (
"bytes"
"errors"
"fmt"
"math/bits"
)
// HashFn computes an internal node's hash using the hashes of its child nodes.
type HashFn func(left, right []byte) []byte
// VisitFn visits the node with the specified ID and hash.
type VisitFn func(id NodeID, hash []byte)
// RangeFactory allows creating compact ranges with the specified hash
// function, which must not be nil, and must not be changed.
type RangeFactory struct {
Hash HashFn
}
// NewRange creates a Range for [begin, end) with the given set of hashes. The
// hashes correspond to the roots of the minimal set of perfect sub-trees
// covering the [begin, end) leaves range, ordered left to right.
func (f *RangeFactory) NewRange(begin, end uint64, hashes [][]byte) (*Range, error) {
if end < begin {
return nil, fmt.Errorf("invalid range: end=%d, want >= %d", end, begin)
}
if got, want := len(hashes), RangeSize(begin, end); got != want {
return nil, fmt.Errorf("invalid hashes: got %d values, want %d", got, want)
}
return &Range{f: f, begin: begin, end: end, hashes: hashes}, nil
}
// NewEmptyRange returns a new Range for an empty [begin, begin) range. The
// value of begin defines where the range will start growing from when entries
// are appended to it.
func (f *RangeFactory) NewEmptyRange(begin uint64) *Range {
return &Range{f: f, begin: begin, end: begin}
}
// Range represents a compact Merkle tree range for leaf indices [begin, end).
//
// It contains the minimal set of perfect subtrees whose leaves comprise this
// range. The structure is efficiently mergeable with other compact ranges that
// share one of the endpoints with it.
//
// For more details, see
// https://github.com/transparency-dev/merkle/blob/main/docs/compact_ranges.md.
type Range struct {
f *RangeFactory
begin uint64
end uint64
hashes [][]byte
}
// Begin returns the first index covered by the range (inclusive).
func (r *Range) Begin() uint64 {
return r.begin
}
// End returns the last index covered by the range (exclusive).
func (r *Range) End() uint64 {
return r.end
}
// Hashes returns sub-tree hashes corresponding to the minimal set of perfect
// sub-trees covering the [begin, end) range, ordered left to right.
func (r *Range) Hashes() [][]byte {
return r.hashes
}
// Append extends the compact range by appending the passed in hash to it. It
// reports all the added nodes through the visitor function (if non-nil).
func (r *Range) Append(hash []byte, visitor VisitFn) error {
if visitor != nil {
visitor(NewNodeID(0, r.end), hash)
}
return r.appendImpl(r.end+1, hash, nil, visitor)
}
// AppendRange extends the compact range by merging in the other compact range
// from the right. It uses the tree hasher to calculate hashes of newly created
// nodes, and reports them through the visitor function (if non-nil).
func (r *Range) AppendRange(other *Range, visitor VisitFn) error {
if other.f != r.f {
return errors.New("incompatible ranges")
}
if got, want := other.begin, r.end; got != want {
return fmt.Errorf("ranges are disjoint: other.begin=%d, want %d", got, want)
}
if len(other.hashes) == 0 { // The other range is empty, merging is trivial.
return nil
}
return r.appendImpl(other.end, other.hashes[0], other.hashes[1:], visitor)
}
// GetRootHash returns the root hash of the Merkle tree represented by this
// compact range. Requires the range to start at index 0. If the range is
// empty, returns nil.
//
// If visitor is not nil, it is called with all "ephemeral" nodes (i.e. the
// ones rooting imperfect subtrees) along the right border of the tree.
func (r *Range) GetRootHash(visitor VisitFn) ([]byte, error) {
if r.begin != 0 {
return nil, fmt.Errorf("begin=%d, want 0", r.begin)
}
ln := len(r.hashes)
if ln == 0 {
return nil, nil
}
hash := r.hashes[ln-1]
// All non-perfect subtree hashes along the right border of the tree
// correspond to the parents of all perfect subtree nodes except the lowest
// one (therefore the loop skips it).
for i, size := ln-2, r.end; i >= 0; i-- {
hash = r.f.Hash(r.hashes[i], hash)
if visitor != nil {
size &= size - 1 // Delete the previous node.
level := uint(bits.TrailingZeros64(size)) + 1 // Compute the parent level.
index := size >> level // And its horizontal index.
visitor(NewNodeID(level, index), hash)
}
}
return hash, nil
}
// Equal compares two Ranges for equality.
func (r *Range) Equal(other *Range) bool {
if r.f != other.f || r.begin != other.begin || r.end != other.end {
return false
}
if len(r.hashes) != len(other.hashes) {
return false
}
for i := range r.hashes {
if !bytes.Equal(r.hashes[i], other.hashes[i]) {
return false
}
}
return true
}
// appendImpl extends the compact range by merging the [r.end, end) compact
// range into it. The other compact range is decomposed into a seed hash and
// all the other hashes (possibly none). The method uses the tree hasher to
// calculate hashes of newly created nodes, and reports them through the
// visitor function (if non-nil).
func (r *Range) appendImpl(end uint64, seed []byte, hashes [][]byte, visitor VisitFn) error {
// Bits [low, high) of r.end encode the merge path, i.e. the sequence of node
// merges that transforms the two compact ranges into one.
low, high := getMergePath(r.begin, r.end, end)
if high < low {
high = low
}
index := r.end >> low
// Now bits [0, high-low) of index encode the merge path.
// The number of one bits in index is the number of nodes from the left range
// that will be merged, and zero bits correspond to the nodes in the right
// range. Below we make sure that both ranges have enough hashes, which can
// be false only in case the data is corrupted in some way.
ones := bits.OnesCount64(index & (1<<(high-low) - 1))
if ln := len(r.hashes); ln < ones {
return fmt.Errorf("corrupted lhs range: got %d hashes, want >= %d", ln, ones)
}
if ln, zeros := len(hashes), int(high-low)-ones; ln < zeros {
return fmt.Errorf("corrupted rhs range: got %d hashes, want >= %d", ln+1, zeros+1)
}
// Some of the trailing nodes of the left compact range, and some of the
// leading nodes of the right range, are sequentially merged with the seed,
// according to the mask. All new nodes are reported through the visitor.
idx1, idx2 := len(r.hashes), 0
for h := low; h < high; h++ {
if index&1 == 0 {
seed = r.f.Hash(seed, hashes[idx2])
idx2++
} else {
idx1--
seed = r.f.Hash(r.hashes[idx1], seed)
}
index >>= 1
if visitor != nil {
visitor(NewNodeID(h+1, index), seed)
}
}
// All nodes from both ranges that have not been merged are bundled together
// with the "merged" seed node.
r.hashes = append(append(r.hashes[:idx1], seed), hashes[idx2:]...)
r.end = end
return nil
}
// getMergePath returns the merging path between the compact range [begin, mid)
// and [mid, end). The path is represented as a range of bits within mid, with
// bit indices [low, high). A bit value of 1 on level i of mid means that the
// node on this level merges with the corresponding node in the left compact
// range, whereas 0 represents merging with the right compact range. If the
// path is empty then high <= low.
//
// The output is not specified if begin <= mid <= end doesn't hold, but the
// function never panics.
func getMergePath(begin, mid, end uint64) (uint, uint) {
low := bits.TrailingZeros64(mid)
high := 64
if begin != 0 {
high = bits.Len64(mid ^ (begin - 1))
}
if high2 := bits.Len64((mid - 1) ^ end); high2 < high {
high = high2
}
return uint(low), uint(high - 1)
}
// Decompose splits the [begin, end) range into a minimal number of sub-ranges,
// each of which is of the form [m * 2^k, (m+1) * 2^k), i.e. of length 2^k, for
// some integers m, k >= 0.
//
// The sequence of sizes is returned encoded as bitmasks left and right, where:
// - a 1 bit in a bitmask denotes a sub-range of the corresponding size 2^k
// - left mask bits in LSB-to-MSB order encode the left part of the sequence
// - right mask bits in MSB-to-LSB order encode the right part
//
// The corresponding values of m are not returned (they can be calculated from
// begin and the sub-range sizes).
//
// For example, (begin, end) values of (0b110, 0b11101) would indicate a
// sequence of tree sizes: 2,8; 8,4,1.
//
// The output is not specified if begin > end, but the function never panics.
func Decompose(begin, end uint64) (uint64, uint64) {
// Special case, as the code below works only if begin != 0, or end < 2^63.
if begin == 0 {
return 0, end
}
xbegin := begin - 1
// Find where paths to leaves #begin-1 and #end diverge, and mask the upper
// bits away, as only the nodes strictly below this point are in the range.
d := bits.Len64(xbegin^end) - 1
mask := uint64(1)<<uint(d) - 1
// The left part of the compact range consists of all nodes strictly below
// and to the right from the path to leaf #begin-1, corresponding to zero
// bits in the masked part of begin-1. Likewise, the right part consists of
// nodes below and to the left from the path to leaf #end, corresponding to
// ones in the masked part of end.
return ^xbegin & mask, end & mask
}

View File

@ -1,32 +0,0 @@
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package merkle provides Merkle tree interfaces and implementation.
package merkle
// TODO(pavelkalinnikov): Remove this root package. The only interface provided
// here does not have to exist, and can be [re-]defined on the user side, such
// as in compact or proof package.
// LogHasher provides the hash functions needed to compute dense merkle trees.
type LogHasher interface {
// EmptyRoot supports returning a special case for the root of an empty tree.
EmptyRoot() []byte
// HashLeaf computes the hash of a leaf that exists.
HashLeaf(leaf []byte) []byte
// HashChildren computes interior nodes.
HashChildren(l, r []byte) []byte
// Size returns the number of bytes the Hash* functions will return.
Size() int
}

View File

@ -1,191 +0,0 @@
// Copyright 2022 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package proof contains helpers for constructing log Merkle tree proofs.
package proof
import (
"fmt"
"math/bits"
"github.com/transparency-dev/merkle/compact"
)
// Nodes contains information on how to construct a log Merkle tree proof. It
// supports any proof that has at most one ephemeral node, such as inclusion
// and consistency proofs defined in RFC 6962.
type Nodes struct {
// IDs contains the IDs of non-ephemeral nodes sufficient to build the proof.
// If an ephemeral node is needed for a proof, it can be recomputed based on
// a subset of nodes in this list.
IDs []compact.NodeID
// begin is the beginning index (inclusive) into the IDs[begin:end] subslice
// of the nodes which will be used to re-create the ephemeral node.
begin int
// end is the ending (exclusive) index into the IDs[begin:end] subslice of
// the nodes which will be used to re-create the ephemeral node.
end int
// ephem is the ID of the ephemeral node in the proof. This node is a common
// ancestor of all nodes in IDs[begin:end]. It is the node that otherwise
// would have been used in the proof if the tree was perfect.
ephem compact.NodeID
}
// Inclusion returns the information on how to fetch and construct an inclusion
// proof for the given leaf index in a log Merkle tree of the given size. It
// requires 0 <= index < size.
func Inclusion(index, size uint64) (Nodes, error) {
if index >= size {
return Nodes{}, fmt.Errorf("index %d out of bounds for tree size %d", index, size)
}
return nodes(index, 0, size).skipFirst(), nil
}
// Consistency returns the information on how to fetch and construct a
// consistency proof between the two given tree sizes of a log Merkle tree. It
// requires 0 <= size1 <= size2.
func Consistency(size1, size2 uint64) (Nodes, error) {
if size1 > size2 {
return Nodes{}, fmt.Errorf("tree size %d > %d", size1, size2)
}
if size1 == size2 || size1 == 0 {
return Nodes{IDs: []compact.NodeID{}}, nil
}
// Find the root of the biggest perfect subtree that ends at size1.
level := uint(bits.TrailingZeros64(size1))
index := (size1 - 1) >> level
// The consistency proof consists of this node (except if size1 is a power of
// two, in which case adding this node would be redundant because the client
// is assumed to know it from a checkpoint), and nodes of the inclusion proof
// into this node in the tree of size2.
p := nodes(index, level, size2)
// Handle the case when size1 is a power of 2.
if index == 0 {
return p.skipFirst(), nil
}
return p, nil
}
// nodes returns the node IDs necessary to prove that the (level, index) node
// is included in the Merkle tree of the given size.
func nodes(index uint64, level uint, size uint64) Nodes {
// Compute the `fork` node, where the path from root to (level, index) node
// diverges from the path to (0, size).
//
// The sibling of this node is the ephemeral node which represents a subtree
// that is not complete in the tree of the given size. To compute the hash
// of the ephemeral node, we need all the non-ephemeral nodes that cover the
// same range of leaves.
//
// The `inner` variable is how many layers up from (level, index) the `fork`
// and the ephemeral nodes are.
inner := bits.Len64(index^(size>>level)) - 1
fork := compact.NewNodeID(level+uint(inner), index>>inner)
begin, end := fork.Coverage()
left := compact.RangeSize(0, begin)
right := compact.RangeSize(end, size)
node := compact.NewNodeID(level, index)
// Pre-allocate the exact number of nodes for the proof, in order:
// - The seed node for which we are building the proof.
// - The `inner` nodes at each level up to the fork node.
// - The `right` nodes, comprising the ephemeral node.
// - The `left` nodes, completing the coverage of the whole [0, size) range.
nodes := append(make([]compact.NodeID, 0, 1+inner+right+left), node)
// The first portion of the proof consists of the siblings for nodes of the
// path going up to the level at which the ephemeral node appears.
for ; node.Level < fork.Level; node = node.Parent() {
nodes = append(nodes, node.Sibling())
}
// This portion of the proof covers the range [begin, end) under it. The
// ranges to the left and to the right from it remain to be covered.
// Add all the nodes (potentially none) that cover the right range, and
// represent the ephemeral node. Reverse them so that the Rehash method can
// process hashes in the convenient order, from lower to upper levels.
len1 := len(nodes)
nodes = compact.RangeNodes(end, size, nodes)
reverse(nodes[len(nodes)-right:])
len2 := len(nodes)
// Add the nodes that cover the left range, ordered increasingly by level.
nodes = compact.RangeNodes(0, begin, nodes)
reverse(nodes[len(nodes)-left:])
// nodes[len1:len2] contains the nodes representing the ephemeral node. If
// it's empty, make it zero. Note that it can also contain a single node.
// Depending on the preference of the layer above, it may or may not be
// considered ephemeral.
if len1 >= len2 {
len1, len2 = 0, 0
}
return Nodes{IDs: nodes, begin: len1, end: len2, ephem: fork.Sibling()}
}
// Ephem returns the ephemeral node, and indices begin and end, such that
// IDs[begin:end] slice contains the child nodes of the ephemeral node.
//
// The list is empty iff there are no ephemeral nodes in the proof. Some
// examples of when this can happen: a proof in a perfect tree; an inclusion
// proof for a leaf in a perfect subtree at the right edge of the tree.
func (n Nodes) Ephem() (compact.NodeID, int, int) {
return n.ephem, n.begin, n.end
}
// Rehash computes the proof based on the slice of node hashes corresponding to
// their IDs in the n.IDs field. The slices must be of the same length. The hc
// parameter computes a node's hash based on hashes of its children.
//
// Warning: The passed-in slice of hashes can be modified in-place.
func (n Nodes) Rehash(h [][]byte, hc func(left, right []byte) []byte) ([][]byte, error) {
if got, want := len(h), len(n.IDs); got != want {
return nil, fmt.Errorf("got %d hashes but expected %d", got, want)
}
cursor := 0
// Scan the list of node hashes, and store the rehashed list in-place.
// Invariant: cursor <= i, and h[:cursor] contains all the hashes of the
// rehashed list after scanning h up to index i-1.
for i, ln := 0, len(h); i < ln; i, cursor = i+1, cursor+1 {
hash := h[i]
if i >= n.begin && i < n.end {
// Scan the block of node hashes that need rehashing.
for i++; i < n.end; i++ {
hash = hc(h[i], hash)
}
i--
}
h[cursor] = hash
}
return h[:cursor], nil
}
func (n Nodes) skipFirst() Nodes {
n.IDs = n.IDs[1:]
// Fixup the indices into the IDs slice.
if n.begin < n.end {
n.begin--
n.end--
}
return n
}
func reverse(ids []compact.NodeID) {
for i, j := 0, len(ids)-1; i < j; i, j = i+1, j-1 {
ids[i], ids[j] = ids[j], ids[i]
}
}

View File

@ -1,176 +0,0 @@
// Copyright 2017 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package proof
import (
"bytes"
"errors"
"fmt"
"math/bits"
"github.com/transparency-dev/merkle"
)
// RootMismatchError occurs when an inclusion proof fails.
type RootMismatchError struct {
ExpectedRoot []byte
CalculatedRoot []byte
}
func (e RootMismatchError) Error() string {
return fmt.Sprintf("calculated root:\n%v\n does not match expected root:\n%v", e.CalculatedRoot, e.ExpectedRoot)
}
func verifyMatch(calculated, expected []byte) error {
if !bytes.Equal(calculated, expected) {
return RootMismatchError{ExpectedRoot: expected, CalculatedRoot: calculated}
}
return nil
}
// VerifyInclusion verifies the correctness of the inclusion proof for the leaf
// with the specified hash and index, relatively to the tree of the given size
// and root hash. Requires 0 <= index < size.
func VerifyInclusion(hasher merkle.LogHasher, index, size uint64, leafHash []byte, proof [][]byte, root []byte) error {
calcRoot, err := RootFromInclusionProof(hasher, index, size, leafHash, proof)
if err != nil {
return err
}
return verifyMatch(calcRoot, root)
}
// RootFromInclusionProof calculates the expected root hash for a tree of the
// given size, provided a leaf index and hash with the corresponding inclusion
// proof. Requires 0 <= index < size.
func RootFromInclusionProof(hasher merkle.LogHasher, index, size uint64, leafHash []byte, proof [][]byte) ([]byte, error) {
if index >= size {
return nil, fmt.Errorf("index is beyond size: %d >= %d", index, size)
}
if got, want := len(leafHash), hasher.Size(); got != want {
return nil, fmt.Errorf("leafHash has unexpected size %d, want %d", got, want)
}
inner, border := decompInclProof(index, size)
if got, want := len(proof), inner+border; got != want {
return nil, fmt.Errorf("wrong proof size %d, want %d", got, want)
}
res := chainInner(hasher, leafHash, proof[:inner], index)
res = chainBorderRight(hasher, res, proof[inner:])
return res, nil
}
// VerifyConsistency checks that the passed-in consistency proof is valid
// between the passed in tree sizes, with respect to the corresponding root
// hashes. Requires 0 <= size1 <= size2.
func VerifyConsistency(hasher merkle.LogHasher, size1, size2 uint64, proof [][]byte, root1, root2 []byte) error {
switch {
case size2 < size1:
return fmt.Errorf("size2 (%d) < size1 (%d)", size1, size2)
case size1 == size2:
if len(proof) > 0 {
return errors.New("size1=size2, but proof is not empty")
}
return verifyMatch(root1, root2)
case size1 == 0:
// Any size greater than 0 is consistent with size 0.
if len(proof) > 0 {
return fmt.Errorf("expected empty proof, but got %d components", len(proof))
}
return nil // Proof OK.
case len(proof) == 0:
return errors.New("empty proof")
}
inner, border := decompInclProof(size1-1, size2)
shift := bits.TrailingZeros64(size1)
inner -= shift // Note: shift < inner if size1 < size2.
// The proof includes the root hash for the sub-tree of size 2^shift.
seed, start := proof[0], 1
if size1 == 1<<uint(shift) { // Unless size1 is that very 2^shift.
seed, start = root1, 0
}
if got, want := len(proof), start+inner+border; got != want {
return fmt.Errorf("wrong proof size %d, want %d", got, want)
}
proof = proof[start:]
// Now len(proof) == inner+border, and proof is effectively a suffix of
// inclusion proof for entry |size1-1| in a tree of size |size2|.
// Verify the first root.
mask := (size1 - 1) >> uint(shift) // Start chaining from level |shift|.
hash1 := chainInnerRight(hasher, seed, proof[:inner], mask)
hash1 = chainBorderRight(hasher, hash1, proof[inner:])
if err := verifyMatch(hash1, root1); err != nil {
return err
}
// Verify the second root.
hash2 := chainInner(hasher, seed, proof[:inner], mask)
hash2 = chainBorderRight(hasher, hash2, proof[inner:])
return verifyMatch(hash2, root2)
}
// decompInclProof breaks down inclusion proof for a leaf at the specified
// |index| in a tree of the specified |size| into 2 components. The splitting
// point between them is where paths to leaves |index| and |size-1| diverge.
// Returns lengths of the bottom and upper proof parts correspondingly. The sum
// of the two determines the correct length of the inclusion proof.
func decompInclProof(index, size uint64) (int, int) {
inner := innerProofSize(index, size)
border := bits.OnesCount64(index >> uint(inner))
return inner, border
}
func innerProofSize(index, size uint64) int {
return bits.Len64(index ^ (size - 1))
}
// chainInner computes a subtree hash for a node on or below the tree's right
// border. Assumes |proof| hashes are ordered from lower levels to upper, and
// |seed| is the initial subtree/leaf hash on the path located at the specified
// |index| on its level.
func chainInner(hasher merkle.LogHasher, seed []byte, proof [][]byte, index uint64) []byte {
for i, h := range proof {
if (index>>uint(i))&1 == 0 {
seed = hasher.HashChildren(seed, h)
} else {
seed = hasher.HashChildren(h, seed)
}
}
return seed
}
// chainInnerRight computes a subtree hash like chainInner, but only takes
// hashes to the left from the path into consideration, which effectively means
// the result is a hash of the corresponding earlier version of this subtree.
func chainInnerRight(hasher merkle.LogHasher, seed []byte, proof [][]byte, index uint64) []byte {
for i, h := range proof {
if (index>>uint(i))&1 == 1 {
seed = hasher.HashChildren(h, seed)
}
}
return seed
}
// chainBorderRight chains proof hashes along tree borders. This differs from
// inner chaining because |proof| contains only left-side subtree hashes.
func chainBorderRight(hasher merkle.LogHasher, seed []byte, proof [][]byte) []byte {
for _, h := range proof {
seed = hasher.HashChildren(h, seed)
}
return seed
}

View File

@ -1,68 +0,0 @@
// Copyright 2016 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package rfc6962 provides hashing functionality according to RFC6962.
package rfc6962
import (
"crypto"
_ "crypto/sha256" // SHA256 is the default algorithm.
)
// Domain separation prefixes
const (
RFC6962LeafHashPrefix = 0
RFC6962NodeHashPrefix = 1
)
// DefaultHasher is a SHA256 based LogHasher.
var DefaultHasher = New(crypto.SHA256)
// Hasher implements the RFC6962 tree hashing algorithm.
type Hasher struct {
crypto.Hash
}
// New creates a new Hashers.LogHasher on the passed in hash function.
func New(h crypto.Hash) *Hasher {
return &Hasher{Hash: h}
}
// EmptyRoot returns a special case for an empty tree.
func (t *Hasher) EmptyRoot() []byte {
return t.New().Sum(nil)
}
// HashLeaf returns the Merkle tree leaf hash of the data passed in through leaf.
// The data in leaf is prefixed by the LeafHashPrefix.
func (t *Hasher) HashLeaf(leaf []byte) []byte {
h := t.New()
h.Write([]byte{RFC6962LeafHashPrefix})
h.Write(leaf)
return h.Sum(nil)
}
// HashChildren returns the inner Merkle tree node hash of the two child nodes l and r.
// The hashed structure is NodeHashPrefix||l||r.
func (t *Hasher) HashChildren(l, r []byte) []byte {
h := t.New()
b := append(append(append(
make([]byte, 0, 1+len(l)+len(r)),
RFC6962NodeHashPrefix),
l...),
r...)
h.Write(b)
return h.Sum(nil)
}

View File

@ -1,19 +0,0 @@
coverage:
range: 80..100
round: down
precision: 2
status:
project: # measuring the overall project coverage
default: # context, you can create multiple ones with custom titles
enabled: yes # must be yes|true to enable this status
target: 100 # specify the target coverage for each commit status
# option: "auto" (must increase from parent commit or pull request base)
# option: "X%" a static target percentage to hit
if_not_found: success # if parent is not found report status as success, error, or failure
if_ci_failed: error # if ci fails report status as success, error, or failure
# Also update COVER_IGNORE_PKGS in the Makefile.
ignore:
- /internal/gen-atomicint/
- /internal/gen-valuewrapper/

15
vendor/go.uber.org/atomic/.gitignore generated vendored
View File

@ -1,15 +0,0 @@
/bin
.DS_Store
/vendor
cover.html
cover.out
lint.log
# Binaries
*.test
# Profiling output
*.prof
# Output of fossa analyzer
/fossa

Some files were not shown because too many files have changed in this diff Show More