Update vendor containers/(common, buildah, image, storage)

Signed-off-by: Daniel J Walsh <dwalsh@redhat.com>
This commit is contained in:
Daniel J Walsh 2023-07-14 07:19:56 -04:00
parent bb72016f58
commit 13a9500166
No known key found for this signature in database
GPG Key ID: A2DF901DABE2C028
118 changed files with 5804 additions and 1146 deletions

37
go.mod
View File

@ -12,14 +12,14 @@ require (
github.com/container-orchestrated-devices/container-device-interface v0.6.0
github.com/containernetworking/cni v1.1.2
github.com/containernetworking/plugins v1.3.0
github.com/containers/buildah v1.31.0
github.com/containers/common v0.55.1
github.com/containers/buildah v1.31.1-0.20230710135949-9c9a344b9874
github.com/containers/common v0.55.1-0.20230713173316-9e5d4a690901
github.com/containers/conmon v2.0.20+incompatible
github.com/containers/image/v5 v5.26.1
github.com/containers/libhvee v0.3.0
github.com/containers/ocicrypt v1.1.7
github.com/containers/psgo v1.8.0
github.com/containers/storage v1.48.0
github.com/containers/storage v1.48.1-0.20230707125135-6dc2de36ca86
github.com/coreos/go-systemd/v22 v22.5.0
github.com/coreos/stream-metadata-go v0.4.3
github.com/crc-org/vfkit v0.1.1
@ -62,7 +62,7 @@ require (
github.com/vbauerster/mpb/v8 v8.4.0
github.com/vishvananda/netlink v1.2.1-beta.2
go.etcd.io/bbolt v1.3.7
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1
golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df
golang.org/x/net v0.12.0
golang.org/x/sync v0.3.0
golang.org/x/sys v0.10.0
@ -90,7 +90,7 @@ require (
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
github.com/coreos/go-oidc/v3 v3.6.0 // indirect
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1 // indirect
github.com/cyberphone/json-canonicalization v0.0.0-20230701045847-91eb5f1b7744 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/digitalocean/go-libvirt v0.0.0-20220804181439-8648fbde413e // indirect
github.com/disiqueira/gotree/v3 v3.0.2 // indirect
@ -106,9 +106,9 @@ require (
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-openapi/analysis v0.21.4 // indirect
github.com/go-openapi/errors v0.20.3 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.20.0 // indirect
github.com/go-openapi/errors v0.20.4 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/loads v0.21.2 // indirect
github.com/go-openapi/runtime v0.26.0 // indirect
github.com/go-openapi/spec v0.20.9 // indirect
@ -117,7 +117,7 @@ require (
github.com/go-openapi/validate v0.22.1 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.14.0 // indirect
github.com/go-playground/validator/v10 v10.14.1 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
@ -133,7 +133,7 @@ require (
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jinzhu/copier v0.3.5 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/klauspost/compress v1.16.6 // indirect
github.com/klauspost/compress v1.16.7 // indirect
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
github.com/klauspost/pgzip v1.2.6 // indirect
github.com/kr/fs v0.1.0 // indirect
@ -163,8 +163,8 @@ require (
github.com/rivo/uniseg v0.4.4 // indirect
github.com/seccomp/libseccomp-golang v0.10.0 // indirect
github.com/segmentio/ksuid v1.0.4 // indirect
github.com/sigstore/fulcio v1.3.1 // indirect
github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12 // indirect
github.com/sigstore/fulcio v1.3.2 // indirect
github.com/sigstore/rekor v1.2.2 // indirect
github.com/sigstore/sigstore v1.7.1 // indirect
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
@ -179,16 +179,17 @@ require (
go.mongodb.org/mongo-driver v1.11.3 // indirect
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel v1.15.0 // indirect
go.opentelemetry.io/otel/trace v1.15.0 // indirect
go.opentelemetry.io/otel v1.16.0 // indirect
go.opentelemetry.io/otel/metric v1.16.0 // indirect
go.opentelemetry.io/otel/trace v1.16.0 // indirect
golang.org/x/arch v0.3.0 // indirect
golang.org/x/crypto v0.11.0 // indirect
golang.org/x/mod v0.10.0 // indirect
golang.org/x/mod v0.11.0 // indirect
golang.org/x/oauth2 v0.9.0 // indirect
golang.org/x/tools v0.9.3 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
google.golang.org/grpc v1.55.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect
google.golang.org/grpc v1.56.1 // indirect
gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
@ -196,5 +197,3 @@ require (
)
replace github.com/opencontainers/runc => github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc
replace github.com/containers/common => github.com/containers/common v0.55.1-0.20230703090011-0ab70cf5664d

78
go.sum
View File

@ -245,10 +245,10 @@ github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHV
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
github.com/containernetworking/plugins v1.3.0 h1:QVNXMT6XloyMUoO2wUOqWTC1hWFV62Q6mVDp5H1HnjM=
github.com/containernetworking/plugins v1.3.0/go.mod h1:Pc2wcedTQQCVuROOOaLBPPxrEXqqXBFt3cZ+/yVg6l0=
github.com/containers/buildah v1.31.0 h1:NgVtEyTsR7e/XLTSJElbInnGPjdDGNHqLKADPHzaUGg=
github.com/containers/buildah v1.31.0/go.mod h1:tcgXcGhqw3kw49RapUS7tskEhxKLG4eVFJKA/QzgwNU=
github.com/containers/common v0.55.1-0.20230703090011-0ab70cf5664d h1:qtrq0ZsAAd+lI9eYBpq/1Fz9cxE1D95AvoB6g4ES0wg=
github.com/containers/common v0.55.1-0.20230703090011-0ab70cf5664d/go.mod h1:c+q6NRSLy4pKMYzb7Hsu6NPy0LnIjH2CJnplR5w3Bk8=
github.com/containers/buildah v1.31.1-0.20230710135949-9c9a344b9874 h1:w6tQ7mQ9aQ5hguCo8wECUiIBbLFfSClmSpjyPiU7N9E=
github.com/containers/buildah v1.31.1-0.20230710135949-9c9a344b9874/go.mod h1:KOi66P8FQvReqloGYaLBVPeRDm/3yx9d2/5zvR7Je+8=
github.com/containers/common v0.55.1-0.20230713173316-9e5d4a690901 h1:S1tzzO9XFnnmjyKEOdj1Ey4dzRn9Y7LyZC19Vyv7bxw=
github.com/containers/common v0.55.1-0.20230713173316-9e5d4a690901/go.mod h1:fJVsIJZze4e7pFO2IYRKfRCF9qlnenEPDRmi8IQTINQ=
github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg=
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/image/v5 v5.26.1 h1:8y3xq8GO/6y8FR+nAedHPsAFiAtOrab9qHTBpbqaX8g=
@ -265,8 +265,8 @@ github.com/containers/ocicrypt v1.1.7/go.mod h1:7CAhjcj2H8AYp5YvEie7oVSK2AhBY8Ns
github.com/containers/psgo v1.8.0 h1:2loGekmGAxM9ir5OsXWEfGwFxorMPYnc6gEDsGFQvhY=
github.com/containers/psgo v1.8.0/go.mod h1:T8ZxnX3Ur4RvnhxFJ7t8xJ1F48RhiZB4rSrOaR/qGHc=
github.com/containers/storage v1.43.0/go.mod h1:uZ147thiIFGdVTjMmIw19knttQnUCl3y9zjreHrg11s=
github.com/containers/storage v1.48.0 h1:wiPs8J2xiFoOEAhxHDRtP6A90Jzj57VqzLRXOqeizns=
github.com/containers/storage v1.48.0/go.mod h1:pRp3lkRo2qodb/ltpnudoXggrviRmaCmU5a5GhTBae0=
github.com/containers/storage v1.48.1-0.20230707125135-6dc2de36ca86 h1:MZM2+8TDTvuN7erRxEoTBfxFpkfEd8fUOzygK9CNeVU=
github.com/containers/storage v1.48.1-0.20230707125135-6dc2de36ca86/go.mod h1:URATDSDqWwSRt7YsTNwYFu7zrrpJ8fKrmWTB1/R8apw=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
@ -298,8 +298,8 @@ github.com/crc-org/vfkit v0.1.1/go.mod h1:vjZiHDacUi0iLosvwyLvqJvJXQhByzlLQbMkdI
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1 h1:8Pq5UNTC+/UfvcOPKQGZoKCkeF+ZaKa4wJ9OS2gsQQM=
github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
github.com/cyberphone/json-canonicalization v0.0.0-20230701045847-91eb5f1b7744 h1:MqMnhqqfDsYF2bjxndKIqvISTIRBb1KCzrIwVzKJHe0=
github.com/cyberphone/json-canonicalization v0.0.0-20230701045847-91eb5f1b7744/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI=
github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
@ -410,19 +410,21 @@ github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9Qy
github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
github.com/go-openapi/errors v0.20.3 h1:rz6kiC84sqNQoqrtulzaL/VERgkoCyB6WdEkc2ujzUc=
github.com/go-openapi/errors v0.20.3/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk=
github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M=
github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk=
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns=
github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA=
github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g=
github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro=
github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw=
@ -444,6 +446,7 @@ github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU=
@ -453,8 +456,8 @@ github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/o
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js=
github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
github.com/go-playground/validator/v10 v10.14.1 h1:9c50NUPC30zyuKprjL3vNZ0m5oG+jU0zvx4AqHGnv4k=
github.com/go-playground/validator/v10 v10.14.1/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
github.com/go-rod/rod v0.113.3 h1:oLiKZW721CCMwA5g7977cWfcAKQ+FuosP47Zf1QiDrA=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
@ -665,8 +668,8 @@ github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/compress v1.16.6 h1:91SKEy4K37vkp255cJ8QesJhjyRO0hn9i9G0GoUwLsk=
github.com/klauspost/compress v1.16.6/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk=
github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
@ -858,7 +861,7 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI=
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -871,7 +874,7 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@ -883,7 +886,7 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
@ -909,10 +912,10 @@ github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c
github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sigstore/fulcio v1.3.1 h1:0ntW9VbQbt2JytoSs8BOGB84A65eeyvGSavWteYp29Y=
github.com/sigstore/fulcio v1.3.1/go.mod h1:/XfqazOec45ulJZpyL9sq+OsVQ8g2UOVoNVi7abFgqU=
github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12 h1:x/WnxasgR40qGY67IHwioakXLuhDxJ10vF8/INuOTiI=
github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12/go.mod h1:8c+a8Yo7r8gKuYbIaz+c3oOdw9iMXx+tMdOg2+b+2jQ=
github.com/sigstore/fulcio v1.3.2 h1:92ubG3JPgpjeKKFWROoWj095SvVKBdbolq22t95qBUg=
github.com/sigstore/fulcio v1.3.2/go.mod h1:RCktAN81mgf6fz7ydiT6X2mSjhMGxaRXfCCa7srztTo=
github.com/sigstore/rekor v1.2.2 h1:5JK/zKZvcQpL/jBmHvmFj3YbpDMBQnJQ6ygp8xdF3bY=
github.com/sigstore/rekor v1.2.2/go.mod h1:FGnWBGWzeNceJnp0x9eDFd41mI8aQqCjj+Zp0IEs0Qg=
github.com/sigstore/sigstore v1.7.1 h1:fCATemikcBK0cG4+NcM940MfoIgmioY1vC6E66hXxks=
github.com/sigstore/sigstore v1.7.1/go.mod h1:0PmMzfJP2Y9+lugD0wer4e7TihR5tM7NcIs3bQNk5xg=
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
@ -1070,11 +1073,13 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/otel v1.15.0 h1:NIl24d4eiLJPM0vKn4HjLYM+UZf6gSfi9Z+NmCxkWbk=
go.opentelemetry.io/otel v1.15.0/go.mod h1:qfwLEbWhLPk5gyWrne4XnF0lC8wtywbuJbgfAE3zbek=
go.opentelemetry.io/otel/sdk v1.15.0 h1:jZTCkRRd08nxD6w7rIaZeDNGZGGQstH3SfLQ3ZsKICk=
go.opentelemetry.io/otel/trace v1.15.0 h1:5Fwje4O2ooOxkfyqI/kJwxWotggDLix4BSAvpE1wlpo=
go.opentelemetry.io/otel/trace v1.15.0/go.mod h1:CUsmE2Ht1CRkvE8OsMESvraoZrrcgD1J2W8GV1ev0Y4=
go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s=
go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4=
go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo=
go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4=
go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiMWgE=
go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs=
go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
@ -1114,8 +1119,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc=
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df h1:UA2aFVmmsIlefxMk29Dp2juaUSth8Pyn3Tq5Y5mJGME=
golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -1137,8 +1142,8 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU=
golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -1415,8 +1420,8 @@ google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfG
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
@ -1435,8 +1440,8 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag=
google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8=
google.golang.org/grpc v1.56.1 h1:z0dNfjIl0VpaZ9iSVjA6daGatAYwPGstTjt5vkRMFkQ=
google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -1463,6 +1468,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=

View File

@ -63,9 +63,17 @@ var _ = Describe("Podman secret", func() {
err := os.WriteFile(secretFilePath, []byte("mysecret"), 0755)
Expect(err).ToNot(HaveOccurred())
session := podmanTest.Podman([]string{"secret", "create", "?!", secretFilePath})
badName := "foo/bar"
session := podmanTest.Podman([]string{"secret", "create", badName, secretFilePath})
session.WaitWithDefaultTimeout()
Expect(session).To(ExitWithError())
Expect(session.ErrorToString()).To(Equal(fmt.Sprintf("Error: secret name %q can not include '=', '/', ',', or the '\\0' (NULL) and be between 1 and 253 characters: invalid secret name", badName)))
badName = "foo=bar"
session = podmanTest.Podman([]string{"secret", "create", badName, secretFilePath})
session.WaitWithDefaultTimeout()
Expect(session).To(ExitWithError())
Expect(session.ErrorToString()).To(Equal(fmt.Sprintf("Error: secret name %q can not include '=', '/', ',', or the '\\0' (NULL) and be between 1 and 253 characters: invalid secret name", badName)))
})
It("podman secret inspect", func() {

View File

@ -29,7 +29,7 @@ const (
// identify working containers.
Package = "buildah"
// Version for the Package. Also used by .packit.sh for Packit builds.
Version = "1.31.0"
Version = "1.32.0-dev"
// DefaultRuntime if containers.conf fails.
DefaultRuntime = "runc"

View File

@ -246,43 +246,25 @@ On openSUSE Tumbleweed, install go via `zypper in go`, then run this command:
The build steps for Buildah on SUSE / openSUSE are the same as for Fedora, above.
### Ubuntu
### Ubuntu/Debian
In Ubuntu jammy you can use these commands:
In Ubuntu 22.10 (Karmic) or Debian 12 (Bookworm) you can use these commands:
```
sudo apt-get -y -qq update
sudo apt-get -y install bats btrfs-progs git libapparmor-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev skopeo go-md2man make
sudo apt-get -y install golang-1.18
sudo apt-get -y install bats btrfs-progs git go-md2man golang libapparmor-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev make skopeo
```
Then to install Buildah on Ubuntu follow the steps in this example:
Then to install Buildah follow the steps in this example:
```
mkdir ~/buildah
cd ~/buildah
export GOPATH=`pwd`
git clone https://github.com/containers/buildah ./src/github.com/containers/buildah
cd ./src/github.com/containers/buildah
PATH=/usr/lib/go-1.18/bin:$PATH make runc all SECURITYTAGS="apparmor seccomp"
git clone https://github.com/containers/buildah
cd buildah
make runc all SECURITYTAGS="apparmor seccomp"
sudo make install install.runc
buildah --help
```
### Debian
To install the required dependencies, you can use those commands, tested under Debian GNU/Linux amd64 9.3 (stretch):
```
gpg --recv-keys 0x018BA5AD9DF57A4448F0E6CF8BECF1637AD8C79D
sudo gpg --export 0x018BA5AD9DF57A4448F0E6CF8BECF1637AD8C79D >> /usr/share/keyrings/projectatomic-ppa.gpg
sudo echo 'deb [signed-by=/usr/share/keyrings/projectatomic-ppa.gpg] http://ppa.launchpad.net/projectatomic/ppa/ubuntu zesty main' > /etc/apt/sources.list.d/projectatomic-ppa.list
sudo apt update
sudo apt -y install -t stretch-backports golang
sudo apt -y install bats btrfs-tools git libapparmor-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev skopeo-containers go-md2man
```
The build steps on Debian are otherwise the same as Ubuntu, above.
## Vendoring - Dependency Management
This project is using [go modules](https://github.com/golang/go/wiki/Modules) for dependency management. If the CI is complaining about a pull request leaving behind an unclean state, it is very likely right about it. After changing dependencies, make sure to run `make vendor-in-container` to synchronize the code with the go module and repopulate the `./vendor` directory.

View File

@ -6,7 +6,6 @@ import (
"os/exec"
"path/filepath"
"strings"
"syscall"
"errors"
@ -146,74 +145,6 @@ func mountWithMountProgram(mountProgram, overlayOptions, mergeDir string) error
return nil
}
// MountWithOptions creates a subdir of the contentDir based on the source directory
// from the source system. It then mounts up the source directory on to the
// generated mount point and returns the mount point to the caller.
// But allows api to set custom workdir, upperdir and other overlay options
// Following API is being used by podman at the moment
func MountWithOptions(contentDir, source, dest string, opts *Options) (mount specs.Mount, Err error) {
mergeDir := filepath.Join(contentDir, "merge")
// Create overlay mount options for rw/ro.
var overlayOptions string
if opts.ReadOnly {
// Read-only overlay mounts require two lower layer.
lowerTwo := filepath.Join(contentDir, "lower")
if err := os.Mkdir(lowerTwo, 0755); err != nil {
return mount, err
}
overlayOptions = fmt.Sprintf("lowerdir=%s:%s,private", escapeColon(source), lowerTwo)
} else {
// Read-write overlay mounts want a lower, upper and a work layer.
workDir := filepath.Join(contentDir, "work")
upperDir := filepath.Join(contentDir, "upper")
if opts.WorkDirOptionFragment != "" && opts.UpperDirOptionFragment != "" {
workDir = opts.WorkDirOptionFragment
upperDir = opts.UpperDirOptionFragment
}
st, err := os.Stat(source)
if err != nil {
return mount, err
}
if err := os.Chmod(upperDir, st.Mode()); err != nil {
return mount, err
}
if stat, ok := st.Sys().(*syscall.Stat_t); ok {
if err := os.Chown(upperDir, int(stat.Uid), int(stat.Gid)); err != nil {
return mount, err
}
}
overlayOptions = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s,private", escapeColon(source), upperDir, workDir)
}
mountProgram := findMountProgram(opts.GraphOpts)
if mountProgram != "" {
if err := mountWithMountProgram(mountProgram, overlayOptions, mergeDir); err != nil {
return mount, err
}
mount.Source = mergeDir
mount.Destination = dest
mount.Type = "bind"
mount.Options = []string{"bind", "slave"}
return mount, nil
}
if unshare.IsRootless() {
/* If a mount_program is not specified, fallback to try mounting native overlay. */
overlayOptions = fmt.Sprintf("%s,userxattr", overlayOptions)
}
mount.Source = mergeDir
mount.Destination = dest
mount.Type = "overlay"
mount.Options = strings.Split(overlayOptions, ",")
return mount, nil
}
// Convert ":" to "\:", the path which will be overlay mounted need to be escaped
func escapeColon(source string) string {
return strings.ReplaceAll(source, ":", "\\:")

View File

@ -0,0 +1,31 @@
package overlay
import (
//"fmt"
//"os"
//"path/filepath"
//"strings"
//"syscall"
"errors"
//"github.com/containers/storage/pkg/unshare"
"github.com/opencontainers/runtime-spec/specs-go"
)
// MountWithOptions creates a subdir of the contentDir based on the source directory
// from the source system. It then mounts up the source directory on to the
// generated mount point and returns the mount point to the caller.
// But allows api to set custom workdir, upperdir and other overlay options
// Following API is being used by podman at the moment
func MountWithOptions(contentDir, source, dest string, opts *Options) (mount specs.Mount, Err error) {
if opts.ReadOnly {
// Read-only overlay mounts can be simulated with nullfs
mount.Source = source
mount.Destination = dest
mount.Type = "nullfs"
mount.Options = []string{"ro"}
return mount, nil
} else {
return mount, errors.New("read/write overlay mounts not supported on freebsd")
}
}

View File

@ -0,0 +1,80 @@
package overlay
import (
"fmt"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/containers/storage/pkg/unshare"
"github.com/opencontainers/runtime-spec/specs-go"
)
// MountWithOptions creates a subdir of the contentDir based on the source directory
// from the source system. It then mounts up the source directory on to the
// generated mount point and returns the mount point to the caller.
// But allows api to set custom workdir, upperdir and other overlay options
// Following API is being used by podman at the moment
func MountWithOptions(contentDir, source, dest string, opts *Options) (mount specs.Mount, Err error) {
mergeDir := filepath.Join(contentDir, "merge")
// Create overlay mount options for rw/ro.
var overlayOptions string
if opts.ReadOnly {
// Read-only overlay mounts require two lower layer.
lowerTwo := filepath.Join(contentDir, "lower")
if err := os.Mkdir(lowerTwo, 0755); err != nil {
return mount, err
}
overlayOptions = fmt.Sprintf("lowerdir=%s:%s,private", escapeColon(source), lowerTwo)
} else {
// Read-write overlay mounts want a lower, upper and a work layer.
workDir := filepath.Join(contentDir, "work")
upperDir := filepath.Join(contentDir, "upper")
if opts.WorkDirOptionFragment != "" && opts.UpperDirOptionFragment != "" {
workDir = opts.WorkDirOptionFragment
upperDir = opts.UpperDirOptionFragment
}
st, err := os.Stat(source)
if err != nil {
return mount, err
}
if err := os.Chmod(upperDir, st.Mode()); err != nil {
return mount, err
}
if stat, ok := st.Sys().(*syscall.Stat_t); ok {
if err := os.Chown(upperDir, int(stat.Uid), int(stat.Gid)); err != nil {
return mount, err
}
}
overlayOptions = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s,private", escapeColon(source), upperDir, workDir)
}
mountProgram := findMountProgram(opts.GraphOpts)
if mountProgram != "" {
if err := mountWithMountProgram(mountProgram, overlayOptions, mergeDir); err != nil {
return mount, err
}
mount.Source = mergeDir
mount.Destination = dest
mount.Type = "bind"
mount.Options = []string{"bind", "slave"}
return mount, nil
}
if unshare.IsRootless() {
/* If a mount_program is not specified, fallback to try mounting native overlay. */
overlayOptions = fmt.Sprintf("%s,userxattr", overlayOptions)
}
mount.Source = mergeDir
mount.Destination = dest
mount.Type = "overlay"
mount.Options = strings.Split(overlayOptions, ",")
return mount, nil
}

View File

@ -17,6 +17,7 @@ import (
"github.com/containers/buildah/define"
"github.com/containers/buildah/internal"
"github.com/containers/buildah/pkg/jail"
"github.com/containers/buildah/pkg/overlay"
"github.com/containers/buildah/pkg/parse"
"github.com/containers/buildah/util"
"github.com/containers/common/libnetwork/resolvconf"
@ -322,13 +323,22 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string,
}
parseMount := func(mountType, host, container string, options []string) (specs.Mount, error) {
var foundrw, foundro bool
var foundrw, foundro, foundO bool
var upperDir string
for _, opt := range options {
switch opt {
case "rw":
foundrw = true
case "ro":
foundro = true
case "O":
foundO = true
}
if strings.HasPrefix(opt, "upperdir") {
splitOpt := strings.SplitN(opt, "=", 2)
if len(splitOpt) > 1 {
upperDir = splitOpt[1]
}
}
}
if !foundrw && !foundro {
@ -337,6 +347,30 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string,
if mountType == "bind" || mountType == "rbind" {
mountType = "nullfs"
}
if foundO {
containerDir, err := b.store.ContainerDirectory(b.ContainerID)
if err != nil {
return specs.Mount{}, err
}
contentDir, err := overlay.TempDir(containerDir, idMaps.rootUID, idMaps.rootGID)
if err != nil {
return specs.Mount{}, fmt.Errorf("failed to create TempDir in the %s directory: %w", containerDir, err)
}
overlayOpts := overlay.Options{
RootUID: idMaps.rootUID,
RootGID: idMaps.rootGID,
UpperDirOptionFragment: upperDir,
GraphOpts: b.store.GraphOptions(),
}
overlayMount, err := overlay.MountWithOptions(contentDir, host, container, &overlayOpts)
if err == nil {
b.TempVolumes[contentDir] = true
}
return overlayMount, err
}
return specs.Mount{
Destination: container,
Type: mountType,

View File

@ -394,10 +394,12 @@ func filterID(value string) filterFunc {
}
}
// filterDigest creates an digest filter for matching the specified value.
// filterDigest creates a digest filter for matching the specified value.
func filterDigest(value string) filterFunc {
// TODO: return an error if value is not a digest
// if _, err := digest.Parse(value); err != nil {...}
return func(img *Image) (bool, error) {
return string(img.Digest()) == value, nil
return img.hasDigest(value), nil
}
}

View File

@ -144,6 +144,9 @@ func (i *Image) ID() string {
// possibly many digests that we have stored for the image, so many
// applications are better off using the entire list returned by Digests().
func (i *Image) Digest() digest.Digest {
// TODO: we return the image digest or the one of the manifest list
// which can lead to issues depending on the callers' assumptions.
// Hence, deprecate in favor of Digest_s_.
return i.storageImage.Digest
}
@ -154,6 +157,18 @@ func (i *Image) Digests() []digest.Digest {
return i.storageImage.Digests
}
// hasDigest returns whether the specified value matches any digest of the
// image.
func (i *Image) hasDigest(value string) bool {
// TODO: change the argument to a typed digest.Digest
for _, d := range i.Digests() {
if string(d) == value {
return true
}
}
return false
}
// IsReadOnly returns whether the image is set read only.
func (i *Image) IsReadOnly() bool {
return i.storageImage.ReadOnly
@ -656,6 +671,8 @@ func (i *Image) NamedTaggedRepoTags() ([]reference.NamedTagged, error) {
// NamedRepoTags returns the repotags associated with the image as a
// slice of reference.Named.
func (i *Image) NamedRepoTags() ([]reference.Named, error) {
// FIXME: the NamedRepoTags name is a bit misleading as it can return
// repo@digest values if thats how an image was pulled.
var repoTags []reference.Named
for _, name := range i.Names() {
parsed, err := reference.Parse(name)
@ -669,32 +686,37 @@ func (i *Image) NamedRepoTags() ([]reference.Named, error) {
return repoTags, nil
}
// inRepoTags looks for the specified name/tag pair in the image's repo tags.
func (i *Image) inRepoTags(namedTagged reference.NamedTagged) (reference.Named, error) {
// inRepoTags looks for the specified name/tag in the image's repo tags. If
// `ignoreTag` is set, only the repo must match and the tag is ignored.
func (i *Image) inRepoTags(namedTagged reference.NamedTagged, ignoreTag bool) (reference.Named, error) {
repoTags, err := i.NamedRepoTags()
if err != nil {
return nil, err
}
pairs, err := ToNameTagPairs(repoTags)
if err != nil {
return nil, err
}
name := namedTagged.Name()
tag := namedTagged.Tag()
for _, pair := range pairs {
if tag != pair.Tag {
for _, r := range repoTags {
if !ignoreTag {
var repoTag string
tagged, isTagged := r.(reference.NamedTagged)
if isTagged {
repoTag = tagged.Tag()
}
if !isTagged || tag != repoTag {
continue
}
}
repoName := r.Name()
if !strings.HasSuffix(repoName, name) {
continue
}
if !strings.HasSuffix(pair.Name, name) {
continue
if len(repoName) == len(name) { // full match
return r, nil
}
if len(pair.Name) == len(name) { // full match
return pair.named, nil
}
if pair.Name[len(pair.Name)-len(name)-1] == '/' { // matches at repo
return pair.named, nil
if repoName[len(repoName)-len(name)-1] == '/' { // matches at repo
return r, nil
}
}

View File

@ -217,6 +217,11 @@ func (i *Image) getManifestList() (manifests.List, error) {
// image index (OCI). This information may be critical to make certain
// execution paths more robust (e.g., suppress certain errors).
func (i *Image) IsManifestList(ctx context.Context) (bool, error) {
// FIXME: due to `ImageDigestBigDataKey` we'll always check the
// _last-written_ manifest which is causing issues for multi-arch image
// pulls.
//
// See https://github.com/containers/common/pull/1505#discussion_r1242677279.
ref, err := i.StorageReference()
if err != nil {
return false, err

View File

@ -100,22 +100,22 @@ func ToNameTagPairs(repoTags []reference.Named) ([]NameTagPair, error) {
// normalizeTaggedDigestedString strips the tag off the specified string iff it
// is tagged and digested. Note that the tag is entirely ignored to match
// Docker behavior.
func normalizeTaggedDigestedString(s string) (string, error) {
func normalizeTaggedDigestedString(s string) (string, reference.Named, error) {
// Note that the input string is not expected to be parseable, so we
// return it verbatim in error cases.
ref, err := reference.Parse(s)
if err != nil {
return "", err
return "", nil, err
}
named, ok := ref.(reference.Named)
if !ok {
return s, nil
return s, nil, nil
}
named, err = normalizeTaggedDigestedNamed(named)
if err != nil {
return "", err
return "", nil, err
}
return named.String(), nil
return named.String(), named, nil
}
// normalizeTaggedDigestedNamed strips the tag off the specified named

View File

@ -86,7 +86,7 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP
// Docker compat: strip off the tag iff name is tagged and digested
// (e.g., fedora:latest@sha256...). In that case, the tag is stripped
// off and entirely ignored. The digest is the sole source of truth.
normalizedName, normalizeError := normalizeTaggedDigestedString(name)
normalizedName, _, normalizeError := normalizeTaggedDigestedString(name)
if normalizeError != nil {
return nil, normalizeError
}

View File

@ -16,6 +16,7 @@ import (
"github.com/containers/storage"
deepcopy "github.com/jinzhu/copier"
jsoniter "github.com/json-iterator/go"
"github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
)
@ -239,7 +240,7 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image,
// Docker compat: strip off the tag iff name is tagged and digested
// (e.g., fedora:latest@sha256...). In that case, the tag is stripped
// off and entirely ignored. The digest is the sole source of truth.
normalizedName, err := normalizeTaggedDigestedString(name)
normalizedName, possiblyUnqualifiedNamedReference, err := normalizeTaggedDigestedString(name)
if err != nil {
return nil, "", err
}
@ -259,7 +260,7 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image,
// If the name clearly refers to a local image, try to look it up.
if byFullID || byDigest {
img, err := r.lookupImageInLocalStorage(originalName, name, options)
img, err := r.lookupImageInLocalStorage(originalName, name, nil, options)
if err != nil {
return nil, "", err
}
@ -297,7 +298,7 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image,
}
for _, candidate := range candidates {
img, err := r.lookupImageInLocalStorage(name, candidate.String(), options)
img, err := r.lookupImageInLocalStorage(name, candidate.String(), candidate, options)
if err != nil {
return nil, "", err
}
@ -308,7 +309,7 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image,
// The specified name may refer to a short ID. Note that this *must*
// happen after the short-name expansion as done above.
img, err := r.lookupImageInLocalStorage(name, name, options)
img, err := r.lookupImageInLocalStorage(name, name, nil, options)
if err != nil {
return nil, "", err
}
@ -316,21 +317,51 @@ func (r *Runtime) LookupImage(name string, options *LookupImageOptions) (*Image,
return img, name, err
}
return r.lookupImageInDigestsAndRepoTags(name, options)
return r.lookupImageInDigestsAndRepoTags(name, possiblyUnqualifiedNamedReference, options)
}
// lookupImageInLocalStorage looks up the specified candidate for name in the
// storage and checks whether it's matching the system context.
func (r *Runtime) lookupImageInLocalStorage(name, candidate string, options *LookupImageOptions) (*Image, error) {
func (r *Runtime) lookupImageInLocalStorage(name, candidate string, namedCandidate reference.Named, options *LookupImageOptions) (*Image, error) {
logrus.Debugf("Trying %q ...", candidate)
img, err := r.store.Image(candidate)
if err != nil && !errors.Is(err, storage.ErrImageUnknown) {
return nil, err
var err error
var img *storage.Image
var ref types.ImageReference
// FIXME: the lookup logic for manifest lists needs improvement.
// See https://github.com/containers/common/pull/1505#discussion_r1242677279
// for details.
// For images pulled by tag, Image.Names does not currently contain a
// repo@digest value, so such an input would not match directly in
// c/storage.
if namedCandidate != nil {
namedCandidate = reference.TagNameOnly(namedCandidate)
ref, err = storageTransport.Transport.NewStoreReference(r.store, namedCandidate, "")
if err != nil {
return nil, err
}
img, err = storageTransport.Transport.GetStoreImage(r.store, ref)
if err != nil {
if errors.Is(err, storage.ErrImageUnknown) {
return nil, nil
}
return nil, err
}
// NOTE: we must reparse the reference another time below since
// an ordinary image may have resolved into a per-platform image
// without any regard to options.{Architecture,OS,Variant}.
} else {
img, err = r.store.Image(candidate)
if err != nil {
if errors.Is(err, storage.ErrImageUnknown) {
return nil, nil
}
return nil, err
}
}
if img == nil {
return nil, nil
}
ref, err := storageTransport.Transport.ParseStoreReference(r.store, img.ID)
ref, err = storageTransport.Transport.ParseStoreReference(r.store, img.ID)
if err != nil {
return nil, err
}
@ -417,76 +448,71 @@ func (r *Runtime) lookupImageInLocalStorage(name, candidate string, options *Loo
// lookupImageInDigestsAndRepoTags attempts to match name against any image in
// the local containers storage. If name is digested, it will be compared
// against image digests. Otherwise, it will be looked up in the repo tags.
func (r *Runtime) lookupImageInDigestsAndRepoTags(name string, options *LookupImageOptions) (*Image, string, error) {
// Until now, we've tried very hard to find an image but now it is time
// for limbo. If the image includes a digest that we couldn't detect
// verbatim in the storage, we must have a look at all digests of all
// images. Those may change over time (e.g., via manifest lists).
// Both Podman and Buildah want us to do that dance.
func (r *Runtime) lookupImageInDigestsAndRepoTags(name string, possiblyUnqualifiedNamedReference reference.Named, options *LookupImageOptions) (*Image, string, error) {
originalName := name // we may change name below
if possiblyUnqualifiedNamedReference == nil {
return nil, "", fmt.Errorf("%s: %w", originalName, storage.ErrImageUnknown)
}
// In case of a digested reference, we strip off the digest and require
// any image matching the repo/tag to also match the specified digest.
var requiredDigest digest.Digest
digested, isDigested := possiblyUnqualifiedNamedReference.(reference.Digested)
if isDigested {
requiredDigest = digested.Digest()
possiblyUnqualifiedNamedReference = reference.TrimNamed(possiblyUnqualifiedNamedReference)
name = possiblyUnqualifiedNamedReference.String()
}
if !shortnames.IsShortName(name) {
return nil, "", fmt.Errorf("%s: %w", originalName, storage.ErrImageUnknown)
}
// Docker compat: make sure to add the "latest" tag if needed. The tag
// will be ignored if we're looking for a digest match.
possiblyUnqualifiedNamedReference = reference.TagNameOnly(possiblyUnqualifiedNamedReference)
namedTagged, isNamedTagged := possiblyUnqualifiedNamedReference.(reference.NamedTagged)
if !isNamedTagged {
// NOTE: this should never happen since we already stripped off
// the digest.
return nil, "", fmt.Errorf("%s: %w (could not cast to tagged)", originalName, storage.ErrImageUnknown)
}
allImages, err := r.ListImages(context.Background(), nil, nil)
if err != nil {
return nil, "", err
}
ref, err := reference.Parse(name) // Warning! This is not ParseNormalizedNamed
if err != nil {
return nil, "", err
}
named, isNamed := ref.(reference.Named)
if !isNamed {
return nil, "", fmt.Errorf("%s: %w", name, storage.ErrImageUnknown)
}
digested, isDigested := named.(reference.Digested)
if isDigested {
logrus.Debug("Looking for image with matching recorded digests")
digest := digested.Digest()
for _, image := range allImages {
for _, d := range image.Digests() {
if d != digest {
continue
}
// Also make sure that the matching image fits all criteria (e.g., manifest list).
if _, err := r.lookupImageInLocalStorage(name, image.ID(), options); err != nil {
return nil, "", err
}
return image, name, nil
}
}
return nil, "", fmt.Errorf("%s: %w", name, storage.ErrImageUnknown)
}
if !shortnames.IsShortName(name) {
return nil, "", fmt.Errorf("%s: %w", name, storage.ErrImageUnknown)
}
named = reference.TagNameOnly(named) // Make sure to add ":latest" if needed
namedTagged, isNammedTagged := named.(reference.NamedTagged)
if !isNammedTagged {
// NOTE: this should never happen since we already know it's
// not a digested reference.
return nil, "", fmt.Errorf("%s: %w (could not cast to tagged)", name, storage.ErrImageUnknown)
}
for _, image := range allImages {
named, err := image.inRepoTags(namedTagged)
named, err := image.inRepoTags(namedTagged, isDigested)
if err != nil {
return nil, "", err
}
if named == nil {
continue
}
img, err := r.lookupImageInLocalStorage(name, named.String(), options)
img, err := r.lookupImageInLocalStorage(name, named.String(), named, options)
if err != nil {
return nil, "", err
}
if img != nil {
return img, named.String(), err
if isDigested {
if !img.hasDigest(requiredDigest.String()) {
continue
}
named = reference.TrimNamed(named)
canonical, err := reference.WithDigest(named, requiredDigest)
if err != nil {
return nil, "", fmt.Errorf("building canonical reference with digest %q and matched %q: %w", requiredDigest.String(), named.String(), err)
}
return img, canonical.String(), nil
}
return img, named.String(), nil
}
}
return nil, "", fmt.Errorf("%s: %w", name, storage.ErrImageUnknown)
return nil, "", fmt.Errorf("%s: %w", originalName, storage.ErrImageUnknown)
}
// ResolveName resolves the specified name. If the name resolves to a local

View File

@ -28,9 +28,7 @@ func GetBridgeInterfaceNames(n NetUtil) []string {
func GetUsedNetworkNames(n NetUtil) []string {
names := make([]string, 0, n.Len())
n.ForEach(func(net types.Network) {
if net.Driver == types.BridgeNetworkDriver {
names = append(names, net.NetworkInterface)
}
names = append(names, net.Name)
})
return names
}

View File

@ -5,7 +5,6 @@ import (
"fmt"
"os"
"path/filepath"
"strings"
"time"
"github.com/containers/common/pkg/secrets/filedriver"
@ -50,8 +49,8 @@ var errDataSize = errors.New("secret data must be larger than 0 and less than 51
var secretsFile = "secrets.json"
// secretNameRegexp matches valid secret names
// Allowed: 253 [a-zA-Z0-9-_.] characters, and the start and end character must be [a-zA-Z0-9]
var secretNameRegexp = regexp.Delayed(`^[a-zA-Z0-9][a-zA-Z0-9_.-]*$`)
// Allowed: 253 characters, excluding ,/=\0
var secretNameRegexp = regexp.Delayed("^[^,/=\000]+$")
// SecretsManager holds information on handling secrets
//
@ -247,11 +246,6 @@ func (s *SecretsManager) Store(name string, data []byte, driverType string, opti
// Delete removes all secret metadata and secret data associated with the specified secret.
// Delete takes a name, ID, or partial ID.
func (s *SecretsManager) Delete(nameOrID string) (string, error) {
err := validateSecretName(nameOrID)
if err != nil {
return "", err
}
s.lockfile.Lock()
defer s.lockfile.Unlock()
@ -325,8 +319,10 @@ func (s *SecretsManager) LookupSecretData(nameOrID string) (*Secret, []byte, err
// validateSecretName checks if the secret name is valid.
func validateSecretName(name string) error {
if !secretNameRegexp.MatchString(name) || len(name) > 253 || strings.HasSuffix(name, "-") || strings.HasSuffix(name, ".") {
return fmt.Errorf("only 253 [a-zA-Z0-9-_.] characters allowed, and the start and end character must be [a-zA-Z0-9]: %s: %w", name, errInvalidSecretName)
if len(name) == 0 ||
len(name) > 253 ||
!secretNameRegexp.MatchString(name) {
return fmt.Errorf("secret name %q can not include '=', '/', ',', or the '\\0' (NULL) and be between 1 and 253 characters: %w", name, errInvalidSecretName)
}
return nil
}

View File

@ -16,10 +16,6 @@ import (
)
func Validate(user *url.Userinfo, path string, port int, identity string) (*config.Destination, *url.URL, error) {
sock := ""
if strings.Contains(path, "/run") {
sock = strings.Split(path, "/run")[1]
}
// url.Parse NEEDS ssh://, if this ever fails or returns some nonsense, that is why.
uri, err := url.Parse(path)
if err != nil {
@ -43,15 +39,8 @@ func Validate(user *url.Userinfo, path string, port int, identity string) (*conf
uri.User = user
}
uriStr := ""
if len(sock) > 0 {
uriStr = "ssh://" + uri.User.Username() + "@" + uri.Host + "/run" + sock
} else {
uriStr = "ssh://" + uri.User.Username() + "@" + uri.Host
}
dst := config.Destination{
URI: uriStr,
URI: uri.String(),
}
if len(identity) > 0 {

View File

@ -1 +1 @@
1.48.0
1.49.0-dev

View File

@ -191,10 +191,27 @@ type DriverWithDifferOutput struct {
TOCDigest digest.Digest
}
type DifferOutputFormat int
const (
// DifferOutputFormatDir means the output is a directory and it will
// keep the original layout.
DifferOutputFormatDir = iota
// DifferOutputFormatFlat will store the files by their checksum, in the form
// checksum[0:2]/checksum[2:]
DifferOutputFormatFlat
)
// DifferOptions overrides how the differ work
type DifferOptions struct {
// Format defines the destination directory layout format
Format DifferOutputFormat
}
// Differ defines the interface for using a custom differ.
// This API is experimental and can be changed without bumping the major version number.
type Differ interface {
ApplyDiff(dest string, options *archive.TarOptions) (DriverWithDifferOutput, error)
ApplyDiff(dest string, options *archive.TarOptions, differOpts *DifferOptions) (DriverWithDifferOutput, error)
}
// DriverWithDiffer is the interface for direct diff access.

View File

@ -0,0 +1,24 @@
//go:build !linux || !composefs || !cgo
// +build !linux !composefs !cgo
package overlay
import (
"fmt"
)
func composeFsSupported() bool {
return false
}
func generateComposeFsBlob(toc []byte, composefsDir string) error {
return fmt.Errorf("composefs is not supported")
}
func mountComposefsBlob(dataDir, mountPoint string) error {
return fmt.Errorf("composefs is not supported")
}
func enableVerityRecursive(path string) error {
return fmt.Errorf("composefs is not supported")
}

View File

@ -0,0 +1,185 @@
//go:build linux && composefs && cgo
// +build linux,composefs,cgo
package overlay
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io/fs"
"os"
"os/exec"
"path/filepath"
"sync"
"syscall"
"unsafe"
"github.com/containers/storage/pkg/loopback"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
var (
composeFsHelperOnce sync.Once
composeFsHelperPath string
composeFsHelperErr error
)
func getComposeFsHelper() (string, error) {
composeFsHelperOnce.Do(func() {
composeFsHelperPath, composeFsHelperErr = exec.LookPath("composefs-from-json")
})
return composeFsHelperPath, composeFsHelperErr
}
func composeFsSupported() bool {
_, err := getComposeFsHelper()
return err == nil
}
func enableVerity(description string, fd int) error {
enableArg := unix.FsverityEnableArg{
Version: 1,
Hash_algorithm: unix.FS_VERITY_HASH_ALG_SHA256,
Block_size: 4096,
}
_, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_ENABLE_VERITY), uintptr(unsafe.Pointer(&enableArg)))
if e1 != 0 && !errors.Is(e1, unix.EEXIST) {
return fmt.Errorf("failed to enable verity for %q: %w", description, e1)
}
return nil
}
func enableVerityRecursive(path string) error {
walkFn := func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if !d.Type().IsRegular() {
return nil
}
f, err := os.Open(path)
if err != nil {
return err
}
defer f.Close()
if err := enableVerity(path, int(f.Fd())); err != nil {
return err
}
return nil
}
return filepath.WalkDir(path, walkFn)
}
func getComposefsBlob(dataDir string) string {
return filepath.Join(dataDir, "composefs.blob")
}
func generateComposeFsBlob(toc []byte, composefsDir string) error {
if err := os.MkdirAll(composefsDir, 0o700); err != nil {
return err
}
destFile := getComposefsBlob(composefsDir)
writerJson, err := getComposeFsHelper()
if err != nil {
return fmt.Errorf("failed to find composefs-from-json: %w", err)
}
fd, err := unix.Openat(unix.AT_FDCWD, destFile, unix.O_WRONLY|unix.O_CREAT|unix.O_TRUNC|unix.O_EXCL|unix.O_CLOEXEC, 0o644)
if err != nil {
return fmt.Errorf("failed to open output file: %w", err)
}
outFd := os.NewFile(uintptr(fd), "outFd")
fd, err = unix.Open(fmt.Sprintf("/proc/self/fd/%d", outFd.Fd()), unix.O_RDONLY|unix.O_CLOEXEC, 0)
if err != nil {
outFd.Close()
return fmt.Errorf("failed to dup output file: %w", err)
}
newFd := os.NewFile(uintptr(fd), "newFd")
defer newFd.Close()
err = func() error {
// a scope to close outFd before setting fsverity on the read-only fd.
defer outFd.Close()
cmd := exec.Command(writerJson, "--format=erofs", "--out=/proc/self/fd/3", "/proc/self/fd/0")
cmd.ExtraFiles = []*os.File{outFd}
cmd.Stderr = os.Stderr
cmd.Stdin = bytes.NewReader(toc)
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to convert json to erofs: %w", err)
}
return nil
}()
if err != nil {
return err
}
if err := enableVerity("manifest file", int(newFd.Fd())); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) {
logrus.Warningf("%s", err)
}
return nil
}
/*
typedef enum {
LCFS_EROFS_FLAGS_HAS_ACL = (1 << 0),
} lcfs_erofs_flag_t;
struct lcfs_erofs_header_s {
uint32_t magic;
uint32_t version;
uint32_t flags;
uint32_t unused[5];
} __attribute__((__packed__));
*/
// hasACL returns true if the erofs blob has ACLs enabled
func hasACL(path string) (bool, error) {
const LCFS_EROFS_FLAGS_HAS_ACL = (1 << 0)
fd, err := unix.Openat(unix.AT_FDCWD, path, unix.O_RDONLY|unix.O_CLOEXEC, 0)
if err != nil {
return false, err
}
defer unix.Close(fd)
// do not worry about checking the magic number, if the file is invalid
// we will fail to mount it anyway
flags := make([]byte, 4)
nread, err := unix.Pread(fd, flags, 8)
if err != nil {
return false, err
}
if nread != 4 {
return false, fmt.Errorf("failed to read flags from %q", path)
}
return binary.LittleEndian.Uint32(flags)&LCFS_EROFS_FLAGS_HAS_ACL != 0, nil
}
func mountComposefsBlob(dataDir, mountPoint string) error {
blobFile := getComposefsBlob(dataDir)
loop, err := loopback.AttachLoopDevice(blobFile)
if err != nil {
return err
}
defer loop.Close()
hasACL, err := hasACL(blobFile)
if err != nil {
return err
}
mountOpts := "ro"
if !hasACL {
mountOpts += ",noacl"
}
return unix.Mount(loop.Name(), mountPoint, "erofs", unix.MS_RDONLY, mountOpts)
}

View File

@ -82,6 +82,8 @@ const (
lowerFile = "lower"
maxDepth = 500
zstdChunkedManifest = "zstd-chunked-manifest"
// idLength represents the number of random characters
// which can be used to create the unique link identifier
// for every layer. If this value is too long then the
@ -780,6 +782,10 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
}
func (d *Driver) useNaiveDiff() bool {
if d.useComposeFs() {
return true
}
useNaiveDiffLock.Do(func() {
if d.options.mountProgram != "" {
useNaiveDiffOnly = true
@ -1431,6 +1437,9 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
logLevel = logrus.DebugLevel
}
optsList := options.Options
needsIDMapping := !disableShifting && len(options.UidMaps) > 0 && len(options.GidMaps) > 0 && d.options.mountProgram == ""
if len(optsList) == 0 {
optsList = strings.Split(d.options.mountOptions, ",")
} else {
@ -1499,12 +1508,76 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
}
idmappedMountProcessPid := -1
if needsIDMapping {
pid, cleanupFunc, err := idmap.CreateUsernsProcess(options.UidMaps, options.GidMaps)
if err != nil {
return "", err
}
idmappedMountProcessPid = int(pid)
defer cleanupFunc()
}
composefsLayers := filepath.Join(workDirBase, "composefs-layers")
if err := os.MkdirAll(composefsLayers, 0o700); err != nil {
return "", err
}
skipIDMappingLayers := make(map[string]string)
composeFsLayers := []string{}
composefsMounts := []string{}
defer func() {
for _, m := range composefsMounts {
defer unix.Unmount(m, unix.MNT_DETACH)
}
}()
maybeAddComposefsMount := func(lowerID string, i int) (string, error) {
composefsBlob := d.getComposefsData(lowerID)
_, err = os.Stat(composefsBlob)
if err != nil {
if os.IsNotExist(err) {
return "", nil
}
return "", err
}
logrus.Debugf("overlay: using composefs blob %s for lower %s", composefsBlob, lowerID)
dest := filepath.Join(composefsLayers, fmt.Sprintf("%d", i))
if err := os.MkdirAll(dest, 0o700); err != nil {
return "", err
}
if err := mountComposefsBlob(composefsBlob, dest); err != nil {
return "", err
}
composefsMounts = append(composefsMounts, dest)
composeFsPath, err := d.getDiffPath(lowerID)
if err != nil {
return "", err
}
composeFsLayers = append(composeFsLayers, composeFsPath)
skipIDMappingLayers[composeFsPath] = composeFsPath
return dest, nil
}
diffDir := path.Join(workDirBase, "diff")
if dest, err := maybeAddComposefsMount(id, 0); err != nil {
return "", err
} else if dest != "" {
diffDir = dest
}
// For each lower, resolve its path, and append it and any additional diffN
// directories to the lowers list.
for _, l := range splitLowers {
for i, l := range splitLowers {
if l == "" {
continue
}
lower := ""
newpath := path.Join(d.home, l)
if st, err := os.Stat(newpath); err != nil {
@ -1538,6 +1611,30 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
lower = newpath
}
linkContent, err := os.Readlink(lower)
if err != nil {
return "", err
}
lowerID := filepath.Base(filepath.Dir(linkContent))
composefsMount, err := maybeAddComposefsMount(lowerID, i+1)
if err != nil {
return "", err
}
if composefsMount != "" {
if needsIDMapping {
if err := idmap.CreateIDMappedMount(composefsMount, composefsMount, idmappedMountProcessPid); err != nil {
return "", fmt.Errorf("create mapped mount for %q: %w", composefsMount, err)
}
skipIDMappingLayers[composefsMount] = composefsMount
// overlay takes a reference on the mount, so it is safe to unmount
// the mapped idmounts as soon as the final overlay file system is mounted.
defer unix.Unmount(composefsMount, unix.MNT_DETACH)
}
absLowers = append(absLowers, composefsMount)
continue
}
absLowers = append(absLowers, lower)
diffN = 1
_, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN)))
@ -1548,15 +1645,22 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
}
if len(composeFsLayers) > 0 {
optsList = append(optsList, "metacopy=on", "redirect_dir=on")
}
absLowers = append(absLowers, composeFsLayers...)
if len(absLowers) == 0 {
absLowers = append(absLowers, path.Join(dir, "empty"))
}
// user namespace requires this to move a directory from lower to upper.
rootUID, rootGID, err := idtools.GetRootUIDGID(options.UidMaps, options.GidMaps)
if err != nil {
return "", err
}
diffDir := path.Join(workDirBase, "diff")
if err := idtools.MkdirAllAs(diffDir, perms, rootUID, rootGID); err != nil {
return "", err
}
@ -1596,31 +1700,30 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
}
if !disableShifting && len(options.UidMaps) > 0 && len(options.GidMaps) > 0 && d.options.mountProgram == "" {
if needsIDMapping {
var newAbsDir []string
idMappedMounts := make(map[string]string)
mappedRoot := filepath.Join(d.home, id, "mapped")
if err := os.MkdirAll(mappedRoot, 0o700); err != nil {
return "", err
}
pid, cleanupFunc, err := idmap.CreateUsernsProcess(options.UidMaps, options.GidMaps)
if err != nil {
return "", err
}
defer cleanupFunc()
idMappedMounts := make(map[string]string)
// rewrite the lower dirs to their idmapped mount.
c := 0
for _, absLower := range absLowers {
mappedMountSrc := getMappedMountRoot(absLower)
if _, ok := skipIDMappingLayers[absLower]; ok {
newAbsDir = append(newAbsDir, absLower)
continue
}
root, found := idMappedMounts[mappedMountSrc]
if !found {
root = filepath.Join(mappedRoot, fmt.Sprintf("%d", c))
c++
if err := idmap.CreateIDMappedMount(mappedMountSrc, root, int(pid)); err != nil {
if err := idmap.CreateIDMappedMount(mappedMountSrc, root, idmappedMountProcessPid); err != nil {
return "", fmt.Errorf("create mapped mount for %q on %q: %w", mappedMountSrc, root, err)
}
idMappedMounts[mappedMountSrc] = root
@ -1896,6 +1999,13 @@ func (d *Driver) CleanupStagingDirectory(stagingDirectory string) error {
return os.RemoveAll(stagingDirectory)
}
func (d *Driver) useComposeFs() bool {
if !composeFsSupported() || unshare.IsRootless() {
return false
}
return true
}
// ApplyDiff applies the changes in the new layer using the specified function
func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, err error) {
var idMappings *idtools.IDMappings
@ -1928,14 +2038,22 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
logrus.Debugf("Applying differ in %s", applyDir)
differOptions := graphdriver.DifferOptions{
Format: graphdriver.DifferOutputFormatDir,
}
if d.useComposeFs() {
differOptions.Format = graphdriver.DifferOutputFormatFlat
}
out, err := differ.ApplyDiff(applyDir, &archive.TarOptions{
UIDMaps: idMappings.UIDs(),
GIDMaps: idMappings.GIDs(),
IgnoreChownErrors: d.options.ignoreChownErrors,
WhiteoutFormat: d.getWhiteoutFormat(),
InUserNS: unshare.IsRootless(),
})
}, &differOptions)
out.Target = applyDir
return out, err
}
@ -1945,17 +2063,28 @@ func (d *Driver) ApplyDiffFromStagingDirectory(id, parent, stagingDirectory stri
return fmt.Errorf("%q is not a staging directory", stagingDirectory)
}
diff, err := d.getDiffPath(id)
if d.useComposeFs() {
// FIXME: move this logic into the differ so we don't have to open
// the file twice.
if err := enableVerityRecursive(stagingDirectory); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) {
logrus.Warningf("%s", err)
}
toc := diffOutput.BigData[zstdChunkedManifest]
if err := generateComposeFsBlob(toc, d.getComposefsData(id)); err != nil {
return err
}
}
diffPath, err := d.getDiffPath(id)
if err != nil {
return err
}
if err := os.RemoveAll(diff); err != nil && !os.IsNotExist(err) {
if err := os.RemoveAll(diffPath); err != nil && !os.IsNotExist(err) {
return err
}
diffOutput.UncompressedDigest = diffOutput.TOCDigest
return os.Rename(stagingDirectory, diff)
return os.Rename(stagingDirectory, diffPath)
}
// DifferTarget gets the location where files are stored for the layer.
@ -2001,6 +2130,11 @@ func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts)
return directory.Size(applyDir)
}
func (d *Driver) getComposefsData(id string) string {
dir := d.dir(id)
return path.Join(dir, "composefs-data")
}
func (d *Driver) getDiffPath(id string) (string, error) {
dir, imagestore, _ := d.dir2(id)
base := dir

View File

@ -92,7 +92,10 @@ func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInf
return err
}
if s.Dev() != sourceStat.Dev() {
// Don't cross mount points. This ignores file mounts to avoid
// generating a diff which deletes all files following the
// mount.
if s.Dev() != sourceStat.Dev() && s.IsDir() {
return filepath.SkipDir
}

View File

@ -15,6 +15,7 @@ import (
"unsafe"
storage "github.com/containers/storage"
graphdriver "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/chunked/internal"
"github.com/containers/storage/pkg/ioutils"
jsoniter "github.com/json-iterator/go"
@ -109,7 +110,7 @@ func (c *layersCache) load() error {
}
bigData, err := c.store.LayerBigData(r.ID, cacheKey)
// if the cache areadly exists, read and use it
// if the cache already exists, read and use it
if err == nil {
defer bigData.Close()
metadata, err := readMetadataFromCache(bigData)
@ -122,6 +123,23 @@ func (c *layersCache) load() error {
return err
}
var lcd chunkedLayerData
clFile, err := c.store.LayerBigData(r.ID, chunkedLayerDataKey)
if err != nil && !errors.Is(err, os.ErrNotExist) {
return err
}
if clFile != nil {
cl, err := io.ReadAll(clFile)
if err != nil {
return fmt.Errorf("open manifest file for layer %q: %w", r.ID, err)
}
json := jsoniter.ConfigCompatibleWithStandardLibrary
if err := json.Unmarshal(cl, &lcd); err != nil {
return err
}
}
// otherwise create it from the layer TOC.
manifestReader, err := c.store.LayerBigData(r.ID, bigDataKey)
if err != nil {
@ -134,7 +152,7 @@ func (c *layersCache) load() error {
return fmt.Errorf("open manifest file for layer %q: %w", r.ID, err)
}
metadata, err := writeCache(manifest, r.ID, c.store)
metadata, err := writeCache(manifest, lcd.Format, r.ID, c.store)
if err == nil {
c.addLayer(r.ID, metadata)
}
@ -211,13 +229,13 @@ type setBigData interface {
// - digest(file.payload))
// - digest(digest(file.payload) + file.UID + file.GID + file.mode + file.xattrs)
// - digest(i) for each i in chunks(file payload)
func writeCache(manifest []byte, id string, dest setBigData) (*metadata, error) {
func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id string, dest setBigData) (*metadata, error) {
var vdata bytes.Buffer
tagLen := 0
digestLen := 0
var tagsBuffer bytes.Buffer
toc, err := prepareMetadata(manifest)
toc, err := prepareMetadata(manifest, format)
if err != nil {
return nil, err
}
@ -396,7 +414,7 @@ func readMetadataFromCache(bigData io.Reader) (*metadata, error) {
}, nil
}
func prepareMetadata(manifest []byte) ([]*internal.FileMetadata, error) {
func prepareMetadata(manifest []byte, format graphdriver.DifferOutputFormat) ([]*internal.FileMetadata, error) {
toc, err := unmarshalToc(manifest)
if err != nil {
// ignore errors here. They might be caused by a different manifest format.
@ -404,6 +422,17 @@ func prepareMetadata(manifest []byte) ([]*internal.FileMetadata, error) {
return nil, nil //nolint: nilnil
}
switch format {
case graphdriver.DifferOutputFormatDir:
case graphdriver.DifferOutputFormatFlat:
toc.Entries, err = makeEntriesFlat(toc.Entries)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("unknown format %q", format)
}
var r []*internal.FileMetadata
chunkSeen := make(map[string]bool)
for i := range toc.Entries {
@ -420,6 +449,7 @@ func prepareMetadata(manifest []byte) ([]*internal.FileMetadata, error) {
chunkSeen[cd] = true
}
}
return r, nil
}

View File

@ -28,6 +28,7 @@ import (
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/types"
securejoin "github.com/cyphar/filepath-securejoin"
jsoniter "github.com/json-iterator/go"
"github.com/klauspost/compress/zstd"
"github.com/klauspost/pgzip"
digest "github.com/opencontainers/go-digest"
@ -41,6 +42,8 @@ const (
newFileFlags = (unix.O_CREAT | unix.O_TRUNC | unix.O_EXCL | unix.O_WRONLY)
containersOverrideXattr = "user.containers.override_stat"
bigDataKey = "zstd-chunked-manifest"
chunkedData = "zstd-chunked-data"
chunkedLayerDataKey = "zstd-chunked-layer-data"
fileTypeZstdChunked = iota
fileTypeEstargz
@ -73,6 +76,11 @@ var xattrsToIgnore = map[string]interface{}{
"security.selinux": true,
}
// chunkedLayerData is used to store additional information about the layer
type chunkedLayerData struct {
Format graphdriver.DifferOutputFormat `json:"format"`
}
func timeToTimespec(time *time.Time) (ts unix.Timespec) {
if time == nil || time.IsZero() {
// Return UTIME_OMIT special value
@ -241,7 +249,7 @@ func copyFileFromOtherLayer(file *internal.FileMetadata, source string, name str
srcFile, err := openFileUnderRoot(name, srcDirfd, unix.O_RDONLY, 0)
if err != nil {
return false, nil, 0, fmt.Errorf("open source file under target rootfs: %w", err)
return false, nil, 0, fmt.Errorf("open source file under target rootfs (%s): %w", name, err)
}
defer srcFile.Close()
@ -844,7 +852,14 @@ func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *ar
}, nil
}
func (d *destinationFile) Close() error {
func (d *destinationFile) Close() (Err error) {
defer func() {
err := d.file.Close()
if Err == nil {
Err = err
}
}()
manifestChecksum, err := digest.Parse(d.metadata.Digest)
if err != nil {
return err
@ -1317,7 +1332,39 @@ func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *internal.FileMetadata, cop
return false, nil
}
func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (graphdriver.DriverWithDifferOutput, error) {
func makeEntriesFlat(mergedEntries []internal.FileMetadata) ([]internal.FileMetadata, error) {
var new []internal.FileMetadata
hashes := make(map[string]string)
for i := range mergedEntries {
if mergedEntries[i].Type != TypeReg {
continue
}
if mergedEntries[i].Digest == "" {
if mergedEntries[i].Size != 0 {
return nil, fmt.Errorf("missing digest for %q", mergedEntries[i].Name)
}
continue
}
digest, err := digest.Parse(mergedEntries[i].Digest)
if err != nil {
return nil, err
}
d := digest.Encoded()
if hashes[d] != "" {
continue
}
hashes[d] = d
mergedEntries[i].Name = fmt.Sprintf("%s/%s", d[0:2], d[2:])
new = append(new, mergedEntries[i])
}
return new, nil
}
func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, differOpts *graphdriver.DifferOptions) (graphdriver.DriverWithDifferOutput, error) {
defer c.layersCache.release()
defer func() {
if c.zstdReader != nil {
@ -1325,11 +1372,21 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
}
}()
lcd := chunkedLayerData{
Format: differOpts.Format,
}
json := jsoniter.ConfigCompatibleWithStandardLibrary
lcdBigData, err := json.Marshal(lcd)
if err != nil {
return graphdriver.DriverWithDifferOutput{}, err
}
output := graphdriver.DriverWithDifferOutput{
Differ: c,
TarSplit: c.tarSplit,
BigData: map[string][]byte{
bigDataKey: c.manifest,
bigDataKey: c.manifest,
chunkedLayerDataKey: lcdBigData,
},
TOCDigest: c.tocDigest,
}
@ -1389,6 +1446,21 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra
}
defer unix.Close(dirfd)
if differOpts != nil && differOpts.Format == graphdriver.DifferOutputFormatFlat {
mergedEntries, err = makeEntriesFlat(mergedEntries)
if err != nil {
return output, err
}
createdDirs := make(map[string]struct{})
for _, e := range mergedEntries {
d := e.Name[0:2]
if _, found := createdDirs[d]; !found {
unix.Mkdirat(dirfd, d, 0o755)
createdDirs[d] = struct{}{}
}
}
}
// hardlinks can point to missing files. So create them after all files
// are retrieved
var hardLinks []hardLinkToCreate

View File

@ -8,75 +8,11 @@ import (
"os"
"runtime"
"syscall"
"unsafe"
"github.com/containers/storage/pkg/idtools"
"golang.org/x/sys/unix"
)
type attr struct {
attrSet uint64
attrClr uint64
propagation uint64
userNs uint64
}
// openTree is a wrapper for the open_tree syscall
func openTree(path string, flags int) (fd int, err error) {
var _p0 *byte
if _p0, err = syscall.BytePtrFromString(path); err != nil {
return 0, err
}
r, _, e1 := syscall.Syscall6(uintptr(unix.SYS_OPEN_TREE), uintptr(0), uintptr(unsafe.Pointer(_p0)),
uintptr(flags), 0, 0, 0)
if e1 != 0 {
err = e1
}
return int(r), err
}
// moveMount is a wrapper for the move_mount syscall.
func moveMount(fdTree int, target string) (err error) {
var _p0, _p1 *byte
empty := ""
if _p0, err = syscall.BytePtrFromString(target); err != nil {
return err
}
if _p1, err = syscall.BytePtrFromString(empty); err != nil {
return err
}
flags := unix.MOVE_MOUNT_F_EMPTY_PATH
_, _, e1 := syscall.Syscall6(uintptr(unix.SYS_MOVE_MOUNT),
uintptr(fdTree), uintptr(unsafe.Pointer(_p1)),
0, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
if e1 != 0 {
err = e1
}
return
}
// mountSetAttr is a wrapper for the mount_setattr syscall
func mountSetAttr(dfd int, path string, flags uint, attr *attr, size uint) (err error) {
var _p0 *byte
if _p0, err = syscall.BytePtrFromString(path); err != nil {
return err
}
_, _, e1 := syscall.Syscall6(uintptr(unix.SYS_MOUNT_SETATTR), uintptr(dfd), uintptr(unsafe.Pointer(_p0)),
uintptr(flags), uintptr(unsafe.Pointer(attr)), uintptr(size), 0)
if e1 != 0 {
err = e1
}
return
}
// CreateIDMappedMount creates a IDMapped bind mount from SOURCE to TARGET using the user namespace
// for the PID process.
func CreateIDMappedMount(source, target string, pid int) error {
@ -85,29 +21,26 @@ func CreateIDMappedMount(source, target string, pid int) error {
if err != nil {
return fmt.Errorf("unable to get user ns file descriptor for %q: %w", path, err)
}
var attr attr
attr.attrSet = unix.MOUNT_ATTR_IDMAP
attr.attrClr = 0
attr.propagation = 0
attr.userNs = uint64(userNsFile.Fd())
defer userNsFile.Close()
targetDirFd, err := openTree(source, unix.OPEN_TREE_CLONE)
targetDirFd, err := unix.OpenTree(0, source, unix.OPEN_TREE_CLONE)
if err != nil {
return err
}
defer unix.Close(targetDirFd)
if err := mountSetAttr(targetDirFd, "", unix.AT_EMPTY_PATH|unix.AT_RECURSIVE,
&attr, uint(unsafe.Sizeof(attr))); err != nil {
if err := unix.MountSetattr(targetDirFd, "", unix.AT_EMPTY_PATH|unix.AT_RECURSIVE,
&unix.MountAttr{
Attr_set: unix.MOUNT_ATTR_IDMAP,
Userns_fd: uint64(userNsFile.Fd()),
}); err != nil {
return err
}
if err := os.Mkdir(target, 0o700); err != nil && !os.IsExist(err) {
return err
}
return moveMount(targetDirFd, target)
return unix.MoveMount(targetDirFd, "", 0, target, unix.MOVE_MOUNT_F_EMPTY_PATH)
}
// CreateUsernsProcess forks the current process and creates a user namespace using the specified

View File

@ -7,6 +7,8 @@ import (
"os"
"strconv"
"syscall"
"golang.org/x/sys/unix"
)
// StatT type contains status of a file. It contains metadata
@ -57,6 +59,10 @@ func (s StatT) Dev() uint64 {
return s.dev
}
func (s StatT) IsDir() bool {
return (s.mode & unix.S_IFDIR) != 0
}
// Stat takes a path to a file and returns
// a system.StatT type pertaining to that file.
//

View File

@ -48,6 +48,10 @@ func (s StatT) Dev() uint64 {
return 0
}
func (s StatT) IsDir() bool {
return s.Mode().IsDir()
}
// Stat takes a path to a file and returns
// a system.StatT type pertaining to that file.
//

View File

@ -112,7 +112,7 @@ func flattenComposite(errs *CompositeError) *CompositeError {
for _, er := range errs.Errors {
switch e := er.(type) {
case *CompositeError:
if len(e.Errors) > 0 {
if e != nil && len(e.Errors) > 0 {
flat := flattenComposite(e)
if len(flat.Errors) > 0 {
res = append(res, flat.Errors...)

View File

@ -1,15 +0,0 @@
after_success:
- bash <(curl -s https://codecov.io/bash)
go:
- 1.14.x
- 1.15.x
install:
- GO111MODULE=off go get -u gotest.tools/gotestsum
env:
- GO111MODULE=on
language: go
notifications:
slack:
secure: a5VgoiwB1G/AZqzmephPZIhEB9avMlsWSlVnM1dSAtYAwdrQHGTQxAmpOxYIoSPDhWNN5bfZmjd29++UlTwLcHSR+e0kJhH6IfDlsHj/HplNCJ9tyI0zYc7XchtdKgeMxMzBKCzgwFXGSbQGydXTliDNBo0HOzmY3cou/daMFTP60K+offcjS+3LRAYb1EroSRXZqrk1nuF/xDL3792DZUdPMiFR/L/Df6y74D6/QP4sTkTDFQitz4Wy/7jbsfj8dG6qK2zivgV6/l+w4OVjFkxVpPXogDWY10vVXNVynqxfJ7to2d1I9lNCHE2ilBCkWMIPdyJF7hjF8pKW+82yP4EzRh0vu8Xn0HT5MZpQxdRY/YMxNrWaG7SxsoEaO4q5uhgdzAqLYY3TRa7MjIK+7Ur+aqOeTXn6OKwVi0CjvZ6mIU3WUKSwiwkFZMbjRAkSb5CYwMEfGFO/z964xz83qGt6WAtBXNotqCQpTIiKtDHQeLOMfksHImCg6JLhQcWBVxamVgu0G3Pdh8Y6DyPnxraXY95+QDavbjqv7TeYT9T/FNnrkXaTTK0s4iWE5H4ACU0Qvz0wUYgfQrZv0/Hp7V17+rabUwnzYySHCy9SWX/7OV9Cfh31iMp9ZIffr76xmmThtOEqs8TrTtU6BWI3rWwvA9cXQipZTVtL0oswrGw=
script:
- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./...

View File

@ -1,8 +1,6 @@
linters-settings:
govet:
check-shadowing: true
golint:
min-confidence: 0
gocyclo:
min-complexity: 30
maligned:
@ -12,6 +10,8 @@ linters-settings:
goconst:
min-len: 2
min-occurrences: 4
paralleltest:
ignore-missing: true
linters:
enable-all: true
disable:
@ -39,3 +39,12 @@ linters:
- nestif
- godot
- errorlint
- varcheck
- interfacer
- deadcode
- golint
- ifshort
- structcheck
- nosnakecase
- varnamelen
- exhaustruct

View File

@ -1,24 +0,0 @@
after_success:
- bash <(curl -s https://codecov.io/bash)
go:
- 1.14.x
- 1.x
install:
- go get gotest.tools/gotestsum
jobs:
include:
# include linting job, but only for latest go version and amd64 arch
- go: 1.x
arch: amd64
install:
go get github.com/golangci/golangci-lint/cmd/golangci-lint
script:
- golangci-lint run --new-from-rev master
env:
- GO111MODULE=on
language: go
notifications:
slack:
secure: OpQG/36F7DSF00HLm9WZMhyqFCYYyYTsVDObW226cWiR8PWYiNfLZiSEvIzT1Gx4dDjhigKTIqcLhG34CkL5iNXDjm9Yyo2RYhQPlK8NErNqUEXuBqn4RqYHW48VGhEhOyDd4Ei0E2FN5ZbgpvHgtpkdZ6XDi64r3Ac89isP9aPHXQTuv2Jog6b4/OKKiUTftLcTIst0p4Cp3gqOJWf1wnoj+IadWiECNVQT6zb47IYjtyw6+uV8iUjTzdKcRB6Zc6b4Dq7JAg1Zd7Jfxkql3hlKp4PNlRf9Cy7y5iA3G7MLyg3FcPX5z2kmcyPt2jOTRMBWUJ5zIQpOxizAcN8WsT3WWBL5KbuYK6k0PzujrIDLqdxGpNmjkkMfDBT9cKmZpm2FdW+oZgPFJP+oKmAo4u4KJz/vjiPTXgQlN5bmrLuRMCp+AwC5wkIohTqWZVPE2TK6ZSnMYcg/W39s+RP/9mJoyryAvPSpBOLTI+biCgaUCTOAZxNTWpMFc3tPYntc41WWkdKcooZ9JA5DwfcaVFyTGQ3YXz+HvX6G1z/gW0Q/A4dBi9mj2iE1xm7tRTT+4VQ2AXFvSEI1HJpfPgYnwAtwOD1v3Qm2EUHk9sCdtEDR4wVGEPIVn44GnwFMnGKx9JWppMPYwFu3SVDdHt+E+LOlhZUply11Aa+IVrT2KUQ=
script:
- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./...

View File

@ -7,8 +7,8 @@ import (
)
const (
defaultHttpPort = ":80"
defaultHttpsPort = ":443"
defaultHTTPPort = ":80"
defaultHTTPSPort = ":443"
)
// Regular expressions used by the normalizations
@ -18,18 +18,24 @@ var rxDupSlashes = regexp.MustCompile(`/{2,}`)
// NormalizeURL will normalize the specified URL
// This was added to replace a previous call to the no longer maintained purell library:
// The call that was used looked like the following:
// url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes))
//
// url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes))
//
// To explain all that was included in the call above, purell.FlagsSafe was really just the following:
// - FlagLowercaseScheme
// - FlagLowercaseHost
// - FlagRemoveDefaultPort
// - FlagRemoveDuplicateSlashes (and this was mixed in with the |)
// - FlagLowercaseScheme
// - FlagLowercaseHost
// - FlagRemoveDefaultPort
// - FlagRemoveDuplicateSlashes (and this was mixed in with the |)
//
// This also normalizes the URL into its urlencoded form by removing RawPath and RawFragment.
func NormalizeURL(u *url.URL) {
lowercaseScheme(u)
lowercaseHost(u)
removeDefaultPort(u)
removeDuplicateSlashes(u)
u.RawPath = ""
u.RawFragment = ""
}
func lowercaseScheme(u *url.URL) {
@ -48,7 +54,7 @@ func removeDefaultPort(u *url.URL) {
if len(u.Host) > 0 {
scheme := strings.ToLower(u.Scheme)
u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string {
if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) {
if (scheme == "http" && val == defaultHTTPPort) || (scheme == "https" && val == defaultHTTPSPort) {
return ""
}
return val

View File

@ -1,7 +1,7 @@
Package validator
=================
<img align="right" src="https://raw.githubusercontent.com/go-playground/validator/v10/logo.png">[![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
![Project status](https://img.shields.io/badge/version-10.14.0-green.svg)
![Project status](https://img.shields.io/badge/version-10.14.1-green.svg)
[![Build Status](https://travis-ci.org/go-playground/validator.svg?branch=master)](https://travis-ci.org/go-playground/validator)
[![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-playground/validator?branch=master)
[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator)

View File

@ -1414,25 +1414,21 @@ func isURL(fl FieldLevel) bool {
switch field.Kind() {
case reflect.String:
var i int
s := field.String()
// checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195
// emulate browser and strip the '#' suffix prior to validation. see issue-#237
if i = strings.Index(s, "#"); i > -1 {
s = s[:i]
}
if len(s) == 0 {
return false
}
url, err := url.ParseRequestURI(s)
url, err := url.Parse(s)
if err != nil || url.Scheme == "" {
return false
}
if url.Host == "" && url.Fragment == "" && url.Opaque == "" {
return false
}
return true
}
@ -1450,7 +1446,13 @@ func isHttpURL(fl FieldLevel) bool {
case reflect.String:
s := strings.ToLower(field.String())
return strings.HasPrefix(s, "http://") || strings.HasPrefix(s, "https://")
url, err := url.Parse(s)
if err != nil || url.Host == "" {
return false
}
return url.Scheme == "http" || url.Scheme == "https"
}
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
@ -2568,9 +2570,17 @@ func isDirPath(fl FieldLevel) bool {
func isJSON(fl FieldLevel) bool {
field := fl.Field()
if field.Kind() == reflect.String {
switch field.Kind() {
case reflect.String:
val := field.String()
return json.Valid([]byte(val))
case reflect.Slice:
fieldType := field.Type()
if fieldType.ConvertibleTo(byteSliceType) {
b := field.Convert(byteSliceType).Interface().([]byte)
return json.Valid(b)
}
}
panic(fmt.Sprintf("Bad field type %T", field.Interface()))

View File

@ -53,6 +53,8 @@ var (
timeDurationType = reflect.TypeOf(time.Duration(0))
timeType = reflect.TypeOf(time.Time{})
byteSliceType = reflect.TypeOf([]byte{})
defaultCField = &cField{namesEqual: true}
)

View File

@ -16,6 +16,12 @@ This package provides various compression algorithms.
# changelog
* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6)
* zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806
* zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824
* gzhttp: Handle informational headers by @rtribotte in https://github.com/klauspost/compress/pull/815
* s2: Improve Better compression slightly https://github.com/klauspost/compress/pull/663
* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5)
* zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802
* gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804

View File

@ -20,6 +20,6 @@ Vulnerabilities resulting from compiler/assembler errors should be reported upst
If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released.
Please disclose it at [security advisory](https://github.com/klaupost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that.
Please disclose it at [security advisory](https://github.com/klauspost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that.
This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base.

View File

@ -144,6 +144,7 @@ func (e *fastBase) resetBase(d *dict, singleBlock bool) {
} else {
e.crc.Reset()
}
e.blk.dictLitEnc = nil
if d != nil {
low := e.lowMem
if singleBlock {

View File

@ -1084,7 +1084,7 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
}
}
e.lastDictID = d.id
e.allDirty = true
allDirty = true
}
// Reset table to initial state
e.cur = e.maxMatchOff

View File

@ -829,13 +829,12 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
}
if true {
end := e.maxMatchOff + int32(len(d.content)) - 8
for i := e.maxMatchOff; i < end; i += 3 {
for i := e.maxMatchOff; i < end; i += 2 {
const hashLog = tableBits
cv := load6432(d.content, i-e.maxMatchOff)
nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 5
nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 6
nextHash2 := hashLen(cv>>16, hashLog, tableFastHashLen) // 2 -> 7
nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 6
nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 7
e.dictTable[nextHash] = tableEntry{
val: uint32(cv),
offset: i,
@ -844,10 +843,6 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
val: uint32(cv >> 8),
offset: i + 1,
}
e.dictTable[nextHash2] = tableEntry{
val: uint32(cv >> 16),
offset: i + 2,
}
}
}
e.lastDictID = d.id

View File

@ -126,6 +126,7 @@ func (m *AlpineV001Schema) ContextValidate(ctx context.Context, formats strfmt.R
func (m *AlpineV001Schema) contextValidatePackage(ctx context.Context, formats strfmt.Registry) error {
if m.Package != nil {
if err := m.Package.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("package")
@ -142,6 +143,7 @@ func (m *AlpineV001Schema) contextValidatePackage(ctx context.Context, formats s
func (m *AlpineV001Schema) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error {
if m.PublicKey != nil {
if err := m.PublicKey.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("publicKey")
@ -244,6 +246,11 @@ func (m *AlpineV001SchemaPackage) ContextValidate(ctx context.Context, formats s
func (m *AlpineV001SchemaPackage) contextValidateHash(ctx context.Context, formats strfmt.Registry) error {
if m.Hash != nil {
if swag.IsZero(m.Hash) { // not required
return nil
}
if err := m.Hash.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("package" + "." + "hash")

View File

@ -116,6 +116,7 @@ func (m *CoseV001Schema) ContextValidate(ctx context.Context, formats strfmt.Reg
func (m *CoseV001Schema) contextValidateData(ctx context.Context, formats strfmt.Registry) error {
if m.Data != nil {
if err := m.Data.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("data")
@ -240,6 +241,11 @@ func (m *CoseV001SchemaData) ContextValidate(ctx context.Context, formats strfmt
func (m *CoseV001SchemaData) contextValidateEnvelopeHash(ctx context.Context, formats strfmt.Registry) error {
if m.EnvelopeHash != nil {
if swag.IsZero(m.EnvelopeHash) { // not required
return nil
}
if err := m.EnvelopeHash.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("data" + "." + "envelopeHash")
@ -256,6 +262,11 @@ func (m *CoseV001SchemaData) contextValidateEnvelopeHash(ctx context.Context, fo
func (m *CoseV001SchemaData) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error {
if m.PayloadHash != nil {
if swag.IsZero(m.PayloadHash) { // not required
return nil
}
if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("data" + "." + "payloadHash")

View File

@ -198,6 +198,11 @@ func (m *DSSEV001Schema) ContextValidate(ctx context.Context, formats strfmt.Reg
func (m *DSSEV001Schema) contextValidateEnvelopeHash(ctx context.Context, formats strfmt.Registry) error {
if m.EnvelopeHash != nil {
if swag.IsZero(m.EnvelopeHash) { // not required
return nil
}
if err := m.EnvelopeHash.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("envelopeHash")
@ -214,6 +219,11 @@ func (m *DSSEV001Schema) contextValidateEnvelopeHash(ctx context.Context, format
func (m *DSSEV001Schema) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error {
if m.PayloadHash != nil {
if swag.IsZero(m.PayloadHash) { // not required
return nil
}
if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("payloadHash")
@ -230,6 +240,11 @@ func (m *DSSEV001Schema) contextValidatePayloadHash(ctx context.Context, formats
func (m *DSSEV001Schema) contextValidateProposedContent(ctx context.Context, formats strfmt.Registry) error {
if m.ProposedContent != nil {
if swag.IsZero(m.ProposedContent) { // not required
return nil
}
if err := m.ProposedContent.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("proposedContent")
@ -252,6 +267,11 @@ func (m *DSSEV001Schema) contextValidateSignatures(ctx context.Context, formats
for i := 0; i < len(m.Signatures); i++ {
if m.Signatures[i] != nil {
if swag.IsZero(m.Signatures[i]) { // not required
return nil
}
if err := m.Signatures[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("signatures" + "." + strconv.Itoa(i))

View File

@ -126,6 +126,7 @@ func (m *HashedrekordV001Schema) ContextValidate(ctx context.Context, formats st
func (m *HashedrekordV001Schema) contextValidateData(ctx context.Context, formats strfmt.Registry) error {
if m.Data != nil {
if err := m.Data.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("data")
@ -142,6 +143,7 @@ func (m *HashedrekordV001Schema) contextValidateData(ctx context.Context, format
func (m *HashedrekordV001Schema) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error {
if m.Signature != nil {
if err := m.Signature.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("signature")
@ -232,6 +234,11 @@ func (m *HashedrekordV001SchemaData) ContextValidate(ctx context.Context, format
func (m *HashedrekordV001SchemaData) contextValidateHash(ctx context.Context, formats strfmt.Registry) error {
if m.Hash != nil {
if swag.IsZero(m.Hash) { // not required
return nil
}
if err := m.Hash.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("data" + "." + "hash")
@ -431,6 +438,11 @@ func (m *HashedrekordV001SchemaSignature) ContextValidate(ctx context.Context, f
func (m *HashedrekordV001SchemaSignature) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error {
if m.PublicKey != nil {
if swag.IsZero(m.PublicKey) { // not required
return nil
}
if err := m.PublicKey.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("signature" + "." + "publicKey")

View File

@ -126,6 +126,7 @@ func (m *HelmV001Schema) ContextValidate(ctx context.Context, formats strfmt.Reg
func (m *HelmV001Schema) contextValidateChart(ctx context.Context, formats strfmt.Registry) error {
if m.Chart != nil {
if err := m.Chart.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("chart")
@ -142,6 +143,7 @@ func (m *HelmV001Schema) contextValidateChart(ctx context.Context, formats strfm
func (m *HelmV001Schema) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error {
if m.PublicKey != nil {
if err := m.PublicKey.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("publicKey")
@ -264,6 +266,11 @@ func (m *HelmV001SchemaChart) ContextValidate(ctx context.Context, formats strfm
func (m *HelmV001SchemaChart) contextValidateHash(ctx context.Context, formats strfmt.Registry) error {
if m.Hash != nil {
if swag.IsZero(m.Hash) { // not required
return nil
}
if err := m.Hash.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("chart" + "." + "hash")
@ -280,6 +287,7 @@ func (m *HelmV001SchemaChart) contextValidateHash(ctx context.Context, formats s
func (m *HelmV001SchemaChart) contextValidateProvenance(ctx context.Context, formats strfmt.Registry) error {
if m.Provenance != nil {
if err := m.Provenance.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("chart" + "." + "provenance")
@ -484,6 +492,11 @@ func (m *HelmV001SchemaChartProvenance) ContextValidate(ctx context.Context, for
func (m *HelmV001SchemaChartProvenance) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error {
if m.Signature != nil {
if swag.IsZero(m.Signature) { // not required
return nil
}
if err := m.Signature.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("chart" + "." + "provenance" + "." + "signature")

View File

@ -112,6 +112,7 @@ func (m *IntotoV001Schema) ContextValidate(ctx context.Context, formats strfmt.R
func (m *IntotoV001Schema) contextValidateContent(ctx context.Context, formats strfmt.Registry) error {
if m.Content != nil {
if err := m.Content.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("content")
@ -235,6 +236,11 @@ func (m *IntotoV001SchemaContent) ContextValidate(ctx context.Context, formats s
func (m *IntotoV001SchemaContent) contextValidateHash(ctx context.Context, formats strfmt.Registry) error {
if m.Hash != nil {
if swag.IsZero(m.Hash) { // not required
return nil
}
if err := m.Hash.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("content" + "." + "hash")
@ -251,6 +257,11 @@ func (m *IntotoV001SchemaContent) contextValidateHash(ctx context.Context, forma
func (m *IntotoV001SchemaContent) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error {
if m.PayloadHash != nil {
if swag.IsZero(m.PayloadHash) { // not required
return nil
}
if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("content" + "." + "payloadHash")

View File

@ -95,6 +95,7 @@ func (m *IntotoV002Schema) ContextValidate(ctx context.Context, formats strfmt.R
func (m *IntotoV002Schema) contextValidateContent(ctx context.Context, formats strfmt.Registry) error {
if m.Content != nil {
if err := m.Content.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("content")
@ -247,6 +248,7 @@ func (m *IntotoV002SchemaContent) ContextValidate(ctx context.Context, formats s
func (m *IntotoV002SchemaContent) contextValidateEnvelope(ctx context.Context, formats strfmt.Registry) error {
if m.Envelope != nil {
if err := m.Envelope.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("content" + "." + "envelope")
@ -263,6 +265,11 @@ func (m *IntotoV002SchemaContent) contextValidateEnvelope(ctx context.Context, f
func (m *IntotoV002SchemaContent) contextValidateHash(ctx context.Context, formats strfmt.Registry) error {
if m.Hash != nil {
if swag.IsZero(m.Hash) { // not required
return nil
}
if err := m.Hash.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("content" + "." + "hash")
@ -279,6 +286,11 @@ func (m *IntotoV002SchemaContent) contextValidateHash(ctx context.Context, forma
func (m *IntotoV002SchemaContent) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error {
if m.PayloadHash != nil {
if swag.IsZero(m.PayloadHash) { // not required
return nil
}
if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("content" + "." + "payloadHash")
@ -408,6 +420,11 @@ func (m *IntotoV002SchemaContentEnvelope) contextValidateSignatures(ctx context.
for i := 0; i < len(m.Signatures); i++ {
if m.Signatures[i] != nil {
if swag.IsZero(m.Signatures[i]) { // not required
return nil
}
if err := m.Signatures[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("content" + "." + "envelope" + "." + "signatures" + "." + strconv.Itoa(i))

View File

@ -124,6 +124,7 @@ func (m *JarV001Schema) ContextValidate(ctx context.Context, formats strfmt.Regi
func (m *JarV001Schema) contextValidateArchive(ctx context.Context, formats strfmt.Registry) error {
if m.Archive != nil {
if err := m.Archive.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("archive")
@ -140,6 +141,11 @@ func (m *JarV001Schema) contextValidateArchive(ctx context.Context, formats strf
func (m *JarV001Schema) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error {
if m.Signature != nil {
if swag.IsZero(m.Signature) { // not required
return nil
}
if err := m.Signature.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("signature")
@ -234,6 +240,11 @@ func (m *JarV001SchemaArchive) ContextValidate(ctx context.Context, formats strf
func (m *JarV001SchemaArchive) contextValidateHash(ctx context.Context, formats strfmt.Registry) error {
if m.Hash != nil {
if swag.IsZero(m.Hash) { // not required
return nil
}
if err := m.Hash.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("archive" + "." + "hash")
@ -463,6 +474,7 @@ func (m *JarV001SchemaSignature) contextValidateContent(ctx context.Context, for
func (m *JarV001SchemaSignature) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error {
if m.PublicKey != nil {
if err := m.PublicKey.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("signature" + "." + "publicKey")

View File

@ -250,6 +250,11 @@ func (m *LogEntryAnon) ContextValidate(ctx context.Context, formats strfmt.Regis
func (m *LogEntryAnon) contextValidateAttestation(ctx context.Context, formats strfmt.Registry) error {
if m.Attestation != nil {
if swag.IsZero(m.Attestation) { // not required
return nil
}
if err := m.Attestation.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("attestation")
@ -266,6 +271,11 @@ func (m *LogEntryAnon) contextValidateAttestation(ctx context.Context, formats s
func (m *LogEntryAnon) contextValidateVerification(ctx context.Context, formats strfmt.Registry) error {
if m.Verification != nil {
if swag.IsZero(m.Verification) { // not required
return nil
}
if err := m.Verification.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("verification")
@ -398,6 +408,11 @@ func (m *LogEntryAnonVerification) ContextValidate(ctx context.Context, formats
func (m *LogEntryAnonVerification) contextValidateInclusionProof(ctx context.Context, formats strfmt.Registry) error {
if m.InclusionProof != nil {
if swag.IsZero(m.InclusionProof) { // not required
return nil
}
if err := m.InclusionProof.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("verification" + "." + "inclusionProof")

View File

@ -182,6 +182,11 @@ func (m *LogInfo) contextValidateInactiveShards(ctx context.Context, formats str
for i := 0; i < len(m.InactiveShards); i++ {
if m.InactiveShards[i] != nil {
if swag.IsZero(m.InactiveShards[i]) { // not required
return nil
}
if err := m.InactiveShards[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("inactiveShards" + "." + strconv.Itoa(i))

View File

@ -126,6 +126,7 @@ func (m *RekordV001Schema) ContextValidate(ctx context.Context, formats strfmt.R
func (m *RekordV001Schema) contextValidateData(ctx context.Context, formats strfmt.Registry) error {
if m.Data != nil {
if err := m.Data.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("data")
@ -142,6 +143,7 @@ func (m *RekordV001Schema) contextValidateData(ctx context.Context, formats strf
func (m *RekordV001Schema) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error {
if m.Signature != nil {
if err := m.Signature.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("signature")
@ -236,6 +238,11 @@ func (m *RekordV001SchemaData) ContextValidate(ctx context.Context, formats strf
func (m *RekordV001SchemaData) contextValidateHash(ctx context.Context, formats strfmt.Registry) error {
if m.Hash != nil {
if swag.IsZero(m.Hash) { // not required
return nil
}
if err := m.Hash.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("data" + "." + "hash")
@ -514,6 +521,7 @@ func (m *RekordV001SchemaSignature) ContextValidate(ctx context.Context, formats
func (m *RekordV001SchemaSignature) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error {
if m.PublicKey != nil {
if err := m.PublicKey.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("signature" + "." + "publicKey")

View File

@ -93,6 +93,7 @@ func (m *Rfc3161V001Schema) ContextValidate(ctx context.Context, formats strfmt.
func (m *Rfc3161V001Schema) contextValidateTsr(ctx context.Context, formats strfmt.Registry) error {
if m.Tsr != nil {
if err := m.Tsr.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("tsr")

View File

@ -126,6 +126,7 @@ func (m *RpmV001Schema) ContextValidate(ctx context.Context, formats strfmt.Regi
func (m *RpmV001Schema) contextValidatePackage(ctx context.Context, formats strfmt.Registry) error {
if m.Package != nil {
if err := m.Package.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("package")
@ -142,6 +143,7 @@ func (m *RpmV001Schema) contextValidatePackage(ctx context.Context, formats strf
func (m *RpmV001Schema) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error {
if m.PublicKey != nil {
if err := m.PublicKey.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("publicKey")
@ -244,6 +246,11 @@ func (m *RpmV001SchemaPackage) ContextValidate(ctx context.Context, formats strf
func (m *RpmV001SchemaPackage) contextValidateHash(ctx context.Context, formats strfmt.Registry) error {
if m.Hash != nil {
if swag.IsZero(m.Hash) { // not required
return nil
}
if err := m.Hash.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("package" + "." + "hash")

View File

@ -180,6 +180,11 @@ func (m *SearchIndex) ContextValidate(ctx context.Context, formats strfmt.Regist
func (m *SearchIndex) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error {
if m.PublicKey != nil {
if swag.IsZero(m.PublicKey) { // not required
return nil
}
if err := m.PublicKey.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("publicKey")

View File

@ -260,6 +260,10 @@ func (m *SearchLogQuery) contextValidateEntries(ctx context.Context, formats str
for i := 0; i < len(m.Entries()); i++ {
if swag.IsZero(m.entriesField[i]) { // not required
return nil
}
if err := m.entriesField[i].ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("entries" + "." + strconv.Itoa(i))

View File

@ -133,6 +133,7 @@ func (m *TUFV001Schema) ContextValidate(ctx context.Context, formats strfmt.Regi
func (m *TUFV001Schema) contextValidateMetadata(ctx context.Context, formats strfmt.Registry) error {
if m.Metadata != nil {
if err := m.Metadata.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("metadata")
@ -149,6 +150,7 @@ func (m *TUFV001Schema) contextValidateMetadata(ctx context.Context, formats str
func (m *TUFV001Schema) contextValidateRoot(ctx context.Context, formats strfmt.Registry) error {
if m.Root != nil {
if err := m.Root.ContextValidate(ctx, formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("root")

5
vendor/go.opentelemetry.io/otel/.codespellignore generated vendored Normal file
View File

@ -0,0 +1,5 @@
ot
fo
te
collison
consequentially

10
vendor/go.opentelemetry.io/otel/.codespellrc generated vendored Normal file
View File

@ -0,0 +1,10 @@
# https://github.com/codespell-project/codespell
[codespell]
builtin = clear,rare,informal
check-filenames =
check-hidden =
ignore-words = .codespellignore
interactive = 1
skip = .git,go.mod,go.sum,semconv,venv,.tools
uri-ignore-words-list = *
write =

View File

@ -2,6 +2,7 @@
Thumbs.db
.tools/
venv/
.idea/
.vscode/
*.iml

View File

@ -113,7 +113,7 @@ linters-settings:
- name: constant-logical-expr
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument
# TODO (#3372) reenable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280
# TODO (#3372) re-enable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280
- name: context-as-argument
disabled: true
arguments:

View File

@ -8,6 +8,61 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
## [Unreleased]
## [1.16.0/0.39.0] 2023-05-18
This release contains the first stable release of the OpenTelemetry Go [metric API].
Our project stability guarantees now apply to the `go.opentelemetry.io/otel/metric` package.
See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
### Added
- The `go.opentelemetry.io/otel/semconv/v1.19.0` package.
The package contains semantic conventions from the `v1.19.0` version of the OpenTelemetry specification. (#3848)
- The `go.opentelemetry.io/otel/semconv/v1.20.0` package.
The package contains semantic conventions from the `v1.20.0` version of the OpenTelemetry specification. (#4078)
### Changed
- Use `strings.Cut()` instead of `string.SplitN()` for better readability and memory use. (#4049)
### Removed
- The deprecated `go.opentelemetry.io/otel/metric/instrument` package is removed.
Use `go.opentelemetry.io/otel/metric` instead. (#4055)
### Fixed
- Fix build for BSD based systems in `go.opentelemetry.io/otel/sdk/resource`. (#4077)
## [1.16.0-rc.1/0.39.0-rc.1] 2023-05-03
This is a release candidate for the v1.16.0/v0.39.0 release.
That release is expected to include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API.
See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
### Added
- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#4039)
- Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`.
- Use `GetMeterProivder` for a global `metric.MeterProvider`.
- Use `SetMeterProivder` to set the global `metric.MeterProvider`.
### Changed
- Move the `go.opentelemetry.io/otel/metric` module to the `stable-v1` module set.
This stages the metric API to be released as a stable module. (#4038)
### Removed
- The `go.opentelemetry.io/otel/metric/global` package is removed.
Use `go.opentelemetry.io/otel` instead. (#4039)
## [1.15.1/0.38.1] 2023-05-02
### Fixed
- Remove unused imports from `sdk/resource/host_id_bsd.go` which caused build failures. (#4040, #4041)
## [1.15.0/0.38.0] 2023-04-27
### Added
@ -2437,7 +2492,10 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.15.0...HEAD
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.16.0...HEAD
[1.16.0/0.39.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0
[1.16.0-rc.1/0.39.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0-rc.1
[1.15.1/0.38.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.1
[1.15.0/0.38.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0
[1.15.0-rc.2/0.38.0-rc.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.2
[1.15.0-rc.1/0.38.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.1
@ -2505,3 +2563,5 @@ It contains api and sdk for trace and meter.
[Go 1.20]: https://go.dev/doc/go1.20
[Go 1.19]: https://go.dev/doc/go1.19
[Go 1.18]: https://go.dev/doc/go1.18
[metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric

View File

@ -28,6 +28,11 @@ precommit` - the `precommit` target is the default).
The `precommit` target also fixes the formatting of the code and
checks the status of the go module files.
Additionally, there is a `codespell` target that checks for common
typos in the code. It is not run by default, but you can run it
manually with `make codespell`. It will set up a virtual environment
in `venv` and install `codespell` there.
If after running `make precommit` the output of `git status` contains
`nothing to commit, working tree clean` then it means that everything
is up-to-date and properly formatted.
@ -150,10 +155,10 @@ Any [Maintainer] can merge the PR once the above criteria have been met.
## Design Choices
As with other OpenTelemetry clients, opentelemetry-go follows the
[opentelemetry-specification](https://github.com/open-telemetry/opentelemetry-specification).
[OpenTelemetry Specification](https://opentelemetry.io/docs/specs/otel).
It's especially valuable to read through the [library
guidelines](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/library-guidelines.md).
guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines).
### Focus on Capabilities, Not Structure Compliance

View File

@ -25,8 +25,8 @@ TIMEOUT = 60
.DEFAULT_GOAL := precommit
.PHONY: precommit ci
precommit: dependabot-generate license-check vanity-import-fix misspell go-mod-tidy golangci-lint-fix test-default
ci: dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage
precommit: generate dependabot-generate license-check vanity-import-fix misspell go-mod-tidy golangci-lint-fix test-default
ci: generate dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage
# Tools
@ -74,9 +74,42 @@ $(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq
.PHONY: tools
tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT)
# Build
# Virtualized python tools via docker
.PHONY: generate build
# The directory where the virtual environment is created.
VENVDIR := venv
# The directory where the python tools are installed.
PYTOOLS := $(VENVDIR)/bin
# The pip executable in the virtual environment.
PIP := $(PYTOOLS)/pip
# The directory in the docker image where the current directory is mounted.
WORKDIR := /workdir
# The python image to use for the virtual environment.
PYTHONIMAGE := python:3.11.3-slim-bullseye
# Run the python image with the current directory mounted.
DOCKERPY := docker run --rm -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE)
# Create a virtual environment for Python tools.
$(PYTOOLS):
# The `--upgrade` flag is needed to ensure that the virtual environment is
# created with the latest pip version.
@$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip"
# Install python packages into the virtual environment.
$(PYTOOLS)/%: | $(PYTOOLS)
@$(DOCKERPY) $(PIP) install -r requirements.txt
CODESPELL = $(PYTOOLS)/codespell
$(CODESPELL): PACKAGE=codespell
# Generate
.PHONY: generate
generate: $(OTEL_GO_MOD_DIRS:%=generate/%)
generate/%: DIR=$*
@ -85,7 +118,11 @@ generate/%: | $(STRINGER) $(PORTO)
&& cd $(DIR) \
&& PATH="$(TOOLS):$${PATH}" $(GO) generate ./... && $(PORTO) -w .
build: generate $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%)
# Build
.PHONY: build
build: $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%)
build/%: DIR=$*
build/%:
@echo "$(GO) build $(DIR)/..." \
@ -176,6 +213,10 @@ vanity-import-fix: | $(PORTO)
misspell: | $(MISSPELL)
@$(MISSPELL) -w $(ALL_DOCS)
.PHONY: codespell
codespell: | $(CODESPELL)
@$(DOCKERPY) $(CODESPELL)
.PHONY: license-check
license-check:
@licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \
@ -211,6 +252,7 @@ semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT)
[ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry specification tag"; exit 1 )
[ "$(OTEL_SPEC_REPO)" ] || ( echo "OTEL_SPEC_REPO unset: missing path to opentelemetry specification repo"; exit 1 )
$(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
$(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
$(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
$(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
$(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)"

View File

@ -2,10 +2,10 @@
## Semantic Convention Generation
New versions of the [OpenTelemetry specification] mean new versions of the `semconv` package need to be generated.
New versions of the [OpenTelemetry Specification] mean new versions of the `semconv` package need to be generated.
The `semconv-generate` make target is used for this.
1. Checkout a local copy of the [OpenTelemetry specification] to the desired release tag.
1. Checkout a local copy of the [OpenTelemetry Specification] to the desired release tag.
2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest`
3. Run the `make semconv-generate ...` target from this repository.
@ -14,7 +14,6 @@ For example,
```sh
export TAG="v1.13.0" # Change to the release version you are generating.
export OTEL_SPEC_REPO="/absolute/path/to/opentelemetry-specification"
git -C "$OTEL_SPEC_REPO" checkout "tags/$TAG" -b "$TAG"
docker pull otel/semconvgen:latest
make semconv-generate # Uses the exported TAG and OTEL_SPEC_REPO.
```
@ -124,4 +123,4 @@ Once verified be sure to [make a release for the `contrib` repository](https://g
Update [the documentation](./website_docs) for [the OpenTelemetry website](https://opentelemetry.io/docs/go/).
Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate.
[OpenTelemetry specification]: https://github.com/open-telemetry/opentelemetry-specification
[OpenTelemetry Specification]: https://github.com/open-telemetry/opentelemetry-specification

View File

@ -289,45 +289,37 @@ func parseMember(member string) (Member, error) {
props properties
)
parts := strings.SplitN(member, propertyDelimiter, 2)
switch len(parts) {
case 2:
keyValue, properties, found := strings.Cut(member, propertyDelimiter)
if found {
// Parse the member properties.
for _, pStr := range strings.Split(parts[1], propertyDelimiter) {
for _, pStr := range strings.Split(properties, propertyDelimiter) {
p, err := parseProperty(pStr)
if err != nil {
return newInvalidMember(), err
}
props = append(props, p)
}
fallthrough
case 1:
// Parse the member key/value pair.
}
// Parse the member key/value pair.
// Take into account a value can contain equal signs (=).
kv := strings.SplitN(parts[0], keyValueDelimiter, 2)
if len(kv) != 2 {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member)
}
// "Leading and trailing whitespaces are allowed but MUST be trimmed
// when converting the header into a data structure."
key = strings.TrimSpace(kv[0])
var err error
value, err = url.QueryUnescape(strings.TrimSpace(kv[1]))
if err != nil {
return newInvalidMember(), fmt.Errorf("%w: %q", err, value)
}
if !keyRe.MatchString(key) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
if !valueRe.MatchString(value) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
default:
// This should never happen unless a developer has changed the string
// splitting somehow. Panic instead of failing silently and allowing
// the bug to slip past the CI checks.
panic("failed to parse baggage member")
// Take into account a value can contain equal signs (=).
k, v, found := strings.Cut(keyValue, keyValueDelimiter)
if !found {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member)
}
// "Leading and trailing whitespaces are allowed but MUST be trimmed
// when converting the header into a data structure."
key = strings.TrimSpace(k)
var err error
value, err = url.QueryUnescape(strings.TrimSpace(v))
if err != nil {
return newInvalidMember(), fmt.Errorf("%w: %q", err, value)
}
if !keyRe.MatchString(key) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
if !valueRe.MatchString(value) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
return Member{key: key, value: value, properties: props, hasData: true}, nil

View File

@ -16,6 +16,6 @@
Package codes defines the canonical error codes used by OpenTelemetry.
It conforms to [the OpenTelemetry
specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#statuscanonicalcode).
specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/api.md#set-status).
*/
package codes // import "go.opentelemetry.io/otel/codes"

View File

@ -0,0 +1,359 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package global // import "go.opentelemetry.io/otel/internal/global"
import (
"context"
"sync/atomic"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/embedded"
)
// unwrapper unwraps to return the underlying instrument implementation.
type unwrapper interface {
Unwrap() metric.Observable
}
type afCounter struct {
embedded.Float64ObservableCounter
metric.Float64Observable
name string
opts []metric.Float64ObservableCounterOption
delegate atomic.Value //metric.Float64ObservableCounter
}
var _ unwrapper = (*afCounter)(nil)
var _ metric.Float64ObservableCounter = (*afCounter)(nil)
func (i *afCounter) setDelegate(m metric.Meter) {
ctr, err := m.Float64ObservableCounter(i.name, i.opts...)
if err != nil {
GetErrorHandler().Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *afCounter) Unwrap() metric.Observable {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Float64ObservableCounter)
}
return nil
}
type afUpDownCounter struct {
embedded.Float64ObservableUpDownCounter
metric.Float64Observable
name string
opts []metric.Float64ObservableUpDownCounterOption
delegate atomic.Value //metric.Float64ObservableUpDownCounter
}
var _ unwrapper = (*afUpDownCounter)(nil)
var _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil)
func (i *afUpDownCounter) setDelegate(m metric.Meter) {
ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...)
if err != nil {
GetErrorHandler().Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *afUpDownCounter) Unwrap() metric.Observable {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Float64ObservableUpDownCounter)
}
return nil
}
type afGauge struct {
embedded.Float64ObservableGauge
metric.Float64Observable
name string
opts []metric.Float64ObservableGaugeOption
delegate atomic.Value //metric.Float64ObservableGauge
}
var _ unwrapper = (*afGauge)(nil)
var _ metric.Float64ObservableGauge = (*afGauge)(nil)
func (i *afGauge) setDelegate(m metric.Meter) {
ctr, err := m.Float64ObservableGauge(i.name, i.opts...)
if err != nil {
GetErrorHandler().Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *afGauge) Unwrap() metric.Observable {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Float64ObservableGauge)
}
return nil
}
type aiCounter struct {
embedded.Int64ObservableCounter
metric.Int64Observable
name string
opts []metric.Int64ObservableCounterOption
delegate atomic.Value //metric.Int64ObservableCounter
}
var _ unwrapper = (*aiCounter)(nil)
var _ metric.Int64ObservableCounter = (*aiCounter)(nil)
func (i *aiCounter) setDelegate(m metric.Meter) {
ctr, err := m.Int64ObservableCounter(i.name, i.opts...)
if err != nil {
GetErrorHandler().Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *aiCounter) Unwrap() metric.Observable {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Int64ObservableCounter)
}
return nil
}
type aiUpDownCounter struct {
embedded.Int64ObservableUpDownCounter
metric.Int64Observable
name string
opts []metric.Int64ObservableUpDownCounterOption
delegate atomic.Value //metric.Int64ObservableUpDownCounter
}
var _ unwrapper = (*aiUpDownCounter)(nil)
var _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil)
func (i *aiUpDownCounter) setDelegate(m metric.Meter) {
ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...)
if err != nil {
GetErrorHandler().Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *aiUpDownCounter) Unwrap() metric.Observable {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Int64ObservableUpDownCounter)
}
return nil
}
type aiGauge struct {
embedded.Int64ObservableGauge
metric.Int64Observable
name string
opts []metric.Int64ObservableGaugeOption
delegate atomic.Value //metric.Int64ObservableGauge
}
var _ unwrapper = (*aiGauge)(nil)
var _ metric.Int64ObservableGauge = (*aiGauge)(nil)
func (i *aiGauge) setDelegate(m metric.Meter) {
ctr, err := m.Int64ObservableGauge(i.name, i.opts...)
if err != nil {
GetErrorHandler().Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *aiGauge) Unwrap() metric.Observable {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Int64ObservableGauge)
}
return nil
}
// Sync Instruments.
type sfCounter struct {
embedded.Float64Counter
name string
opts []metric.Float64CounterOption
delegate atomic.Value //metric.Float64Counter
}
var _ metric.Float64Counter = (*sfCounter)(nil)
func (i *sfCounter) setDelegate(m metric.Meter) {
ctr, err := m.Float64Counter(i.name, i.opts...)
if err != nil {
GetErrorHandler().Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *sfCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) {
if ctr := i.delegate.Load(); ctr != nil {
ctr.(metric.Float64Counter).Add(ctx, incr, opts...)
}
}
type sfUpDownCounter struct {
embedded.Float64UpDownCounter
name string
opts []metric.Float64UpDownCounterOption
delegate atomic.Value //metric.Float64UpDownCounter
}
var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil)
func (i *sfUpDownCounter) setDelegate(m metric.Meter) {
ctr, err := m.Float64UpDownCounter(i.name, i.opts...)
if err != nil {
GetErrorHandler().Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) {
if ctr := i.delegate.Load(); ctr != nil {
ctr.(metric.Float64UpDownCounter).Add(ctx, incr, opts...)
}
}
type sfHistogram struct {
embedded.Float64Histogram
name string
opts []metric.Float64HistogramOption
delegate atomic.Value //metric.Float64Histogram
}
var _ metric.Float64Histogram = (*sfHistogram)(nil)
func (i *sfHistogram) setDelegate(m metric.Meter) {
ctr, err := m.Float64Histogram(i.name, i.opts...)
if err != nil {
GetErrorHandler().Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.RecordOption) {
if ctr := i.delegate.Load(); ctr != nil {
ctr.(metric.Float64Histogram).Record(ctx, x, opts...)
}
}
type siCounter struct {
embedded.Int64Counter
name string
opts []metric.Int64CounterOption
delegate atomic.Value //metric.Int64Counter
}
var _ metric.Int64Counter = (*siCounter)(nil)
func (i *siCounter) setDelegate(m metric.Meter) {
ctr, err := m.Int64Counter(i.name, i.opts...)
if err != nil {
GetErrorHandler().Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *siCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) {
if ctr := i.delegate.Load(); ctr != nil {
ctr.(metric.Int64Counter).Add(ctx, x, opts...)
}
}
type siUpDownCounter struct {
embedded.Int64UpDownCounter
name string
opts []metric.Int64UpDownCounterOption
delegate atomic.Value //metric.Int64UpDownCounter
}
var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil)
func (i *siUpDownCounter) setDelegate(m metric.Meter) {
ctr, err := m.Int64UpDownCounter(i.name, i.opts...)
if err != nil {
GetErrorHandler().Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *siUpDownCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) {
if ctr := i.delegate.Load(); ctr != nil {
ctr.(metric.Int64UpDownCounter).Add(ctx, x, opts...)
}
}
type siHistogram struct {
embedded.Int64Histogram
name string
opts []metric.Int64HistogramOption
delegate atomic.Value //metric.Int64Histogram
}
var _ metric.Int64Histogram = (*siHistogram)(nil)
func (i *siHistogram) setDelegate(m metric.Meter) {
ctr, err := m.Int64Histogram(i.name, i.opts...)
if err != nil {
GetErrorHandler().Handle(err)
return
}
i.delegate.Store(ctr)
}
func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.RecordOption) {
if ctr := i.delegate.Load(); ctr != nil {
ctr.(metric.Int64Histogram).Record(ctx, x, opts...)
}
}

View File

@ -0,0 +1,354 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package global // import "go.opentelemetry.io/otel/internal/global"
import (
"container/list"
"sync"
"sync/atomic"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/embedded"
)
// meterProvider is a placeholder for a configured SDK MeterProvider.
//
// All MeterProvider functionality is forwarded to a delegate once
// configured.
type meterProvider struct {
embedded.MeterProvider
mtx sync.Mutex
meters map[il]*meter
delegate metric.MeterProvider
}
// setDelegate configures p to delegate all MeterProvider functionality to
// provider.
//
// All Meters provided prior to this function call are switched out to be
// Meters provided by provider. All instruments and callbacks are recreated and
// delegated.
//
// It is guaranteed by the caller that this happens only once.
func (p *meterProvider) setDelegate(provider metric.MeterProvider) {
p.mtx.Lock()
defer p.mtx.Unlock()
p.delegate = provider
if len(p.meters) == 0 {
return
}
for _, meter := range p.meters {
meter.setDelegate(provider)
}
p.meters = nil
}
// Meter implements MeterProvider.
func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter {
p.mtx.Lock()
defer p.mtx.Unlock()
if p.delegate != nil {
return p.delegate.Meter(name, opts...)
}
// At this moment it is guaranteed that no sdk is installed, save the meter in the meters map.
c := metric.NewMeterConfig(opts...)
key := il{
name: name,
version: c.InstrumentationVersion(),
}
if p.meters == nil {
p.meters = make(map[il]*meter)
}
if val, ok := p.meters[key]; ok {
return val
}
t := &meter{name: name, opts: opts}
p.meters[key] = t
return t
}
// meter is a placeholder for a metric.Meter.
//
// All Meter functionality is forwarded to a delegate once configured.
// Otherwise, all functionality is forwarded to a NoopMeter.
type meter struct {
embedded.Meter
name string
opts []metric.MeterOption
mtx sync.Mutex
instruments []delegatedInstrument
registry list.List
delegate atomic.Value // metric.Meter
}
type delegatedInstrument interface {
setDelegate(metric.Meter)
}
// setDelegate configures m to delegate all Meter functionality to Meters
// created by provider.
//
// All subsequent calls to the Meter methods will be passed to the delegate.
//
// It is guaranteed by the caller that this happens only once.
func (m *meter) setDelegate(provider metric.MeterProvider) {
meter := provider.Meter(m.name, m.opts...)
m.delegate.Store(meter)
m.mtx.Lock()
defer m.mtx.Unlock()
for _, inst := range m.instruments {
inst.setDelegate(meter)
}
for e := m.registry.Front(); e != nil; e = e.Next() {
r := e.Value.(*registration)
r.setDelegate(meter)
m.registry.Remove(e)
}
m.instruments = nil
m.registry.Init()
}
func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.Int64Counter(name, options...)
}
m.mtx.Lock()
defer m.mtx.Unlock()
i := &siCounter{name: name, opts: options}
m.instruments = append(m.instruments, i)
return i, nil
}
func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.Int64UpDownCounter(name, options...)
}
m.mtx.Lock()
defer m.mtx.Unlock()
i := &siUpDownCounter{name: name, opts: options}
m.instruments = append(m.instruments, i)
return i, nil
}
func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.Int64Histogram(name, options...)
}
m.mtx.Lock()
defer m.mtx.Unlock()
i := &siHistogram{name: name, opts: options}
m.instruments = append(m.instruments, i)
return i, nil
}
func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.Int64ObservableCounter(name, options...)
}
m.mtx.Lock()
defer m.mtx.Unlock()
i := &aiCounter{name: name, opts: options}
m.instruments = append(m.instruments, i)
return i, nil
}
func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.Int64ObservableUpDownCounter(name, options...)
}
m.mtx.Lock()
defer m.mtx.Unlock()
i := &aiUpDownCounter{name: name, opts: options}
m.instruments = append(m.instruments, i)
return i, nil
}
func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.Int64ObservableGauge(name, options...)
}
m.mtx.Lock()
defer m.mtx.Unlock()
i := &aiGauge{name: name, opts: options}
m.instruments = append(m.instruments, i)
return i, nil
}
func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.Float64Counter(name, options...)
}
m.mtx.Lock()
defer m.mtx.Unlock()
i := &sfCounter{name: name, opts: options}
m.instruments = append(m.instruments, i)
return i, nil
}
func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.Float64UpDownCounter(name, options...)
}
m.mtx.Lock()
defer m.mtx.Unlock()
i := &sfUpDownCounter{name: name, opts: options}
m.instruments = append(m.instruments, i)
return i, nil
}
func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.Float64Histogram(name, options...)
}
m.mtx.Lock()
defer m.mtx.Unlock()
i := &sfHistogram{name: name, opts: options}
m.instruments = append(m.instruments, i)
return i, nil
}
func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.Float64ObservableCounter(name, options...)
}
m.mtx.Lock()
defer m.mtx.Unlock()
i := &afCounter{name: name, opts: options}
m.instruments = append(m.instruments, i)
return i, nil
}
func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.Float64ObservableUpDownCounter(name, options...)
}
m.mtx.Lock()
defer m.mtx.Unlock()
i := &afUpDownCounter{name: name, opts: options}
m.instruments = append(m.instruments, i)
return i, nil
}
func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
return del.Float64ObservableGauge(name, options...)
}
m.mtx.Lock()
defer m.mtx.Unlock()
i := &afGauge{name: name, opts: options}
m.instruments = append(m.instruments, i)
return i, nil
}
// RegisterCallback captures the function that will be called during Collect.
func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) {
if del, ok := m.delegate.Load().(metric.Meter); ok {
insts = unwrapInstruments(insts)
return del.RegisterCallback(f, insts...)
}
m.mtx.Lock()
defer m.mtx.Unlock()
reg := &registration{instruments: insts, function: f}
e := m.registry.PushBack(reg)
reg.unreg = func() error {
m.mtx.Lock()
_ = m.registry.Remove(e)
m.mtx.Unlock()
return nil
}
return reg, nil
}
type wrapped interface {
unwrap() metric.Observable
}
func unwrapInstruments(instruments []metric.Observable) []metric.Observable {
out := make([]metric.Observable, 0, len(instruments))
for _, inst := range instruments {
if in, ok := inst.(wrapped); ok {
out = append(out, in.unwrap())
} else {
out = append(out, inst)
}
}
return out
}
type registration struct {
embedded.Registration
instruments []metric.Observable
function metric.Callback
unreg func() error
unregMu sync.Mutex
}
func (c *registration) setDelegate(m metric.Meter) {
insts := unwrapInstruments(c.instruments)
c.unregMu.Lock()
defer c.unregMu.Unlock()
if c.unreg == nil {
// Unregister already called.
return
}
reg, err := m.RegisterCallback(c.function, insts...)
if err != nil {
GetErrorHandler().Handle(err)
}
c.unreg = reg.Unregister
}
func (c *registration) Unregister() error {
c.unregMu.Lock()
defer c.unregMu.Unlock()
if c.unreg == nil {
// Unregister already called.
return nil
}
var err error
err, c.unreg = c.unreg(), nil
return err
}

View File

@ -19,6 +19,7 @@ import (
"sync"
"sync/atomic"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/trace"
)
@ -31,14 +32,20 @@ type (
propagatorsHolder struct {
tm propagation.TextMapPropagator
}
meterProviderHolder struct {
mp metric.MeterProvider
}
)
var (
globalTracer = defaultTracerValue()
globalPropagators = defaultPropagatorsValue()
globalTracer = defaultTracerValue()
globalPropagators = defaultPropagatorsValue()
globalMeterProvider = defaultMeterProvider()
delegateTraceOnce sync.Once
delegateTextMapPropagatorOnce sync.Once
delegateMeterOnce sync.Once
)
// TracerProvider is the internal implementation for global.TracerProvider.
@ -102,6 +109,34 @@ func SetTextMapPropagator(p propagation.TextMapPropagator) {
globalPropagators.Store(propagatorsHolder{tm: p})
}
// MeterProvider is the internal implementation for global.MeterProvider.
func MeterProvider() metric.MeterProvider {
return globalMeterProvider.Load().(meterProviderHolder).mp
}
// SetMeterProvider is the internal implementation for global.SetMeterProvider.
func SetMeterProvider(mp metric.MeterProvider) {
current := MeterProvider()
if _, cOk := current.(*meterProvider); cOk {
if _, mpOk := mp.(*meterProvider); mpOk && current == mp {
// Do not assign the default delegating MeterProvider to delegate
// to itself.
Error(
errors.New("no delegate configured in meter provider"),
"Setting meter provider to it's current value. No delegate will be configured",
)
return
}
}
delegateMeterOnce.Do(func() {
if def, ok := current.(*meterProvider); ok {
def.setDelegate(mp)
}
})
globalMeterProvider.Store(meterProviderHolder{mp: mp})
}
func defaultTracerValue() *atomic.Value {
v := &atomic.Value{}
v.Store(tracerProviderHolder{tp: &tracerProvider{}})
@ -113,3 +148,9 @@ func defaultPropagatorsValue() *atomic.Value {
v.Store(propagatorsHolder{tm: newTextMapPropagator()})
return v
}
func defaultMeterProvider() *atomic.Value {
v := &atomic.Value{}
v.Store(meterProviderHolder{mp: &meterProvider{}})
return v
}

53
vendor/go.opentelemetry.io/otel/metric.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otel // import "go.opentelemetry.io/otel"
import (
"go.opentelemetry.io/otel/internal/global"
"go.opentelemetry.io/otel/metric"
)
// Meter returns a Meter from the global MeterProvider. The name must be the
// name of the library providing instrumentation. This name may be the same as
// the instrumented code only if that code provides built-in instrumentation.
// If the name is empty, then a implementation defined default name will be
// used instead.
//
// If this is called before a global MeterProvider is registered the returned
// Meter will be a No-op implementation of a Meter. When a global MeterProvider
// is registered for the first time, the returned Meter, and all the
// instruments it has created or will create, are recreated automatically from
// the new MeterProvider.
//
// This is short for GetMeterProvider().Meter(name).
func Meter(name string, opts ...metric.MeterOption) metric.Meter {
return GetMeterProvider().Meter(name, opts...)
}
// GetMeterProvider returns the registered global meter provider.
//
// If no global GetMeterProvider has been registered, a No-op GetMeterProvider
// implementation is returned. When a global GetMeterProvider is registered for
// the first time, the returned GetMeterProvider, and all the Meters it has
// created or will create, are recreated automatically from the new
// GetMeterProvider.
func GetMeterProvider() metric.MeterProvider {
return global.MeterProvider()
}
// SetMeterProvider registers mp as the global MeterProvider.
func SetMeterProvider(mp metric.MeterProvider) {
global.SetMeterProvider(mp)
}

201
vendor/go.opentelemetry.io/otel/metric/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

271
vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go generated vendored Normal file
View File

@ -0,0 +1,271 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric // import "go.opentelemetry.io/otel/metric"
import (
"context"
"go.opentelemetry.io/otel/metric/embedded"
)
// Float64Observable describes a set of instruments used asynchronously to
// record float64 measurements once per collection cycle. Observations of
// these instruments are only made within a callback.
//
// Warning: Methods may be added to this interface in minor releases.
type Float64Observable interface {
Observable
float64Observable()
}
// Float64ObservableCounter is an instrument used to asynchronously record
// increasing float64 measurements once per collection cycle. Observations are
// only made within a callback for this instrument. The value observed is
// assumed the to be the cumulative sum of the count.
//
// Warning: Methods may be added to this interface in minor releases. See
// package documentation on API implementation for information on how to set
// default behavior for
// unimplemented methods.
type Float64ObservableCounter interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Float64ObservableCounter
Float64Observable
}
// Float64ObservableCounterConfig contains options for asynchronous counter
// instruments that record int64 values.
type Float64ObservableCounterConfig struct {
description string
unit string
callbacks []Float64Callback
}
// NewFloat64ObservableCounterConfig returns a new
// [Float64ObservableCounterConfig] with all opts applied.
func NewFloat64ObservableCounterConfig(opts ...Float64ObservableCounterOption) Float64ObservableCounterConfig {
var config Float64ObservableCounterConfig
for _, o := range opts {
config = o.applyFloat64ObservableCounter(config)
}
return config
}
// Description returns the configured description.
func (c Float64ObservableCounterConfig) Description() string {
return c.description
}
// Unit returns the configured unit.
func (c Float64ObservableCounterConfig) Unit() string {
return c.unit
}
// Callbacks returns the configured callbacks.
func (c Float64ObservableCounterConfig) Callbacks() []Float64Callback {
return c.callbacks
}
// Float64ObservableCounterOption applies options to a
// [Float64ObservableCounterConfig]. See [Float64ObservableOption] and
// [InstrumentOption] for other options that can be used as a
// Float64ObservableCounterOption.
type Float64ObservableCounterOption interface {
applyFloat64ObservableCounter(Float64ObservableCounterConfig) Float64ObservableCounterConfig
}
// Float64ObservableUpDownCounter is an instrument used to asynchronously
// record float64 measurements once per collection cycle. Observations are only
// made within a callback for this instrument. The value observed is assumed
// the to be the cumulative sum of the count.
//
// Warning: Methods may be added to this interface in minor releases. See
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Float64ObservableUpDownCounter interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Float64ObservableUpDownCounter
Float64Observable
}
// Float64ObservableUpDownCounterConfig contains options for asynchronous
// counter instruments that record int64 values.
type Float64ObservableUpDownCounterConfig struct {
description string
unit string
callbacks []Float64Callback
}
// NewFloat64ObservableUpDownCounterConfig returns a new
// [Float64ObservableUpDownCounterConfig] with all opts applied.
func NewFloat64ObservableUpDownCounterConfig(opts ...Float64ObservableUpDownCounterOption) Float64ObservableUpDownCounterConfig {
var config Float64ObservableUpDownCounterConfig
for _, o := range opts {
config = o.applyFloat64ObservableUpDownCounter(config)
}
return config
}
// Description returns the configured description.
func (c Float64ObservableUpDownCounterConfig) Description() string {
return c.description
}
// Unit returns the configured unit.
func (c Float64ObservableUpDownCounterConfig) Unit() string {
return c.unit
}
// Callbacks returns the configured callbacks.
func (c Float64ObservableUpDownCounterConfig) Callbacks() []Float64Callback {
return c.callbacks
}
// Float64ObservableUpDownCounterOption applies options to a
// [Float64ObservableUpDownCounterConfig]. See [Float64ObservableOption] and
// [InstrumentOption] for other options that can be used as a
// Float64ObservableUpDownCounterOption.
type Float64ObservableUpDownCounterOption interface {
applyFloat64ObservableUpDownCounter(Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig
}
// Float64ObservableGauge is an instrument used to asynchronously record
// instantaneous float64 measurements once per collection cycle. Observations
// are only made within a callback for this instrument.
//
// Warning: Methods may be added to this interface in minor releases. See
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Float64ObservableGauge interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Float64ObservableGauge
Float64Observable
}
// Float64ObservableGaugeConfig contains options for asynchronous counter
// instruments that record int64 values.
type Float64ObservableGaugeConfig struct {
description string
unit string
callbacks []Float64Callback
}
// NewFloat64ObservableGaugeConfig returns a new [Float64ObservableGaugeConfig]
// with all opts applied.
func NewFloat64ObservableGaugeConfig(opts ...Float64ObservableGaugeOption) Float64ObservableGaugeConfig {
var config Float64ObservableGaugeConfig
for _, o := range opts {
config = o.applyFloat64ObservableGauge(config)
}
return config
}
// Description returns the configured description.
func (c Float64ObservableGaugeConfig) Description() string {
return c.description
}
// Unit returns the configured unit.
func (c Float64ObservableGaugeConfig) Unit() string {
return c.unit
}
// Callbacks returns the configured callbacks.
func (c Float64ObservableGaugeConfig) Callbacks() []Float64Callback {
return c.callbacks
}
// Float64ObservableGaugeOption applies options to a
// [Float64ObservableGaugeConfig]. See [Float64ObservableOption] and
// [InstrumentOption] for other options that can be used as a
// Float64ObservableGaugeOption.
type Float64ObservableGaugeOption interface {
applyFloat64ObservableGauge(Float64ObservableGaugeConfig) Float64ObservableGaugeConfig
}
// Float64Observer is a recorder of float64 measurements.
//
// Warning: Methods may be added to this interface in minor releases. See
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Float64Observer interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Float64Observer
// Observe records the float64 value.
//
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Observe(value float64, options ...ObserveOption)
}
// Float64Callback is a function registered with a Meter that makes
// observations for a Float64Observerable instrument it is registered with.
// Calls to the Float64Observer record measurement values for the
// Float64Observable.
//
// The function needs to complete in a finite amount of time and the deadline
// of the passed context is expected to be honored.
//
// The function needs to make unique observations across all registered
// Float64Callbacks. Meaning, it should not report measurements with the same
// attributes as another Float64Callbacks also registered for the same
// instrument.
//
// The function needs to be concurrent safe.
type Float64Callback func(context.Context, Float64Observer) error
// Float64ObservableOption applies options to float64 Observer instruments.
type Float64ObservableOption interface {
Float64ObservableCounterOption
Float64ObservableUpDownCounterOption
Float64ObservableGaugeOption
}
type float64CallbackOpt struct {
cback Float64Callback
}
func (o float64CallbackOpt) applyFloat64ObservableCounter(cfg Float64ObservableCounterConfig) Float64ObservableCounterConfig {
cfg.callbacks = append(cfg.callbacks, o.cback)
return cfg
}
func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter(cfg Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig {
cfg.callbacks = append(cfg.callbacks, o.cback)
return cfg
}
func (o float64CallbackOpt) applyFloat64ObservableGauge(cfg Float64ObservableGaugeConfig) Float64ObservableGaugeConfig {
cfg.callbacks = append(cfg.callbacks, o.cback)
return cfg
}
// WithFloat64Callback adds callback to be called for an instrument.
func WithFloat64Callback(callback Float64Callback) Float64ObservableOption {
return float64CallbackOpt{callback}
}

269
vendor/go.opentelemetry.io/otel/metric/asyncint64.go generated vendored Normal file
View File

@ -0,0 +1,269 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric // import "go.opentelemetry.io/otel/metric"
import (
"context"
"go.opentelemetry.io/otel/metric/embedded"
)
// Int64Observable describes a set of instruments used asynchronously to record
// int64 measurements once per collection cycle. Observations of these
// instruments are only made within a callback.
//
// Warning: Methods may be added to this interface in minor releases.
type Int64Observable interface {
Observable
int64Observable()
}
// Int64ObservableCounter is an instrument used to asynchronously record
// increasing int64 measurements once per collection cycle. Observations are
// only made within a callback for this instrument. The value observed is
// assumed the to be the cumulative sum of the count.
//
// Warning: Methods may be added to this interface in minor releases. See
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Int64ObservableCounter interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Int64ObservableCounter
Int64Observable
}
// Int64ObservableCounterConfig contains options for asynchronous counter
// instruments that record int64 values.
type Int64ObservableCounterConfig struct {
description string
unit string
callbacks []Int64Callback
}
// NewInt64ObservableCounterConfig returns a new [Int64ObservableCounterConfig]
// with all opts applied.
func NewInt64ObservableCounterConfig(opts ...Int64ObservableCounterOption) Int64ObservableCounterConfig {
var config Int64ObservableCounterConfig
for _, o := range opts {
config = o.applyInt64ObservableCounter(config)
}
return config
}
// Description returns the configured description.
func (c Int64ObservableCounterConfig) Description() string {
return c.description
}
// Unit returns the configured unit.
func (c Int64ObservableCounterConfig) Unit() string {
return c.unit
}
// Callbacks returns the configured callbacks.
func (c Int64ObservableCounterConfig) Callbacks() []Int64Callback {
return c.callbacks
}
// Int64ObservableCounterOption applies options to a
// [Int64ObservableCounterConfig]. See [Int64ObservableOption] and
// [InstrumentOption] for other options that can be used as an
// Int64ObservableCounterOption.
type Int64ObservableCounterOption interface {
applyInt64ObservableCounter(Int64ObservableCounterConfig) Int64ObservableCounterConfig
}
// Int64ObservableUpDownCounter is an instrument used to asynchronously record
// int64 measurements once per collection cycle. Observations are only made
// within a callback for this instrument. The value observed is assumed the to
// be the cumulative sum of the count.
//
// Warning: Methods may be added to this interface in minor releases. See
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Int64ObservableUpDownCounter interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Int64ObservableUpDownCounter
Int64Observable
}
// Int64ObservableUpDownCounterConfig contains options for asynchronous counter
// instruments that record int64 values.
type Int64ObservableUpDownCounterConfig struct {
description string
unit string
callbacks []Int64Callback
}
// NewInt64ObservableUpDownCounterConfig returns a new
// [Int64ObservableUpDownCounterConfig] with all opts applied.
func NewInt64ObservableUpDownCounterConfig(opts ...Int64ObservableUpDownCounterOption) Int64ObservableUpDownCounterConfig {
var config Int64ObservableUpDownCounterConfig
for _, o := range opts {
config = o.applyInt64ObservableUpDownCounter(config)
}
return config
}
// Description returns the configured description.
func (c Int64ObservableUpDownCounterConfig) Description() string {
return c.description
}
// Unit returns the configured unit.
func (c Int64ObservableUpDownCounterConfig) Unit() string {
return c.unit
}
// Callbacks returns the configured callbacks.
func (c Int64ObservableUpDownCounterConfig) Callbacks() []Int64Callback {
return c.callbacks
}
// Int64ObservableUpDownCounterOption applies options to a
// [Int64ObservableUpDownCounterConfig]. See [Int64ObservableOption] and
// [InstrumentOption] for other options that can be used as an
// Int64ObservableUpDownCounterOption.
type Int64ObservableUpDownCounterOption interface {
applyInt64ObservableUpDownCounter(Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig
}
// Int64ObservableGauge is an instrument used to asynchronously record
// instantaneous int64 measurements once per collection cycle. Observations are
// only made within a callback for this instrument.
//
// Warning: Methods may be added to this interface in minor releases. See
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Int64ObservableGauge interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Int64ObservableGauge
Int64Observable
}
// Int64ObservableGaugeConfig contains options for asynchronous counter
// instruments that record int64 values.
type Int64ObservableGaugeConfig struct {
description string
unit string
callbacks []Int64Callback
}
// NewInt64ObservableGaugeConfig returns a new [Int64ObservableGaugeConfig]
// with all opts applied.
func NewInt64ObservableGaugeConfig(opts ...Int64ObservableGaugeOption) Int64ObservableGaugeConfig {
var config Int64ObservableGaugeConfig
for _, o := range opts {
config = o.applyInt64ObservableGauge(config)
}
return config
}
// Description returns the configured description.
func (c Int64ObservableGaugeConfig) Description() string {
return c.description
}
// Unit returns the configured unit.
func (c Int64ObservableGaugeConfig) Unit() string {
return c.unit
}
// Callbacks returns the configured callbacks.
func (c Int64ObservableGaugeConfig) Callbacks() []Int64Callback {
return c.callbacks
}
// Int64ObservableGaugeOption applies options to a
// [Int64ObservableGaugeConfig]. See [Int64ObservableOption] and
// [InstrumentOption] for other options that can be used as an
// Int64ObservableGaugeOption.
type Int64ObservableGaugeOption interface {
applyInt64ObservableGauge(Int64ObservableGaugeConfig) Int64ObservableGaugeConfig
}
// Int64Observer is a recorder of int64 measurements.
//
// Warning: Methods may be added to this interface in minor releases. See
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Int64Observer interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Int64Observer
// Observe records the int64 value.
//
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Observe(value int64, options ...ObserveOption)
}
// Int64Callback is a function registered with a Meter that makes observations
// for an Int64Observerable instrument it is registered with. Calls to the
// Int64Observer record measurement values for the Int64Observable.
//
// The function needs to complete in a finite amount of time and the deadline
// of the passed context is expected to be honored.
//
// The function needs to make unique observations across all registered
// Int64Callbacks. Meaning, it should not report measurements with the same
// attributes as another Int64Callbacks also registered for the same
// instrument.
//
// The function needs to be concurrent safe.
type Int64Callback func(context.Context, Int64Observer) error
// Int64ObservableOption applies options to int64 Observer instruments.
type Int64ObservableOption interface {
Int64ObservableCounterOption
Int64ObservableUpDownCounterOption
Int64ObservableGaugeOption
}
type int64CallbackOpt struct {
cback Int64Callback
}
func (o int64CallbackOpt) applyInt64ObservableCounter(cfg Int64ObservableCounterConfig) Int64ObservableCounterConfig {
cfg.callbacks = append(cfg.callbacks, o.cback)
return cfg
}
func (o int64CallbackOpt) applyInt64ObservableUpDownCounter(cfg Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig {
cfg.callbacks = append(cfg.callbacks, o.cback)
return cfg
}
func (o int64CallbackOpt) applyInt64ObservableGauge(cfg Int64ObservableGaugeConfig) Int64ObservableGaugeConfig {
cfg.callbacks = append(cfg.callbacks, o.cback)
return cfg
}
// WithInt64Callback adds callback to be called for an instrument.
func WithInt64Callback(callback Int64Callback) Int64ObservableOption {
return int64CallbackOpt{callback}
}

92
vendor/go.opentelemetry.io/otel/metric/config.go generated vendored Normal file
View File

@ -0,0 +1,92 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric // import "go.opentelemetry.io/otel/metric"
import "go.opentelemetry.io/otel/attribute"
// MeterConfig contains options for Meters.
type MeterConfig struct {
instrumentationVersion string
schemaURL string
attrs attribute.Set
// Ensure forward compatibility by explicitly making this not comparable.
noCmp [0]func() //nolint: unused // This is indeed used.
}
// InstrumentationVersion returns the version of the library providing
// instrumentation.
func (cfg MeterConfig) InstrumentationVersion() string {
return cfg.instrumentationVersion
}
// InstrumentationAttributes returns the attributes associated with the library
// providing instrumentation.
func (cfg MeterConfig) InstrumentationAttributes() attribute.Set {
return cfg.attrs
}
// SchemaURL is the schema_url of the library providing instrumentation.
func (cfg MeterConfig) SchemaURL() string {
return cfg.schemaURL
}
// MeterOption is an interface for applying Meter options.
type MeterOption interface {
// applyMeter is used to set a MeterOption value of a MeterConfig.
applyMeter(MeterConfig) MeterConfig
}
// NewMeterConfig creates a new MeterConfig and applies
// all the given options.
func NewMeterConfig(opts ...MeterOption) MeterConfig {
var config MeterConfig
for _, o := range opts {
config = o.applyMeter(config)
}
return config
}
type meterOptionFunc func(MeterConfig) MeterConfig
func (fn meterOptionFunc) applyMeter(cfg MeterConfig) MeterConfig {
return fn(cfg)
}
// WithInstrumentationVersion sets the instrumentation version.
func WithInstrumentationVersion(version string) MeterOption {
return meterOptionFunc(func(config MeterConfig) MeterConfig {
config.instrumentationVersion = version
return config
})
}
// WithInstrumentationAttributes sets the instrumentation attributes.
//
// The passed attributes will be de-duplicated.
func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption {
return meterOptionFunc(func(config MeterConfig) MeterConfig {
config.attrs = attribute.NewSet(attr...)
return config
})
}
// WithSchemaURL sets the schema URL.
func WithSchemaURL(schemaURL string) MeterOption {
return meterOptionFunc(func(config MeterConfig) MeterConfig {
config.schemaURL = schemaURL
return config
})
}

170
vendor/go.opentelemetry.io/otel/metric/doc.go generated vendored Normal file
View File

@ -0,0 +1,170 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package metric provides the OpenTelemetry API used to measure metrics about
source code operation.
This API is separate from its implementation so the instrumentation built from
it is reusable. See [go.opentelemetry.io/otel/sdk/metric] for the official
OpenTelemetry implementation of this API.
All measurements made with this package are made via instruments. These
instruments are created by a [Meter] which itself is created by a
[MeterProvider]. Applications need to accept a [MeterProvider] implementation
as a starting point when instrumenting. This can be done directly, or by using
the OpenTelemetry global MeterProvider via [GetMeterProvider]. Using an
appropriately named [Meter] from the accepted [MeterProvider], instrumentation
can then be built from the [Meter]'s instruments.
# Instruments
Each instrument is designed to make measurements of a particular type. Broadly,
all instruments fall into two overlapping logical categories: asynchronous or
synchronous, and int64 or float64.
All synchronous instruments ([Int64Counter], [Int64UpDownCounter],
[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and
[Float64Histogram]) are used to measure the operation and performance of source
code during the source code execution. These instruments only make measurements
when the source code they instrument is run.
All asynchronous instruments ([Int64ObservableCounter],
[Int64ObservableUpDownCounter], [Int64ObservableGauge],
[Float64ObservableCounter], [Float64ObservableUpDownCounter], and
[Float64ObservableGauge]) are used to measure metrics outside of the execution
of source code. They are said to make "observations" via a callback function
called once every measurement collection cycle.
Each instrument is also grouped by the value type it measures. Either int64 or
float64. The value being measured will dictate which instrument in these
categories to use.
Outside of these two broad categories, instruments are described by the
function they are designed to serve. All Counters ([Int64Counter],
[Float64Counter], [Int64ObservableCounter], and [Float64ObservableCounter]) are
designed to measure values that never decrease in value, but instead only
incrementally increase in value. UpDownCounters ([Int64UpDownCounter],
[Float64UpDownCounter], [Int64ObservableUpDownCounter], and
[Float64ObservableUpDownCounter]) on the other hand, are designed to measure
values that can increase and decrease. When more information needs to be
conveyed about all the synchronous measurements made during a collection cycle,
a Histogram ([Int64Histogram] and [Float64Histogram]) should be used. Finally,
when just the most recent measurement needs to be conveyed about an
asynchronous measurement, a Gauge ([Int64ObservableGauge] and
[Float64ObservableGauge]) should be used.
See the [OpenTelemetry documentation] for more information about instruments
and their intended use.
# Measurements
Measurements are made by recording values and information about the values with
an instrument. How these measurements are recorded depends on the instrument.
Measurements for synchronous instruments ([Int64Counter], [Int64UpDownCounter],
[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and
[Float64Histogram]) are recorded using the instrument methods directly. All
counter instruments have an Add method that is used to measure an increment
value, and all histogram instruments have a Record method to measure a data
point.
Asynchronous instruments ([Int64ObservableCounter],
[Int64ObservableUpDownCounter], [Int64ObservableGauge],
[Float64ObservableCounter], [Float64ObservableUpDownCounter], and
[Float64ObservableGauge]) record measurements within a callback function. The
callback is registered with the Meter which ensures the callback is called once
per collection cycle. A callback can be registered two ways: during the
instrument's creation using an option, or later using the RegisterCallback
method of the [Meter] that created the instrument.
If the following criteria are met, an option ([WithInt64Callback] or
[WithFloat64Callback]) can be used during the asynchronous instrument's
creation to register a callback ([Int64Callback] or [Float64Callback],
respectively):
- The measurement process is known when the instrument is created
- Only that instrument will make a measurement within the callback
- The callback never needs to be unregistered
If the criteria are not met, use the RegisterCallback method of the [Meter] that
created the instrument to register a [Callback].
# API Implementations
This package does not conform to the standard Go versioning policy, all of its
interfaces may have methods added to them without a package major version bump.
This non-standard API evolution could surprise an uninformed implementation
author. They could unknowingly build their implementation in a way that would
result in a runtime panic for their users that update to the new API.
The API is designed to help inform an instrumentation author about this
non-standard API evolution. It requires them to choose a default behavior for
unimplemented interface methods. There are three behavior choices they can
make:
- Compilation failure
- Panic
- Default to another implementation
All interfaces in this API embed a corresponding interface from
[go.opentelemetry.io/otel/metric/embedded]. If an author wants the default
behavior of their implementations to be a compilation failure, signaling to
their users they need to update to the latest version of that implementation,
they need to embed the corresponding interface from
[go.opentelemetry.io/otel/metric/embedded] in their implementation. For
example,
import "go.opentelemetry.io/otel/metric/embedded"
type MeterProvider struct {
embedded.MeterProvider
// ...
}
If an author wants the default behavior of their implementations to a panic,
they need to embed the API interface directly.
import "go.opentelemetry.io/otel/metric"
type MeterProvider struct {
metric.MeterProvider
// ...
}
This is not a recommended behavior as it could lead to publishing packages that
contain runtime panics when users update other package that use newer versions
of [go.opentelemetry.io/otel/metric].
Finally, an author can embed another implementation in theirs. The embedded
implementation will be used for methods not defined by the author. For example,
an author who want to default to silently dropping the call can use
[go.opentelemetry.io/otel/metric/noop]:
import "go.opentelemetry.io/otel/metric/noop"
type MeterProvider struct {
noop.MeterProvider
// ...
}
It is strongly recommended that authors only embed
[go.opentelemetry.io/otel/metric/noop] if they choose this default behavior.
That implementation is the only one OpenTelemetry authors can guarantee will
fully implement all the API interfaces when a user updates their API.
[OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/
[GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider
*/
package metric // import "go.opentelemetry.io/otel/metric"

View File

@ -0,0 +1,234 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package embedded provides interfaces embedded within the [OpenTelemetry
// metric API].
//
// Implementers of the [OpenTelemetry metric API] can embed the relevant type
// from this package into their implementation directly. Doing so will result
// in a compilation error for users when the [OpenTelemetry metric API] is
// extended (which is something that can happen without a major version bump of
// the API package).
//
// [OpenTelemetry metric API]: https://pkg.go.dev/go.opentelemetry.io/otel/metric
package embedded // import "go.opentelemetry.io/otel/metric/embedded"
// MeterProvider is embedded in
// [go.opentelemetry.io/otel/metric.MeterProvider].
//
// Embed this interface in your implementation of the
// [go.opentelemetry.io/otel/metric.MeterProvider] if you want users to
// experience a compilation error, signaling they need to update to your latest
// implementation, when the [go.opentelemetry.io/otel/metric.MeterProvider]
// interface is extended (which is something that can happen without a major
// version bump of the API package).
type MeterProvider interface{ meterProvider() }
// Meter is embedded in [go.opentelemetry.io/otel/metric.Meter].
//
// Embed this interface in your implementation of the
// [go.opentelemetry.io/otel/metric.Meter] if you want users to experience a
// compilation error, signaling they need to update to your latest
// implementation, when the [go.opentelemetry.io/otel/metric.Meter] interface
// is extended (which is something that can happen without a major version bump
// of the API package).
type Meter interface{ meter() }
// Float64Observer is embedded in
// [go.opentelemetry.io/otel/metric.Float64Observer].
//
// Embed this interface in your implementation of the
// [go.opentelemetry.io/otel/metric.Float64Observer] if you want
// users to experience a compilation error, signaling they need to update to
// your latest implementation, when the
// [go.opentelemetry.io/otel/metric.Float64Observer] interface is
// extended (which is something that can happen without a major version bump of
// the API package).
type Float64Observer interface{ float64Observer() }
// Int64Observer is embedded in
// [go.opentelemetry.io/otel/metric.Int64Observer].
//
// Embed this interface in your implementation of the
// [go.opentelemetry.io/otel/metric.Int64Observer] if you want users
// to experience a compilation error, signaling they need to update to your
// latest implementation, when the
// [go.opentelemetry.io/otel/metric.Int64Observer] interface is
// extended (which is something that can happen without a major version bump of
// the API package).
type Int64Observer interface{ int64Observer() }
// Observer is embedded in [go.opentelemetry.io/otel/metric.Observer].
//
// Embed this interface in your implementation of the
// [go.opentelemetry.io/otel/metric.Observer] if you want users to experience a
// compilation error, signaling they need to update to your latest
// implementation, when the [go.opentelemetry.io/otel/metric.Observer]
// interface is extended (which is something that can happen without a major
// version bump of the API package).
type Observer interface{ observer() }
// Registration is embedded in [go.opentelemetry.io/otel/metric.Registration].
//
// Embed this interface in your implementation of the
// [go.opentelemetry.io/otel/metric.Registration] if you want users to
// experience a compilation error, signaling they need to update to your latest
// implementation, when the [go.opentelemetry.io/otel/metric.Registration]
// interface is extended (which is something that can happen without a major
// version bump of the API package).
type Registration interface{ registration() }
// Float64Counter is embedded in
// [go.opentelemetry.io/otel/metric.Float64Counter].
//
// Embed this interface in your implementation of the
// [go.opentelemetry.io/otel/metric.Float64Counter] if you want
// users to experience a compilation error, signaling they need to update to
// your latest implementation, when the
// [go.opentelemetry.io/otel/metric.Float64Counter] interface is
// extended (which is something that can happen without a major version bump of
// the API package).
type Float64Counter interface{ float64Counter() }
// Float64Histogram is embedded in
// [go.opentelemetry.io/otel/metric.Float64Histogram].
//
// Embed this interface in your implementation of the
// [go.opentelemetry.io/otel/metric.Float64Histogram] if you want
// users to experience a compilation error, signaling they need to update to
// your latest implementation, when the
// [go.opentelemetry.io/otel/metric.Float64Histogram] interface is
// extended (which is something that can happen without a major version bump of
// the API package).
type Float64Histogram interface{ float64Histogram() }
// Float64ObservableCounter is embedded in
// [go.opentelemetry.io/otel/metric.Float64ObservableCounter].
//
// Embed this interface in your implementation of the
// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] if you
// want users to experience a compilation error, signaling they need to update
// to your latest implementation, when the
// [go.opentelemetry.io/otel/metric.Float64ObservableCounter]
// interface is extended (which is something that can happen without a major
// version bump of the API package).
type Float64ObservableCounter interface{ float64ObservableCounter() }
// Float64ObservableGauge is embedded in
// [go.opentelemetry.io/otel/metric.Float64ObservableGauge].
//
// Embed this interface in your implementation of the
// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] if you
// want users to experience a compilation error, signaling they need to update
// to your latest implementation, when the
// [go.opentelemetry.io/otel/metric.Float64ObservableGauge]
// interface is extended (which is something that can happen without a major
// version bump of the API package).
type Float64ObservableGauge interface{ float64ObservableGauge() }
// Float64ObservableUpDownCounter is embedded in
// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter].
//
// Embed this interface in your implementation of the
// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter]
// if you want users to experience a compilation error, signaling they need to
// update to your latest implementation, when the
// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter]
// interface is extended (which is something that can happen without a major
// version bump of the API package).
type Float64ObservableUpDownCounter interface{ float64ObservableUpDownCounter() }
// Float64UpDownCounter is embedded in
// [go.opentelemetry.io/otel/metric.Float64UpDownCounter].
//
// Embed this interface in your implementation of the
// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] if you
// want users to experience a compilation error, signaling they need to update
// to your latest implementation, when the
// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] interface
// is extended (which is something that can happen without a major version bump
// of the API package).
type Float64UpDownCounter interface{ float64UpDownCounter() }
// Int64Counter is embedded in
// [go.opentelemetry.io/otel/metric.Int64Counter].
//
// Embed this interface in your implementation of the
// [go.opentelemetry.io/otel/metric.Int64Counter] if you want users
// to experience a compilation error, signaling they need to update to your
// latest implementation, when the
// [go.opentelemetry.io/otel/metric.Int64Counter] interface is
// extended (which is something that can happen without a major version bump of
// the API package).
type Int64Counter interface{ int64Counter() }
// Int64Histogram is embedded in
// [go.opentelemetry.io/otel/metric.Int64Histogram].
//
// Embed this interface in your implementation of the
// [go.opentelemetry.io/otel/metric.Int64Histogram] if you want
// users to experience a compilation error, signaling they need to update to
// your latest implementation, when the
// [go.opentelemetry.io/otel/metric.Int64Histogram] interface is
// extended (which is something that can happen without a major version bump of
// the API package).
type Int64Histogram interface{ int64Histogram() }
// Int64ObservableCounter is embedded in
// [go.opentelemetry.io/otel/metric.Int64ObservableCounter].
//
// Embed this interface in your implementation of the
// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] if you
// want users to experience a compilation error, signaling they need to update
// to your latest implementation, when the
// [go.opentelemetry.io/otel/metric.Int64ObservableCounter]
// interface is extended (which is something that can happen without a major
// version bump of the API package).
type Int64ObservableCounter interface{ int64ObservableCounter() }
// Int64ObservableGauge is embedded in
// [go.opentelemetry.io/otel/metric.Int64ObservableGauge].
//
// Embed this interface in your implementation of the
// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] if you
// want users to experience a compilation error, signaling they need to update
// to your latest implementation, when the
// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] interface
// is extended (which is something that can happen without a major version bump
// of the API package).
type Int64ObservableGauge interface{ int64ObservableGauge() }
// Int64ObservableUpDownCounter is embedded in
// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter].
//
// Embed this interface in your implementation of the
// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] if
// you want users to experience a compilation error, signaling they need to
// update to your latest implementation, when the
// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter]
// interface is extended (which is something that can happen without a major
// version bump of the API package).
type Int64ObservableUpDownCounter interface{ int64ObservableUpDownCounter() }
// Int64UpDownCounter is embedded in
// [go.opentelemetry.io/otel/metric.Int64UpDownCounter].
//
// Embed this interface in your implementation of the
// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] if you want
// users to experience a compilation error, signaling they need to update to
// your latest implementation, when the
// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] interface is
// extended (which is something that can happen without a major version bump of
// the API package).
type Int64UpDownCounter interface{ int64UpDownCounter() }

332
vendor/go.opentelemetry.io/otel/metric/instrument.go generated vendored Normal file
View File

@ -0,0 +1,332 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric // import "go.opentelemetry.io/otel/metric"
import "go.opentelemetry.io/otel/attribute"
// Observable is used as a grouping mechanism for all instruments that are
// updated within a Callback.
type Observable interface {
observable()
}
// InstrumentOption applies options to all instruments.
type InstrumentOption interface {
Int64CounterOption
Int64UpDownCounterOption
Int64HistogramOption
Int64ObservableCounterOption
Int64ObservableUpDownCounterOption
Int64ObservableGaugeOption
Float64CounterOption
Float64UpDownCounterOption
Float64HistogramOption
Float64ObservableCounterOption
Float64ObservableUpDownCounterOption
Float64ObservableGaugeOption
}
type descOpt string
func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig {
c.description = string(o)
return c
}
func (o descOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig {
c.description = string(o)
return c
}
func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig {
c.description = string(o)
return c
}
func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig {
c.description = string(o)
return c
}
func (o descOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig {
c.description = string(o)
return c
}
func (o descOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig {
c.description = string(o)
return c
}
func (o descOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig {
c.description = string(o)
return c
}
func (o descOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig {
c.description = string(o)
return c
}
func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig {
c.description = string(o)
return c
}
func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig {
c.description = string(o)
return c
}
func (o descOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig {
c.description = string(o)
return c
}
func (o descOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig {
c.description = string(o)
return c
}
// WithDescription sets the instrument description.
func WithDescription(desc string) InstrumentOption { return descOpt(desc) }
type unitOpt string
func (o unitOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig {
c.unit = string(o)
return c
}
func (o unitOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig {
c.unit = string(o)
return c
}
func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig {
c.unit = string(o)
return c
}
func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig {
c.unit = string(o)
return c
}
func (o unitOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig {
c.unit = string(o)
return c
}
func (o unitOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig {
c.unit = string(o)
return c
}
func (o unitOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig {
c.unit = string(o)
return c
}
func (o unitOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig {
c.unit = string(o)
return c
}
func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig {
c.unit = string(o)
return c
}
func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig {
c.unit = string(o)
return c
}
func (o unitOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig {
c.unit = string(o)
return c
}
func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig {
c.unit = string(o)
return c
}
// WithUnit sets the instrument unit.
func WithUnit(u string) InstrumentOption { return unitOpt(u) }
// AddOption applies options to an addition measurement. See
// [MeasurementOption] for other options that can be used as an AddOption.
type AddOption interface {
applyAdd(AddConfig) AddConfig
}
// AddConfig contains options for an addition measurement.
type AddConfig struct {
attrs attribute.Set
}
// NewAddConfig returns a new [AddConfig] with all opts applied.
func NewAddConfig(opts []AddOption) AddConfig {
config := AddConfig{attrs: *attribute.EmptySet()}
for _, o := range opts {
config = o.applyAdd(config)
}
return config
}
// Attributes returns the configured attribute set.
func (c AddConfig) Attributes() attribute.Set {
return c.attrs
}
// RecordOption applies options to an addition measurement. See
// [MeasurementOption] for other options that can be used as a RecordOption.
type RecordOption interface {
applyRecord(RecordConfig) RecordConfig
}
// RecordConfig contains options for a recorded measurement.
type RecordConfig struct {
attrs attribute.Set
}
// NewRecordConfig returns a new [RecordConfig] with all opts applied.
func NewRecordConfig(opts []RecordOption) RecordConfig {
config := RecordConfig{attrs: *attribute.EmptySet()}
for _, o := range opts {
config = o.applyRecord(config)
}
return config
}
// Attributes returns the configured attribute set.
func (c RecordConfig) Attributes() attribute.Set {
return c.attrs
}
// ObserveOption applies options to an addition measurement. See
// [MeasurementOption] for other options that can be used as a ObserveOption.
type ObserveOption interface {
applyObserve(ObserveConfig) ObserveConfig
}
// ObserveConfig contains options for an observed measurement.
type ObserveConfig struct {
attrs attribute.Set
}
// NewObserveConfig returns a new [ObserveConfig] with all opts applied.
func NewObserveConfig(opts []ObserveOption) ObserveConfig {
config := ObserveConfig{attrs: *attribute.EmptySet()}
for _, o := range opts {
config = o.applyObserve(config)
}
return config
}
// Attributes returns the configured attribute set.
func (c ObserveConfig) Attributes() attribute.Set {
return c.attrs
}
// MeasurementOption applies options to all instrument measurement.
type MeasurementOption interface {
AddOption
RecordOption
ObserveOption
}
type attrOpt struct {
set attribute.Set
}
// mergeSets returns the union of keys between a and b. Any duplicate keys will
// use the value associated with b.
func mergeSets(a, b attribute.Set) attribute.Set {
// NewMergeIterator uses the first value for any duplicates.
iter := attribute.NewMergeIterator(&b, &a)
merged := make([]attribute.KeyValue, 0, a.Len()+b.Len())
for iter.Next() {
merged = append(merged, iter.Attribute())
}
return attribute.NewSet(merged...)
}
func (o attrOpt) applyAdd(c AddConfig) AddConfig {
switch {
case o.set.Len() == 0:
case c.attrs.Len() == 0:
c.attrs = o.set
default:
c.attrs = mergeSets(c.attrs, o.set)
}
return c
}
func (o attrOpt) applyRecord(c RecordConfig) RecordConfig {
switch {
case o.set.Len() == 0:
case c.attrs.Len() == 0:
c.attrs = o.set
default:
c.attrs = mergeSets(c.attrs, o.set)
}
return c
}
func (o attrOpt) applyObserve(c ObserveConfig) ObserveConfig {
switch {
case o.set.Len() == 0:
case c.attrs.Len() == 0:
c.attrs = o.set
default:
c.attrs = mergeSets(c.attrs, o.set)
}
return c
}
// WithAttributeSet sets the attribute Set associated with a measurement is
// made with.
//
// If multiple WithAttributeSet or WithAttributes options are passed the
// attributes will be merged together in the order they are passed. Attributes
// with duplicate keys will use the last value passed.
func WithAttributeSet(attributes attribute.Set) MeasurementOption {
return attrOpt{set: attributes}
}
// WithAttributes converts attributes into an attribute Set and sets the Set to
// be associated with a measurement. This is shorthand for:
//
// cp := make([]attribute.KeyValue, len(attributes))
// copy(cp, attributes)
// WithAttributes(attribute.NewSet(cp...))
//
// [attribute.NewSet] may modify the passed attributes so this will make a copy
// of attributes before creating a set in order to ensure this function is
// concurrent safe. This makes this option function less optimized in
// comparison to [WithAttributeSet]. Therefore, [WithAttributeSet] should be
// preferred for performance sensitive code.
//
// See [WithAttributeSet] for information about how multiple WithAttributes are
// merged.
func WithAttributes(attributes ...attribute.KeyValue) MeasurementOption {
cp := make([]attribute.KeyValue, len(attributes))
copy(cp, attributes)
return attrOpt{set: attribute.NewSet(cp...)}
}

210
vendor/go.opentelemetry.io/otel/metric/meter.go generated vendored Normal file
View File

@ -0,0 +1,210 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric // import "go.opentelemetry.io/otel/metric"
import (
"context"
"go.opentelemetry.io/otel/metric/embedded"
)
// MeterProvider provides access to named Meter instances, for instrumenting
// an application or package.
//
// Warning: Methods may be added to this interface in minor releases. See
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type MeterProvider interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.MeterProvider
// Meter returns a new Meter with the provided name and configuration.
//
// A Meter should be scoped at most to a single package. The name needs to
// be unique so it does not collide with other names used by
// an application, nor other applications. To achieve this, the import path
// of the instrumentation package is recommended to be used as name.
//
// If the name is empty, then an implementation defined default name will
// be used instead.
Meter(name string, opts ...MeterOption) Meter
}
// Meter provides access to instrument instances for recording metrics.
//
// Warning: Methods may be added to this interface in minor releases. See
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Meter interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Meter
// Int64Counter returns a new Int64Counter instrument identified by name
// and configured with options. The instrument is used to synchronously
// record increasing int64 measurements during a computational operation.
Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error)
// Int64UpDownCounter returns a new Int64UpDownCounter instrument
// identified by name and configured with options. The instrument is used
// to synchronously record int64 measurements during a computational
// operation.
Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error)
// Int64Histogram returns a new Int64Histogram instrument identified by
// name and configured with options. The instrument is used to
// synchronously record the distribution of int64 measurements during a
// computational operation.
Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error)
// Int64ObservableCounter returns a new Int64ObservableCounter identified
// by name and configured with options. The instrument is used to
// asynchronously record increasing int64 measurements once per a
// measurement collection cycle.
//
// Measurements for the returned instrument are made via a callback. Use
// the WithInt64Callback option to register the callback here, or use the
// RegisterCallback method of this Meter to register one later. See the
// Measurements section of the package documentation for more information.
Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error)
// Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter
// instrument identified by name and configured with options. The
// instrument is used to asynchronously record int64 measurements once per
// a measurement collection cycle.
//
// Measurements for the returned instrument are made via a callback. Use
// the WithInt64Callback option to register the callback here, or use the
// RegisterCallback method of this Meter to register one later. See the
// Measurements section of the package documentation for more information.
Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error)
// Int64ObservableGauge returns a new Int64ObservableGauge instrument
// identified by name and configured with options. The instrument is used
// to asynchronously record instantaneous int64 measurements once per a
// measurement collection cycle.
//
// Measurements for the returned instrument are made via a callback. Use
// the WithInt64Callback option to register the callback here, or use the
// RegisterCallback method of this Meter to register one later. See the
// Measurements section of the package documentation for more information.
Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error)
// Float64Counter returns a new Float64Counter instrument identified by
// name and configured with options. The instrument is used to
// synchronously record increasing float64 measurements during a
// computational operation.
Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error)
// Float64UpDownCounter returns a new Float64UpDownCounter instrument
// identified by name and configured with options. The instrument is used
// to synchronously record float64 measurements during a computational
// operation.
Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error)
// Float64Histogram returns a new Float64Histogram instrument identified by
// name and configured with options. The instrument is used to
// synchronously record the distribution of float64 measurements during a
// computational operation.
Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error)
// Float64ObservableCounter returns a new Float64ObservableCounter
// instrument identified by name and configured with options. The
// instrument is used to asynchronously record increasing float64
// measurements once per a measurement collection cycle.
//
// Measurements for the returned instrument are made via a callback. Use
// the WithFloat64Callback option to register the callback here, or use the
// RegisterCallback method of this Meter to register one later. See the
// Measurements section of the package documentation for more information.
Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error)
// Float64ObservableUpDownCounter returns a new
// Float64ObservableUpDownCounter instrument identified by name and
// configured with options. The instrument is used to asynchronously record
// float64 measurements once per a measurement collection cycle.
//
// Measurements for the returned instrument are made via a callback. Use
// the WithFloat64Callback option to register the callback here, or use the
// RegisterCallback method of this Meter to register one later. See the
// Measurements section of the package documentation for more information.
Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error)
// Float64ObservableGauge returns a new Float64ObservableGauge instrument
// identified by name and configured with options. The instrument is used
// to asynchronously record instantaneous float64 measurements once per a
// measurement collection cycle.
//
// Measurements for the returned instrument are made via a callback. Use
// the WithFloat64Callback option to register the callback here, or use the
// RegisterCallback method of this Meter to register one later. See the
// Measurements section of the package documentation for more information.
Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error)
// RegisterCallback registers f to be called during the collection of a
// measurement cycle.
//
// If Unregister of the returned Registration is called, f needs to be
// unregistered and not called during collection.
//
// The instruments f is registered with are the only instruments that f may
// observe values for.
//
// If no instruments are passed, f should not be registered nor called
// during collection.
RegisterCallback(f Callback, instruments ...Observable) (Registration, error)
}
// Callback is a function registered with a Meter that makes observations for
// the set of instruments it is registered with. The Observer parameter is used
// to record measurement observations for these instruments.
//
// The function needs to complete in a finite amount of time and the deadline
// of the passed context is expected to be honored.
//
// The function needs to make unique observations across all registered
// Callbacks. Meaning, it should not report measurements for an instrument with
// the same attributes as another Callback will report.
//
// The function needs to be concurrent safe.
type Callback func(context.Context, Observer) error
// Observer records measurements for multiple instruments in a Callback.
//
// Warning: Methods may be added to this interface in minor releases. See
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Observer interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Observer
// ObserveFloat64 records the float64 value for obsrv.
ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption)
// ObserveInt64 records the int64 value for obsrv.
ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption)
}
// Registration is an token representing the unique registration of a callback
// for a set of instruments with a Meter.
//
// Warning: Methods may be added to this interface in minor releases. See
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Registration interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Registration
// Unregister removes the callback registration from a Meter.
//
// This method needs to be idempotent and concurrent safe.
Unregister() error
}

179
vendor/go.opentelemetry.io/otel/metric/syncfloat64.go generated vendored Normal file
View File

@ -0,0 +1,179 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric // import "go.opentelemetry.io/otel/metric"
import (
"context"
"go.opentelemetry.io/otel/metric/embedded"
)
// Float64Counter is an instrument that records increasing float64 values.
//
// Warning: Methods may be added to this interface in minor releases. See
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Float64Counter interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Float64Counter
// Add records a change to the counter.
//
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Add(ctx context.Context, incr float64, options ...AddOption)
}
// Float64CounterConfig contains options for synchronous counter instruments that
// record int64 values.
type Float64CounterConfig struct {
description string
unit string
}
// NewFloat64CounterConfig returns a new [Float64CounterConfig] with all opts
// applied.
func NewFloat64CounterConfig(opts ...Float64CounterOption) Float64CounterConfig {
var config Float64CounterConfig
for _, o := range opts {
config = o.applyFloat64Counter(config)
}
return config
}
// Description returns the configured description.
func (c Float64CounterConfig) Description() string {
return c.description
}
// Unit returns the configured unit.
func (c Float64CounterConfig) Unit() string {
return c.unit
}
// Float64CounterOption applies options to a [Float64CounterConfig]. See
// [InstrumentOption] for other options that can be used as a
// Float64CounterOption.
type Float64CounterOption interface {
applyFloat64Counter(Float64CounterConfig) Float64CounterConfig
}
// Float64UpDownCounter is an instrument that records increasing or decreasing
// float64 values.
//
// Warning: Methods may be added to this interface in minor releases. See
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Float64UpDownCounter interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Float64UpDownCounter
// Add records a change to the counter.
//
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Add(ctx context.Context, incr float64, options ...AddOption)
}
// Float64UpDownCounterConfig contains options for synchronous counter
// instruments that record int64 values.
type Float64UpDownCounterConfig struct {
description string
unit string
}
// NewFloat64UpDownCounterConfig returns a new [Float64UpDownCounterConfig]
// with all opts applied.
func NewFloat64UpDownCounterConfig(opts ...Float64UpDownCounterOption) Float64UpDownCounterConfig {
var config Float64UpDownCounterConfig
for _, o := range opts {
config = o.applyFloat64UpDownCounter(config)
}
return config
}
// Description returns the configured description.
func (c Float64UpDownCounterConfig) Description() string {
return c.description
}
// Unit returns the configured unit.
func (c Float64UpDownCounterConfig) Unit() string {
return c.unit
}
// Float64UpDownCounterOption applies options to a
// [Float64UpDownCounterConfig]. See [InstrumentOption] for other options that
// can be used as a Float64UpDownCounterOption.
type Float64UpDownCounterOption interface {
applyFloat64UpDownCounter(Float64UpDownCounterConfig) Float64UpDownCounterConfig
}
// Float64Histogram is an instrument that records a distribution of float64
// values.
//
// Warning: Methods may be added to this interface in minor releases. See
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Float64Histogram interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Float64Histogram
// Record adds an additional value to the distribution.
//
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Record(ctx context.Context, incr float64, options ...RecordOption)
}
// Float64HistogramConfig contains options for synchronous counter instruments
// that record int64 values.
type Float64HistogramConfig struct {
description string
unit string
}
// NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all
// opts applied.
func NewFloat64HistogramConfig(opts ...Float64HistogramOption) Float64HistogramConfig {
var config Float64HistogramConfig
for _, o := range opts {
config = o.applyFloat64Histogram(config)
}
return config
}
// Description returns the configured description.
func (c Float64HistogramConfig) Description() string {
return c.description
}
// Unit returns the configured unit.
func (c Float64HistogramConfig) Unit() string {
return c.unit
}
// Float64HistogramOption applies options to a [Float64HistogramConfig]. See
// [InstrumentOption] for other options that can be used as a
// Float64HistogramOption.
type Float64HistogramOption interface {
applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig
}

179
vendor/go.opentelemetry.io/otel/metric/syncint64.go generated vendored Normal file
View File

@ -0,0 +1,179 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metric // import "go.opentelemetry.io/otel/metric"
import (
"context"
"go.opentelemetry.io/otel/metric/embedded"
)
// Int64Counter is an instrument that records increasing int64 values.
//
// Warning: Methods may be added to this interface in minor releases. See
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Int64Counter interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Int64Counter
// Add records a change to the counter.
//
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Add(ctx context.Context, incr int64, options ...AddOption)
}
// Int64CounterConfig contains options for synchronous counter instruments that
// record int64 values.
type Int64CounterConfig struct {
description string
unit string
}
// NewInt64CounterConfig returns a new [Int64CounterConfig] with all opts
// applied.
func NewInt64CounterConfig(opts ...Int64CounterOption) Int64CounterConfig {
var config Int64CounterConfig
for _, o := range opts {
config = o.applyInt64Counter(config)
}
return config
}
// Description returns the configured description.
func (c Int64CounterConfig) Description() string {
return c.description
}
// Unit returns the configured unit.
func (c Int64CounterConfig) Unit() string {
return c.unit
}
// Int64CounterOption applies options to a [Int64CounterConfig]. See
// [InstrumentOption] for other options that can be used as an
// Int64CounterOption.
type Int64CounterOption interface {
applyInt64Counter(Int64CounterConfig) Int64CounterConfig
}
// Int64UpDownCounter is an instrument that records increasing or decreasing
// int64 values.
//
// Warning: Methods may be added to this interface in minor releases. See
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Int64UpDownCounter interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Int64UpDownCounter
// Add records a change to the counter.
//
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Add(ctx context.Context, incr int64, options ...AddOption)
}
// Int64UpDownCounterConfig contains options for synchronous counter
// instruments that record int64 values.
type Int64UpDownCounterConfig struct {
description string
unit string
}
// NewInt64UpDownCounterConfig returns a new [Int64UpDownCounterConfig] with
// all opts applied.
func NewInt64UpDownCounterConfig(opts ...Int64UpDownCounterOption) Int64UpDownCounterConfig {
var config Int64UpDownCounterConfig
for _, o := range opts {
config = o.applyInt64UpDownCounter(config)
}
return config
}
// Description returns the configured description.
func (c Int64UpDownCounterConfig) Description() string {
return c.description
}
// Unit returns the configured unit.
func (c Int64UpDownCounterConfig) Unit() string {
return c.unit
}
// Int64UpDownCounterOption applies options to a [Int64UpDownCounterConfig].
// See [InstrumentOption] for other options that can be used as an
// Int64UpDownCounterOption.
type Int64UpDownCounterOption interface {
applyInt64UpDownCounter(Int64UpDownCounterConfig) Int64UpDownCounterConfig
}
// Int64Histogram is an instrument that records a distribution of int64
// values.
//
// Warning: Methods may be added to this interface in minor releases. See
// package documentation on API implementation for information on how to set
// default behavior for unimplemented methods.
type Int64Histogram interface {
// Users of the interface can ignore this. This embedded type is only used
// by implementations of this interface. See the "API Implementations"
// section of the package documentation for more information.
embedded.Int64Histogram
// Record adds an additional value to the distribution.
//
// Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes.
Record(ctx context.Context, incr int64, options ...RecordOption)
}
// Int64HistogramConfig contains options for synchronous counter instruments
// that record int64 values.
type Int64HistogramConfig struct {
description string
unit string
}
// NewInt64HistogramConfig returns a new [Int64HistogramConfig] with all opts
// applied.
func NewInt64HistogramConfig(opts ...Int64HistogramOption) Int64HistogramConfig {
var config Int64HistogramConfig
for _, o := range opts {
config = o.applyInt64Histogram(config)
}
return config
}
// Description returns the configured description.
func (c Int64HistogramConfig) Description() string {
return c.description
}
// Unit returns the configured unit.
func (c Int64HistogramConfig) Unit() string {
return c.unit
}
// Int64HistogramOption applies options to a [Int64HistogramConfig]. See
// [InstrumentOption] for other options that can be used as an
// Int64HistogramOption.
type Int64HistogramOption interface {
applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig
}

1
vendor/go.opentelemetry.io/otel/requirements.txt generated vendored Normal file
View File

@ -0,0 +1 @@
codespell==2.2.4

View File

@ -232,10 +232,12 @@ func (sc *SemanticConventions) HTTPServerAttributesFromHTTPRequest(serverName, r
if route != "" {
attrs = append(attrs, sc.HTTPRouteKey.String(route))
}
if values, ok := request.Header["X-Forwarded-For"]; ok && len(values) > 0 {
if addresses := strings.SplitN(values[0], ",", 2); len(addresses) > 0 {
attrs = append(attrs, sc.HTTPClientIPKey.String(addresses[0]))
if values := request.Header["X-Forwarded-For"]; len(values) > 0 {
addr := values[0]
if i := strings.Index(addr, ","); i > 0 {
addr = addr[:i]
}
attrs = append(attrs, sc.HTTPClientIPKey.String(addr))
}
return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...)

View File

@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel"
// Version is the current release version of OpenTelemetry in use.
func Version() string {
return "1.15.0"
return "1.16.0"
}

View File

@ -14,7 +14,7 @@
module-sets:
stable-v1:
version: v1.15.0
version: v1.16.0
modules:
- go.opentelemetry.io/otel
- go.opentelemetry.io/otel/bridge/opentracing
@ -32,10 +32,11 @@ module-sets:
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp
- go.opentelemetry.io/otel/exporters/stdout/stdouttrace
- go.opentelemetry.io/otel/exporters/zipkin
- go.opentelemetry.io/otel/metric
- go.opentelemetry.io/otel/sdk
- go.opentelemetry.io/otel/trace
experimental-metrics:
version: v0.38.0
version: v0.39.0
modules:
- go.opentelemetry.io/otel/example/opencensus
- go.opentelemetry.io/otel/example/prometheus
@ -44,7 +45,6 @@ module-sets:
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
- go.opentelemetry.io/otel/exporters/prometheus
- go.opentelemetry.io/otel/exporters/stdout/stdoutmetric
- go.opentelemetry.io/otel/metric
- go.opentelemetry.io/otel/sdk/metric
- go.opentelemetry.io/otel/bridge/opencensus
- go.opentelemetry.io/otel/bridge/opencensus/test

View File

@ -124,7 +124,7 @@
// not contain spaces or newlines).
//
// If Open is given access to a Verifiers including the
// Verifier for this key, then it will succeed at verifiying
// Verifier for this key, then it will succeed at verifying
// the encoded message and returning the parsed Note:
//
// vkey := "PeterNeumann+c74f20a3+ARpc2QcUPDhMQegwxbzhKqiBfsVkmqq/LDE4izWy10TW"

View File

@ -25,6 +25,11 @@
// later release.
package attributes
import (
"fmt"
"strings"
)
// Attributes is an immutable struct for storing and retrieving generic
// key/value pairs. Keys must be hashable, and users should define their own
// types for keys. Values should not be modified after they are added to an
@ -99,3 +104,27 @@ func (a *Attributes) Equal(o *Attributes) bool {
}
return true
}
// String prints the attribute map. If any key or values throughout the map
// implement fmt.Stringer, it calls that method and appends.
func (a *Attributes) String() string {
var sb strings.Builder
sb.WriteString("{")
first := true
for k, v := range a.m {
var key, val string
if str, ok := k.(interface{ String() string }); ok {
key = str.String()
}
if str, ok := v.(interface{ String() string }); ok {
val = str.String()
}
if !first {
sb.WriteString(", ")
}
sb.WriteString(fmt.Sprintf("%q: %q, ", key, val))
first = false
}
sb.WriteString("}")
return sb.String()
}

View File

@ -286,7 +286,7 @@ type PickResult struct {
//
// LB policies with child policies are responsible for propagating metadata
// injected by their children to the ClientConn, as part of Pick().
Metatada metadata.MD
Metadata metadata.MD
}
// TransientFailureError returns e. It exists for backward compatibility and

View File

@ -25,14 +25,20 @@ import (
"sync"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/internal/balancer/gracefulswitch"
"google.golang.org/grpc/internal/buffer"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/status"
)
type ccbMode int
const (
ccbModeActive = iota
ccbModeIdle
ccbModeClosed
ccbModeExitingIdle
)
// ccBalancerWrapper sits between the ClientConn and the Balancer.
@ -49,192 +55,101 @@ import (
// It uses the gracefulswitch.Balancer internally to ensure that balancer
// switches happen in a graceful manner.
type ccBalancerWrapper struct {
cc *ClientConn
// The following fields are initialized when the wrapper is created and are
// read-only afterwards, and therefore can be accessed without a mutex.
cc *ClientConn
opts balancer.BuildOptions
// Since these fields are accessed only from handleXxx() methods which are
// synchronized by the watcher goroutine, we do not need a mutex to protect
// these fields.
// Outgoing (gRPC --> balancer) calls are guaranteed to execute in a
// mutually exclusive manner as they are scheduled in the serializer. Fields
// accessed *only* in these serializer callbacks, can therefore be accessed
// without a mutex.
balancer *gracefulswitch.Balancer
curBalancerName string
updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher().
resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here.
closed *grpcsync.Event // Indicates if close has been called.
done *grpcsync.Event // Indicates if close has completed its work.
// mu guards access to the below fields. Access to the serializer and its
// cancel function needs to be mutex protected because they are overwritten
// when the wrapper exits idle mode.
mu sync.Mutex
serializer *grpcsync.CallbackSerializer // To serialize all outoing calls.
serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time.
mode ccbMode // Tracks the current mode of the wrapper.
}
// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer
// is not created until the switchTo() method is invoked.
func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper {
ctx, cancel := context.WithCancel(context.Background())
ccb := &ccBalancerWrapper{
cc: cc,
updateCh: buffer.NewUnbounded(),
resultCh: buffer.NewUnbounded(),
closed: grpcsync.NewEvent(),
done: grpcsync.NewEvent(),
cc: cc,
opts: bopts,
serializer: grpcsync.NewCallbackSerializer(ctx),
serializerCancel: cancel,
}
go ccb.watcher()
ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts)
return ccb
}
// The following xxxUpdate structs wrap the arguments received as part of the
// corresponding update. The watcher goroutine uses the 'type' of the update to
// invoke the appropriate handler routine to handle the update.
type ccStateUpdate struct {
ccs *balancer.ClientConnState
}
type scStateUpdate struct {
sc balancer.SubConn
state connectivity.State
err error
}
type exitIdleUpdate struct{}
type resolverErrorUpdate struct {
err error
}
type switchToUpdate struct {
name string
}
type subConnUpdate struct {
acbw *acBalancerWrapper
}
// watcher is a long-running goroutine which reads updates from a channel and
// invokes corresponding methods on the underlying balancer. It ensures that
// these methods are invoked in a synchronous fashion. It also ensures that
// these methods are invoked in the order in which the updates were received.
func (ccb *ccBalancerWrapper) watcher() {
for {
select {
case u := <-ccb.updateCh.Get():
ccb.updateCh.Load()
if ccb.closed.HasFired() {
break
}
switch update := u.(type) {
case *ccStateUpdate:
ccb.handleClientConnStateChange(update.ccs)
case *scStateUpdate:
ccb.handleSubConnStateChange(update)
case *exitIdleUpdate:
ccb.handleExitIdle()
case *resolverErrorUpdate:
ccb.handleResolverError(update.err)
case *switchToUpdate:
ccb.handleSwitchTo(update.name)
case *subConnUpdate:
ccb.handleRemoveSubConn(update.acbw)
default:
logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update)
}
case <-ccb.closed.Done():
}
if ccb.closed.HasFired() {
ccb.handleClose()
return
}
}
}
// updateClientConnState is invoked by grpc to push a ClientConnState update to
// the underlying balancer.
//
// Unlike other methods invoked by grpc to push updates to the underlying
// balancer, this method cannot simply push the update onto the update channel
// and return. It needs to return the error returned by the underlying balancer
// back to grpc which propagates that to the resolver.
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
ccb.updateCh.Put(&ccStateUpdate{ccs: ccs})
var res interface{}
select {
case res = <-ccb.resultCh.Get():
ccb.resultCh.Load()
case <-ccb.closed.Done():
// Return early if the balancer wrapper is closed while we are waiting for
// the underlying balancer to process a ClientConnState update.
return nil
}
// If the returned error is nil, attempting to type assert to error leads to
// panic. So, this needs to handled separately.
if res == nil {
return nil
}
return res.(error)
}
// handleClientConnStateChange handles a ClientConnState update from the update
// channel and invokes the appropriate method on the underlying balancer.
//
// If the addresses specified in the update contain addresses of type "grpclb"
// and the selected LB policy is not "grpclb", these addresses will be filtered
// out and ccs will be modified with the updated address list.
func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) {
if ccb.curBalancerName != grpclbName {
// Filter any grpclb addresses since we don't have the grpclb balancer.
var addrs []resolver.Address
for _, addr := range ccs.ResolverState.Addresses {
if addr.Type == resolver.GRPCLB {
continue
ccb.mu.Lock()
errCh := make(chan error, 1)
// Here and everywhere else where Schedule() is called, it is done with the
// lock held. But the lock guards only the scheduling part. The actual
// callback is called asynchronously without the lock being held.
ok := ccb.serializer.Schedule(func(_ context.Context) {
// If the addresses specified in the update contain addresses of type
// "grpclb" and the selected LB policy is not "grpclb", these addresses
// will be filtered out and ccs will be modified with the updated
// address list.
if ccb.curBalancerName != grpclbName {
var addrs []resolver.Address
for _, addr := range ccs.ResolverState.Addresses {
if addr.Type == resolver.GRPCLB {
continue
}
addrs = append(addrs, addr)
}
addrs = append(addrs, addr)
ccs.ResolverState.Addresses = addrs
}
ccs.ResolverState.Addresses = addrs
errCh <- ccb.balancer.UpdateClientConnState(*ccs)
})
if !ok {
// If we are unable to schedule a function with the serializer, it
// indicates that it has been closed. A serializer is only closed when
// the wrapper is closed or is in idle.
ccb.mu.Unlock()
return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer")
}
ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs))
ccb.mu.Unlock()
// We get here only if the above call to Schedule succeeds, in which case it
// is guaranteed that the scheduled function will run. Therefore it is safe
// to block on this channel.
err := <-errCh
if logger.V(2) && err != nil {
logger.Infof("error from balancer.UpdateClientConnState: %v", err)
}
return err
}
// updateSubConnState is invoked by grpc to push a subConn state update to the
// underlying balancer.
func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) {
// When updating addresses for a SubConn, if the address in use is not in
// the new addresses, the old ac will be tearDown() and a new ac will be
// created. tearDown() generates a state change with Shutdown state, we
// don't want the balancer to receive this state change. So before
// tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and
// this function will be called with (nil, Shutdown). We don't need to call
// balancer method in this case.
if sc == nil {
return
}
ccb.updateCh.Put(&scStateUpdate{
sc: sc,
state: s,
err: err,
ccb.mu.Lock()
ccb.serializer.Schedule(func(_ context.Context) {
ccb.balancer.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s, ConnectionError: err})
})
}
// handleSubConnStateChange handles a SubConnState update from the update
// channel and invokes the appropriate method on the underlying balancer.
func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) {
ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err})
}
func (ccb *ccBalancerWrapper) exitIdle() {
ccb.updateCh.Put(&exitIdleUpdate{})
}
func (ccb *ccBalancerWrapper) handleExitIdle() {
if ccb.cc.GetState() != connectivity.Idle {
return
}
ccb.balancer.ExitIdle()
ccb.mu.Unlock()
}
func (ccb *ccBalancerWrapper) resolverError(err error) {
ccb.updateCh.Put(&resolverErrorUpdate{err: err})
}
func (ccb *ccBalancerWrapper) handleResolverError(err error) {
ccb.balancer.ResolverError(err)
ccb.mu.Lock()
ccb.serializer.Schedule(func(_ context.Context) {
ccb.balancer.ResolverError(err)
})
ccb.mu.Unlock()
}
// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the
@ -248,24 +163,27 @@ func (ccb *ccBalancerWrapper) handleResolverError(err error) {
// the ccBalancerWrapper keeps track of the current LB policy name, and skips
// the graceful balancer switching process if the name does not change.
func (ccb *ccBalancerWrapper) switchTo(name string) {
ccb.updateCh.Put(&switchToUpdate{name: name})
ccb.mu.Lock()
ccb.serializer.Schedule(func(_ context.Context) {
// TODO: Other languages use case-sensitive balancer registries. We should
// switch as well. See: https://github.com/grpc/grpc-go/issues/5288.
if strings.EqualFold(ccb.curBalancerName, name) {
return
}
ccb.buildLoadBalancingPolicy(name)
})
ccb.mu.Unlock()
}
// handleSwitchTo handles a balancer switch update from the update channel. It
// calls the SwitchTo() method on the gracefulswitch.Balancer with a
// balancer.Builder corresponding to name. If no balancer.Builder is registered
// for the given name, it uses the default LB policy which is "pick_first".
func (ccb *ccBalancerWrapper) handleSwitchTo(name string) {
// TODO: Other languages use case-insensitive balancer registries. We should
// switch as well. See: https://github.com/grpc/grpc-go/issues/5288.
if strings.EqualFold(ccb.curBalancerName, name) {
return
}
// TODO: Ensure that name is a registered LB policy when we get here.
// We currently only validate the `loadBalancingConfig` field. We need to do
// the same for the `loadBalancingPolicy` field and reject the service config
// if the specified policy is not registered.
// buildLoadBalancingPolicy performs the following:
// - retrieve a balancer builder for the given name. Use the default LB
// policy, pick_first, if no LB policy with name is found in the registry.
// - instruct the gracefulswitch balancer to switch to the above builder. This
// will actually build the new balancer.
// - update the `curBalancerName` field
//
// Must be called from a serializer callback.
func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) {
builder := balancer.Get(name)
if builder == nil {
channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name)
@ -281,26 +199,114 @@ func (ccb *ccBalancerWrapper) handleSwitchTo(name string) {
ccb.curBalancerName = builder.Name()
}
// handleRemoveSucConn handles a request from the underlying balancer to remove
// a subConn.
//
// See comments in RemoveSubConn() for more details.
func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) {
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
}
func (ccb *ccBalancerWrapper) close() {
ccb.closed.Fire()
<-ccb.done.Done()
channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing")
ccb.closeBalancer(ccbModeClosed)
}
func (ccb *ccBalancerWrapper) handleClose() {
ccb.balancer.Close()
ccb.done.Fire()
// enterIdleMode is invoked by grpc when the channel enters idle mode upon
// expiry of idle_timeout. This call blocks until the balancer is closed.
func (ccb *ccBalancerWrapper) enterIdleMode() {
channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode")
ccb.closeBalancer(ccbModeIdle)
}
// closeBalancer is invoked when the channel is being closed or when it enters
// idle mode upon expiry of idle_timeout.
func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) {
ccb.mu.Lock()
if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle {
ccb.mu.Unlock()
return
}
ccb.mode = m
done := ccb.serializer.Done
b := ccb.balancer
ok := ccb.serializer.Schedule(func(_ context.Context) {
// Close the serializer to ensure that no more calls from gRPC are sent
// to the balancer.
ccb.serializerCancel()
// Empty the current balancer name because we don't have a balancer
// anymore and also so that we act on the next call to switchTo by
// creating a new balancer specified by the new resolver.
ccb.curBalancerName = ""
})
if !ok {
ccb.mu.Unlock()
return
}
ccb.mu.Unlock()
// Give enqueued callbacks a chance to finish.
<-done
// Spawn a goroutine to close the balancer (since it may block trying to
// cleanup all allocated resources) and return early.
go b.Close()
}
// exitIdleMode is invoked by grpc when the channel exits idle mode either
// because of an RPC or because of an invocation of the Connect() API. This
// recreates the balancer that was closed previously when entering idle mode.
//
// If the channel is not in idle mode, we know for a fact that we are here as a
// result of the user calling the Connect() method on the ClientConn. In this
// case, we can simply forward the call to the underlying balancer, instructing
// it to reconnect to the backends.
func (ccb *ccBalancerWrapper) exitIdleMode() {
ccb.mu.Lock()
if ccb.mode == ccbModeClosed {
// Request to exit idle is a no-op when wrapper is already closed.
ccb.mu.Unlock()
return
}
if ccb.mode == ccbModeIdle {
// Recreate the serializer which was closed when we entered idle.
ctx, cancel := context.WithCancel(context.Background())
ccb.serializer = grpcsync.NewCallbackSerializer(ctx)
ccb.serializerCancel = cancel
}
// The ClientConn guarantees that mutual exclusion between close() and
// exitIdleMode(), and since we just created a new serializer, we can be
// sure that the below function will be scheduled.
done := make(chan struct{})
ccb.serializer.Schedule(func(_ context.Context) {
defer close(done)
ccb.mu.Lock()
defer ccb.mu.Unlock()
if ccb.mode != ccbModeIdle {
ccb.balancer.ExitIdle()
return
}
// Gracefulswitch balancer does not support a switchTo operation after
// being closed. Hence we need to create a new one here.
ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts)
ccb.mode = ccbModeActive
channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode")
})
ccb.mu.Unlock()
<-done
}
func (ccb *ccBalancerWrapper) isIdleOrClosed() bool {
ccb.mu.Lock()
defer ccb.mu.Unlock()
return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed
}
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
if len(addrs) <= 0 {
if ccb.isIdleOrClosed() {
return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle")
}
if len(addrs) == 0 {
return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
}
ac, err := ccb.cc.newAddrConn(addrs, opts)
@ -309,31 +315,35 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer
return nil, err
}
acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)}
acbw.ac.mu.Lock()
ac.acbw = acbw
acbw.ac.mu.Unlock()
return acbw, nil
}
func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
// Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it
// was required to handle the RemoveSubConn() method asynchronously by pushing
// the update onto the update channel. This was done to avoid a deadlock as
// switchBalancer() was holding cc.mu when calling Close() on the old
// balancer, which would in turn call RemoveSubConn().
//
// With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this
// asynchronously is probably not required anymore since the switchTo() method
// handles the balancer switch by pushing the update onto the channel.
// TODO(easwars): Handle this inline.
if ccb.isIdleOrClosed() {
// It it safe to ignore this call when the balancer is closed or in idle
// because the ClientConn takes care of closing the connections.
//
// Not returning early from here when the balancer is closed or in idle
// leads to a deadlock though, because of the following sequence of
// calls when holding cc.mu:
// cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close -->
// ccb.RemoveAddrConn --> cc.removeAddrConn
return
}
acbw, ok := sc.(*acBalancerWrapper)
if !ok {
return
}
ccb.updateCh.Put(&subConnUpdate{acbw: acbw})
ccb.cc.removeAddrConn(acbw.ac, errConnDrain)
}
func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
if ccb.isIdleOrClosed() {
return
}
acbw, ok := sc.(*acBalancerWrapper)
if !ok {
return
@ -342,6 +352,10 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol
}
func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
if ccb.isIdleOrClosed() {
return
}
// Update picker before updating state. Even though the ordering here does
// not matter, it can lead to multiple calls of Pick in the common start-up
// case where we wait for ready and then perform an RPC. If the picker is
@ -352,6 +366,10 @@ func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
}
func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) {
if ccb.isIdleOrClosed() {
return
}
ccb.cc.resolveNow(o)
}
@ -362,71 +380,31 @@ func (ccb *ccBalancerWrapper) Target() string {
// acBalancerWrapper is a wrapper on top of ac for balancers.
// It implements balancer.SubConn interface.
type acBalancerWrapper struct {
ac *addrConn // read-only
mu sync.Mutex
ac *addrConn
producers map[balancer.ProducerBuilder]*refCountedProducer
}
func (acbw *acBalancerWrapper) String() string {
return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int())
}
func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
acbw.mu.Lock()
defer acbw.mu.Unlock()
if len(addrs) <= 0 {
acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain)
return
}
if !acbw.ac.tryUpdateAddrs(addrs) {
cc := acbw.ac.cc
opts := acbw.ac.scopts
acbw.ac.mu.Lock()
// Set old ac.acbw to nil so the Shutdown state update will be ignored
// by balancer.
//
// TODO(bar) the state transition could be wrong when tearDown() old ac
// and creating new ac, fix the transition.
acbw.ac.acbw = nil
acbw.ac.mu.Unlock()
acState := acbw.ac.getState()
acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain)
if acState == connectivity.Shutdown {
return
}
newAC, err := cc.newAddrConn(addrs, opts)
if err != nil {
channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
return
}
acbw.ac = newAC
newAC.mu.Lock()
newAC.acbw = acbw
newAC.mu.Unlock()
if acState != connectivity.Idle {
go newAC.connect()
}
}
acbw.ac.updateAddrs(addrs)
}
func (acbw *acBalancerWrapper) Connect() {
acbw.mu.Lock()
defer acbw.mu.Unlock()
go acbw.ac.connect()
}
func (acbw *acBalancerWrapper) getAddrConn() *addrConn {
acbw.mu.Lock()
defer acbw.mu.Unlock()
return acbw.ac
}
var errSubConnNotReady = status.Error(codes.Unavailable, "SubConn not currently connected")
// NewStream begins a streaming RPC on the addrConn. If the addrConn is not
// ready, returns errSubConnNotReady.
// ready, blocks until it is or ctx expires. Returns an error when the context
// expires or the addrConn is shut down.
func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
transport := acbw.ac.getReadyTransport()
if transport == nil {
return nil, errSubConnNotReady
transport, err := acbw.ac.getTransport(ctx)
if err != nil {
return nil, err
}
return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...)
}

View File

@ -27,6 +27,11 @@ import (
//
// All errors returned by Invoke are compatible with the status package.
func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error {
if err := cc.idlenessMgr.onCallBegin(); err != nil {
return err
}
defer cc.idlenessMgr.onCallEnd()
// allow interceptor to see all applicable call options, which means those
// configured as defaults from dial option as well as per-call options
opts = combine(cc.dopts.callOptions, opts)

View File

@ -24,7 +24,6 @@ import (
"fmt"
"math"
"net/url"
"reflect"
"strings"
"sync"
"sync/atomic"
@ -69,6 +68,9 @@ var (
errConnDrain = errors.New("grpc: the connection is drained")
// errConnClosing indicates that the connection is closing.
errConnClosing = errors.New("grpc: the connection is closing")
// errConnIdling indicates the the connection is being closed as the channel
// is moving to an idle mode due to inactivity.
errConnIdling = errors.New("grpc: the connection is closing due to channel idleness")
// invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default
// service config.
invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid"
@ -134,17 +136,29 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires
// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target.
func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
cc := &ClientConn{
target: target,
csMgr: &connectivityStateManager{},
conns: make(map[*addrConn]struct{}),
dopts: defaultDialOptions(),
blockingpicker: newPickerWrapper(),
czData: new(channelzData),
firstResolveEvent: grpcsync.NewEvent(),
target: target,
csMgr: &connectivityStateManager{},
conns: make(map[*addrConn]struct{}),
dopts: defaultDialOptions(),
czData: new(channelzData),
}
// We start the channel off in idle mode, but kick it out of idle at the end
// of this method, instead of waiting for the first RPC. Other gRPC
// implementations do wait for the first RPC to kick the channel out of
// idle. But doing so would be a major behavior change for our users who are
// used to seeing the channel active after Dial.
//
// Taking this approach of kicking it out of idle at the end of this method
// allows us to share the code between channel creation and exiting idle
// mode. This will also make it easy for us to switch to starting the
// channel off in idle, if at all we ever get to do that.
cc.idlenessState = ccIdlenessStateIdle
cc.retryThrottler.Store((*retryThrottler)(nil))
cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
cc.ctx, cc.cancel = context.WithCancel(context.Background())
cc.exitIdleCond = sync.NewCond(&cc.mu)
disableGlobalOpts := false
for _, opt := range opts {
@ -173,40 +187,11 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
}
}()
pid := cc.dopts.channelzParentID
cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target)
ted := &channelz.TraceEventDesc{
Desc: "Channel created",
Severity: channelz.CtInfo,
}
if cc.dopts.channelzParentID != nil {
ted.Parent = &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()),
Severity: channelz.CtInfo,
}
}
channelz.AddTraceEvent(logger, cc.channelzID, 1, ted)
cc.csMgr.channelzID = cc.channelzID
// Register ClientConn with channelz.
cc.channelzRegistration(target)
if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil {
return nil, errNoTransportSecurity
}
if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil {
return nil, errTransportCredsAndBundle
}
if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil {
return nil, errNoTransportCredsInBundle
}
transportCreds := cc.dopts.copts.TransportCredentials
if transportCreds == nil {
transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials()
}
if transportCreds.Info().SecurityProtocol == "insecure" {
for _, cd := range cc.dopts.copts.PerRPCCredentials {
if cd.RequireTransportSecurity() {
return nil, errTransportCredentialsMissing
}
}
if err := cc.validateTransportCredentials(); err != nil {
return nil, err
}
if cc.dopts.defaultServiceConfigRawJSON != nil {
@ -249,15 +234,12 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
}
// Determine the resolver to use.
resolverBuilder, err := cc.parseTargetAndFindResolver()
if err != nil {
if err := cc.parseTargetAndFindResolver(); err != nil {
return nil, err
}
cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint(), cc.target, cc.dopts)
if err != nil {
if err = cc.determineAuthority(); err != nil {
return nil, err
}
channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority)
if cc.dopts.scChan != nil {
// Blocking wait for the initial service config.
@ -275,57 +257,224 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
go cc.scWatcher()
}
// This creates the name resolver, load balancer, blocking picker etc.
if err := cc.exitIdleMode(); err != nil {
return nil, err
}
// Configure idleness support with configured idle timeout or default idle
// timeout duration. Idleness can be explicitly disabled by the user, by
// setting the dial option to 0.
cc.idlenessMgr = newIdlenessManager(cc, cc.dopts.idleTimeout)
// Return early for non-blocking dials.
if !cc.dopts.block {
return cc, nil
}
// A blocking dial blocks until the clientConn is ready.
for {
s := cc.GetState()
if s == connectivity.Idle {
cc.Connect()
}
if s == connectivity.Ready {
return cc, nil
} else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure {
if err = cc.connectionError(); err != nil {
terr, ok := err.(interface {
Temporary() bool
})
if ok && !terr.Temporary() {
return nil, err
}
}
}
if !cc.WaitForStateChange(ctx, s) {
// ctx got timeout or canceled.
if err = cc.connectionError(); err != nil && cc.dopts.returnLastError {
return nil, err
}
return nil, ctx.Err()
}
}
}
// addTraceEvent is a helper method to add a trace event on the channel. If the
// channel is a nested one, the same event is also added on the parent channel.
func (cc *ClientConn) addTraceEvent(msg string) {
ted := &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Channel %s", msg),
Severity: channelz.CtInfo,
}
if cc.dopts.channelzParentID != nil {
ted.Parent = &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelzID.Int(), msg),
Severity: channelz.CtInfo,
}
}
channelz.AddTraceEvent(logger, cc.channelzID, 0, ted)
}
// exitIdleMode moves the channel out of idle mode by recreating the name
// resolver and load balancer.
func (cc *ClientConn) exitIdleMode() error {
cc.mu.Lock()
if cc.conns == nil {
cc.mu.Unlock()
return errConnClosing
}
if cc.idlenessState != ccIdlenessStateIdle {
cc.mu.Unlock()
logger.Info("ClientConn asked to exit idle mode when not in idle mode")
return nil
}
defer func() {
// When Close() and exitIdleMode() race against each other, one of the
// following two can happen:
// - Close() wins the race and runs first. exitIdleMode() runs after, and
// sees that the ClientConn is already closed and hence returns early.
// - exitIdleMode() wins the race and runs first and recreates the balancer
// and releases the lock before recreating the resolver. If Close() runs
// in this window, it will wait for exitIdleMode to complete.
//
// We achieve this synchronization using the below condition variable.
cc.mu.Lock()
cc.idlenessState = ccIdlenessStateActive
cc.exitIdleCond.Signal()
cc.mu.Unlock()
}()
cc.idlenessState = ccIdlenessStateExitingIdle
exitedIdle := false
if cc.blockingpicker == nil {
cc.blockingpicker = newPickerWrapper()
} else {
cc.blockingpicker.exitIdleMode()
exitedIdle = true
}
var credsClone credentials.TransportCredentials
if creds := cc.dopts.copts.TransportCredentials; creds != nil {
credsClone = creds.Clone()
}
cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{
DialCreds: credsClone,
CredsBundle: cc.dopts.copts.CredsBundle,
Dialer: cc.dopts.copts.Dialer,
Authority: cc.authority,
CustomUserAgent: cc.dopts.copts.UserAgent,
ChannelzParentID: cc.channelzID,
Target: cc.parsedTarget,
})
// Build the resolver.
rWrapper, err := newCCResolverWrapper(cc, resolverBuilder)
if err != nil {
return nil, fmt.Errorf("failed to build resolver: %v", err)
if cc.balancerWrapper == nil {
cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{
DialCreds: credsClone,
CredsBundle: cc.dopts.copts.CredsBundle,
Dialer: cc.dopts.copts.Dialer,
Authority: cc.authority,
CustomUserAgent: cc.dopts.copts.UserAgent,
ChannelzParentID: cc.channelzID,
Target: cc.parsedTarget,
})
} else {
cc.balancerWrapper.exitIdleMode()
}
cc.mu.Lock()
cc.resolverWrapper = rWrapper
cc.firstResolveEvent = grpcsync.NewEvent()
cc.mu.Unlock()
// A blocking dial blocks until the clientConn is ready.
if cc.dopts.block {
for {
cc.Connect()
s := cc.GetState()
if s == connectivity.Ready {
break
} else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure {
if err = cc.connectionError(); err != nil {
terr, ok := err.(interface {
Temporary() bool
})
if ok && !terr.Temporary() {
return nil, err
}
}
}
if !cc.WaitForStateChange(ctx, s) {
// ctx got timeout or canceled.
if err = cc.connectionError(); err != nil && cc.dopts.returnLastError {
return nil, err
}
return nil, ctx.Err()
// This needs to be called without cc.mu because this builds a new resolver
// which might update state or report error inline which needs to be handled
// by cc.updateResolverState() which also grabs cc.mu.
if err := cc.initResolverWrapper(credsClone); err != nil {
return err
}
if exitedIdle {
cc.addTraceEvent("exiting idle mode")
}
return nil
}
// enterIdleMode puts the channel in idle mode, and as part of it shuts down the
// name resolver, load balancer and any subchannels.
func (cc *ClientConn) enterIdleMode() error {
cc.mu.Lock()
if cc.conns == nil {
cc.mu.Unlock()
return ErrClientConnClosing
}
if cc.idlenessState != ccIdlenessStateActive {
logger.Error("ClientConn asked to enter idle mode when not active")
return nil
}
// cc.conns == nil is a proxy for the ClientConn being closed. So, instead
// of setting it to nil here, we recreate the map. This also means that we
// don't have to do this when exiting idle mode.
conns := cc.conns
cc.conns = make(map[*addrConn]struct{})
// TODO: Currently, we close the resolver wrapper upon entering idle mode
// and create a new one upon exiting idle mode. This means that the
// `cc.resolverWrapper` field would be overwritten everytime we exit idle
// mode. While this means that we need to hold `cc.mu` when accessing
// `cc.resolverWrapper`, it makes the code simpler in the wrapper. We should
// try to do the same for the balancer and picker wrappers too.
cc.resolverWrapper.close()
cc.blockingpicker.enterIdleMode()
cc.balancerWrapper.enterIdleMode()
cc.csMgr.updateState(connectivity.Idle)
cc.idlenessState = ccIdlenessStateIdle
cc.mu.Unlock()
go func() {
cc.addTraceEvent("entering idle mode")
for ac := range conns {
ac.tearDown(errConnIdling)
}
}()
return nil
}
// validateTransportCredentials performs a series of checks on the configured
// transport credentials. It returns a non-nil error if any of these conditions
// are met:
// - no transport creds and no creds bundle is configured
// - both transport creds and creds bundle are configured
// - creds bundle is configured, but it lacks a transport credentials
// - insecure transport creds configured alongside call creds that require
// transport level security
//
// If none of the above conditions are met, the configured credentials are
// deemed valid and a nil error is returned.
func (cc *ClientConn) validateTransportCredentials() error {
if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil {
return errNoTransportSecurity
}
if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil {
return errTransportCredsAndBundle
}
if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil {
return errNoTransportCredsInBundle
}
transportCreds := cc.dopts.copts.TransportCredentials
if transportCreds == nil {
transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials()
}
if transportCreds.Info().SecurityProtocol == "insecure" {
for _, cd := range cc.dopts.copts.PerRPCCredentials {
if cd.RequireTransportSecurity() {
return errTransportCredentialsMissing
}
}
}
return nil
}
return cc, nil
// channelzRegistration registers the newly created ClientConn with channelz and
// stores the returned identifier in `cc.channelzID` and `cc.csMgr.channelzID`.
// A channelz trace event is emitted for ClientConn creation. If the newly
// created ClientConn is a nested one, i.e a valid parent ClientConn ID is
// specified via a dial option, the trace event is also added to the parent.
//
// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
func (cc *ClientConn) channelzRegistration(target string) {
cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target)
cc.addTraceEvent("created")
cc.csMgr.channelzID = cc.channelzID
}
// chainUnaryClientInterceptors chains all unary client interceptors into one.
@ -471,7 +620,9 @@ type ClientConn struct {
authority string // See determineAuthority().
dopts dialOptions // Default and user specified dial options.
channelzID *channelz.Identifier // Channelz identifier for the channel.
resolverBuilder resolver.Builder // See parseTargetAndFindResolver().
balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath.
idlenessMgr idlenessManager
// The following provide their own synchronization, and therefore don't
// require cc.mu to be held to access them.
@ -492,11 +643,31 @@ type ClientConn struct {
sc *ServiceConfig // Latest service config received from the resolver.
conns map[*addrConn]struct{} // Set to nil on close.
mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway.
idlenessState ccIdlenessState // Tracks idleness state of the channel.
exitIdleCond *sync.Cond // Signalled when channel exits idle.
lceMu sync.Mutex // protects lastConnectionError
lastConnectionError error
}
// ccIdlenessState tracks the idleness state of the channel.
//
// Channels start off in `active` and move to `idle` after a period of
// inactivity. When moving back to `active` upon an incoming RPC, they
// transition through `exiting_idle`. This state is useful for synchronization
// with Close().
//
// This state tracking is mostly for self-protection. The idlenessManager is
// expected to keep track of the state as well, and is expected not to call into
// the ClientConn unnecessarily.
type ccIdlenessState int8
const (
ccIdlenessStateActive ccIdlenessState = iota
ccIdlenessStateIdle
ccIdlenessStateExitingIdle
)
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
// ctx expires. A true value is returned in former case and false in latter.
//
@ -536,7 +707,10 @@ func (cc *ClientConn) GetState() connectivity.State {
// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
// release.
func (cc *ClientConn) Connect() {
cc.balancerWrapper.exitIdle()
cc.exitIdleMode()
// If the ClientConn was not in idle mode, we need to call ExitIdle on the
// LB policy so that connections can be created.
cc.balancerWrapper.exitIdleMode()
}
func (cc *ClientConn) scWatcher() {
@ -705,6 +879,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub
dopts: cc.dopts,
czData: new(channelzData),
resetBackoff: make(chan struct{}),
stateChan: make(chan struct{}),
}
ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
// Track ac in cc. This needs to be done before any getTransport(...) is called.
@ -798,9 +973,6 @@ func (ac *addrConn) connect() error {
ac.mu.Unlock()
return nil
}
// Update connectivity state within the lock to prevent subsequent or
// concurrent calls from resetting the transport more than once.
ac.updateConnectivityState(connectivity.Connecting, nil)
ac.mu.Unlock()
ac.resetTransport()
@ -819,58 +991,62 @@ func equalAddresses(a, b []resolver.Address) bool {
return true
}
// tryUpdateAddrs tries to update ac.addrs with the new addresses list.
//
// If ac is TransientFailure, it updates ac.addrs and returns true. The updated
// addresses will be picked up by retry in the next iteration after backoff.
//
// If ac is Shutdown or Idle, it updates ac.addrs and returns true.
//
// If the addresses is the same as the old list, it does nothing and returns
// true.
//
// If ac is Connecting, it returns false. The caller should tear down the ac and
// create a new one. Note that the backoff will be reset when this happens.
//
// If ac is Ready, it checks whether current connected address of ac is in the
// new addrs list.
// - If true, it updates ac.addrs and returns true. The ac will keep using
// the existing connection.
// - If false, it does nothing and returns false.
func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
// updateAddrs updates ac.addrs with the new addresses list and handles active
// connections or connection attempts.
func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
ac.mu.Lock()
defer ac.mu.Unlock()
channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
if equalAddresses(ac.addrs, addrs) {
ac.mu.Unlock()
return
}
ac.addrs = addrs
if ac.state == connectivity.Shutdown ||
ac.state == connectivity.TransientFailure ||
ac.state == connectivity.Idle {
ac.addrs = addrs
return true
// We were not connecting, so do nothing but update the addresses.
ac.mu.Unlock()
return
}
if equalAddresses(ac.addrs, addrs) {
return true
}
if ac.state == connectivity.Connecting {
return false
}
// ac.state is Ready, try to find the connected address.
var curAddrFound bool
for _, a := range addrs {
a.ServerName = ac.cc.getServerName(a)
if reflect.DeepEqual(ac.curAddr, a) {
curAddrFound = true
break
if ac.state == connectivity.Ready {
// Try to find the connected address.
for _, a := range addrs {
a.ServerName = ac.cc.getServerName(a)
if a.Equal(ac.curAddr) {
// We are connected to a valid address, so do nothing but
// update the addresses.
ac.mu.Unlock()
return
}
}
}
channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound)
if curAddrFound {
ac.addrs = addrs
// We are either connected to the wrong address or currently connecting.
// Stop the current iteration and restart.
ac.cancel()
ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx)
// We have to defer here because GracefulClose => Close => onClose, which
// requires locking ac.mu.
if ac.transport != nil {
defer ac.transport.GracefulClose()
ac.transport = nil
}
return curAddrFound
if len(addrs) == 0 {
ac.updateConnectivityState(connectivity.Idle, nil)
}
ac.mu.Unlock()
// Since we were connecting/connected, we should start a new connection
// attempt.
go ac.resetTransport()
}
// getServerName determines the serverName to be used in the connection
@ -1023,39 +1199,40 @@ func (cc *ClientConn) Close() error {
cc.mu.Unlock()
return ErrClientConnClosing
}
for cc.idlenessState == ccIdlenessStateExitingIdle {
cc.exitIdleCond.Wait()
}
conns := cc.conns
cc.conns = nil
cc.csMgr.updateState(connectivity.Shutdown)
pWrapper := cc.blockingpicker
rWrapper := cc.resolverWrapper
cc.resolverWrapper = nil
bWrapper := cc.balancerWrapper
idlenessMgr := cc.idlenessMgr
cc.mu.Unlock()
// The order of closing matters here since the balancer wrapper assumes the
// picker is closed before it is closed.
cc.blockingpicker.close()
if pWrapper != nil {
pWrapper.close()
}
if bWrapper != nil {
bWrapper.close()
}
if rWrapper != nil {
rWrapper.close()
}
if idlenessMgr != nil {
idlenessMgr.close()
}
for ac := range conns {
ac.tearDown(ErrClientConnClosing)
}
ted := &channelz.TraceEventDesc{
Desc: "Channel deleted",
Severity: channelz.CtInfo,
}
if cc.dopts.channelzParentID != nil {
ted.Parent = &channelz.TraceEventDesc{
Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()),
Severity: channelz.CtInfo,
}
}
channelz.AddTraceEvent(logger, cc.channelzID, 0, ted)
cc.addTraceEvent("deleted")
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add
// trace reference to the entity being deleted, and thus prevent it from being
// deleted right away.
@ -1085,7 +1262,8 @@ type addrConn struct {
addrs []resolver.Address // All addresses that the resolver resolved to.
// Use updateConnectivityState for updating addrConn's connectivity state.
state connectivity.State
state connectivity.State
stateChan chan struct{} // closed and recreated on every state change.
backoffIdx int // Needs to be stateful for resetConnectBackoff.
resetBackoff chan struct{}
@ -1099,6 +1277,9 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error)
if ac.state == s {
return
}
// When changing states, reset the state change channel.
close(ac.stateChan)
ac.stateChan = make(chan struct{})
ac.state = s
if lastErr == nil {
channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s)
@ -1124,7 +1305,8 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
func (ac *addrConn) resetTransport() {
ac.mu.Lock()
if ac.state == connectivity.Shutdown {
acCtx := ac.ctx
if acCtx.Err() != nil {
ac.mu.Unlock()
return
}
@ -1152,15 +1334,14 @@ func (ac *addrConn) resetTransport() {
ac.updateConnectivityState(connectivity.Connecting, nil)
ac.mu.Unlock()
if err := ac.tryAllAddrs(addrs, connectDeadline); err != nil {
if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil {
ac.cc.resolveNow(resolver.ResolveNowOptions{})
// After exhausting all addresses, the addrConn enters
// TRANSIENT_FAILURE.
ac.mu.Lock()
if ac.state == connectivity.Shutdown {
ac.mu.Unlock()
if acCtx.Err() != nil {
return
}
ac.mu.Lock()
ac.updateConnectivityState(connectivity.TransientFailure, err)
// Backoff.
@ -1175,13 +1356,13 @@ func (ac *addrConn) resetTransport() {
ac.mu.Unlock()
case <-b:
timer.Stop()
case <-ac.ctx.Done():
case <-acCtx.Done():
timer.Stop()
return
}
ac.mu.Lock()
if ac.state != connectivity.Shutdown {
if acCtx.Err() == nil {
ac.updateConnectivityState(connectivity.Idle, err)
}
ac.mu.Unlock()
@ -1196,14 +1377,13 @@ func (ac *addrConn) resetTransport() {
// tryAllAddrs tries to creates a connection to the addresses, and stop when at
// the first successful one. It returns an error if no address was successfully
// connected, or updates ac appropriately with the new transport.
func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) error {
func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error {
var firstConnErr error
for _, addr := range addrs {
ac.mu.Lock()
if ac.state == connectivity.Shutdown {
ac.mu.Unlock()
if ctx.Err() != nil {
return errConnClosing
}
ac.mu.Lock()
ac.cc.mu.RLock()
ac.dopts.copts.KeepaliveParams = ac.cc.mkp
@ -1217,7 +1397,7 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T
channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr)
err := ac.createTransport(addr, copts, connectDeadline)
err := ac.createTransport(ctx, addr, copts, connectDeadline)
if err == nil {
return nil
}
@ -1234,19 +1414,20 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T
// createTransport creates a connection to addr. It returns an error if the
// address was not successfully connected, or updates ac appropriately with the
// new transport.
func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error {
func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error {
addr.ServerName = ac.cc.getServerName(addr)
hctx, hcancel := context.WithCancel(ac.ctx)
hctx, hcancel := context.WithCancel(ctx)
onClose := func(r transport.GoAwayReason) {
ac.mu.Lock()
defer ac.mu.Unlock()
// adjust params based on GoAwayReason
ac.adjustParams(r)
if ac.state == connectivity.Shutdown {
// Already shut down. tearDown() already cleared the transport and
// canceled hctx via ac.ctx, and we expected this connection to be
// closed, so do nothing here.
if ctx.Err() != nil {
// Already shut down or connection attempt canceled. tearDown() or
// updateAddrs() already cleared the transport and canceled hctx
// via ac.ctx, and we expected this connection to be closed, so do
// nothing here.
return
}
hcancel()
@ -1265,7 +1446,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
ac.updateConnectivityState(connectivity.Idle, nil)
}
connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
connectCtx, cancel := context.WithDeadline(ctx, connectDeadline)
defer cancel()
copts.ChannelzParentID = ac.channelzID
@ -1282,7 +1463,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
ac.mu.Lock()
defer ac.mu.Unlock()
if ac.state == connectivity.Shutdown {
if ctx.Err() != nil {
// This can happen if the subConn was removed while in `Connecting`
// state. tearDown() would have set the state to `Shutdown`, but
// would not have closed the transport since ac.transport would not
@ -1294,6 +1475,9 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
// The error we pass to Close() is immaterial since there are no open
// streams at this point, so no trailers with error details will be sent
// out. We just need to pass a non-nil error.
//
// This can also happen when updateAddrs is called during a connection
// attempt.
go newTr.Close(transport.ErrConnClosing)
return nil
}
@ -1401,6 +1585,29 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport {
return nil
}
// getTransport waits until the addrconn is ready and returns the transport.
// If the context expires first, returns an appropriate status. If the
// addrConn is stopped first, returns an Unavailable status error.
func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) {
for ctx.Err() == nil {
ac.mu.Lock()
t, state, sc := ac.transport, ac.state, ac.stateChan
ac.mu.Unlock()
if state == connectivity.Ready {
return t, nil
}
if state == connectivity.Shutdown {
return nil, status.Errorf(codes.Unavailable, "SubConn shutting down")
}
select {
case <-ctx.Done():
case <-sc:
}
}
return nil, status.FromContextError(ctx.Err()).Err()
}
// tearDown starts to tear down the addrConn.
//
// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct
@ -1552,7 +1759,14 @@ func (cc *ClientConn) connectionError() error {
return cc.lastConnectionError
}
func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) {
// parseTargetAndFindResolver parses the user's dial target and stores the
// parsed target in `cc.parsedTarget`.
//
// The resolver to use is determined based on the scheme in the parsed target
// and the same is stored in `cc.resolverBuilder`.
//
// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
func (cc *ClientConn) parseTargetAndFindResolver() error {
channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target)
var rb resolver.Builder
@ -1564,7 +1778,8 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) {
rb = cc.getResolver(parsedTarget.URL.Scheme)
if rb != nil {
cc.parsedTarget = parsedTarget
return rb, nil
cc.resolverBuilder = rb
return nil
}
}
@ -1579,15 +1794,16 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) {
parsedTarget, err = parseTarget(canonicalTarget)
if err != nil {
channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err)
return nil, err
return err
}
channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget)
rb = cc.getResolver(parsedTarget.URL.Scheme)
if rb == nil {
return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme)
return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme)
}
cc.parsedTarget = parsedTarget
return rb, nil
cc.resolverBuilder = rb
return nil
}
// parseTarget uses RFC 3986 semantics to parse the given target into a
@ -1610,7 +1826,15 @@ func parseTarget(target string) (resolver.Target, error) {
// - user specified authority override using `WithAuthority` dial option
// - creds' notion of server name for the authentication handshake
// - endpoint from dial target of the form "scheme://[authority]/endpoint"
func determineAuthority(endpoint, target string, dopts dialOptions) (string, error) {
//
// Stores the determined authority in `cc.authority`.
//
// Returns a non-nil error if the authority returned by the transport
// credentials do not match the authority configured through the dial option.
//
// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
func (cc *ClientConn) determineAuthority() error {
dopts := cc.dopts
// Historically, we had two options for users to specify the serverName or
// authority for a channel. One was through the transport credentials
// (either in its constructor, or through the OverrideServerName() method).
@ -1627,25 +1851,58 @@ func determineAuthority(endpoint, target string, dopts dialOptions) (string, err
}
authorityFromDialOption := dopts.authority
if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption {
return "", fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption)
return fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption)
}
endpoint := cc.parsedTarget.Endpoint()
target := cc.target
switch {
case authorityFromDialOption != "":
return authorityFromDialOption, nil
cc.authority = authorityFromDialOption
case authorityFromCreds != "":
return authorityFromCreds, nil
cc.authority = authorityFromCreds
case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"):
// TODO: remove when the unix resolver implements optional interface to
// return channel authority.
return "localhost", nil
cc.authority = "localhost"
case strings.HasPrefix(endpoint, ":"):
return "localhost" + endpoint, nil
cc.authority = "localhost" + endpoint
default:
// TODO: Define an optional interface on the resolver builder to return
// the channel authority given the user's dial target. For resolvers
// which don't implement this interface, we will use the endpoint from
// "scheme://authority/endpoint" as the default authority.
return endpoint, nil
cc.authority = endpoint
}
channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority)
return nil
}
// initResolverWrapper creates a ccResolverWrapper, which builds the name
// resolver. This method grabs the lock to assign the newly built resolver
// wrapper to the cc.resolverWrapper field.
func (cc *ClientConn) initResolverWrapper(creds credentials.TransportCredentials) error {
rw, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{
target: cc.parsedTarget,
builder: cc.resolverBuilder,
bOpts: resolver.BuildOptions{
DisableServiceConfig: cc.dopts.disableServiceConfig,
DialCreds: creds,
CredsBundle: cc.dopts.copts.CredsBundle,
Dialer: cc.dopts.copts.Dialer,
},
channelzID: cc.channelzID,
})
if err != nil {
return fmt.Errorf("failed to build resolver: %v", err)
}
// Resolver implementations may report state update or error inline when
// built (or right after), and this is handled in cc.updateResolverState.
// Also, an error from the resolver might lead to a re-resolution request
// from the balancer, which is handled in resolveNow() where
// `cc.resolverWrapper` is accessed. Hence, we need to hold the lock here.
cc.mu.Lock()
cc.resolverWrapper = rw
cc.mu.Unlock()
return nil
}

View File

@ -77,6 +77,7 @@ type dialOptions struct {
defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON.
defaultServiceConfigRawJSON *string
resolvers []resolver.Builder
idleTimeout time.Duration
}
// DialOption configures how we set up the connection.
@ -655,3 +656,23 @@ func WithResolvers(rs ...resolver.Builder) DialOption {
o.resolvers = append(o.resolvers, rs...)
})
}
// WithIdleTimeout returns a DialOption that configures an idle timeout for the
// channel. If the channel is idle for the configured timeout, i.e there are no
// ongoing RPCs and no new RPCs are initiated, the channel will enter idle mode
// and as a result the name resolver and load balancer will be shut down. The
// channel will exit idle mode when the Connect() method is called or when an
// RPC is initiated.
//
// By default this feature is disabled, which can also be explicitly configured
// by passing zero to this function.
//
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func WithIdleTimeout(d time.Duration) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.idleTimeout = d
})
}

287
vendor/google.golang.org/grpc/idle.go generated vendored Normal file
View File

@ -0,0 +1,287 @@
/*
*
* Copyright 2023 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpc
import (
"fmt"
"math"
"sync"
"sync/atomic"
"time"
)
// For overriding in unit tests.
var timeAfterFunc = func(d time.Duration, f func()) *time.Timer {
return time.AfterFunc(d, f)
}
// idlenessEnforcer is the functionality provided by grpc.ClientConn to enter
// and exit from idle mode.
type idlenessEnforcer interface {
exitIdleMode() error
enterIdleMode() error
}
// idlenessManager defines the functionality required to track RPC activity on a
// channel.
type idlenessManager interface {
onCallBegin() error
onCallEnd()
close()
}
type noopIdlenessManager struct{}
func (noopIdlenessManager) onCallBegin() error { return nil }
func (noopIdlenessManager) onCallEnd() {}
func (noopIdlenessManager) close() {}
// idlenessManagerImpl implements the idlenessManager interface. It uses atomic
// operations to synchronize access to shared state and a mutex to guarantee
// mutual exclusion in a critical section.
type idlenessManagerImpl struct {
// State accessed atomically.
lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed.
activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there.
activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback.
closed int32 // Boolean; True when the manager is closed.
// Can be accessed without atomics or mutex since these are set at creation
// time and read-only after that.
enforcer idlenessEnforcer // Functionality provided by grpc.ClientConn.
timeout int64 // Idle timeout duration nanos stored as an int64.
// idleMu is used to guarantee mutual exclusion in two scenarios:
// - Opposing intentions:
// - a: Idle timeout has fired and handleIdleTimeout() is trying to put
// the channel in idle mode because the channel has been inactive.
// - b: At the same time an RPC is made on the channel, and onCallBegin()
// is trying to prevent the channel from going idle.
// - Competing intentions:
// - The channel is in idle mode and there are multiple RPCs starting at
// the same time, all trying to move the channel out of idle. Only one
// of them should succeed in doing so, while the other RPCs should
// piggyback on the first one and be successfully handled.
idleMu sync.RWMutex
actuallyIdle bool
timer *time.Timer
}
// newIdlenessManager creates a new idleness manager implementation for the
// given idle timeout.
func newIdlenessManager(enforcer idlenessEnforcer, idleTimeout time.Duration) idlenessManager {
if idleTimeout == 0 {
return noopIdlenessManager{}
}
i := &idlenessManagerImpl{
enforcer: enforcer,
timeout: int64(idleTimeout),
}
i.timer = timeAfterFunc(idleTimeout, i.handleIdleTimeout)
return i
}
// resetIdleTimer resets the idle timer to the given duration. This method
// should only be called from the timer callback.
func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) {
i.idleMu.Lock()
defer i.idleMu.Unlock()
if i.timer == nil {
// Only close sets timer to nil. We are done.
return
}
// It is safe to ignore the return value from Reset() because this method is
// only ever called from the timer callback, which means the timer has
// already fired.
i.timer.Reset(d)
}
// handleIdleTimeout is the timer callback that is invoked upon expiry of the
// configured idle timeout. The channel is considered inactive if there are no
// ongoing calls and no RPC activity since the last time the timer fired.
func (i *idlenessManagerImpl) handleIdleTimeout() {
if i.isClosed() {
return
}
if atomic.LoadInt32(&i.activeCallsCount) > 0 {
i.resetIdleTimer(time.Duration(i.timeout))
return
}
// There has been activity on the channel since we last got here. Reset the
// timer and return.
if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 {
// Set the timer to fire after a duration of idle timeout, calculated
// from the time the most recent RPC completed.
atomic.StoreInt32(&i.activeSinceLastTimerCheck, 0)
i.resetIdleTimer(time.Duration(atomic.LoadInt64(&i.lastCallEndTime) + i.timeout - time.Now().UnixNano()))
return
}
// This CAS operation is extremely likely to succeed given that there has
// been no activity since the last time we were here. Setting the
// activeCallsCount to -math.MaxInt32 indicates to onCallBegin() that the
// channel is either in idle mode or is trying to get there.
if !atomic.CompareAndSwapInt32(&i.activeCallsCount, 0, -math.MaxInt32) {
// This CAS operation can fail if an RPC started after we checked for
// activity at the top of this method, or one was ongoing from before
// the last time we were here. In both case, reset the timer and return.
i.resetIdleTimer(time.Duration(i.timeout))
return
}
// Now that we've set the active calls count to -math.MaxInt32, it's time to
// actually move to idle mode.
if i.tryEnterIdleMode() {
// Successfully entered idle mode. No timer needed until we exit idle.
return
}
// Failed to enter idle mode due to a concurrent RPC that kept the channel
// active, or because of an error from the channel. Undo the attempt to
// enter idle, and reset the timer to try again later.
atomic.AddInt32(&i.activeCallsCount, math.MaxInt32)
i.resetIdleTimer(time.Duration(i.timeout))
}
// tryEnterIdleMode instructs the channel to enter idle mode. But before
// that, it performs a last minute check to ensure that no new RPC has come in,
// making the channel active.
//
// Return value indicates whether or not the channel moved to idle mode.
//
// Holds idleMu which ensures mutual exclusion with exitIdleMode.
func (i *idlenessManagerImpl) tryEnterIdleMode() bool {
i.idleMu.Lock()
defer i.idleMu.Unlock()
if atomic.LoadInt32(&i.activeCallsCount) != -math.MaxInt32 {
// We raced and lost to a new RPC. Very rare, but stop entering idle.
return false
}
if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 {
// An very short RPC could have come in (and also finished) after we
// checked for calls count and activity in handleIdleTimeout(), but
// before the CAS operation. So, we need to check for activity again.
return false
}
// No new RPCs have come in since we last set the active calls count value
// -math.MaxInt32 in the timer callback. And since we have the lock, it is
// safe to enter idle mode now.
if err := i.enforcer.enterIdleMode(); err != nil {
logger.Errorf("Failed to enter idle mode: %v", err)
return false
}
// Successfully entered idle mode.
i.actuallyIdle = true
return true
}
// onCallBegin is invoked at the start of every RPC.
func (i *idlenessManagerImpl) onCallBegin() error {
if i.isClosed() {
return nil
}
if atomic.AddInt32(&i.activeCallsCount, 1) > 0 {
// Channel is not idle now. Set the activity bit and allow the call.
atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1)
return nil
}
// Channel is either in idle mode or is in the process of moving to idle
// mode. Attempt to exit idle mode to allow this RPC.
if err := i.exitIdleMode(); err != nil {
// Undo the increment to calls count, and return an error causing the
// RPC to fail.
atomic.AddInt32(&i.activeCallsCount, -1)
return err
}
atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1)
return nil
}
// exitIdleMode instructs the channel to exit idle mode.
//
// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode.
func (i *idlenessManagerImpl) exitIdleMode() error {
i.idleMu.Lock()
defer i.idleMu.Unlock()
if !i.actuallyIdle {
// This can happen in two scenarios:
// - handleIdleTimeout() set the calls count to -math.MaxInt32 and called
// tryEnterIdleMode(). But before the latter could grab the lock, an RPC
// came in and onCallBegin() noticed that the calls count is negative.
// - Channel is in idle mode, and multiple new RPCs come in at the same
// time, all of them notice a negative calls count in onCallBegin and get
// here. The first one to get the lock would got the channel to exit idle.
//
// Either way, nothing to do here.
return nil
}
if err := i.enforcer.exitIdleMode(); err != nil {
return fmt.Errorf("channel failed to exit idle mode: %v", err)
}
// Undo the idle entry process. This also respects any new RPC attempts.
atomic.AddInt32(&i.activeCallsCount, math.MaxInt32)
i.actuallyIdle = false
// Start a new timer to fire after the configured idle timeout.
i.timer = timeAfterFunc(time.Duration(i.timeout), i.handleIdleTimeout)
return nil
}
// onCallEnd is invoked at the end of every RPC.
func (i *idlenessManagerImpl) onCallEnd() {
if i.isClosed() {
return
}
// Record the time at which the most recent call finished.
atomic.StoreInt64(&i.lastCallEndTime, time.Now().UnixNano())
// Decrement the active calls count. This count can temporarily go negative
// when the timer callback is in the process of moving the channel to idle
// mode, but one or more RPCs come in and complete before the timer callback
// can get done with the process of moving to idle mode.
atomic.AddInt32(&i.activeCallsCount, -1)
}
func (i *idlenessManagerImpl) isClosed() bool {
return atomic.LoadInt32(&i.closed) == 1
}
func (i *idlenessManagerImpl) close() {
atomic.StoreInt32(&i.closed, 1)
i.idleMu.Lock()
i.timer.Stop()
i.timer = nil
i.idleMu.Unlock()
}

View File

@ -32,6 +32,9 @@ var grpclogLogger = grpclog.Component("binarylog")
// Logger specifies MethodLoggers for method names with a Log call that
// takes a context.
//
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
type Logger interface {
GetMethodLogger(methodName string) MethodLogger
}

View File

@ -49,6 +49,9 @@ func (g *callIDGenerator) reset() {
var idGen callIDGenerator
// MethodLogger is the sub-logger for each method.
//
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
type MethodLogger interface {
Log(context.Context, LogEntryConfig)
}
@ -65,6 +68,9 @@ type TruncatingMethodLogger struct {
}
// NewTruncatingMethodLogger returns a new truncating method logger.
//
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger {
return &TruncatingMethodLogger{
headerMaxLen: h,
@ -145,6 +151,9 @@ func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (trun
}
// LogEntryConfig represents the configuration for binary log entry.
//
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
type LogEntryConfig interface {
toProto() *binlogpb.GrpcLogEntry
}

View File

@ -35,6 +35,7 @@ import "sync"
// internal/transport/transport.go for an example of this.
type Unbounded struct {
c chan interface{}
closed bool
mu sync.Mutex
backlog []interface{}
}
@ -47,16 +48,18 @@ func NewUnbounded() *Unbounded {
// Put adds t to the unbounded buffer.
func (b *Unbounded) Put(t interface{}) {
b.mu.Lock()
defer b.mu.Unlock()
if b.closed {
return
}
if len(b.backlog) == 0 {
select {
case b.c <- t:
b.mu.Unlock()
return
default:
}
}
b.backlog = append(b.backlog, t)
b.mu.Unlock()
}
// Load sends the earliest buffered data, if any, onto the read channel
@ -64,6 +67,10 @@ func (b *Unbounded) Put(t interface{}) {
// value from the read channel.
func (b *Unbounded) Load() {
b.mu.Lock()
defer b.mu.Unlock()
if b.closed {
return
}
if len(b.backlog) > 0 {
select {
case b.c <- b.backlog[0]:
@ -72,7 +79,6 @@ func (b *Unbounded) Load() {
default:
}
}
b.mu.Unlock()
}
// Get returns a read channel on which values added to the buffer, via Put(),
@ -80,6 +86,20 @@ func (b *Unbounded) Load() {
//
// Upon reading a value from this channel, users are expected to call Load() to
// send the next buffered value onto the channel if there is any.
//
// If the unbounded buffer is closed, the read channel returned by this method
// is closed.
func (b *Unbounded) Get() <-chan interface{} {
return b.c
}
// Close closes the unbounded buffer.
func (b *Unbounded) Close() {
b.mu.Lock()
defer b.mu.Unlock()
if b.closed {
return
}
b.closed = true
close(b.c)
}

View File

@ -36,6 +36,10 @@ var (
// "GRPC_RING_HASH_CAP". This does not override the default bounds
// checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M).
RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024)
// PickFirstLBConfig is set if we should support configuration of the
// pick_first LB policy, which can be enabled by setting the environment
// variable "GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG" to "true".
PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", false)
)
func boolFromEnv(envVar string, def bool) bool {

View File

@ -28,9 +28,15 @@ const (
var (
// ObservabilityConfig is the json configuration for the gcp/observability
// package specified directly in the envObservabilityConfig env var.
//
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
ObservabilityConfig = os.Getenv(envObservabilityConfig)
// ObservabilityConfigFile is the json configuration for the
// gcp/observability specified in a file with the location specified in
// envObservabilityConfigFile env var.
//
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile)
)

Some files were not shown because too many files have changed in this diff Show More