mirror of https://github.com/knative/client.git
Vendor eventing v0.11.0 and run tests against it (#546)
- Pin contrib.go.opencensus.io/exporter/stackdriver@59d068f8d8ff5b653916aa30cdc4e13c7f15d56e - Pin knative.dev/pkg@release-0.11 - Move `Destination` from (knative.dev/pkg) v1alpha1 to duckv1beta1 - Run tests against Eventing v0.11.0
This commit is contained in:
parent
4f0eb99f8e
commit
f0a40d1b4b
8
go.mod
8
go.mod
|
|
@ -2,10 +2,9 @@ module knative.dev/client
|
|||
|
||||
require (
|
||||
contrib.go.opencensus.io/exporter/prometheus v0.1.0 // indirect
|
||||
contrib.go.opencensus.io/exporter/stackdriver v0.12.5 // indirect; indirect needed by knative serving
|
||||
contrib.go.opencensus.io/exporter/stackdriver v0.12.9-0.20191108183826-59d068f8d8ff // indirect
|
||||
github.com/google/go-containerregistry v0.0.0-20191029173801-50b26ee28691 // indirect
|
||||
github.com/magiconair/properties v1.8.0
|
||||
github.com/markbates/inflect v1.0.4 // indirect
|
||||
github.com/mattbaird/jsonpatch v0.0.0-20171005235357-81af80346b1a // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/pkg/errors v0.8.1
|
||||
|
|
@ -14,13 +13,14 @@ require (
|
|||
github.com/spf13/pflag v1.0.5
|
||||
github.com/spf13/viper v1.4.0
|
||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8
|
||||
google.golang.org/api v0.14.0 // indirect
|
||||
gotest.tools v2.2.0+incompatible
|
||||
k8s.io/api v0.0.0-20191016110246-af539daaa43a
|
||||
k8s.io/apimachinery v0.0.0-20191004115701-31ade1b30762
|
||||
k8s.io/cli-runtime v0.0.0-20191016113937-7693ce2cae74
|
||||
k8s.io/client-go v0.0.0-20191016110837-54936ba21026
|
||||
knative.dev/eventing v0.10.0
|
||||
knative.dev/pkg v0.0.0-20191107185656-884d50f09454
|
||||
knative.dev/eventing v0.11.0
|
||||
knative.dev/pkg v0.0.0-20191203221237-94a34e416c44
|
||||
knative.dev/serving v0.11.0
|
||||
knative.dev/test-infra v0.0.0-20191205012837-30f1a1f7b77e
|
||||
sigs.k8s.io/yaml v1.1.0
|
||||
|
|
|
|||
59
go.sum
59
go.sum
|
|
@ -2,12 +2,16 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
|
|||
cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.43.0 h1:banaiRPAM8kUVYneOSkhgcDsLzEvL25FinuiSZaH/2w=
|
||||
cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1 h1:lRi0CHyU+ytlvylOlFKKq0af6JncuyoRh1J+QJBqQx0=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
contrib.go.opencensus.io/exporter/prometheus v0.1.0 h1:SByaIoWwNgMdPSgl5sMqM2KDE5H/ukPWBRo314xiDvg=
|
||||
contrib.go.opencensus.io/exporter/prometheus v0.1.0/go.mod h1:cGFniUXGZlKRjzOyuZJ6mgB+PgBcCIa79kEKR8YCW+A=
|
||||
contrib.go.opencensus.io/exporter/stackdriver v0.12.5 h1:roYxbw//uT5VGVKwTutsI/RShEO11XY8nBOetZEzihI=
|
||||
contrib.go.opencensus.io/exporter/stackdriver v0.12.5/go.mod h1:8x999/OcIPy5ivx/wDiV7Gx4D+VUPODf0mWRGRc5kSk=
|
||||
contrib.go.opencensus.io/exporter/stackdriver v0.12.9-0.20191108183826-59d068f8d8ff h1:g4QkFNN0ak+sCs/jqbhYLNkQaF1NVaKVoQ4Xm1RV3wM=
|
||||
contrib.go.opencensus.io/exporter/stackdriver v0.12.9-0.20191108183826-59d068f8d8ff/go.mod h1:XyyafDnFOsqoxHJgTFycKZMrRUrPThLh2iYTJF6uoO0=
|
||||
github.com/Azure/azure-sdk-for-go v19.1.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-autorest v10.15.5+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
|
|
@ -30,8 +34,8 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy
|
|||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/aws/aws-sdk-go v1.15.90/go.mod h1:es1KtYUFs7le0xQ3rOihkuoVD90z7D0fR2Qm4S00/gU=
|
||||
github.com/aws/aws-sdk-go v1.22.1 h1://WJvJi9iq/i5TWHuK3hIC23xCZYH7Qv7SIN2vZVqxY=
|
||||
github.com/aws/aws-sdk-go v1.22.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.23.20 h1:2CBuL21P0yKdZN5urf2NxKa1ha8fhnY+A3pBCHFeZoA=
|
||||
github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
|
|
@ -104,8 +108,6 @@ github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/
|
|||
github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE=
|
||||
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gobuffalo/envy v1.6.5 h1:X3is06x7v0nW2xiy2yFbbIjwHz57CD6z6MkvqULTCm8=
|
||||
github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ=
|
||||
github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
||||
|
|
@ -117,6 +119,8 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU
|
|||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
|
|
@ -170,8 +174,6 @@ github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCO
|
|||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk=
|
||||
github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
|
|
@ -184,8 +186,6 @@ github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht
|
|||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8=
|
||||
github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=
|
||||
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be h1:AHimNtVIpiBjPUhEF5KNCkrUyqTSA5zWUl8sQ2bfGBE=
|
||||
|
|
@ -213,8 +213,6 @@ github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3
|
|||
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/markbates/inflect v1.0.4 h1:5fh1gzTFhfae06u3hzHYO9xe3l3v3nW5Pwt3naLTP5g=
|
||||
github.com/markbates/inflect v1.0.4/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs=
|
||||
github.com/mattbaird/jsonpatch v0.0.0-20171005235357-81af80346b1a h1:+J2gw7Bw77w/fbK7wnNJJDKmw1IbWft2Ul5BzrG1Qm8=
|
||||
github.com/mattbaird/jsonpatch v0.0.0-20171005235357-81af80346b1a/go.mod h1:M1qoD/MqPgTZIk0EWKB38wE28ACRfVcn+cU08jyArI0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
|
|
@ -322,9 +320,12 @@ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGr
|
|||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.1 h1:8dP3SGL7MPB94crU3bEPplMPe83FI4EouesJUeFHv50=
|
||||
go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA=
|
||||
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||
|
|
@ -371,11 +372,12 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR
|
|||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc h1:gkKoSkUmnU6bpS/VhkuO27bzQeSA51uaEfbOW5dNb68=
|
||||
golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190912160710-24e19bdeb0f2 h1:4dVFTC832rPn4pomLSz1vA+are2+dU19w1H8OngV7nc=
|
||||
golang.org/x/net v0.0.0-20190912160710-24e19bdeb0f2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA=
|
||||
|
|
@ -389,6 +391,8 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
|
@ -407,8 +411,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa h1:KIDDMLT1O0Nr7TSxp8xM5tJcdn8tgyAONntO829og1M=
|
||||
golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190912141932-bc967efca4b8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47 h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY=
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
|
@ -452,6 +455,11 @@ gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmK
|
|||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0 h1:9sdfJOzWlkqPltHAuzT2Cp+yrBeY1KRVYgms8soxMwM=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.14.0 h1:uMf5uLi4eQMRrMKhCplNik4U4H8Z6C1br3zOtAa/aDE=
|
||||
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
|
@ -459,20 +467,23 @@ google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4
|
|||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.2 h1:j8RI1yW0SkI+paT6uGwMlrMI/6zwYA6/CFil8rxOzGI=
|
||||
google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64 h1:iKtrH9Y8mcbADOP0YFaEMth7OfuHY9xHOwNj4znpM1A=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51 h1:Ex1mq5jaJof+kRnYi3SlYJ8KKa9Ao3NHyIT5XJ1gF6U=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.22.1 h1:/7cs52RnTJmD43s3uxzlq2U7nqVTd/37viQwMrMNlOM=
|
||||
google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s=
|
||||
google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
|
|
@ -528,10 +539,10 @@ k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKf
|
|||
k8s.io/kubernetes v1.11.10/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
|
||||
k8s.io/utils v0.0.0-20190221042446-c2654d5206da h1:ElyM7RPonbKnQqOcw7dG2IK5uvQQn3b/WPHqD5mBvP4=
|
||||
k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0=
|
||||
knative.dev/eventing v0.10.0 h1:dDrOjBWWgloOCUjxR8ixUIh8nqCTbmM5BbGo8rvf1bc=
|
||||
knative.dev/eventing v0.10.0/go.mod h1:UxweNv8yXhsdHJitcb9R6rmfNaUD2DFi9GWwNRyIs58=
|
||||
knative.dev/pkg v0.0.0-20191107185656-884d50f09454 h1:nkslWFyRWaJp3nPDm+GSQOSvN8NpZg8qIYTR+XssNNg=
|
||||
knative.dev/pkg v0.0.0-20191107185656-884d50f09454/go.mod h1:pgODObA1dTyhNoFxPZTTjNWfx6F0aKsKzn+vaT9XO/Q=
|
||||
knative.dev/eventing v0.11.0 h1:g+vzgmRzYQIrADd8To3MskGdXxIhWXbUhadALPloU10=
|
||||
knative.dev/eventing v0.11.0/go.mod h1:UxweNv8yXhsdHJitcb9R6rmfNaUD2DFi9GWwNRyIs58=
|
||||
knative.dev/pkg v0.0.0-20191203221237-94a34e416c44 h1:WzTw8tbzXXHCFk5zOIoIp3H9JgCQb9qcNOfPgucEU4k=
|
||||
knative.dev/pkg v0.0.0-20191203221237-94a34e416c44/go.mod h1:pgODObA1dTyhNoFxPZTTjNWfx6F0aKsKzn+vaT9XO/Q=
|
||||
knative.dev/serving v0.11.0 h1:FK8aPpMuiBHZbfBfT+Avxyk11TaIlOMDEw/Va2sw1w0=
|
||||
knative.dev/serving v0.11.0/go.mod h1:x2n255JS2XBI39tmjZ8CwTxIf9EKNMCrkVuiOttLRm0=
|
||||
knative.dev/test-infra v0.0.0-20191205012837-30f1a1f7b77e h1:QTlOxV+M072CNdbKkqn6gmLWPETuLs+EiZ/pOv1jcrw=
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ import (
|
|||
|
||||
"github.com/spf13/cobra"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apisv1alpha1 "knative.dev/pkg/apis/v1alpha1"
|
||||
duckv1beta1 "knative.dev/pkg/apis/duck/v1beta1"
|
||||
|
||||
"knative.dev/client/pkg/serving/v1alpha1"
|
||||
)
|
||||
|
|
@ -33,7 +33,7 @@ func (i *SinkFlags) Add(cmd *cobra.Command) {
|
|||
cmd.Flags().StringVarP(&i.sink, "sink", "s", "", "Addressable sink for events")
|
||||
}
|
||||
|
||||
func (i *SinkFlags) ResolveSink(client v1alpha1.KnServingClient) (*apisv1alpha1.Destination, error) {
|
||||
func (i *SinkFlags) ResolveSink(client v1alpha1.KnServingClient) (*duckv1beta1.Destination, error) {
|
||||
if i.sink == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
|
@ -44,7 +44,7 @@ func (i *SinkFlags) ResolveSink(client v1alpha1.KnServingClient) (*apisv1alpha1.
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &apisv1alpha1.Destination{
|
||||
return &duckv1beta1.Destination{
|
||||
Ref: &v1.ObjectReference{
|
||||
Kind: service.Kind,
|
||||
APIVersion: service.APIVersion,
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ import (
|
|||
"knative.dev/client/pkg/kn/commands"
|
||||
"knative.dev/client/pkg/util"
|
||||
"knative.dev/eventing/pkg/apis/sources/v1alpha1"
|
||||
apisv1alpha1 "knative.dev/pkg/apis/v1alpha1"
|
||||
duckv1beta1 "knative.dev/pkg/apis/duck/v1beta1"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -87,7 +87,7 @@ func TestApiServerSourceCreate(t *testing.T) {
|
|||
}},
|
||||
ServiceAccountName: "myaccountname",
|
||||
Mode: "Ref",
|
||||
Sink: &apisv1alpha1.Destination{
|
||||
Sink: &duckv1beta1.Destination{
|
||||
Ref: &v1.ObjectReference{
|
||||
Kind: "Service",
|
||||
APIVersion: "serving.knative.dev/v1alpha1",
|
||||
|
|
|
|||
|
|
@ -20,5 +20,6 @@
|
|||
# latest release version.
|
||||
|
||||
export KNATIVE_SERVING_VERSION="0.11.0"
|
||||
export KNATIVE_EVENTING_VERSION="0.10.2"
|
||||
export KNATIVE_EVENTING_VERSION="0.11.0"
|
||||
|
||||
$(dirname $0)/presubmit-tests.sh --integration-tests
|
||||
|
|
|
|||
|
|
@ -227,6 +227,9 @@ func InternalIP() (string, error) { return defaultClient.InternalIP() }
|
|||
// ExternalIP returns the instance's primary external (public) IP address.
|
||||
func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
|
||||
|
||||
// Email calls Client.Email on the default client.
|
||||
func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) }
|
||||
|
||||
// Hostname returns the instance's hostname. This will be of the form
|
||||
// "<instanceID>.c.<projID>.internal".
|
||||
func Hostname() (string, error) { return defaultClient.Hostname() }
|
||||
|
|
@ -367,6 +370,16 @@ func (c *Client) InternalIP() (string, error) {
|
|||
return c.getTrimmed("instance/network-interfaces/0/ip")
|
||||
}
|
||||
|
||||
// Email returns the email address associated with the service account.
|
||||
// The account may be empty or the string "default" to use the instance's
|
||||
// main account.
|
||||
func (c *Client) Email(serviceAccount string) (string, error) {
|
||||
if serviceAccount == "" {
|
||||
serviceAccount = "default"
|
||||
}
|
||||
return c.getTrimmed("instance/service-accounts/" + serviceAccount + "/email")
|
||||
}
|
||||
|
||||
// ExternalIP returns the instance's primary external (public) IP address.
|
||||
func (c *Client) ExternalIP() (string, error) {
|
||||
return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
|
||||
|
|
|
|||
|
|
@ -1,4 +1,17 @@
|
|||
#!/bin/bash
|
||||
# Copyright 2019 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
today=$(date +%Y%m%d)
|
||||
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ import (
|
|||
|
||||
// Repo is the current version of the client libraries in this
|
||||
// repo. It should be a date in YYYYMMDD format.
|
||||
const Repo = "20180226"
|
||||
const Repo = "20190802"
|
||||
|
||||
// Go returns the Go runtime version. The returned string
|
||||
// has no whitespace.
|
||||
|
|
|
|||
|
|
@ -47,6 +47,8 @@ func defaultAlertPolicyClientOptions() []option.ClientOption {
|
|||
return []option.ClientOption{
|
||||
option.WithEndpoint("monitoring.googleapis.com:443"),
|
||||
option.WithScopes(DefaultAuthScopes()...),
|
||||
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
|
||||
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -102,4 +102,4 @@ func versionGo() string {
|
|||
return "UNKNOWN"
|
||||
}
|
||||
|
||||
const versionClient = "20190716"
|
||||
const versionClient = "20190819"
|
||||
|
|
|
|||
|
|
@ -49,6 +49,8 @@ func defaultGroupClientOptions() []option.ClientOption {
|
|||
return []option.ClientOption{
|
||||
option.WithEndpoint("monitoring.googleapis.com:443"),
|
||||
option.WithScopes(DefaultAuthScopes()...),
|
||||
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
|
||||
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -52,6 +52,8 @@ func defaultMetricClientOptions() []option.ClientOption {
|
|||
return []option.ClientOption{
|
||||
option.WithEndpoint("monitoring.googleapis.com:443"),
|
||||
option.WithScopes(DefaultAuthScopes()...),
|
||||
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
|
||||
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -36,19 +36,24 @@ import (
|
|||
|
||||
// NotificationChannelCallOptions contains the retry settings for each method of NotificationChannelClient.
|
||||
type NotificationChannelCallOptions struct {
|
||||
ListNotificationChannelDescriptors []gax.CallOption
|
||||
GetNotificationChannelDescriptor []gax.CallOption
|
||||
ListNotificationChannels []gax.CallOption
|
||||
GetNotificationChannel []gax.CallOption
|
||||
CreateNotificationChannel []gax.CallOption
|
||||
UpdateNotificationChannel []gax.CallOption
|
||||
DeleteNotificationChannel []gax.CallOption
|
||||
ListNotificationChannelDescriptors []gax.CallOption
|
||||
GetNotificationChannelDescriptor []gax.CallOption
|
||||
ListNotificationChannels []gax.CallOption
|
||||
GetNotificationChannel []gax.CallOption
|
||||
CreateNotificationChannel []gax.CallOption
|
||||
UpdateNotificationChannel []gax.CallOption
|
||||
DeleteNotificationChannel []gax.CallOption
|
||||
SendNotificationChannelVerificationCode []gax.CallOption
|
||||
GetNotificationChannelVerificationCode []gax.CallOption
|
||||
VerifyNotificationChannel []gax.CallOption
|
||||
}
|
||||
|
||||
func defaultNotificationChannelClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
option.WithEndpoint("monitoring.googleapis.com:443"),
|
||||
option.WithScopes(DefaultAuthScopes()...),
|
||||
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
|
||||
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -68,13 +73,16 @@ func defaultNotificationChannelCallOptions() *NotificationChannelCallOptions {
|
|||
},
|
||||
}
|
||||
return &NotificationChannelCallOptions{
|
||||
ListNotificationChannelDescriptors: retry[[2]string{"default", "idempotent"}],
|
||||
GetNotificationChannelDescriptor: retry[[2]string{"default", "idempotent"}],
|
||||
ListNotificationChannels: retry[[2]string{"default", "idempotent"}],
|
||||
GetNotificationChannel: retry[[2]string{"default", "idempotent"}],
|
||||
CreateNotificationChannel: retry[[2]string{"default", "non_idempotent"}],
|
||||
UpdateNotificationChannel: retry[[2]string{"default", "non_idempotent"}],
|
||||
DeleteNotificationChannel: retry[[2]string{"default", "idempotent"}],
|
||||
ListNotificationChannelDescriptors: retry[[2]string{"default", "idempotent"}],
|
||||
GetNotificationChannelDescriptor: retry[[2]string{"default", "idempotent"}],
|
||||
ListNotificationChannels: retry[[2]string{"default", "idempotent"}],
|
||||
GetNotificationChannel: retry[[2]string{"default", "idempotent"}],
|
||||
CreateNotificationChannel: retry[[2]string{"default", "non_idempotent"}],
|
||||
UpdateNotificationChannel: retry[[2]string{"default", "non_idempotent"}],
|
||||
DeleteNotificationChannel: retry[[2]string{"default", "idempotent"}],
|
||||
SendNotificationChannelVerificationCode: retry[[2]string{"default", "non_idempotent"}],
|
||||
GetNotificationChannelVerificationCode: retry[[2]string{"default", "idempotent"}],
|
||||
VerifyNotificationChannel: retry[[2]string{"default", "idempotent"}],
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -301,6 +309,76 @@ func (c *NotificationChannelClient) DeleteNotificationChannel(ctx context.Contex
|
|||
return err
|
||||
}
|
||||
|
||||
// SendNotificationChannelVerificationCode causes a verification code to be delivered to the channel. The code
|
||||
// can then be supplied in VerifyNotificationChannel to verify the channel.
|
||||
func (c *NotificationChannelClient) SendNotificationChannelVerificationCode(ctx context.Context, req *monitoringpb.SendNotificationChannelVerificationCodeRequest, opts ...gax.CallOption) error {
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append(c.CallOptions.SendNotificationChannelVerificationCode[0:len(c.CallOptions.SendNotificationChannelVerificationCode):len(c.CallOptions.SendNotificationChannelVerificationCode)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = c.notificationChannelClient.SendNotificationChannelVerificationCode(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetNotificationChannelVerificationCode requests a verification code for an already verified channel that can then
|
||||
// be used in a call to VerifyNotificationChannel() on a different channel
|
||||
// with an equivalent identity in the same or in a different project. This
|
||||
// makes it possible to copy a channel between projects without requiring
|
||||
// manual reverification of the channel. If the channel is not in the
|
||||
// verified state, this method will fail (in other words, this may only be
|
||||
// used if the SendNotificationChannelVerificationCode and
|
||||
// VerifyNotificationChannel paths have already been used to put the given
|
||||
// channel into the verified state).
|
||||
//
|
||||
// There is no guarantee that the verification codes returned by this method
|
||||
// will be of a similar structure or form as the ones that are delivered
|
||||
// to the channel via SendNotificationChannelVerificationCode; while
|
||||
// VerifyNotificationChannel() will recognize both the codes delivered via
|
||||
// SendNotificationChannelVerificationCode() and returned from
|
||||
// GetNotificationChannelVerificationCode(), it is typically the case that
|
||||
// the verification codes delivered via
|
||||
// SendNotificationChannelVerificationCode() will be shorter and also
|
||||
// have a shorter expiration (e.g. codes such as "G-123456") whereas
|
||||
// GetVerificationCode() will typically return a much longer, websafe base
|
||||
// 64 encoded string that has a longer expiration time.
|
||||
func (c *NotificationChannelClient) GetNotificationChannelVerificationCode(ctx context.Context, req *monitoringpb.GetNotificationChannelVerificationCodeRequest, opts ...gax.CallOption) (*monitoringpb.GetNotificationChannelVerificationCodeResponse, error) {
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append(c.CallOptions.GetNotificationChannelVerificationCode[0:len(c.CallOptions.GetNotificationChannelVerificationCode):len(c.CallOptions.GetNotificationChannelVerificationCode)], opts...)
|
||||
var resp *monitoringpb.GetNotificationChannelVerificationCodeResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.notificationChannelClient.GetNotificationChannelVerificationCode(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// VerifyNotificationChannel verifies a NotificationChannel by proving receipt of the code
|
||||
// delivered to the channel as a result of calling
|
||||
// SendNotificationChannelVerificationCode.
|
||||
func (c *NotificationChannelClient) VerifyNotificationChannel(ctx context.Context, req *monitoringpb.VerifyNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
|
||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
||||
opts = append(c.CallOptions.VerifyNotificationChannel[0:len(c.CallOptions.VerifyNotificationChannel):len(c.CallOptions.VerifyNotificationChannel)], opts...)
|
||||
var resp *monitoringpb.NotificationChannel
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.notificationChannelClient.VerifyNotificationChannel(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// NotificationChannelDescriptorIterator manages a stream of *monitoringpb.NotificationChannelDescriptor.
|
||||
type NotificationChannelDescriptorIterator struct {
|
||||
items []*monitoringpb.NotificationChannelDescriptor
|
||||
|
|
|
|||
|
|
@ -48,6 +48,8 @@ func defaultUptimeCheckClientOptions() []option.ClientOption {
|
|||
return []option.ClientOption{
|
||||
option.WithEndpoint("monitoring.googleapis.com:443"),
|
||||
option.WithScopes(DefaultAuthScopes()...),
|
||||
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
|
||||
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -102,4 +102,4 @@ func versionGo() string {
|
|||
return "UNKNOWN"
|
||||
}
|
||||
|
||||
const versionClient = "20190716"
|
||||
const versionClient = "20190819"
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ package trace
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
|
|
@ -41,6 +42,8 @@ func defaultClientOptions() []option.ClientOption {
|
|||
return []option.ClientOption{
|
||||
option.WithEndpoint("cloudtrace.googleapis.com:443"),
|
||||
option.WithScopes(DefaultAuthScopes()...),
|
||||
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
|
||||
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,10 @@
|
|||
/vendor/
|
||||
# GoLand IDEA
|
||||
/.idea/
|
||||
*.iml
|
||||
|
||||
# VS Code
|
||||
.vscode
|
||||
|
||||
# Coverage
|
||||
coverage.txt
|
||||
coverage.html
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.11.x
|
||||
- 1.12.x
|
||||
|
||||
go_import_path: contrib.go.opencensus.io/exporter/stackdriver
|
||||
|
||||
|
|
@ -9,16 +9,12 @@ env:
|
|||
global:
|
||||
GO111MODULE=on
|
||||
|
||||
before_script:
|
||||
- GO_FILES=$(find . -iname '*.go' | grep -v /vendor/) # All the .go files, excluding vendor/ if any
|
||||
- PKGS=$(go list ./... | grep -v /vendor/) # All the import paths, excluding vendor/ if any
|
||||
install:
|
||||
- go mod download
|
||||
- make install-tools
|
||||
|
||||
script:
|
||||
- go build ./... # Ensure dependency updates don't break build
|
||||
- if [ -n "$(gofmt -s -l $GO_FILES)" ]; then echo "gofmt the following files:"; gofmt -s -l $GO_FILES; exit 1; fi
|
||||
- go vet ./...
|
||||
- go test -v -race $PKGS # Run all the tests with the race detector enabled
|
||||
- GO111MODULE=off go get -t ./...
|
||||
- GO111MODULE=off go build ./...
|
||||
- GO111MODULE=off go test -v -race $PKGS # Make sure tests still pass when not using Go modules.
|
||||
- 'if [[ $TRAVIS_GO_VERSION = 1.8* ]]; then ! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"; fi'
|
||||
- make travis-ci
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
|
|
@ -1,81 +0,0 @@
|
|||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
name = "cloud.google.com/go"
|
||||
packages = ["compute/metadata","internal/version","monitoring/apiv3","trace/apiv2"]
|
||||
revision = "0fd7230b2a7505833d5f69b75cbd6c9582401479"
|
||||
version = "v0.23.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto","protoc-gen-go/descriptor","ptypes","ptypes/any","ptypes/duration","ptypes/empty","ptypes/struct","ptypes/timestamp","ptypes/wrappers"]
|
||||
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/googleapis/gax-go"
|
||||
packages = ["."]
|
||||
revision = "317e0006254c44a0ac427cc52a0e083ff0b9622f"
|
||||
version = "v2.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "go.opencensus.io"
|
||||
packages = [".","exporter/stackdriver/propagation","exporterutil","internal","internal/tagencoding","plugin/ocgrpc","plugin/ochttp","plugin/ochttp/propagation/b3","stats","stats/internal","stats/view","tag","trace","trace/internal","trace/propagation"]
|
||||
revision = "5897c5ce32247fc8af19c7710abd96e3304fb43c"
|
||||
version = "v0.20.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = ["context","context/ctxhttp","http/httpguts","http2","http2/hpack","idna","internal/timeseries","trace"]
|
||||
revision = "1e491301e022f8f977054da4c2d852decd59571f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
packages = [".","google","internal","jws","jwt"]
|
||||
revision = "1e0a3fa8ba9a5c9eb35c271780101fdaf1b205d7"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sync"
|
||||
packages = ["semaphore"]
|
||||
revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca"
|
||||
|
||||
[[projects]]
|
||||
name = "golang.org/x/text"
|
||||
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
|
||||
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/api"
|
||||
packages = ["googleapi/transport","internal","iterator","option","support/bundler","transport","transport/grpc","transport/http"]
|
||||
revision = "8e296ef260056b6323d10727db40512dac6d92d5"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/appengine"
|
||||
packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/socket","internal/urlfetch","socket","urlfetch"]
|
||||
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = ["googleapis/api/annotations","googleapis/api/distribution","googleapis/api/label","googleapis/api/metric","googleapis/api/monitoredres","googleapis/devtools/cloudtrace/v2","googleapis/monitoring/v3","googleapis/rpc/code","googleapis/rpc/status","protobuf/field_mask"]
|
||||
revision = "81158efcc9f219c511e4d3c0d61a0e6e49c01a24"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/grpc"
|
||||
packages = [".","balancer","balancer/base","balancer/roundrobin","channelz","codes","connectivity","credentials","credentials/oauth","encoding","encoding/proto","grpclb/grpc_lb_v1/messages","grpclog","internal","keepalive","metadata","naming","peer","resolver","resolver/dns","resolver/passthrough","stats","status","tap","transport"]
|
||||
revision = "41344da2231b913fa3d983840a57a6b1b7b631a1"
|
||||
version = "v1.12.0"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "d587e278f7302f82cb7f5c14e5e7ce831c84f198c05ede6c16a8afa4d6112f9e"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
|
@ -1,62 +0,0 @@
|
|||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
#
|
||||
# [prune]
|
||||
# non-go = false
|
||||
# go-tests = true
|
||||
# unused-packages = true
|
||||
|
||||
|
||||
[[constraint]]
|
||||
name = "cloud.google.com/go"
|
||||
version = ">=0.23.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/golang/protobuf"
|
||||
version = "1.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "go.opencensus.io"
|
||||
version = ">=0.20.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/api"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/genproto"
|
||||
|
||||
[[constraint]]
|
||||
name = "google.golang.org/grpc"
|
||||
version = "1.12.0"
|
||||
|
||||
[prune]
|
||||
go-tests = true
|
||||
unused-packages = true
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
# TODO: Fix this on windows.
|
||||
ALL_SRC := $(shell find . -name '*.go' \
|
||||
-not -path './vendor/*' \
|
||||
-not -path '*/gen-go/*' \
|
||||
-not -path '*/internal/testpb/*' \
|
||||
-not -name 'tools.go' \
|
||||
-type f | sort)
|
||||
ALL_PKGS := $(shell go list $(sort $(dir $(ALL_SRC))))
|
||||
|
||||
|
|
@ -10,20 +10,21 @@ GOTEST_OPT_WITH_COVERAGE = $(GOTEST_OPT) -coverprofile=coverage.txt -covermode=a
|
|||
GOTEST=go test
|
||||
GOFMT=gofmt
|
||||
GOLINT=golint
|
||||
GOIMPORTS=goimports
|
||||
GOVET=go vet
|
||||
EMBEDMD=embedmd
|
||||
STATICCHECK=staticcheck
|
||||
# TODO decide if we need to change these names.
|
||||
README_FILES := $(shell find . -name '*README.md' | sort | tr '\n' ' ')
|
||||
|
||||
.DEFAULT_GOAL := defaul-goal
|
||||
|
||||
.DEFAULT_GOAL := fmt-lint-vet-embedmd-test
|
||||
.PHONY: defaul-goal
|
||||
defaul-goal: fmt lint vet embedmd goimports staticcheck test
|
||||
|
||||
.PHONY: fmt-lint-vet-embedmd-test
|
||||
fmt-lint-vet-embedmd-test: fmt lint vet embedmd test
|
||||
|
||||
# TODO enable test-with-coverage in tavis
|
||||
# TODO: enable test-with-cover when find out why "scripts/check-test-files.sh: 4: set: Illegal option -o pipefail"
|
||||
.PHONY: travis-ci
|
||||
travis-ci: fmt lint vet embedmd test test-386
|
||||
travis-ci: fmt lint vet embedmd goimports staticcheck test test-386 test-with-coverage
|
||||
|
||||
all-pkgs:
|
||||
@echo $(ALL_PKGS) | tr ' ' '\n' | sort
|
||||
|
|
@ -41,7 +42,19 @@ test-386:
|
|||
|
||||
.PHONY: test-with-coverage
|
||||
test-with-coverage:
|
||||
@echo pre-compiling tests
|
||||
@time go test -i $(ALL_PKGS)
|
||||
$(GOTEST) $(GOTEST_OPT_WITH_COVERAGE) $(ALL_PKGS)
|
||||
go tool cover -html=coverage.txt -o coverage.html
|
||||
|
||||
.PHONY: test-with-cover
|
||||
test-with-cover:
|
||||
@echo Verifying that all packages have test files to count in coverage
|
||||
@scripts/check-test-files.sh $(subst contrib.go.opencensus.io/exporter/stackdriver,./,$(ALL_PKGS))
|
||||
@echo pre-compiling tests
|
||||
@time go test -i $(ALL_PKGS)
|
||||
$(GOTEST) $(GOTEST_OPT_WITH_COVERAGE) $(ALL_PKGS)
|
||||
go tool cover -html=coverage.txt -o coverage.html
|
||||
|
||||
.PHONY: fmt
|
||||
fmt:
|
||||
|
|
@ -88,8 +101,25 @@ embedmd:
|
|||
echo "Embedmd finished successfully"; \
|
||||
fi
|
||||
|
||||
.PHONY: goimports
|
||||
goimports:
|
||||
@IMPORTSOUT=`$(GOIMPORTS) -d . 2>&1`; \
|
||||
if [ "$$IMPORTSOUT" ]; then \
|
||||
echo "$(GOIMPORTS) FAILED => fix the following goimports errors:\n"; \
|
||||
echo "$$IMPORTSOUT\n"; \
|
||||
exit 1; \
|
||||
else \
|
||||
echo "Goimports finished successfully"; \
|
||||
fi
|
||||
|
||||
.PHONY: staticcheck
|
||||
staticcheck:
|
||||
$(STATICCHECK) ./...
|
||||
|
||||
.PHONY: install-tools
|
||||
install-tools:
|
||||
go get -u golang.org/x/tools/cmd/cover
|
||||
go get -u golang.org/x/lint/golint
|
||||
go get -u github.com/rakyll/embedmd
|
||||
GO111MODULE=on go install \
|
||||
golang.org/x/lint/golint \
|
||||
golang.org/x/tools/cmd/goimports \
|
||||
github.com/rakyll/embedmd \
|
||||
honnef.co/go/tools/cmd/staticcheck
|
||||
|
|
|
|||
|
|
@ -1,19 +1,26 @@
|
|||
module contrib.go.opencensus.io/exporter/stackdriver
|
||||
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.43.0
|
||||
github.com/aws/aws-sdk-go v1.22.1
|
||||
cloud.google.com/go v0.45.1
|
||||
github.com/aws/aws-sdk-go v1.23.20
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/golang/protobuf v1.3.2
|
||||
github.com/google/go-cmp v0.3.1
|
||||
github.com/hashicorp/golang-lru v0.5.3 // indirect
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024
|
||||
github.com/stretchr/testify v1.3.0 // indirect
|
||||
go.opencensus.io v0.22.0
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80
|
||||
go.opencensus.io v0.22.1
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422
|
||||
golang.org/x/net v0.0.0-20190912160710-24e19bdeb0f2
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
|
||||
golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa // indirect
|
||||
google.golang.org/api v0.7.0
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64
|
||||
google.golang.org/grpc v1.22.1
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e // indirect
|
||||
golang.org/x/sys v0.0.0-20190912141932-bc967efca4b8 // indirect
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0
|
||||
google.golang.org/api v0.10.0
|
||||
google.golang.org/appengine v1.6.2 // indirect
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51
|
||||
google.golang.org/grpc v1.23.1
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc
|
||||
)
|
||||
|
|
|
|||
|
|
@ -3,14 +3,20 @@ cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg=
|
|||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.43.0 h1:banaiRPAM8kUVYneOSkhgcDsLzEvL25FinuiSZaH/2w=
|
||||
cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1 h1:lRi0CHyU+ytlvylOlFKKq0af6JncuyoRh1J+QJBqQx0=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/aws/aws-sdk-go v1.22.1 h1://WJvJi9iq/i5TWHuK3hIC23xCZYH7Qv7SIN2vZVqxY=
|
||||
github.com/aws/aws-sdk-go v1.22.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.23.20 h1:2CBuL21P0yKdZN5urf2NxKa1ha8fhnY+A3pBCHFeZoA=
|
||||
github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
|
|
@ -18,6 +24,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
|||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
|
|
@ -46,10 +54,9 @@ github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCO
|
|||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk=
|
||||
github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
|
|
@ -59,6 +66,8 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
|
|||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.1 h1:8dP3SGL7MPB94crU3bEPplMPe83FI4EouesJUeFHv50=
|
||||
go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
|
|
@ -68,6 +77,7 @@ golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTk
|
|||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422 h1:QzoH/1pFpZguR8NrRHLcO6jKqfv2zpuSqZLgdm7ZmjI=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
|
@ -82,8 +92,8 @@ golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn
|
|||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190912160710-24e19bdeb0f2 h1:4dVFTC832rPn4pomLSz1vA+are2+dU19w1H8OngV7nc=
|
||||
golang.org/x/net v0.0.0-20190912160710-24e19bdeb0f2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
|
@ -98,6 +108,8 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0
|
|||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
|
@ -107,8 +119,8 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa h1:KIDDMLT1O0Nr7TSxp8xM5tJcdn8tgyAONntO829og1M=
|
||||
golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190912141932-bc967efca4b8 h1:41hwlulw1prEMBxLQSlMSux1zxJf07B3WPsdjJlKZxE=
|
||||
golang.org/x/sys v0.0.0-20190912141932-bc967efca4b8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To=
|
||||
|
|
@ -127,17 +139,24 @@ golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBn
|
|||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0 h1:Dh6fw+p6FyRl5x/FvNswO1ji0lIGzm3KP8Y9VkS9PTE=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
google.golang.org/api v0.4.0 h1:KKgc1aqhV8wDPbDzlDtpvyjZFY3vjz85FP7p4wcQUyI=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0 h1:9sdfJOzWlkqPltHAuzT2Cp+yrBeY1KRVYgms8soxMwM=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.10.0 h1:7tmAxx3oKE98VMZ+SBZzvYYWRQ9HODBxmC8mXUsraSQ=
|
||||
google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.2 h1:j8RI1yW0SkI+paT6uGwMlrMI/6zwYA6/CFil8rxOzGI=
|
||||
google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
|
|
@ -145,19 +164,21 @@ google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRn
|
|||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb h1:i1Ppqkc3WQXikh8bXiwHqAN5Rv3/qDCcRk0/Otx73BY=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610 h1:Ygq9/SRJX9+dU0WCIICM8RkWvDw03lvB77hrhJnpxfU=
|
||||
google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64 h1:iKtrH9Y8mcbADOP0YFaEMth7OfuHY9xHOwNj4znpM1A=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51 h1:Ex1mq5jaJof+kRnYi3SlYJ8KKa9Ao3NHyIT5XJ1gF6U=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.22.1 h1:/7cs52RnTJmD43s3uxzlq2U7nqVTd/37viQwMrMNlOM=
|
||||
google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.1 h1:q4XQuHFC6I28BKZpo6IYyb3mNO+l7lSOxRuYTCiDfXk=
|
||||
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
|
|
|
|||
|
|
@ -21,8 +21,9 @@ directly to Stackdriver Metrics.
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes/any"
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
|
|
@ -34,15 +35,11 @@ import (
|
|||
monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
|
||||
monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
|
||||
|
||||
"contrib.go.opencensus.io/exporter/stackdriver/monitoredresource"
|
||||
"go.opencensus.io/metric/metricdata"
|
||||
"go.opencensus.io/resource"
|
||||
)
|
||||
|
||||
var (
|
||||
errLableExtraction = errors.New("error extracting labels")
|
||||
errUnspecifiedMetricKind = errors.New("metric kind is unpsecified")
|
||||
)
|
||||
|
||||
const (
|
||||
exemplarAttachmentTypeString = "type.googleapis.com/google.protobuf.StringValue"
|
||||
exemplarAttachmentTypeSpanCtx = "type.googleapis.com/google.monitoring.v3.SpanContext"
|
||||
|
|
@ -73,9 +70,11 @@ func (se *statsExporter) handleMetricsUpload(metrics []*metricdata.Metric) {
|
|||
}
|
||||
|
||||
func (se *statsExporter) uploadMetrics(metrics []*metricdata.Metric) error {
|
||||
ctx, cancel := se.o.newContextWithTimeout()
|
||||
ctx, cancel := newContextWithTimeout(se.o.Context, se.o.Timeout)
|
||||
defer cancel()
|
||||
|
||||
var errors []error
|
||||
|
||||
ctx, span := trace.StartSpan(
|
||||
ctx,
|
||||
"contrib.go.opencensus.io/exporter/stackdriver.uploadMetrics",
|
||||
|
|
@ -87,7 +86,7 @@ func (se *statsExporter) uploadMetrics(metrics []*metricdata.Metric) error {
|
|||
// Now create the metric descriptor remotely.
|
||||
if err := se.createMetricDescriptorFromMetric(ctx, metric); err != nil {
|
||||
span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()})
|
||||
//TODO: [rghetia] record error metrics.
|
||||
errors = append(errors, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
|
@ -97,7 +96,7 @@ func (se *statsExporter) uploadMetrics(metrics []*metricdata.Metric) error {
|
|||
tsl, err := se.metricToMpbTs(ctx, metric)
|
||||
if err != nil {
|
||||
span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()})
|
||||
//TODO: [rghetia] record error metrics.
|
||||
errors = append(errors, err)
|
||||
continue
|
||||
}
|
||||
if tsl != nil {
|
||||
|
|
@ -116,26 +115,35 @@ func (se *statsExporter) uploadMetrics(metrics []*metricdata.Metric) error {
|
|||
for _, ctsreq := range ctsreql {
|
||||
if err := createTimeSeries(ctx, se.c, ctsreq); err != nil {
|
||||
span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()})
|
||||
// TODO(@rghetia): record error metrics
|
||||
// return err
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
numErrors := len(errors)
|
||||
if numErrors == 0 {
|
||||
return nil
|
||||
} else if numErrors == 1 {
|
||||
return errors[0]
|
||||
}
|
||||
errMsgs := make([]string, 0, numErrors)
|
||||
for _, err := range errors {
|
||||
errMsgs = append(errMsgs, err.Error())
|
||||
}
|
||||
return fmt.Errorf("[%s]", strings.Join(errMsgs, "; "))
|
||||
}
|
||||
|
||||
// metricToMpbTs converts a metric into a list of Stackdriver Monitoring v3 API TimeSeries
|
||||
// but it doesn't invoke any remote API.
|
||||
func (se *statsExporter) metricToMpbTs(ctx context.Context, metric *metricdata.Metric) ([]*monitoringpb.TimeSeries, error) {
|
||||
if metric == nil {
|
||||
return nil, errNilMetric
|
||||
return nil, errNilMetricOrMetricDescriptor
|
||||
}
|
||||
|
||||
resource := se.metricRscToMpbRsc(metric.Resource)
|
||||
|
||||
metricName := metric.Descriptor.Name
|
||||
metricType, _ := se.metricTypeFromProto(metricName)
|
||||
metricType := se.metricTypeFromProto(metricName)
|
||||
metricLabelKeys := metric.Descriptor.LabelKeys
|
||||
metricKind, _ := metricDescriptorTypeToMetricKind(metric)
|
||||
|
||||
|
|
@ -159,12 +167,26 @@ func (se *statsExporter) metricToMpbTs(ctx context.Context, metric *metricdata.M
|
|||
// TODO: (@rghetia) perhaps log this error from labels extraction, if non-nil.
|
||||
continue
|
||||
}
|
||||
|
||||
var rsc *monitoredrespb.MonitoredResource
|
||||
var mr monitoredresource.Interface
|
||||
if se.o.ResourceByDescriptor != nil {
|
||||
labels, mr = se.o.ResourceByDescriptor(&metric.Descriptor, labels)
|
||||
// TODO(rghetia): optimize this. It is inefficient to convert this for all metrics.
|
||||
rsc = convertMonitoredResourceToPB(mr)
|
||||
if rsc.Type == "" {
|
||||
rsc.Type = "global"
|
||||
rsc.Labels = nil
|
||||
}
|
||||
} else {
|
||||
rsc = resource
|
||||
}
|
||||
timeSeries = append(timeSeries, &monitoringpb.TimeSeries{
|
||||
Metric: &googlemetricpb.Metric{
|
||||
Type: metricType,
|
||||
Labels: labels,
|
||||
},
|
||||
Resource: resource,
|
||||
Resource: rsc,
|
||||
Points: sdPoints,
|
||||
})
|
||||
}
|
||||
|
|
@ -173,17 +195,21 @@ func (se *statsExporter) metricToMpbTs(ctx context.Context, metric *metricdata.M
|
|||
}
|
||||
|
||||
func metricLabelsToTsLabels(defaults map[string]labelValue, labelKeys []metricdata.LabelKey, labelValues []metricdata.LabelValue) (map[string]string, error) {
|
||||
// Perform this sanity check now.
|
||||
if len(labelKeys) != len(labelValues) {
|
||||
return nil, fmt.Errorf("length mismatch: len(labelKeys)=%d len(labelValues)=%d", len(labelKeys), len(labelValues))
|
||||
}
|
||||
|
||||
if len(defaults)+len(labelKeys) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
labels := make(map[string]string)
|
||||
// Fill in the defaults firstly, irrespective of if the labelKeys and labelValues are mismatched.
|
||||
for key, label := range defaults {
|
||||
labels[sanitize(key)] = label.val
|
||||
}
|
||||
|
||||
// Perform this sanity check now.
|
||||
if len(labelKeys) != len(labelValues) {
|
||||
return labels, fmt.Errorf("Length mismatch: len(labelKeys)=%d len(labelValues)=%d", len(labelKeys), len(labelValues))
|
||||
}
|
||||
|
||||
for i, labelKey := range labelKeys {
|
||||
labelValue := labelValues[i]
|
||||
labels[sanitize(labelKey.Key)] = labelValue.Value
|
||||
|
|
@ -195,6 +221,11 @@ func metricLabelsToTsLabels(defaults map[string]labelValue, labelKeys []metricda
|
|||
// createMetricDescriptorFromMetric creates a metric descriptor from the OpenCensus metric
|
||||
// and then creates it remotely using Stackdriver's API.
|
||||
func (se *statsExporter) createMetricDescriptorFromMetric(ctx context.Context, metric *metricdata.Metric) error {
|
||||
// Skip create metric descriptor if configured
|
||||
if se.o.SkipCMD {
|
||||
return nil
|
||||
}
|
||||
|
||||
se.metricMu.Lock()
|
||||
defer se.metricMu.Unlock()
|
||||
|
||||
|
|
@ -203,6 +234,11 @@ func (se *statsExporter) createMetricDescriptorFromMetric(ctx context.Context, m
|
|||
return nil
|
||||
}
|
||||
|
||||
if builtinMetric(se.metricTypeFromProto(name)) {
|
||||
se.metricDescriptors[name] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Otherwise, we encountered a cache-miss and
|
||||
// should create the metric descriptor remotely.
|
||||
inMD, err := se.metricToMpbMetricDescriptor(metric)
|
||||
|
|
@ -210,35 +246,21 @@ func (se *statsExporter) createMetricDescriptorFromMetric(ctx context.Context, m
|
|||
return err
|
||||
}
|
||||
|
||||
var md *googlemetricpb.MetricDescriptor
|
||||
if builtinMetric(inMD.Type) {
|
||||
gmrdesc := &monitoringpb.GetMetricDescriptorRequest{
|
||||
Name: inMD.Name,
|
||||
}
|
||||
md, err = getMetricDescriptor(ctx, se.c, gmrdesc)
|
||||
} else {
|
||||
|
||||
cmrdesc := &monitoringpb.CreateMetricDescriptorRequest{
|
||||
Name: fmt.Sprintf("projects/%s", se.o.ProjectID),
|
||||
MetricDescriptor: inMD,
|
||||
}
|
||||
md, err = createMetricDescriptor(ctx, se.c, cmrdesc)
|
||||
if err = se.createMetricDescriptor(ctx, inMD); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
// Now record the metric as having been created.
|
||||
se.metricDescriptors[name] = md
|
||||
}
|
||||
|
||||
return err
|
||||
// Now record the metric as having been created.
|
||||
se.metricDescriptors[name] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (se *statsExporter) metricToMpbMetricDescriptor(metric *metricdata.Metric) (*googlemetricpb.MetricDescriptor, error) {
|
||||
if metric == nil {
|
||||
return nil, errNilMetric
|
||||
return nil, errNilMetricOrMetricDescriptor
|
||||
}
|
||||
|
||||
metricType, _ := se.metricTypeFromProto(metric.Descriptor.Name)
|
||||
metricType := se.metricTypeFromProto(metric.Descriptor.Name)
|
||||
displayName := se.displayName(metric.Descriptor.Name)
|
||||
metricKind, valueType := metricDescriptorTypeToMetricKind(metric)
|
||||
|
||||
|
|
@ -466,11 +488,9 @@ func metricExemplarToPbExemplar(exemplar *metricdata.Exemplar, projectID string)
|
|||
func attachmentsToPbAttachments(attachments metricdata.Attachments, projectID string) []*any.Any {
|
||||
var pbAttachments []*any.Any
|
||||
for _, v := range attachments {
|
||||
switch v.(type) {
|
||||
case trace.SpanContext:
|
||||
spanCtx, _ := v.(trace.SpanContext)
|
||||
if spanCtx, succ := v.(trace.SpanContext); succ {
|
||||
pbAttachments = append(pbAttachments, toPbSpanCtxAttachment(spanCtx, projectID))
|
||||
default:
|
||||
} else {
|
||||
// Treat everything else as plain string for now.
|
||||
// TODO(songy23): add support for dropped label attachments.
|
||||
pbAttachments = append(pbAttachments, toPbStringAttachment(v))
|
||||
|
|
|
|||
201
vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics_batcher.go
generated
vendored
Normal file
201
vendor/contrib.go.opencensus.io/exporter/stackdriver/metrics_batcher.go
generated
vendored
Normal file
|
|
@ -0,0 +1,201 @@
|
|||
// Copyright 2019, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package stackdriver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
monitoring "cloud.google.com/go/monitoring/apiv3"
|
||||
monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
|
||||
)
|
||||
|
||||
const (
|
||||
minNumWorkers = 1
|
||||
minReqsChanSize = 5
|
||||
)
|
||||
|
||||
type metricsBatcher struct {
|
||||
projectName string
|
||||
allTss []*monitoringpb.TimeSeries
|
||||
allErrs []error
|
||||
|
||||
// Counts all dropped TimeSeries by this metricsBatcher.
|
||||
droppedTimeSeries int
|
||||
|
||||
workers []*worker
|
||||
// reqsChan, respsChan and wg are shared between metricsBatcher and worker goroutines.
|
||||
reqsChan chan *monitoringpb.CreateTimeSeriesRequest
|
||||
respsChan chan *response
|
||||
wg *sync.WaitGroup
|
||||
}
|
||||
|
||||
func newMetricsBatcher(ctx context.Context, projectID string, numWorkers int, mc *monitoring.MetricClient, timeout time.Duration) *metricsBatcher {
|
||||
if numWorkers < minNumWorkers {
|
||||
numWorkers = minNumWorkers
|
||||
}
|
||||
workers := make([]*worker, 0, numWorkers)
|
||||
reqsChanSize := numWorkers
|
||||
if reqsChanSize < minReqsChanSize {
|
||||
reqsChanSize = minReqsChanSize
|
||||
}
|
||||
reqsChan := make(chan *monitoringpb.CreateTimeSeriesRequest, reqsChanSize)
|
||||
respsChan := make(chan *response, numWorkers)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(numWorkers)
|
||||
for i := 0; i < numWorkers; i++ {
|
||||
w := newWorker(ctx, mc, reqsChan, respsChan, &wg, timeout)
|
||||
workers = append(workers, w)
|
||||
go w.start()
|
||||
}
|
||||
return &metricsBatcher{
|
||||
projectName: fmt.Sprintf("projects/%s", projectID),
|
||||
allTss: make([]*monitoringpb.TimeSeries, 0, maxTimeSeriesPerUpload),
|
||||
droppedTimeSeries: 0,
|
||||
workers: workers,
|
||||
wg: &wg,
|
||||
reqsChan: reqsChan,
|
||||
respsChan: respsChan,
|
||||
}
|
||||
}
|
||||
|
||||
func (mb *metricsBatcher) recordDroppedTimeseries(numTimeSeries int, errs ...error) {
|
||||
mb.droppedTimeSeries += numTimeSeries
|
||||
for _, err := range errs {
|
||||
if err != nil {
|
||||
mb.allErrs = append(mb.allErrs, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (mb *metricsBatcher) addTimeSeries(ts *monitoringpb.TimeSeries) {
|
||||
mb.allTss = append(mb.allTss, ts)
|
||||
if len(mb.allTss) == maxTimeSeriesPerUpload {
|
||||
mb.sendReqToChan()
|
||||
mb.allTss = make([]*monitoringpb.TimeSeries, 0, maxTimeSeriesPerUpload)
|
||||
}
|
||||
}
|
||||
|
||||
func (mb *metricsBatcher) close(ctx context.Context) error {
|
||||
// Send any remaining time series, must be <200
|
||||
if len(mb.allTss) > 0 {
|
||||
mb.sendReqToChan()
|
||||
}
|
||||
|
||||
close(mb.reqsChan)
|
||||
mb.wg.Wait()
|
||||
for i := 0; i < len(mb.workers); i++ {
|
||||
resp := <-mb.respsChan
|
||||
mb.recordDroppedTimeseries(resp.droppedTimeSeries, resp.errs...)
|
||||
}
|
||||
close(mb.respsChan)
|
||||
|
||||
numErrors := len(mb.allErrs)
|
||||
if numErrors == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if numErrors == 1 {
|
||||
return mb.allErrs[0]
|
||||
}
|
||||
|
||||
errMsgs := make([]string, 0, numErrors)
|
||||
for _, err := range mb.allErrs {
|
||||
errMsgs = append(errMsgs, err.Error())
|
||||
}
|
||||
return fmt.Errorf("[%s]", strings.Join(errMsgs, "; "))
|
||||
}
|
||||
|
||||
// sendReqToChan grabs all the timeseies in this metricsBatcher, puts them
|
||||
// to a CreateTimeSeriesRequest and sends the request to reqsChan.
|
||||
func (mb *metricsBatcher) sendReqToChan() {
|
||||
req := &monitoringpb.CreateTimeSeriesRequest{
|
||||
Name: mb.projectName,
|
||||
TimeSeries: mb.allTss,
|
||||
}
|
||||
mb.reqsChan <- req
|
||||
}
|
||||
|
||||
// sendReq sends create time series requests to Stackdriver,
|
||||
// and returns the count of dropped time series and error.
|
||||
func sendReq(ctx context.Context, c *monitoring.MetricClient, req *monitoringpb.CreateTimeSeriesRequest) (int, error) {
|
||||
if c != nil { // c==nil only happens in unit tests where we don't make real calls to Stackdriver server
|
||||
err := createTimeSeries(ctx, c, req)
|
||||
if err != nil {
|
||||
return len(req.TimeSeries), err
|
||||
}
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
type worker struct {
|
||||
ctx context.Context
|
||||
timeout time.Duration
|
||||
mc *monitoring.MetricClient
|
||||
|
||||
resp *response
|
||||
|
||||
respsChan chan *response
|
||||
reqsChan chan *monitoringpb.CreateTimeSeriesRequest
|
||||
|
||||
wg *sync.WaitGroup
|
||||
}
|
||||
|
||||
func newWorker(
|
||||
ctx context.Context,
|
||||
mc *monitoring.MetricClient,
|
||||
reqsChan chan *monitoringpb.CreateTimeSeriesRequest,
|
||||
respsChan chan *response,
|
||||
wg *sync.WaitGroup,
|
||||
timeout time.Duration) *worker {
|
||||
return &worker{
|
||||
ctx: ctx,
|
||||
mc: mc,
|
||||
resp: &response{},
|
||||
reqsChan: reqsChan,
|
||||
respsChan: respsChan,
|
||||
wg: wg,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *worker) start() {
|
||||
for req := range w.reqsChan {
|
||||
w.sendReqWithTimeout(req)
|
||||
}
|
||||
w.respsChan <- w.resp
|
||||
w.wg.Done()
|
||||
}
|
||||
|
||||
func (w *worker) sendReqWithTimeout(req *monitoringpb.CreateTimeSeriesRequest) {
|
||||
ctx, cancel := newContextWithTimeout(w.ctx, w.timeout)
|
||||
defer cancel()
|
||||
|
||||
w.recordDroppedTimeseries(sendReq(ctx, w.mc, req))
|
||||
}
|
||||
|
||||
func (w *worker) recordDroppedTimeseries(numTimeSeries int, err error) {
|
||||
w.resp.droppedTimeSeries += numTimeSeries
|
||||
if err != nil {
|
||||
w.resp.errs = append(w.resp.errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
type response struct {
|
||||
droppedTimeSeries int
|
||||
errs []error
|
||||
}
|
||||
|
|
@ -24,81 +24,74 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
"cloud.google.com/go/monitoring/apiv3"
|
||||
distributionpb "google.golang.org/genproto/googleapis/api/distribution"
|
||||
labelpb "google.golang.org/genproto/googleapis/api/label"
|
||||
googlemetricpb "google.golang.org/genproto/googleapis/api/metric"
|
||||
monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
|
||||
"go.opencensus.io/resource"
|
||||
|
||||
commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
|
||||
metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1"
|
||||
resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
|
||||
"go.opencensus.io/resource"
|
||||
timestamppb "github.com/golang/protobuf/ptypes/timestamp"
|
||||
distributionpb "google.golang.org/genproto/googleapis/api/distribution"
|
||||
labelpb "google.golang.org/genproto/googleapis/api/label"
|
||||
googlemetricpb "google.golang.org/genproto/googleapis/api/metric"
|
||||
monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
|
||||
monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
|
||||
)
|
||||
|
||||
var errNilMetric = errors.New("expecting a non-nil metric")
|
||||
var errNilMetricDescriptor = errors.New("expecting a non-nil metric descriptor")
|
||||
var errNilMetricOrMetricDescriptor = errors.New("non-nil metric or metric descriptor")
|
||||
var percentileLabelKey = &metricspb.LabelKey{
|
||||
Key: "percentile",
|
||||
Description: "the value at a given percentile of a distribution",
|
||||
}
|
||||
var globalResource = &resource.Resource{Type: "global"}
|
||||
var domains = []string{"googleapis.com", "kubernetes.io", "istio.io", "knative.dev"}
|
||||
|
||||
type metricProtoPayload struct {
|
||||
node *commonpb.Node
|
||||
resource *resourcepb.Resource
|
||||
metric *metricspb.Metric
|
||||
additionalLabels map[string]labelValue
|
||||
}
|
||||
|
||||
func (se *statsExporter) addPayload(node *commonpb.Node, rsc *resourcepb.Resource, labels map[string]labelValue, metrics ...*metricspb.Metric) {
|
||||
for _, metric := range metrics {
|
||||
payload := &metricProtoPayload{
|
||||
metric: metric,
|
||||
resource: rsc,
|
||||
node: node,
|
||||
additionalLabels: labels,
|
||||
}
|
||||
se.protoMetricsBundler.Add(payload, 1)
|
||||
}
|
||||
}
|
||||
|
||||
// ExportMetricsProto exports OpenCensus Metrics Proto to Stackdriver Monitoring.
|
||||
func (se *statsExporter) ExportMetricsProto(ctx context.Context, node *commonpb.Node, rsc *resourcepb.Resource, metrics []*metricspb.Metric) error {
|
||||
// PushMetricsProto exports OpenCensus Metrics Proto to Stackdriver Monitoring synchronously,
|
||||
// without de-duping or adding proto metrics to the bundler.
|
||||
func (se *statsExporter) PushMetricsProto(ctx context.Context, node *commonpb.Node, rsc *resourcepb.Resource, metrics []*metricspb.Metric) (int, error) {
|
||||
if len(metrics) == 0 {
|
||||
return errNilMetric
|
||||
return 0, errNilMetricOrMetricDescriptor
|
||||
}
|
||||
|
||||
additionalLabels := se.defaultLabels
|
||||
if additionalLabels == nil {
|
||||
// additionalLabels must be stateless because each node is different
|
||||
additionalLabels = getDefaultLabelsFromNode(node)
|
||||
}
|
||||
// Caches the resources seen so far
|
||||
seenResources := make(map[*resourcepb.Resource]*monitoredrespb.MonitoredResource)
|
||||
|
||||
mb := newMetricsBatcher(ctx, se.o.ProjectID, se.o.NumberOfWorkers, se.c, se.o.Timeout)
|
||||
for _, metric := range metrics {
|
||||
if len(metric.GetTimeseries()) == 0 {
|
||||
// No TimeSeries to export, skip this metric.
|
||||
continue
|
||||
}
|
||||
mappedRsc := se.getResource(rsc, metric, seenResources)
|
||||
if metric.GetMetricDescriptor().GetType() == metricspb.MetricDescriptor_SUMMARY {
|
||||
se.addPayload(node, rsc, additionalLabels, se.convertSummaryMetrics(metric)...)
|
||||
summaryMtcs := se.convertSummaryMetrics(metric)
|
||||
for _, summaryMtc := range summaryMtcs {
|
||||
if err := se.createMetricDescriptorFromMetricProto(ctx, summaryMtc); err != nil {
|
||||
mb.recordDroppedTimeseries(len(summaryMtc.GetTimeseries()), err)
|
||||
continue
|
||||
}
|
||||
se.protoMetricToTimeSeries(ctx, mappedRsc, summaryMtc, mb)
|
||||
}
|
||||
} else {
|
||||
se.addPayload(node, rsc, additionalLabels, metric)
|
||||
if err := se.createMetricDescriptorFromMetricProto(ctx, metric); err != nil {
|
||||
mb.recordDroppedTimeseries(len(metric.GetTimeseries()), err)
|
||||
continue
|
||||
}
|
||||
se.protoMetricToTimeSeries(ctx, mappedRsc, metric, mb)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return mb.droppedTimeSeries, mb.close(ctx)
|
||||
}
|
||||
|
||||
func (se *statsExporter) convertSummaryMetrics(summary *metricspb.Metric) []*metricspb.Metric {
|
||||
var metrics []*metricspb.Metric
|
||||
var percentileTss []*metricspb.TimeSeries
|
||||
var countTss []*metricspb.TimeSeries
|
||||
var sumTss []*metricspb.TimeSeries
|
||||
|
||||
for _, ts := range summary.Timeseries {
|
||||
var percentileTss []*metricspb.TimeSeries
|
||||
var countTss []*metricspb.TimeSeries
|
||||
var sumTss []*metricspb.TimeSeries
|
||||
lvs := ts.GetLabelValues()
|
||||
|
||||
startTime := ts.StartTimestamp
|
||||
|
|
@ -141,7 +134,8 @@ func (se *statsExporter) convertSummaryMetrics(summary *metricspb.Metric) []*met
|
|||
for _, percentileValue := range snapshot.GetPercentileValues() {
|
||||
lvsWithPercentile := lvs[0:]
|
||||
lvsWithPercentile = append(lvsWithPercentile, &metricspb.LabelValue{
|
||||
Value: fmt.Sprintf("%f", percentileValue.Percentile),
|
||||
HasValue: true,
|
||||
Value: fmt.Sprintf("%f", percentileValue.Percentile),
|
||||
})
|
||||
percentileTs := &metricspb.TimeSeries{
|
||||
LabelValues: lvsWithPercentile,
|
||||
|
|
@ -207,142 +201,22 @@ func (se *statsExporter) convertSummaryMetrics(summary *metricspb.Metric) []*met
|
|||
return metrics
|
||||
}
|
||||
|
||||
func (se *statsExporter) handleMetricsProtoUpload(payloads []*metricProtoPayload) {
|
||||
err := se.uploadMetricsProto(payloads)
|
||||
if err != nil {
|
||||
se.o.handleError(err)
|
||||
func (se *statsExporter) getResource(rsc *resourcepb.Resource, metric *metricspb.Metric, seenRscs map[*resourcepb.Resource]*monitoredrespb.MonitoredResource) *monitoredrespb.MonitoredResource {
|
||||
var resource = rsc
|
||||
if metric.Resource != nil {
|
||||
resource = metric.Resource
|
||||
}
|
||||
}
|
||||
|
||||
func (se *statsExporter) uploadMetricsProto(payloads []*metricProtoPayload) error {
|
||||
ctx, cancel := se.o.newContextWithTimeout()
|
||||
defer cancel()
|
||||
|
||||
ctx, span := trace.StartSpan(
|
||||
ctx,
|
||||
"contrib.go.opencensus.io/exporter/stackdriver.uploadMetrics",
|
||||
trace.WithSampler(trace.NeverSample()),
|
||||
)
|
||||
defer span.End()
|
||||
|
||||
for _, payload := range payloads {
|
||||
// Now create the metric descriptor remotely.
|
||||
if err := se.createMetricDescriptor(ctx, payload.metric, payload.additionalLabels); err != nil {
|
||||
span.SetStatus(trace.Status{Code: 2, Message: err.Error()})
|
||||
return err
|
||||
}
|
||||
mappedRsc, ok := seenRscs[resource]
|
||||
if !ok {
|
||||
mappedRsc = se.o.MapResource(resourcepbToResource(resource))
|
||||
seenRscs[resource] = mappedRsc
|
||||
}
|
||||
|
||||
var allTimeSeries []*monitoringpb.TimeSeries
|
||||
for _, payload := range payloads {
|
||||
tsl, err := se.protoMetricToTimeSeries(ctx, payload.node, payload.resource, payload.metric, payload.additionalLabels)
|
||||
if err != nil {
|
||||
span.SetStatus(trace.Status{Code: 2, Message: err.Error()})
|
||||
return err
|
||||
}
|
||||
allTimeSeries = append(allTimeSeries, tsl...)
|
||||
}
|
||||
|
||||
// Now batch timeseries up and then export.
|
||||
for start, end := 0, 0; start < len(allTimeSeries); start = end {
|
||||
end = start + maxTimeSeriesPerUpload
|
||||
if end > len(allTimeSeries) {
|
||||
end = len(allTimeSeries)
|
||||
}
|
||||
batch := allTimeSeries[start:end]
|
||||
ctsreql := se.combineTimeSeriesToCreateTimeSeriesRequest(batch)
|
||||
for _, ctsreq := range ctsreql {
|
||||
if err := createTimeSeries(ctx, se.c, ctsreq); err != nil {
|
||||
span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()})
|
||||
// TODO(@odeke-em): Don't fail fast here, perhaps batch errors?
|
||||
// return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// metricSignature creates a unique signature consisting of a
|
||||
// metric's type and its lexicographically sorted label values
|
||||
// See https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/issues/120
|
||||
func metricSignature(metric *googlemetricpb.Metric) string {
|
||||
labels := metric.GetLabels()
|
||||
labelValues := make([]string, 0, len(labels))
|
||||
|
||||
for _, labelValue := range labels {
|
||||
labelValues = append(labelValues, labelValue)
|
||||
}
|
||||
sort.Strings(labelValues)
|
||||
return fmt.Sprintf("%s:%s", metric.GetType(), strings.Join(labelValues, ","))
|
||||
}
|
||||
|
||||
func (se *statsExporter) combineTimeSeriesToCreateTimeSeriesRequest(ts []*monitoringpb.TimeSeries) (ctsreql []*monitoringpb.CreateTimeSeriesRequest) {
|
||||
if len(ts) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Since there are scenarios in which Metrics with the same Type
|
||||
// can be bunched in the same TimeSeries, we have to ensure that
|
||||
// we create a unique CreateTimeSeriesRequest with entirely unique Metrics
|
||||
// per TimeSeries, lest we'll encounter:
|
||||
//
|
||||
// err: rpc error: code = InvalidArgument desc = One or more TimeSeries could not be written:
|
||||
// Field timeSeries[2] had an invalid value: Duplicate TimeSeries encountered.
|
||||
// Only one point can be written per TimeSeries per request.: timeSeries[2]
|
||||
//
|
||||
// This scenario happens when we are using the OpenCensus Agent in which multiple metrics
|
||||
// are streamed by various client applications.
|
||||
// See https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/issues/73
|
||||
uniqueTimeSeries := make([]*monitoringpb.TimeSeries, 0, len(ts))
|
||||
nonUniqueTimeSeries := make([]*monitoringpb.TimeSeries, 0, len(ts))
|
||||
seenMetrics := make(map[string]struct{})
|
||||
|
||||
for _, tti := range ts {
|
||||
key := metricSignature(tti.Metric)
|
||||
if _, alreadySeen := seenMetrics[key]; !alreadySeen {
|
||||
uniqueTimeSeries = append(uniqueTimeSeries, tti)
|
||||
seenMetrics[key] = struct{}{}
|
||||
} else {
|
||||
nonUniqueTimeSeries = append(nonUniqueTimeSeries, tti)
|
||||
}
|
||||
}
|
||||
|
||||
// UniqueTimeSeries can be bunched up together
|
||||
// While for each nonUniqueTimeSeries, we have
|
||||
// to make a unique CreateTimeSeriesRequest.
|
||||
ctsreql = append(ctsreql, &monitoringpb.CreateTimeSeriesRequest{
|
||||
Name: monitoring.MetricProjectPath(se.o.ProjectID),
|
||||
TimeSeries: uniqueTimeSeries,
|
||||
})
|
||||
|
||||
// Now recursively also combine the non-unique TimeSeries
|
||||
// that were singly added to nonUniqueTimeSeries.
|
||||
// The reason is that we need optimal combinations
|
||||
// for optimal combinations because:
|
||||
// * "a/b/c"
|
||||
// * "a/b/c"
|
||||
// * "x/y/z"
|
||||
// * "a/b/c"
|
||||
// * "x/y/z"
|
||||
// * "p/y/z"
|
||||
// * "d/y/z"
|
||||
//
|
||||
// should produce:
|
||||
// CreateTimeSeries(uniqueTimeSeries) :: ["a/b/c", "x/y/z", "p/y/z", "d/y/z"]
|
||||
// CreateTimeSeries(nonUniqueTimeSeries) :: ["a/b/c"]
|
||||
// CreateTimeSeries(nonUniqueTimeSeries) :: ["a/b/c", "x/y/z"]
|
||||
nonUniqueRequests := se.combineTimeSeriesToCreateTimeSeriesRequest(nonUniqueTimeSeries)
|
||||
ctsreql = append(ctsreql, nonUniqueRequests...)
|
||||
|
||||
return ctsreql
|
||||
return mappedRsc
|
||||
}
|
||||
|
||||
func resourcepbToResource(rsc *resourcepb.Resource) *resource.Resource {
|
||||
if rsc == nil {
|
||||
return &resource.Resource{
|
||||
Type: "global",
|
||||
}
|
||||
return globalResource
|
||||
}
|
||||
res := &resource.Resource{
|
||||
Type: rsc.Type,
|
||||
|
|
@ -357,92 +231,87 @@ func resourcepbToResource(rsc *resourcepb.Resource) *resource.Resource {
|
|||
|
||||
// protoMetricToTimeSeries converts a metric into a Stackdriver Monitoring v3 API CreateTimeSeriesRequest
|
||||
// but it doesn't invoke any remote API.
|
||||
func (se *statsExporter) protoMetricToTimeSeries(ctx context.Context, node *commonpb.Node, rsc *resourcepb.Resource, metric *metricspb.Metric, additionalLabels map[string]labelValue) ([]*monitoringpb.TimeSeries, error) {
|
||||
if metric == nil {
|
||||
return nil, errNilMetric
|
||||
func (se *statsExporter) protoMetricToTimeSeries(ctx context.Context, mappedRsc *monitoredrespb.MonitoredResource, metric *metricspb.Metric, mb *metricsBatcher) {
|
||||
if metric == nil || metric.MetricDescriptor == nil {
|
||||
mb.recordDroppedTimeseries(len(metric.GetTimeseries()), errNilMetricOrMetricDescriptor)
|
||||
}
|
||||
|
||||
var resource = rsc
|
||||
if metric.Resource != nil {
|
||||
resource = metric.Resource
|
||||
}
|
||||
|
||||
mappedRes := se.o.MapResource(resourcepbToResource(resource))
|
||||
|
||||
metricName, _, _, err := metricProseFromProto(metric)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
metricType, _ := se.metricTypeFromProto(metricName)
|
||||
metricType := se.metricTypeFromProto(metric.GetMetricDescriptor().GetName())
|
||||
metricLabelKeys := metric.GetMetricDescriptor().GetLabelKeys()
|
||||
metricKind, _ := protoMetricDescriptorTypeToMetricKind(metric)
|
||||
metricKind, valueType := protoMetricDescriptorTypeToMetricKind(metric)
|
||||
labelKeys := make([]string, 0, len(metricLabelKeys))
|
||||
for _, key := range metricLabelKeys {
|
||||
labelKeys = append(labelKeys, sanitize(key.GetKey()))
|
||||
}
|
||||
|
||||
timeSeries := make([]*monitoringpb.TimeSeries, 0, len(metric.Timeseries))
|
||||
for _, protoTimeSeries := range metric.Timeseries {
|
||||
if len(protoTimeSeries.Points) == 0 {
|
||||
// No points to send just move forward.
|
||||
continue
|
||||
}
|
||||
|
||||
sdPoints, err := se.protoTimeSeriesToMonitoringPoints(protoTimeSeries, metricKind)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
mb.recordDroppedTimeseries(1, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Each TimeSeries has labelValues which MUST be correlated
|
||||
// with that from the MetricDescriptor
|
||||
labels, err := labelsPerTimeSeries(additionalLabels, metricLabelKeys, protoTimeSeries.GetLabelValues())
|
||||
labels, err := labelsPerTimeSeries(se.defaultLabels, labelKeys, protoTimeSeries.GetLabelValues())
|
||||
if err != nil {
|
||||
// TODO: (@odeke-em) perhaps log this error from labels extraction, if non-nil.
|
||||
mb.recordDroppedTimeseries(1, err)
|
||||
continue
|
||||
}
|
||||
timeSeries = append(timeSeries, &monitoringpb.TimeSeries{
|
||||
mb.addTimeSeries(&monitoringpb.TimeSeries{
|
||||
Metric: &googlemetricpb.Metric{
|
||||
Type: metricType,
|
||||
Labels: labels,
|
||||
},
|
||||
Resource: mappedRes,
|
||||
Points: sdPoints,
|
||||
MetricKind: metricKind,
|
||||
ValueType: valueType,
|
||||
Resource: mappedRsc,
|
||||
Points: sdPoints,
|
||||
})
|
||||
}
|
||||
|
||||
return timeSeries, nil
|
||||
}
|
||||
|
||||
func labelsPerTimeSeries(defaults map[string]labelValue, labelKeys []*metricspb.LabelKey, labelValues []*metricspb.LabelValue) (map[string]string, error) {
|
||||
func labelsPerTimeSeries(defaults map[string]labelValue, labelKeys []string, labelValues []*metricspb.LabelValue) (map[string]string, error) {
|
||||
if len(labelKeys) != len(labelValues) {
|
||||
return nil, fmt.Errorf("length mismatch: len(labelKeys)=%d len(labelValues)=%d", len(labelKeys), len(labelValues))
|
||||
}
|
||||
|
||||
if len(defaults)+len(labelKeys) == 0 {
|
||||
// No labels for this metric
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
labels := make(map[string]string)
|
||||
// Fill in the defaults firstly, irrespective of if the labelKeys and labelValues are mismatched.
|
||||
for key, label := range defaults {
|
||||
labels[sanitize(key)] = label.val
|
||||
}
|
||||
|
||||
// Perform this sanity check now.
|
||||
if len(labelKeys) != len(labelValues) {
|
||||
return labels, fmt.Errorf("Length mismatch: len(labelKeys)=%d len(labelValues)=%d", len(labelKeys), len(labelValues))
|
||||
labels[key] = label.val
|
||||
}
|
||||
|
||||
for i, labelKey := range labelKeys {
|
||||
labelValue := labelValues[i]
|
||||
labels[sanitize(labelKey.GetKey())] = labelValue.GetValue()
|
||||
if !labelValue.GetHasValue() {
|
||||
continue
|
||||
}
|
||||
labels[labelKey] = labelValue.GetValue()
|
||||
}
|
||||
|
||||
return labels, nil
|
||||
}
|
||||
|
||||
func (se *statsExporter) protoMetricDescriptorToCreateMetricDescriptorRequest(ctx context.Context, metric *metricspb.Metric, additionalLabels map[string]labelValue) (*monitoringpb.CreateMetricDescriptorRequest, error) {
|
||||
// Otherwise, we encountered a cache-miss and
|
||||
// should create the metric descriptor remotely.
|
||||
inMD, err := se.protoToMonitoringMetricDescriptor(metric, additionalLabels)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func (se *statsExporter) createMetricDescriptorFromMetricProto(ctx context.Context, metric *metricspb.Metric) error {
|
||||
// Skip create metric descriptor if configured
|
||||
if se.o.SkipCMD {
|
||||
return nil
|
||||
}
|
||||
|
||||
cmrdesc := &monitoringpb.CreateMetricDescriptorRequest{
|
||||
Name: fmt.Sprintf("projects/%s", se.o.ProjectID),
|
||||
MetricDescriptor: inMD,
|
||||
}
|
||||
ctx, cancel := newContextWithTimeout(ctx, se.o.Timeout)
|
||||
defer cancel()
|
||||
|
||||
return cmrdesc, nil
|
||||
}
|
||||
|
||||
// createMetricDescriptor creates a metric descriptor from the OpenCensus proto metric
|
||||
// and then creates it remotely using Stackdriver's API.
|
||||
func (se *statsExporter) createMetricDescriptor(ctx context.Context, metric *metricspb.Metric, additionalLabels map[string]labelValue) error {
|
||||
se.protoMu.Lock()
|
||||
defer se.protoMu.Unlock()
|
||||
|
||||
|
|
@ -451,46 +320,35 @@ func (se *statsExporter) createMetricDescriptor(ctx context.Context, metric *met
|
|||
return nil
|
||||
}
|
||||
|
||||
if builtinMetric(se.metricTypeFromProto(name)) {
|
||||
se.protoMetricDescriptors[name] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Otherwise, we encountered a cache-miss and
|
||||
// should create the metric descriptor remotely.
|
||||
inMD, err := se.protoToMonitoringMetricDescriptor(metric, additionalLabels)
|
||||
inMD, err := se.protoToMonitoringMetricDescriptor(metric, se.defaultLabels)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var md *googlemetricpb.MetricDescriptor
|
||||
if builtinMetric(inMD.Type) {
|
||||
gmrdesc := &monitoringpb.GetMetricDescriptorRequest{
|
||||
Name: inMD.Name,
|
||||
}
|
||||
md, err = getMetricDescriptor(ctx, se.c, gmrdesc)
|
||||
} else {
|
||||
|
||||
cmrdesc := &monitoringpb.CreateMetricDescriptorRequest{
|
||||
Name: fmt.Sprintf("projects/%s", se.o.ProjectID),
|
||||
MetricDescriptor: inMD,
|
||||
}
|
||||
md, err = createMetricDescriptor(ctx, se.c, cmrdesc)
|
||||
if err = se.createMetricDescriptor(ctx, inMD); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
// Now record the metric as having been created.
|
||||
se.protoMetricDescriptors[name] = md
|
||||
}
|
||||
|
||||
return err
|
||||
se.protoMetricDescriptors[name] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (se *statsExporter) protoTimeSeriesToMonitoringPoints(ts *metricspb.TimeSeries, metricKind googlemetricpb.MetricDescriptor_MetricKind) (sptl []*monitoringpb.Point, err error) {
|
||||
func (se *statsExporter) protoTimeSeriesToMonitoringPoints(ts *metricspb.TimeSeries, metricKind googlemetricpb.MetricDescriptor_MetricKind) ([]*monitoringpb.Point, error) {
|
||||
sptl := make([]*monitoringpb.Point, 0, len(ts.Points))
|
||||
for _, pt := range ts.Points {
|
||||
|
||||
// If we have a last value aggregation point i.e. MetricDescriptor_GAUGE
|
||||
// StartTime should be nil.
|
||||
startTime := ts.StartTimestamp
|
||||
if metricKind == googlemetricpb.MetricDescriptor_GAUGE {
|
||||
startTime = nil
|
||||
}
|
||||
|
||||
spt, err := fromProtoPoint(startTime, pt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -501,15 +359,15 @@ func (se *statsExporter) protoTimeSeriesToMonitoringPoints(ts *metricspb.TimeSer
|
|||
}
|
||||
|
||||
func (se *statsExporter) protoToMonitoringMetricDescriptor(metric *metricspb.Metric, additionalLabels map[string]labelValue) (*googlemetricpb.MetricDescriptor, error) {
|
||||
if metric == nil {
|
||||
return nil, errNilMetric
|
||||
if metric == nil || metric.MetricDescriptor == nil {
|
||||
return nil, errNilMetricOrMetricDescriptor
|
||||
}
|
||||
|
||||
metricName, description, unit, err := metricProseFromProto(metric)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
metricType, _ := se.metricTypeFromProto(metricName)
|
||||
md := metric.GetMetricDescriptor()
|
||||
metricName := md.GetName()
|
||||
unit := md.GetUnit()
|
||||
description := md.GetDescription()
|
||||
metricType := se.metricTypeFromProto(metricName)
|
||||
displayName := se.displayName(metricName)
|
||||
metricKind, valueType := protoMetricDescriptorTypeToMetricKind(metric)
|
||||
|
||||
|
|
@ -550,32 +408,32 @@ func labelDescriptorsFromProto(defaults map[string]labelValue, protoLabelKeys []
|
|||
return labelDescriptors
|
||||
}
|
||||
|
||||
func metricProseFromProto(metric *metricspb.Metric) (name, description, unit string, err error) {
|
||||
md := metric.GetMetricDescriptor()
|
||||
if md == nil {
|
||||
return "", "", "", errNilMetricDescriptor
|
||||
func (se *statsExporter) metricTypeFromProto(name string) string {
|
||||
prefix := se.o.MetricPrefix
|
||||
if se.o.GetMetricPrefix != nil {
|
||||
prefix = se.o.GetMetricPrefix(name)
|
||||
}
|
||||
|
||||
name = md.GetName()
|
||||
unit = md.GetUnit()
|
||||
description = md.GetDescription()
|
||||
|
||||
if md.Type == metricspb.MetricDescriptor_CUMULATIVE_INT64 {
|
||||
// If the aggregation type is count, which counts the number of recorded measurements, the unit must be "1",
|
||||
// because this view does not apply to the recorded values.
|
||||
unit = stats.UnitDimensionless
|
||||
if prefix != "" {
|
||||
name = path.Join(prefix, name)
|
||||
}
|
||||
|
||||
return name, description, unit, nil
|
||||
if !hasDomain(name) {
|
||||
// Still needed because the name may or may not have a "/" at the beginning.
|
||||
name = path.Join(defaultDomain, name)
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func (se *statsExporter) metricTypeFromProto(name string) (string, bool) {
|
||||
// TODO: (@odeke-em) support non-"custom.googleapis.com" metrics names.
|
||||
name = path.Join("custom.googleapis.com", "opencensus", name)
|
||||
return name, true
|
||||
// hasDomain checks if the metric name already has a domain in it.
|
||||
func hasDomain(name string) bool {
|
||||
for _, domain := range domains {
|
||||
if strings.Contains(name, domain) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func fromProtoPoint(startTime *timestamp.Timestamp, pt *metricspb.Point) (*monitoringpb.Point, error) {
|
||||
func fromProtoPoint(startTime *timestamppb.Timestamp, pt *metricspb.Point) (*monitoringpb.Point, error) {
|
||||
if pt == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
|
@ -585,14 +443,13 @@ func fromProtoPoint(startTime *timestamp.Timestamp, pt *metricspb.Point) (*monit
|
|||
return nil, err
|
||||
}
|
||||
|
||||
mpt := &monitoringpb.Point{
|
||||
return &monitoringpb.Point{
|
||||
Value: mptv,
|
||||
Interval: &monitoringpb.TimeInterval{
|
||||
StartTime: startTime,
|
||||
EndTime: pt.Timestamp,
|
||||
},
|
||||
}
|
||||
return mpt, nil
|
||||
}, nil
|
||||
}
|
||||
|
||||
func protoToMetricPoint(value interface{}) (*monitoringpb.TypedValue, error) {
|
||||
|
|
@ -600,8 +457,6 @@ func protoToMetricPoint(value interface{}) (*monitoringpb.TypedValue, error) {
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
var tval *monitoringpb.TypedValue
|
||||
switch v := value.(type) {
|
||||
default:
|
||||
// All the other types are not yet handled.
|
||||
|
|
@ -617,21 +472,21 @@ func protoToMetricPoint(value interface{}) (*monitoringpb.TypedValue, error) {
|
|||
// TODO: Add conversion from SummaryValue when
|
||||
// https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/issues/66
|
||||
// has been figured out.
|
||||
err = fmt.Errorf("protoToMetricPoint: unknown Data type: %T", value)
|
||||
return nil, fmt.Errorf("protoToMetricPoint: unknown Data type: %T", value)
|
||||
|
||||
case *metricspb.Point_Int64Value:
|
||||
tval = &monitoringpb.TypedValue{
|
||||
return &monitoringpb.TypedValue{
|
||||
Value: &monitoringpb.TypedValue_Int64Value{
|
||||
Int64Value: v.Int64Value,
|
||||
},
|
||||
}
|
||||
}, nil
|
||||
|
||||
case *metricspb.Point_DoubleValue:
|
||||
tval = &monitoringpb.TypedValue{
|
||||
return &monitoringpb.TypedValue{
|
||||
Value: &monitoringpb.TypedValue_DoubleValue{
|
||||
DoubleValue: v.DoubleValue,
|
||||
},
|
||||
}
|
||||
}, nil
|
||||
|
||||
case *metricspb.Point_DistributionValue:
|
||||
dv := v.DistributionValue
|
||||
|
|
@ -669,10 +524,8 @@ func protoToMetricPoint(value interface{}) (*monitoringpb.TypedValue, error) {
|
|||
mv.DistributionValue.BucketCounts = addZeroBucketCountOnCondition(insertZeroBound, bucketCounts(dv.Buckets)...)
|
||||
|
||||
}
|
||||
tval = &monitoringpb.TypedValue{Value: mv}
|
||||
return &monitoringpb.TypedValue{Value: mv}, nil
|
||||
}
|
||||
|
||||
return tval, err
|
||||
}
|
||||
|
||||
func bucketCounts(buckets []*metricspb.DistributionValue_Bucket) []int64 {
|
||||
|
|
@ -714,13 +567,3 @@ func protoMetricDescriptorTypeToMetricKind(m *metricspb.Metric) (googlemetricpb.
|
|||
return googlemetricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED, googlemetricpb.MetricDescriptor_VALUE_TYPE_UNSPECIFIED
|
||||
}
|
||||
}
|
||||
|
||||
func getDefaultLabelsFromNode(node *commonpb.Node) map[string]labelValue {
|
||||
taskValue := fmt.Sprintf("%s-%d@%s", strings.ToLower(node.LibraryInfo.GetLanguage().String()), node.Identifier.Pid, node.Identifier.HostName)
|
||||
return map[string]labelValue{
|
||||
opencensusTaskKey: {
|
||||
val: taskValue,
|
||||
desc: opencensusTaskDescription,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,62 +0,0 @@
|
|||
// Copyright 2019, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package stackdriver
|
||||
|
||||
/*
|
||||
Common test utilities for comparing Stackdriver metrics.
|
||||
*/
|
||||
|
||||
import (
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
|
||||
googlemetricpb "google.golang.org/genproto/googleapis/api/metric"
|
||||
monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
|
||||
monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
|
||||
|
||||
"time"
|
||||
)
|
||||
|
||||
func timestampToTime(ts *timestamp.Timestamp) time.Time {
|
||||
if ts == nil {
|
||||
return time.Unix(0, 0).UTC()
|
||||
}
|
||||
return time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
|
||||
}
|
||||
|
||||
func cmpResource(got, want *monitoredrespb.MonitoredResource) string {
|
||||
return cmp.Diff(got, want, cmpopts.IgnoreUnexported(monitoredrespb.MonitoredResource{}))
|
||||
}
|
||||
|
||||
func cmpTSReqs(got, want []*monitoringpb.CreateTimeSeriesRequest) string {
|
||||
return cmp.Diff(got, want, cmpopts.IgnoreUnexported(monitoringpb.CreateTimeSeriesRequest{}))
|
||||
}
|
||||
|
||||
func cmpMD(got, want *googlemetricpb.MetricDescriptor) string {
|
||||
return cmp.Diff(got, want, cmpopts.IgnoreUnexported(googlemetricpb.MetricDescriptor{}))
|
||||
}
|
||||
|
||||
func cmpMDReq(got, want *monitoringpb.CreateMetricDescriptorRequest) string {
|
||||
return cmp.Diff(got, want, cmpopts.IgnoreUnexported(monitoringpb.CreateMetricDescriptorRequest{}))
|
||||
}
|
||||
|
||||
func cmpMDReqs(got, want []*monitoringpb.CreateMetricDescriptorRequest) string {
|
||||
return cmp.Diff(got, want, cmpopts.IgnoreUnexported(monitoringpb.CreateMetricDescriptorRequest{}))
|
||||
}
|
||||
|
||||
func cmpPoint(got, want *monitoringpb.Point) string {
|
||||
return cmp.Diff(got, want, cmpopts.IgnoreUnexported(monitoringpb.Point{}))
|
||||
}
|
||||
|
|
@ -37,8 +37,12 @@ type awsIdentityDocument struct {
|
|||
// This is only done once.
|
||||
func retrieveAWSIdentityDocument() *awsIdentityDocument {
|
||||
awsIdentityDoc := awsIdentityDocument{}
|
||||
c := ec2metadata.New(session.New())
|
||||
if c.Available() == false {
|
||||
sesion, err := session.NewSession()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
c := ec2metadata.New(sesion)
|
||||
if !c.Available() {
|
||||
return nil
|
||||
}
|
||||
ec2InstanceIdentifyDocument, err := c.GetInstanceIdentityDocument()
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"cloud.google.com/go/compute/metadata"
|
||||
"cloud.google.com/go/container/apiv1"
|
||||
container "cloud.google.com/go/container/apiv1"
|
||||
containerpb "google.golang.org/genproto/googleapis/container/v1"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -22,13 +22,6 @@ import (
|
|||
monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
|
||||
)
|
||||
|
||||
type resourceMap struct {
|
||||
// Mapping from the input resource type to the monitored resource type in Stackdriver.
|
||||
srcType, dstType string
|
||||
// Mapping from Stackdriver monitored resource label to an OpenCensus resource label.
|
||||
labels map[string]string
|
||||
}
|
||||
|
||||
// Resource labels that are generally internal to the exporter.
|
||||
// Consider exposing these labels and a type identifier in the future to allow
|
||||
// for customization.
|
||||
|
|
@ -41,7 +34,7 @@ const (
|
|||
)
|
||||
|
||||
// Mappings for the well-known OpenCensus resources to applicable Stackdriver resources.
|
||||
var k8sResourceMap = map[string]string{
|
||||
var k8sContainerMap = map[string]string{
|
||||
"project_id": stackdriverProjectID,
|
||||
"location": resourcekeys.CloudKeyZone,
|
||||
"cluster_name": resourcekeys.K8SKeyClusterName,
|
||||
|
|
@ -50,6 +43,21 @@ var k8sResourceMap = map[string]string{
|
|||
"container_name": resourcekeys.ContainerKeyName,
|
||||
}
|
||||
|
||||
var k8sPodMap = map[string]string{
|
||||
"project_id": stackdriverProjectID,
|
||||
"location": resourcekeys.CloudKeyZone,
|
||||
"cluster_name": resourcekeys.K8SKeyClusterName,
|
||||
"namespace_name": resourcekeys.K8SKeyNamespaceName,
|
||||
"pod_name": resourcekeys.K8SKeyPodName,
|
||||
}
|
||||
|
||||
var k8sNodeMap = map[string]string{
|
||||
"project_id": stackdriverProjectID,
|
||||
"location": resourcekeys.CloudKeyZone,
|
||||
"cluster_name": resourcekeys.K8SKeyClusterName,
|
||||
"node_name": resourcekeys.HostKeyName,
|
||||
}
|
||||
|
||||
var gcpResourceMap = map[string]string{
|
||||
"project_id": stackdriverProjectID,
|
||||
"instance_id": resourcekeys.HostKeyID,
|
||||
|
|
@ -72,14 +80,20 @@ var genericResourceMap = map[string]string{
|
|||
"task_id": stackdriverGenericTaskID,
|
||||
}
|
||||
|
||||
func transformResource(match, input map[string]string) map[string]string {
|
||||
// returns transformed label map and true if all labels in match are found
|
||||
// in input except optional project_id. It returns false if at least one label
|
||||
// other than project_id is missing.
|
||||
func transformResource(match, input map[string]string) (map[string]string, bool) {
|
||||
output := make(map[string]string, len(input))
|
||||
for dst, src := range match {
|
||||
if v, ok := input[src]; ok {
|
||||
v, ok := input[src]
|
||||
if ok {
|
||||
output[dst] = v
|
||||
} else if dst != "project_id" {
|
||||
return nil, true
|
||||
}
|
||||
}
|
||||
return output
|
||||
return output, false
|
||||
}
|
||||
|
||||
func defaultMapResource(res *resource.Resource) *monitoredrespb.MonitoredResource {
|
||||
|
|
@ -94,10 +108,13 @@ func defaultMapResource(res *resource.Resource) *monitoredrespb.MonitoredResourc
|
|||
switch {
|
||||
case res.Type == resourcekeys.ContainerType:
|
||||
result.Type = "k8s_container"
|
||||
match = k8sResourceMap
|
||||
match = k8sContainerMap
|
||||
case res.Type == resourcekeys.K8SType:
|
||||
result.Type = "k8s_pod"
|
||||
match = k8sResourceMap
|
||||
match = k8sPodMap
|
||||
case res.Type == resourcekeys.HostType && res.Labels[resourcekeys.K8SKeyClusterName] != "":
|
||||
result.Type = "k8s_node"
|
||||
match = k8sNodeMap
|
||||
case res.Labels[resourcekeys.CloudKeyProvider] == resourcekeys.CloudProviderGCP:
|
||||
result.Type = "gce_instance"
|
||||
match = gcpResourceMap
|
||||
|
|
@ -106,7 +123,17 @@ func defaultMapResource(res *resource.Resource) *monitoredrespb.MonitoredResourc
|
|||
match = awsResourceMap
|
||||
}
|
||||
|
||||
result.Labels = transformResource(match, res.Labels)
|
||||
var missing bool
|
||||
result.Labels, missing = transformResource(match, res.Labels)
|
||||
if missing {
|
||||
result.Type = "global"
|
||||
// if project id specified then transform it.
|
||||
if v, ok := res.Labels[stackdriverProjectID]; ok {
|
||||
result.Labels = make(map[string]string, 1)
|
||||
result.Labels["project_id"] = v
|
||||
}
|
||||
return result
|
||||
}
|
||||
if result.Type == "aws_ec2_instance" {
|
||||
if v, ok := result.Labels["region"]; ok {
|
||||
result.Labels["region"] = fmt.Sprintf("aws:%s", v)
|
||||
|
|
|
|||
|
|
@ -54,6 +54,7 @@ import (
|
|||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
metadataapi "cloud.google.com/go/compute/metadata"
|
||||
|
|
@ -61,7 +62,6 @@ import (
|
|||
"contrib.go.opencensus.io/exporter/stackdriver/monitoredresource"
|
||||
"go.opencensus.io/resource"
|
||||
"go.opencensus.io/stats/view"
|
||||
"go.opencensus.io/tag"
|
||||
"go.opencensus.io/trace"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/option"
|
||||
|
|
@ -186,11 +186,9 @@ type Options struct {
|
|||
// conversions from auto-detected resources to well-known Stackdriver monitored resources.
|
||||
MapResource func(*resource.Resource) *monitoredrespb.MonitoredResource
|
||||
|
||||
// MetricPrefix overrides the prefix of a Stackdriver metric display names.
|
||||
// Optional. If unset defaults to "OpenCensus/".
|
||||
// Deprecated: Provide GetMetricDisplayName to change the display name of
|
||||
// the metric.
|
||||
// If GetMetricDisplayName is non-nil, this option is ignored.
|
||||
// MetricPrefix overrides the prefix of a Stackdriver metric names.
|
||||
// Optional. If unset defaults to "custom.googleapis.com/opencensus/".
|
||||
// If GetMetricPrefix is non-nil, this option is ignored.
|
||||
MetricPrefix string
|
||||
|
||||
// GetMetricDisplayName allows customizing the display name for the metric
|
||||
|
|
@ -203,8 +201,16 @@ type Options struct {
|
|||
// "custom.googleapis.com/opencensus/" + view.Name
|
||||
//
|
||||
// See: https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricDescriptor
|
||||
// Depreacted. Use GetMetricPrefix instead.
|
||||
GetMetricType func(view *view.View) string
|
||||
|
||||
// GetMetricPrefix allows customizing the metric prefix for the given metric name.
|
||||
// If it is not set, MetricPrefix is used. If MetricPrefix is not set, it defaults to:
|
||||
// "custom.googleapis.com/opencensus/"
|
||||
//
|
||||
// See: https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricDescriptor
|
||||
GetMetricPrefix func(name string) string
|
||||
|
||||
// DefaultTraceAttributes will be appended to every span that is exported to
|
||||
// Stackdriver Trace.
|
||||
DefaultTraceAttributes map[string]interface{}
|
||||
|
|
@ -238,31 +244,47 @@ type Options struct {
|
|||
// If unset, context.Background() will be used.
|
||||
Context context.Context
|
||||
|
||||
// SkipCMD enforces to skip all the CreateMetricDescriptor calls.
|
||||
// These calls are important in order to configure the unit of the metrics,
|
||||
// but in some cases all the exported metrics are builtin (unit is configured)
|
||||
// or the unit is not important.
|
||||
SkipCMD bool
|
||||
|
||||
// Timeout for all API calls. If not set, defaults to 5 seconds.
|
||||
Timeout time.Duration
|
||||
|
||||
// GetMonitoredResource may be provided to supply the details of the
|
||||
// monitored resource dynamically based on the tags associated with each
|
||||
// data point. Most users will not need to set this, but should instead
|
||||
// set the MonitoredResource field.
|
||||
//
|
||||
// GetMonitoredResource may add or remove tags by returning a new set of
|
||||
// tags. It is safe for the function to mutate its argument and return it.
|
||||
//
|
||||
// See the documentation on the MonitoredResource field for guidance on the
|
||||
// interaction between monitored resources and labels.
|
||||
//
|
||||
// The MonitoredResource field is ignored if this field is set to a non-nil
|
||||
// value.
|
||||
GetMonitoredResource func(*view.View, []tag.Tag) ([]tag.Tag, monitoredresource.Interface)
|
||||
|
||||
// ReportingInterval sets the interval between reporting metrics.
|
||||
// If it is set to zero then default value is used.
|
||||
ReportingInterval time.Duration
|
||||
|
||||
// NumberOfWorkers sets the number of go rountines that send requests
|
||||
// to Stackdriver Monitoring. This is only used for Proto metrics export
|
||||
// for now. The minimum number of workers is 1.
|
||||
NumberOfWorkers int
|
||||
|
||||
// ResourceByDescriptor may be provided to supply monitored resource dynamically
|
||||
// based on the metric Descriptor. Most users will not need to set this,
|
||||
// but should instead set ResourceDetector.
|
||||
//
|
||||
// The MonitoredResource and ResourceDetector fields are ignored if this
|
||||
// field is set to a non-nil value.
|
||||
//
|
||||
// The ResourceByDescriptor is called to derive monitored resources from
|
||||
// metric.Descriptor and the label map associated with the time-series.
|
||||
// If any label is used for the derived resource then it will be removed
|
||||
// from the label map. The remaining labels in the map are returned to
|
||||
// be used with the time-series.
|
||||
//
|
||||
// If the func set to this field does not return valid resource even for one
|
||||
// time-series then it will result into an error for the entire CreateTimeSeries request
|
||||
// which may contain more than one time-series.
|
||||
ResourceByDescriptor func(*metricdata.Descriptor, map[string]string) (map[string]string, monitoredresource.Interface)
|
||||
}
|
||||
|
||||
const defaultTimeout = 5 * time.Second
|
||||
|
||||
var defaultDomain = path.Join("custom.googleapis.com", "opencensus")
|
||||
|
||||
// Exporter is a stats and trace exporter that uploads data to Stackdriver.
|
||||
//
|
||||
// You can create a single Exporter and register it as both a trace exporter
|
||||
|
|
@ -292,10 +314,6 @@ func NewExporter(o Options) (*Exporter, error) {
|
|||
}
|
||||
if o.Location == "" {
|
||||
if metadataapi.OnGCE() {
|
||||
ctx := o.Context
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
zone, err := metadataapi.Zone()
|
||||
if err != nil {
|
||||
// This error should be logged with a warning level.
|
||||
|
|
@ -336,6 +354,9 @@ func NewExporter(o Options) (*Exporter, error) {
|
|||
|
||||
o.Resource = o.MapResource(res)
|
||||
}
|
||||
if o.MetricPrefix != "" && !strings.HasSuffix(o.MetricPrefix, "/") {
|
||||
o.MetricPrefix = o.MetricPrefix + "/"
|
||||
}
|
||||
|
||||
se, err := newStatsExporter(o)
|
||||
if err != nil {
|
||||
|
|
@ -353,13 +374,21 @@ func NewExporter(o Options) (*Exporter, error) {
|
|||
|
||||
// ExportView exports to the Stackdriver Monitoring if view data
|
||||
// has one or more rows.
|
||||
// Deprecated: use ExportMetrics and StartMetricsExporter instead.
|
||||
func (e *Exporter) ExportView(vd *view.Data) {
|
||||
e.statsExporter.ExportView(vd)
|
||||
}
|
||||
|
||||
// ExportMetricsProto exports OpenCensus Metrics Proto to Stackdriver Monitoring.
|
||||
// ExportMetricsProto exports OpenCensus Metrics Proto to Stackdriver Monitoring synchronously,
|
||||
// without de-duping or adding proto metrics to the bundler.
|
||||
func (e *Exporter) ExportMetricsProto(ctx context.Context, node *commonpb.Node, rsc *resourcepb.Resource, metrics []*metricspb.Metric) error {
|
||||
return e.statsExporter.ExportMetricsProto(ctx, node, rsc, metrics)
|
||||
_, err := e.statsExporter.PushMetricsProto(ctx, node, rsc, metrics)
|
||||
return err
|
||||
}
|
||||
|
||||
// PushMetricsProto simliar with ExportMetricsProto but returns the number of dropped timeseries.
|
||||
func (e *Exporter) PushMetricsProto(ctx context.Context, node *commonpb.Node, rsc *resourcepb.Resource, metrics []*metricspb.Metric) (int, error) {
|
||||
return e.statsExporter.PushMetricsProto(ctx, node, rsc, metrics)
|
||||
}
|
||||
|
||||
// ExportMetrics exports OpenCensus Metrics to Stackdriver Monitoring
|
||||
|
|
@ -427,12 +456,10 @@ func (o Options) handleError(err error) {
|
|||
log.Printf("Failed to export to Stackdriver: %v", err)
|
||||
}
|
||||
|
||||
func (o Options) newContextWithTimeout() (context.Context, func()) {
|
||||
ctx := o.Context
|
||||
func newContextWithTimeout(ctx context.Context, timeout time.Duration) (context.Context, func()) {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
timeout := o.Timeout
|
||||
if timeout <= 0 {
|
||||
timeout = defaultTimeout
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,18 +20,19 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.opencensus.io"
|
||||
opencensus "go.opencensus.io"
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/stats/view"
|
||||
"go.opencensus.io/tag"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
"cloud.google.com/go/monitoring/apiv3"
|
||||
monitoring "cloud.google.com/go/monitoring/apiv3"
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
"go.opencensus.io/metric/metricdata"
|
||||
"go.opencensus.io/metric/metricexport"
|
||||
|
|
@ -40,6 +41,7 @@ import (
|
|||
distributionpb "google.golang.org/genproto/googleapis/api/distribution"
|
||||
labelpb "google.golang.org/genproto/googleapis/api/label"
|
||||
"google.golang.org/genproto/googleapis/api/metric"
|
||||
googlemetricpb "google.golang.org/genproto/googleapis/api/metric"
|
||||
metricpb "google.golang.org/genproto/googleapis/api/metric"
|
||||
monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
|
||||
monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3"
|
||||
|
|
@ -59,18 +61,14 @@ var userAgent = fmt.Sprintf("opencensus-go %s; stackdriver-exporter %s", opencen
|
|||
type statsExporter struct {
|
||||
o Options
|
||||
|
||||
viewDataBundler *bundler.Bundler
|
||||
protoMetricsBundler *bundler.Bundler
|
||||
metricsBundler *bundler.Bundler
|
||||
|
||||
createdViewsMu sync.Mutex
|
||||
createdViews map[string]*metricpb.MetricDescriptor // Views already created remotely
|
||||
viewDataBundler *bundler.Bundler
|
||||
metricsBundler *bundler.Bundler
|
||||
|
||||
protoMu sync.Mutex
|
||||
protoMetricDescriptors map[string]*metricpb.MetricDescriptor // Saves the metric descriptors that were already created remotely
|
||||
protoMetricDescriptors map[string]bool // Metric descriptors that were already created remotely
|
||||
|
||||
metricMu sync.Mutex
|
||||
metricDescriptors map[string]*metricpb.MetricDescriptor // Saves the metric descriptors that were already created remotely
|
||||
metricDescriptors map[string]bool // Metric descriptors that were already created remotely
|
||||
|
||||
c *monitoring.MetricClient
|
||||
defaultLabels map[string]labelValue
|
||||
|
|
@ -103,39 +101,39 @@ func newStatsExporter(o Options) (*statsExporter, error) {
|
|||
e := &statsExporter{
|
||||
c: client,
|
||||
o: o,
|
||||
createdViews: make(map[string]*metricpb.MetricDescriptor),
|
||||
protoMetricDescriptors: make(map[string]*metricpb.MetricDescriptor),
|
||||
metricDescriptors: make(map[string]*metricpb.MetricDescriptor),
|
||||
protoMetricDescriptors: make(map[string]bool),
|
||||
metricDescriptors: make(map[string]bool),
|
||||
}
|
||||
|
||||
var defaultLablesNotSanitized map[string]labelValue
|
||||
if o.DefaultMonitoringLabels != nil {
|
||||
e.defaultLabels = o.DefaultMonitoringLabels.m
|
||||
defaultLablesNotSanitized = o.DefaultMonitoringLabels.m
|
||||
} else {
|
||||
e.defaultLabels = map[string]labelValue{
|
||||
defaultLablesNotSanitized = map[string]labelValue{
|
||||
opencensusTaskKey: {val: getTaskValue(), desc: opencensusTaskDescription},
|
||||
}
|
||||
}
|
||||
|
||||
e.defaultLabels = make(map[string]labelValue)
|
||||
// Fill in the defaults firstly, irrespective of if the labelKeys and labelValues are mismatched.
|
||||
for key, label := range defaultLablesNotSanitized {
|
||||
e.defaultLabels[sanitize(key)] = label
|
||||
}
|
||||
|
||||
e.viewDataBundler = bundler.NewBundler((*view.Data)(nil), func(bundle interface{}) {
|
||||
vds := bundle.([]*view.Data)
|
||||
e.handleUpload(vds...)
|
||||
})
|
||||
e.protoMetricsBundler = bundler.NewBundler((*metricProtoPayload)(nil), func(bundle interface{}) {
|
||||
payloads := bundle.([]*metricProtoPayload)
|
||||
e.handleMetricsProtoUpload(payloads)
|
||||
})
|
||||
e.metricsBundler = bundler.NewBundler((*metricdata.Metric)(nil), func(bundle interface{}) {
|
||||
metrics := bundle.([]*metricdata.Metric)
|
||||
e.handleMetricsUpload(metrics)
|
||||
})
|
||||
if delayThreshold := e.o.BundleDelayThreshold; delayThreshold > 0 {
|
||||
e.viewDataBundler.DelayThreshold = delayThreshold
|
||||
e.protoMetricsBundler.DelayThreshold = delayThreshold
|
||||
e.metricsBundler.DelayThreshold = delayThreshold
|
||||
}
|
||||
if countThreshold := e.o.BundleCountThreshold; countThreshold > 0 {
|
||||
e.viewDataBundler.BundleCountThreshold = countThreshold
|
||||
e.protoMetricsBundler.BundleCountThreshold = countThreshold
|
||||
e.metricsBundler.BundleCountThreshold = countThreshold
|
||||
}
|
||||
return e, nil
|
||||
|
|
@ -143,7 +141,7 @@ func newStatsExporter(o Options) (*statsExporter, error) {
|
|||
|
||||
func (e *statsExporter) startMetricsReader() error {
|
||||
e.initReaderOnce.Do(func() {
|
||||
e.ir, _ = metricexport.NewIntervalReader(&metricexport.Reader{}, e)
|
||||
e.ir, _ = metricexport.NewIntervalReader(metricexport.NewReader(), e)
|
||||
})
|
||||
e.ir.ReportingInterval = e.o.ReportingInterval
|
||||
return e.ir.Start()
|
||||
|
|
@ -156,10 +154,6 @@ func (e *statsExporter) stopMetricsReader() {
|
|||
}
|
||||
|
||||
func (e *statsExporter) getMonitoredResource(v *view.View, tags []tag.Tag) ([]tag.Tag, *monitoredrespb.MonitoredResource) {
|
||||
if get := e.o.GetMonitoredResource; get != nil {
|
||||
newTags, mr := get(v, tags)
|
||||
return newTags, convertMonitoredResourceToPB(mr)
|
||||
}
|
||||
resource := e.o.Resource
|
||||
if resource == nil {
|
||||
resource = &monitoredrespb.MonitoredResource{
|
||||
|
|
@ -210,12 +204,11 @@ func (e *statsExporter) handleUpload(vds ...*view.Data) {
|
|||
// want to lose data that hasn't yet been exported.
|
||||
func (e *statsExporter) Flush() {
|
||||
e.viewDataBundler.Flush()
|
||||
e.protoMetricsBundler.Flush()
|
||||
e.metricsBundler.Flush()
|
||||
}
|
||||
|
||||
func (e *statsExporter) uploadStats(vds []*view.Data) error {
|
||||
ctx, cancel := e.o.newContextWithTimeout()
|
||||
ctx, cancel := newContextWithTimeout(e.o.Context, e.o.Timeout)
|
||||
defer cancel()
|
||||
ctx, span := trace.StartSpan(
|
||||
ctx,
|
||||
|
|
@ -225,7 +218,7 @@ func (e *statsExporter) uploadStats(vds []*view.Data) error {
|
|||
defer span.End()
|
||||
|
||||
for _, vd := range vds {
|
||||
if err := e.createMeasure(ctx, vd.View); err != nil {
|
||||
if err := e.createMetricDescriptorFromView(ctx, vd.View); err != nil {
|
||||
span.SetStatus(trace.Status{Code: 2, Message: err.Error()})
|
||||
return err
|
||||
}
|
||||
|
|
@ -334,34 +327,27 @@ func (e *statsExporter) viewToMetricDescriptor(ctx context.Context, v *view.View
|
|||
return res, nil
|
||||
}
|
||||
|
||||
func (e *statsExporter) viewToCreateMetricDescriptorRequest(ctx context.Context, v *view.View) (*monitoringpb.CreateMetricDescriptorRequest, error) {
|
||||
inMD, err := e.viewToMetricDescriptor(ctx, v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cmrdesc := &monitoringpb.CreateMetricDescriptorRequest{
|
||||
Name: fmt.Sprintf("projects/%s", e.o.ProjectID),
|
||||
MetricDescriptor: inMD,
|
||||
}
|
||||
return cmrdesc, nil
|
||||
}
|
||||
|
||||
// createMeasure creates a MetricDescriptor for the given view data in Stackdriver Monitoring.
|
||||
// createMetricDescriptorFromView creates a MetricDescriptor for the given view data in Stackdriver Monitoring.
|
||||
// An error will be returned if there is already a metric descriptor created with the same name
|
||||
// but it has a different aggregation or keys.
|
||||
func (e *statsExporter) createMeasure(ctx context.Context, v *view.View) error {
|
||||
e.createdViewsMu.Lock()
|
||||
defer e.createdViewsMu.Unlock()
|
||||
func (e *statsExporter) createMetricDescriptorFromView(ctx context.Context, v *view.View) error {
|
||||
// Skip create metric descriptor if configured
|
||||
if e.o.SkipCMD {
|
||||
return nil
|
||||
}
|
||||
|
||||
e.metricMu.Lock()
|
||||
defer e.metricMu.Unlock()
|
||||
|
||||
viewName := v.Name
|
||||
|
||||
if md, ok := e.createdViews[viewName]; ok {
|
||||
// [TODO:rghetia] Temporary fix for https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/issues/76#issuecomment-459459091
|
||||
if builtinMetric(md.Type) {
|
||||
return nil
|
||||
}
|
||||
return e.equalMeasureAggTagKeys(md, v.Measure, v.Aggregation, v.TagKeys)
|
||||
if _, created := e.metricDescriptors[viewName]; created {
|
||||
return nil
|
||||
}
|
||||
|
||||
if builtinMetric(e.metricType(v)) {
|
||||
e.metricDescriptors[viewName] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
inMD, err := e.viewToMetricDescriptor(ctx, v)
|
||||
|
|
@ -369,34 +355,92 @@ func (e *statsExporter) createMeasure(ctx context.Context, v *view.View) error {
|
|||
return err
|
||||
}
|
||||
|
||||
var dmd *metric.MetricDescriptor
|
||||
if builtinMetric(inMD.Type) {
|
||||
gmrdesc := &monitoringpb.GetMetricDescriptorRequest{
|
||||
Name: inMD.Name,
|
||||
}
|
||||
dmd, err = getMetricDescriptor(ctx, e.c, gmrdesc)
|
||||
} else {
|
||||
cmrdesc := &monitoringpb.CreateMetricDescriptorRequest{
|
||||
Name: fmt.Sprintf("projects/%s", e.o.ProjectID),
|
||||
MetricDescriptor: inMD,
|
||||
}
|
||||
dmd, err = createMetricDescriptor(ctx, e.c, cmrdesc)
|
||||
}
|
||||
if err != nil {
|
||||
if err = e.createMetricDescriptor(ctx, inMD); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Now cache the metric descriptor
|
||||
e.createdViews[viewName] = dmd
|
||||
return err
|
||||
e.metricDescriptors[viewName] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *statsExporter) displayName(suffix string) string {
|
||||
displayNamePrefix := defaultDisplayNamePrefix
|
||||
if e.o.MetricPrefix != "" {
|
||||
displayNamePrefix = e.o.MetricPrefix
|
||||
return path.Join(defaultDisplayNamePrefix, suffix)
|
||||
}
|
||||
|
||||
func (e *statsExporter) combineTimeSeriesToCreateTimeSeriesRequest(ts []*monitoringpb.TimeSeries) (ctsreql []*monitoringpb.CreateTimeSeriesRequest) {
|
||||
if len(ts) == 0 {
|
||||
return nil
|
||||
}
|
||||
return path.Join(displayNamePrefix, suffix)
|
||||
|
||||
// Since there are scenarios in which Metrics with the same Type
|
||||
// can be bunched in the same TimeSeries, we have to ensure that
|
||||
// we create a unique CreateTimeSeriesRequest with entirely unique Metrics
|
||||
// per TimeSeries, lest we'll encounter:
|
||||
//
|
||||
// err: rpc error: code = InvalidArgument desc = One or more TimeSeries could not be written:
|
||||
// Field timeSeries[2] had an invalid value: Duplicate TimeSeries encountered.
|
||||
// Only one point can be written per TimeSeries per request.: timeSeries[2]
|
||||
//
|
||||
// This scenario happens when we are using the OpenCensus Agent in which multiple metrics
|
||||
// are streamed by various client applications.
|
||||
// See https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/issues/73
|
||||
uniqueTimeSeries := make([]*monitoringpb.TimeSeries, 0, len(ts))
|
||||
nonUniqueTimeSeries := make([]*monitoringpb.TimeSeries, 0, len(ts))
|
||||
seenMetrics := make(map[string]struct{})
|
||||
|
||||
for _, tti := range ts {
|
||||
key := metricSignature(tti.Metric)
|
||||
if _, alreadySeen := seenMetrics[key]; !alreadySeen {
|
||||
uniqueTimeSeries = append(uniqueTimeSeries, tti)
|
||||
seenMetrics[key] = struct{}{}
|
||||
} else {
|
||||
nonUniqueTimeSeries = append(nonUniqueTimeSeries, tti)
|
||||
}
|
||||
}
|
||||
|
||||
// UniqueTimeSeries can be bunched up together
|
||||
// While for each nonUniqueTimeSeries, we have
|
||||
// to make a unique CreateTimeSeriesRequest.
|
||||
ctsreql = append(ctsreql, &monitoringpb.CreateTimeSeriesRequest{
|
||||
Name: fmt.Sprintf("projects/%s", e.o.ProjectID),
|
||||
TimeSeries: uniqueTimeSeries,
|
||||
})
|
||||
|
||||
// Now recursively also combine the non-unique TimeSeries
|
||||
// that were singly added to nonUniqueTimeSeries.
|
||||
// The reason is that we need optimal combinations
|
||||
// for optimal combinations because:
|
||||
// * "a/b/c"
|
||||
// * "a/b/c"
|
||||
// * "x/y/z"
|
||||
// * "a/b/c"
|
||||
// * "x/y/z"
|
||||
// * "p/y/z"
|
||||
// * "d/y/z"
|
||||
//
|
||||
// should produce:
|
||||
// CreateTimeSeries(uniqueTimeSeries) :: ["a/b/c", "x/y/z", "p/y/z", "d/y/z"]
|
||||
// CreateTimeSeries(nonUniqueTimeSeries) :: ["a/b/c"]
|
||||
// CreateTimeSeries(nonUniqueTimeSeries) :: ["a/b/c", "x/y/z"]
|
||||
nonUniqueRequests := e.combineTimeSeriesToCreateTimeSeriesRequest(nonUniqueTimeSeries)
|
||||
ctsreql = append(ctsreql, nonUniqueRequests...)
|
||||
|
||||
return ctsreql
|
||||
}
|
||||
|
||||
// metricSignature creates a unique signature consisting of a
|
||||
// metric's type and its lexicographically sorted label values
|
||||
// See https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/issues/120
|
||||
func metricSignature(metric *googlemetricpb.Metric) string {
|
||||
labels := metric.GetLabels()
|
||||
labelValues := make([]string, 0, len(labels))
|
||||
|
||||
for _, labelValue := range labels {
|
||||
labelValues = append(labelValues, labelValue)
|
||||
}
|
||||
sort.Strings(labelValues)
|
||||
return fmt.Sprintf("%s:%s", metric.GetType(), strings.Join(labelValues, ","))
|
||||
}
|
||||
|
||||
func newPoint(v *view.View, row *view.Row, start, end time.Time) *monitoringpb.Point {
|
||||
|
|
@ -548,61 +592,21 @@ func newLabelDescriptors(defaults map[string]labelValue, keys []tag.Key) []*labe
|
|||
return labelDescriptors
|
||||
}
|
||||
|
||||
func (e *statsExporter) equalMeasureAggTagKeys(md *metricpb.MetricDescriptor, m stats.Measure, agg *view.Aggregation, keys []tag.Key) error {
|
||||
var aggTypeMatch bool
|
||||
switch md.ValueType {
|
||||
case metricpb.MetricDescriptor_INT64:
|
||||
if _, ok := m.(*stats.Int64Measure); !(ok || agg.Type == view.AggTypeCount) {
|
||||
return fmt.Errorf("stackdriver metric descriptor was not created as int64")
|
||||
}
|
||||
aggTypeMatch = agg.Type == view.AggTypeCount || agg.Type == view.AggTypeSum || agg.Type == view.AggTypeLastValue
|
||||
case metricpb.MetricDescriptor_DOUBLE:
|
||||
if _, ok := m.(*stats.Float64Measure); !ok {
|
||||
return fmt.Errorf("stackdriver metric descriptor was not created as double")
|
||||
}
|
||||
aggTypeMatch = agg.Type == view.AggTypeSum || agg.Type == view.AggTypeLastValue
|
||||
case metricpb.MetricDescriptor_DISTRIBUTION:
|
||||
aggTypeMatch = agg.Type == view.AggTypeDistribution
|
||||
func (e *statsExporter) createMetricDescriptor(ctx context.Context, md *metric.MetricDescriptor) error {
|
||||
ctx, cancel := newContextWithTimeout(ctx, e.o.Timeout)
|
||||
defer cancel()
|
||||
cmrdesc := &monitoringpb.CreateMetricDescriptorRequest{
|
||||
Name: fmt.Sprintf("projects/%s", e.o.ProjectID),
|
||||
MetricDescriptor: md,
|
||||
}
|
||||
|
||||
if !aggTypeMatch {
|
||||
return fmt.Errorf("stackdriver metric descriptor was not created with aggregation type %T", agg.Type)
|
||||
}
|
||||
|
||||
labels := make(map[string]struct{}, len(keys)+len(e.defaultLabels))
|
||||
for _, k := range keys {
|
||||
labels[sanitize(k.Name())] = struct{}{}
|
||||
}
|
||||
for k := range e.defaultLabels {
|
||||
labels[sanitize(k)] = struct{}{}
|
||||
}
|
||||
|
||||
for _, k := range md.Labels {
|
||||
if _, ok := labels[k.Key]; !ok {
|
||||
return fmt.Errorf("stackdriver metric descriptor %q was not created with label %q", md.Type, k)
|
||||
}
|
||||
delete(labels, k.Key)
|
||||
}
|
||||
|
||||
if len(labels) > 0 {
|
||||
extra := make([]string, 0, len(labels))
|
||||
for k := range labels {
|
||||
extra = append(extra, k)
|
||||
}
|
||||
return fmt.Errorf("stackdriver metric descriptor %q contains unexpected labels: %s", md.Type, strings.Join(extra, ", "))
|
||||
}
|
||||
|
||||
return nil
|
||||
_, err := createMetricDescriptor(ctx, e.c, cmrdesc)
|
||||
return err
|
||||
}
|
||||
|
||||
var createMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) {
|
||||
return c.CreateMetricDescriptor(ctx, mdr)
|
||||
}
|
||||
|
||||
var getMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.GetMetricDescriptorRequest) (*metric.MetricDescriptor, error) {
|
||||
return c.GetMetricDescriptor(ctx, mdr)
|
||||
}
|
||||
|
||||
var createTimeSeries = func(ctx context.Context, c *monitoring.MetricClient, ts *monitoringpb.CreateTimeSeriesRequest) error {
|
||||
return c.CreateTimeSeries(ctx, ts)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -121,7 +121,7 @@ func (e *traceExporter) uploadSpans(spans []*tracepb.Span) {
|
|||
Spans: spans,
|
||||
}
|
||||
// Create a never-sampled span to prevent traces associated with exporter.
|
||||
ctx, cancel := e.o.newContextWithTimeout()
|
||||
ctx, cancel := newContextWithTimeout(e.o.Context, e.o.Timeout)
|
||||
defer cancel()
|
||||
ctx, span := trace.StartSpan(
|
||||
ctx,
|
||||
|
|
|
|||
|
|
@ -14,12 +14,12 @@ import (
|
|||
// struct and override the specific methods. For example, to override only
|
||||
// the MaxRetries method:
|
||||
//
|
||||
// type retryer struct {
|
||||
// client.DefaultRetryer
|
||||
// }
|
||||
// type retryer struct {
|
||||
// client.DefaultRetryer
|
||||
// }
|
||||
//
|
||||
// // This implementation always has 100 max retries
|
||||
// func (d retryer) MaxRetries() int { return 100 }
|
||||
// // This implementation always has 100 max retries
|
||||
// func (d retryer) MaxRetries() int { return 100 }
|
||||
type DefaultRetryer struct {
|
||||
NumMaxRetries int
|
||||
}
|
||||
|
|
@ -33,25 +33,28 @@ func (d DefaultRetryer) MaxRetries() int {
|
|||
// RetryRules returns the delay duration before retrying this request again
|
||||
func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
|
||||
// Set the upper limit of delay in retrying at ~five minutes
|
||||
minTime := 30
|
||||
throttle := d.shouldThrottle(r)
|
||||
if throttle {
|
||||
if delay, ok := getRetryDelay(r); ok {
|
||||
return delay
|
||||
var minTime int64 = 30
|
||||
var initialDelay time.Duration
|
||||
|
||||
isThrottle := r.IsErrorThrottle()
|
||||
if isThrottle {
|
||||
if delay, ok := getRetryAfterDelay(r); ok {
|
||||
initialDelay = delay
|
||||
}
|
||||
|
||||
minTime = 500
|
||||
}
|
||||
|
||||
retryCount := r.RetryCount
|
||||
if throttle && retryCount > 8 {
|
||||
if isThrottle && retryCount > 8 {
|
||||
retryCount = 8
|
||||
} else if retryCount > 13 {
|
||||
retryCount = 13
|
||||
} else if retryCount > 12 {
|
||||
retryCount = 12
|
||||
}
|
||||
|
||||
delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime)
|
||||
return time.Duration(delay) * time.Millisecond
|
||||
delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Int63n(minTime) + minTime)
|
||||
return (time.Duration(delay) * time.Millisecond) + initialDelay
|
||||
|
||||
}
|
||||
|
||||
// ShouldRetry returns true if the request should be retried.
|
||||
|
|
@ -65,26 +68,13 @@ func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
|
|||
if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 {
|
||||
return true
|
||||
}
|
||||
return r.IsErrorRetryable() || d.shouldThrottle(r)
|
||||
}
|
||||
|
||||
// ShouldThrottle returns true if the request should be throttled.
|
||||
func (d DefaultRetryer) shouldThrottle(r *request.Request) bool {
|
||||
switch r.HTTPResponse.StatusCode {
|
||||
case 429:
|
||||
case 502:
|
||||
case 503:
|
||||
case 504:
|
||||
default:
|
||||
return r.IsErrorThrottle()
|
||||
}
|
||||
|
||||
return true
|
||||
return r.IsErrorRetryable() || r.IsErrorThrottle()
|
||||
}
|
||||
|
||||
// This will look in the Retry-After header, RFC 7231, for how long
|
||||
// it will wait before attempting another request
|
||||
func getRetryDelay(r *request.Request) (time.Duration, bool) {
|
||||
func getRetryAfterDelay(r *request.Request) (time.Duration, bool) {
|
||||
if !canUseRetryAfterHeader(r) {
|
||||
return 0, false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ type RequestRetryer interface{}
|
|||
// A Config provides service configuration for service clients. By default,
|
||||
// all clients will use the defaults.DefaultConfig structure.
|
||||
//
|
||||
// // Create Session with MaxRetry configuration to be shared by multiple
|
||||
// // Create Session with MaxRetries configuration to be shared by multiple
|
||||
// // service clients.
|
||||
// sess := session.Must(session.NewSession(&aws.Config{
|
||||
// MaxRetries: aws.Int(3),
|
||||
|
|
@ -251,7 +251,7 @@ type Config struct {
|
|||
// NewConfig returns a new Config pointer that can be chained with builder
|
||||
// methods to set multiple configuration values inline without using pointers.
|
||||
//
|
||||
// // Create Session with MaxRetry configuration to be shared by multiple
|
||||
// // Create Session with MaxRetries configuration to be shared by multiple
|
||||
// // service clients.
|
||||
// sess := session.Must(session.NewSession(aws.NewConfig().
|
||||
// WithMaxRetries(3),
|
||||
|
|
|
|||
|
|
@ -159,9 +159,9 @@ func handleSendError(r *request.Request, err error) {
|
|||
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
|
||||
}
|
||||
}
|
||||
// Catch all other request errors.
|
||||
// Catch all request errors, and let the default retrier determine
|
||||
// if the error is retryable.
|
||||
r.Error = awserr.New("RequestError", "send request failed", err)
|
||||
r.Retryable = aws.Bool(true) // network errors are retryable
|
||||
|
||||
// Override the error with a context canceled error, if that was canceled.
|
||||
ctx := r.Context()
|
||||
|
|
@ -184,37 +184,39 @@ var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseH
|
|||
|
||||
// AfterRetryHandler performs final checks to determine if the request should
|
||||
// be retried and how long to delay.
|
||||
var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) {
|
||||
// If one of the other handlers already set the retry state
|
||||
// we don't want to override it based on the service's state
|
||||
if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) {
|
||||
r.Retryable = aws.Bool(r.ShouldRetry(r))
|
||||
}
|
||||
|
||||
if r.WillRetry() {
|
||||
r.RetryDelay = r.RetryRules(r)
|
||||
|
||||
if sleepFn := r.Config.SleepDelay; sleepFn != nil {
|
||||
// Support SleepDelay for backwards compatibility and testing
|
||||
sleepFn(r.RetryDelay)
|
||||
} else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil {
|
||||
r.Error = awserr.New(request.CanceledErrorCode,
|
||||
"request context canceled", err)
|
||||
r.Retryable = aws.Bool(false)
|
||||
return
|
||||
var AfterRetryHandler = request.NamedHandler{
|
||||
Name: "core.AfterRetryHandler",
|
||||
Fn: func(r *request.Request) {
|
||||
// If one of the other handlers already set the retry state
|
||||
// we don't want to override it based on the service's state
|
||||
if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) {
|
||||
r.Retryable = aws.Bool(r.ShouldRetry(r))
|
||||
}
|
||||
|
||||
// when the expired token exception occurs the credentials
|
||||
// need to be expired locally so that the next request to
|
||||
// get credentials will trigger a credentials refresh.
|
||||
if r.IsErrorExpired() {
|
||||
r.Config.Credentials.Expire()
|
||||
}
|
||||
if r.WillRetry() {
|
||||
r.RetryDelay = r.RetryRules(r)
|
||||
|
||||
r.RetryCount++
|
||||
r.Error = nil
|
||||
}
|
||||
}}
|
||||
if sleepFn := r.Config.SleepDelay; sleepFn != nil {
|
||||
// Support SleepDelay for backwards compatibility and testing
|
||||
sleepFn(r.RetryDelay)
|
||||
} else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil {
|
||||
r.Error = awserr.New(request.CanceledErrorCode,
|
||||
"request context canceled", err)
|
||||
r.Retryable = aws.Bool(false)
|
||||
return
|
||||
}
|
||||
|
||||
// when the expired token exception occurs the credentials
|
||||
// need to be expired locally so that the next request to
|
||||
// get credentials will trigger a credentials refresh.
|
||||
if r.IsErrorExpired() {
|
||||
r.Config.Credentials.Expire()
|
||||
}
|
||||
|
||||
r.RetryCount++
|
||||
r.Error = nil
|
||||
}
|
||||
}}
|
||||
|
||||
// ValidateEndpointHandler is a request handler to validate a request had the
|
||||
// appropriate Region and Endpoint set. Will set r.Error if the endpoint or
|
||||
|
|
|
|||
|
|
@ -98,8 +98,8 @@ func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint strin
|
|||
return p
|
||||
}
|
||||
|
||||
// NewCredentialsClient returns a Credentials wrapper for retrieving credentials
|
||||
// from an arbitrary endpoint concurrently. The client will request the
|
||||
// NewCredentialsClient returns a pointer to a new Credentials object
|
||||
// wrapping the endpoint credentials Provider.
|
||||
func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials {
|
||||
return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...))
|
||||
}
|
||||
|
|
|
|||
7
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go
generated
vendored
7
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go
generated
vendored
|
|
@ -76,12 +76,15 @@ func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) {
|
|||
// uses unix time in nanoseconds to uniquely identify sessions.
|
||||
sessionName = strconv.FormatInt(now().UnixNano(), 10)
|
||||
}
|
||||
resp, err := p.client.AssumeRoleWithWebIdentity(&sts.AssumeRoleWithWebIdentityInput{
|
||||
req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{
|
||||
RoleArn: &p.roleARN,
|
||||
RoleSessionName: &sessionName,
|
||||
WebIdentityToken: aws.String(string(b)),
|
||||
})
|
||||
if err != nil {
|
||||
// InvalidIdentityToken error is a temporary error that can occur
|
||||
// when assuming an Role with a JWT web identity token.
|
||||
req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException)
|
||||
if err := req.Send(); err != nil {
|
||||
return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -16,25 +16,26 @@ var (
|
|||
|
||||
type metricChan struct {
|
||||
ch chan metric
|
||||
paused int64
|
||||
paused *int64
|
||||
}
|
||||
|
||||
func newMetricChan(size int) metricChan {
|
||||
return metricChan{
|
||||
ch: make(chan metric, size),
|
||||
ch: make(chan metric, size),
|
||||
paused: new(int64),
|
||||
}
|
||||
}
|
||||
|
||||
func (ch *metricChan) Pause() {
|
||||
atomic.StoreInt64(&ch.paused, pausedEnum)
|
||||
atomic.StoreInt64(ch.paused, pausedEnum)
|
||||
}
|
||||
|
||||
func (ch *metricChan) Continue() {
|
||||
atomic.StoreInt64(&ch.paused, runningEnum)
|
||||
atomic.StoreInt64(ch.paused, runningEnum)
|
||||
}
|
||||
|
||||
func (ch *metricChan) IsPaused() bool {
|
||||
v := atomic.LoadInt64(&ch.paused)
|
||||
v := atomic.LoadInt64(ch.paused)
|
||||
return v == pausedEnum
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -152,18 +152,19 @@ type EC2IAMInfo struct {
|
|||
// An EC2InstanceIdentityDocument provides the shape for unmarshaling
|
||||
// an instance identity document
|
||||
type EC2InstanceIdentityDocument struct {
|
||||
DevpayProductCodes []string `json:"devpayProductCodes"`
|
||||
AvailabilityZone string `json:"availabilityZone"`
|
||||
PrivateIP string `json:"privateIp"`
|
||||
Version string `json:"version"`
|
||||
Region string `json:"region"`
|
||||
InstanceID string `json:"instanceId"`
|
||||
BillingProducts []string `json:"billingProducts"`
|
||||
InstanceType string `json:"instanceType"`
|
||||
AccountID string `json:"accountId"`
|
||||
PendingTime time.Time `json:"pendingTime"`
|
||||
ImageID string `json:"imageId"`
|
||||
KernelID string `json:"kernelId"`
|
||||
RamdiskID string `json:"ramdiskId"`
|
||||
Architecture string `json:"architecture"`
|
||||
DevpayProductCodes []string `json:"devpayProductCodes"`
|
||||
MarketplaceProductCodes []string `json:"marketplaceProductCodes"`
|
||||
AvailabilityZone string `json:"availabilityZone"`
|
||||
PrivateIP string `json:"privateIp"`
|
||||
Version string `json:"version"`
|
||||
Region string `json:"region"`
|
||||
InstanceID string `json:"instanceId"`
|
||||
BillingProducts []string `json:"billingProducts"`
|
||||
InstanceType string `json:"instanceType"`
|
||||
AccountID string `json:"accountId"`
|
||||
PendingTime time.Time `json:"pendingTime"`
|
||||
ImageID string `json:"imageId"`
|
||||
KernelID string `json:"kernelId"`
|
||||
RamdiskID string `json:"ramdiskId"`
|
||||
Architecture string `json:"architecture"`
|
||||
}
|
||||
|
|
|
|||
|
|
@ -123,7 +123,7 @@ func unmarshalHandler(r *request.Request) {
|
|||
defer r.HTTPResponse.Body.Close()
|
||||
b := &bytes.Buffer{}
|
||||
if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
|
||||
r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata respose", err)
|
||||
r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata response", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -136,7 +136,7 @@ func unmarshalError(r *request.Request) {
|
|||
defer r.HTTPResponse.Body.Close()
|
||||
b := &bytes.Buffer{}
|
||||
if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
|
||||
r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error respose", err)
|
||||
r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -11,6 +11,8 @@ const (
|
|||
AwsPartitionID = "aws" // AWS Standard partition.
|
||||
AwsCnPartitionID = "aws-cn" // AWS China partition.
|
||||
AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition.
|
||||
AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition.
|
||||
AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition.
|
||||
)
|
||||
|
||||
// AWS Standard partition's regions.
|
||||
|
|
@ -47,8 +49,18 @@ const (
|
|||
UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US).
|
||||
)
|
||||
|
||||
// AWS ISO (US) partition's regions.
|
||||
const (
|
||||
UsIsoEast1RegionID = "us-iso-east-1" // US ISO East.
|
||||
)
|
||||
|
||||
// AWS ISOB (US) partition's regions.
|
||||
const (
|
||||
UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio).
|
||||
)
|
||||
|
||||
// DefaultResolver returns an Endpoint resolver that will be able
|
||||
// to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US).
|
||||
// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US).
|
||||
//
|
||||
// Use DefaultPartitions() to get the list of the default partitions.
|
||||
func DefaultResolver() Resolver {
|
||||
|
|
@ -56,7 +68,7 @@ func DefaultResolver() Resolver {
|
|||
}
|
||||
|
||||
// DefaultPartitions returns a list of the partitions the SDK is bundled
|
||||
// with. The available partitions are: AWS Standard, AWS China, and AWS GovCloud (US).
|
||||
// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US).
|
||||
//
|
||||
// partitions := endpoints.DefaultPartitions
|
||||
// for _, p := range partitions {
|
||||
|
|
@ -70,6 +82,8 @@ var defaultPartitions = partitions{
|
|||
awsPartition,
|
||||
awscnPartition,
|
||||
awsusgovPartition,
|
||||
awsisoPartition,
|
||||
awsisobPartition,
|
||||
}
|
||||
|
||||
// AwsPartition returns the Resolver for AWS Standard.
|
||||
|
|
@ -320,6 +334,7 @@ var awsPartition = partition{
|
|||
"ap-northeast-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
|
|
@ -339,6 +354,7 @@ var awsPartition = partition{
|
|||
"api.sagemaker": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-east-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
|
|
@ -346,8 +362,11 @@ var awsPartition = partition{
|
|||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-1-fips": endpoint{
|
||||
Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com",
|
||||
|
|
@ -581,6 +600,7 @@ var awsPartition = partition{
|
|||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"me-south-1": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
|
|
@ -728,6 +748,7 @@ var awsPartition = partition{
|
|||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"me-south-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
|
|
@ -903,6 +924,7 @@ var awsPartition = partition{
|
|||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
|
|
@ -1093,10 +1115,11 @@ var awsPartition = partition{
|
|||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
"me-south-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"dax": service{
|
||||
|
|
@ -1124,6 +1147,7 @@ var awsPartition = partition{
|
|||
"directconnect": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-east-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
|
|
@ -1553,11 +1577,12 @@ var awsPartition = partition{
|
|||
Region: "us-west-1",
|
||||
},
|
||||
},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
"me-south-1": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"events": service{
|
||||
|
|
@ -1586,6 +1611,7 @@ var awsPartition = partition{
|
|||
"firehose": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-east-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
|
|
@ -1629,6 +1655,7 @@ var awsPartition = partition{
|
|||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
|
|
@ -1696,6 +1723,7 @@ var awsPartition = partition{
|
|||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"me-south-1": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
|
|
@ -1749,9 +1777,33 @@ var awsPartition = partition{
|
|||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
"us-east-1-fips": endpoint{
|
||||
Hostname: "guardduty-fips.us-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-1",
|
||||
},
|
||||
},
|
||||
"us-east-2": endpoint{},
|
||||
"us-east-2-fips": endpoint{
|
||||
Hostname: "guardduty-fips.us-east-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-east-2",
|
||||
},
|
||||
},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-1-fips": endpoint{
|
||||
Hostname: "guardduty-fips.us-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-1",
|
||||
},
|
||||
},
|
||||
"us-west-2": endpoint{},
|
||||
"us-west-2-fips": endpoint{
|
||||
Hostname: "guardduty-fips.us-west-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-west-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"health": service{
|
||||
|
|
@ -1960,11 +2012,14 @@ var awsPartition = partition{
|
|||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
|
|
@ -2004,6 +2059,16 @@ var awsPartition = partition{
|
|||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"lakeformation": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"lambda": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
|
|
@ -2042,6 +2107,7 @@ var awsPartition = partition{
|
|||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"me-south-1": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
|
|
@ -2112,6 +2178,7 @@ var awsPartition = partition{
|
|||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
|
|
@ -2465,6 +2532,16 @@ var awsPartition = partition{
|
|||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"qldb": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"ram": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
|
|
@ -2640,6 +2717,7 @@ var awsPartition = partition{
|
|||
"runtime.sagemaker": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-east-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
|
|
@ -2647,8 +2725,11 @@ var awsPartition = partition{
|
|||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-1-fips": endpoint{
|
||||
Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com",
|
||||
|
|
@ -3097,6 +3178,16 @@ var awsPartition = partition{
|
|||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"session.qldb": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"shield": service{
|
||||
IsRegionalized: boxedFalse,
|
||||
Defaults: endpoint{
|
||||
|
|
@ -3122,6 +3213,7 @@ var awsPartition = partition{
|
|||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"me-south-1": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
|
|
@ -3275,6 +3367,7 @@ var awsPartition = partition{
|
|||
"storagegateway": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-east-1": endpoint{},
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-south-1": endpoint{},
|
||||
|
|
@ -3495,9 +3588,11 @@ var awsPartition = partition{
|
|||
"ap-southeast-2": endpoint{},
|
||||
"ca-central-1": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-north-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"eu-west-3": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
|
|
@ -3920,7 +4015,8 @@ var awscnPartition = partition{
|
|||
},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"cn-north-1": endpoint{},
|
||||
"cn-north-1": endpoint{},
|
||||
"cn-northwest-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"kinesis": service{
|
||||
|
|
@ -4500,6 +4596,13 @@ var awsusgovPartition = partition{
|
|||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"health": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"iam": service{
|
||||
PartitionEndpoint: "aws-us-gov-global",
|
||||
IsRegionalized: boxedFalse,
|
||||
|
|
@ -4595,6 +4698,23 @@ var awsusgovPartition = partition{
|
|||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"neptune": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{
|
||||
Hostname: "rds.us-gov-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-east-1",
|
||||
},
|
||||
},
|
||||
"us-gov-west-1": endpoint{
|
||||
Hostname: "rds.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"organizations": service{
|
||||
PartitionEndpoint: "aws-us-gov-global",
|
||||
IsRegionalized: boxedFalse,
|
||||
|
|
@ -4617,6 +4737,7 @@ var awsusgovPartition = partition{
|
|||
"ram": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{},
|
||||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
|
|
@ -4640,6 +4761,19 @@ var awsusgovPartition = partition{
|
|||
"us-gov-west-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"route53": service{
|
||||
PartitionEndpoint: "aws-us-gov-global",
|
||||
IsRegionalized: boxedFalse,
|
||||
|
||||
Endpoints: endpoints{
|
||||
"aws-us-gov-global": endpoint{
|
||||
Hostname: "route53.us-gov.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"runtime.sagemaker": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
|
|
@ -4720,6 +4854,9 @@ var awsusgovPartition = partition{
|
|||
Protocols: []string{"https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"us-gov-east-1": endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
"us-gov-west-1": endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
|
|
@ -4850,3 +4987,599 @@ var awsusgovPartition = partition{
|
|||
},
|
||||
},
|
||||
}
|
||||
|
||||
// AwsIsoPartition returns the Resolver for AWS ISO (US).
|
||||
func AwsIsoPartition() Partition {
|
||||
return awsisoPartition.Partition()
|
||||
}
|
||||
|
||||
var awsisoPartition = partition{
|
||||
ID: "aws-iso",
|
||||
Name: "AWS ISO (US)",
|
||||
DNSSuffix: "c2s.ic.gov",
|
||||
RegionRegex: regionRegex{
|
||||
Regexp: func() *regexp.Regexp {
|
||||
reg, _ := regexp.Compile("^us\\-iso\\-\\w+\\-\\d+$")
|
||||
return reg
|
||||
}(),
|
||||
},
|
||||
Defaults: endpoint{
|
||||
Hostname: "{service}.{region}.{dnsSuffix}",
|
||||
Protocols: []string{"https"},
|
||||
SignatureVersions: []string{"v4"},
|
||||
},
|
||||
Regions: regions{
|
||||
"us-iso-east-1": region{
|
||||
Description: "US ISO East",
|
||||
},
|
||||
},
|
||||
Services: services{
|
||||
"api.ecr": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{
|
||||
Hostname: "api.ecr.us-iso-east-1.c2s.ic.gov",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-iso-east-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"application-autoscaling": service{
|
||||
Defaults: endpoint{
|
||||
Hostname: "autoscaling.{region}.amazonaws.com",
|
||||
Protocols: []string{"http", "https"},
|
||||
CredentialScope: credentialScope{
|
||||
Service: "application-autoscaling",
|
||||
},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"autoscaling": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"cloudformation": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"cloudtrail": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"codedeploy": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"config": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"datapipeline": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"directconnect": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"dms": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"ds": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"dynamodb": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"ec2": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"ec2metadata": service{
|
||||
PartitionEndpoint: "aws-global",
|
||||
IsRegionalized: boxedFalse,
|
||||
|
||||
Endpoints: endpoints{
|
||||
"aws-global": endpoint{
|
||||
Hostname: "169.254.169.254/latest",
|
||||
Protocols: []string{"http"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"ecs": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"elasticache": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"elasticloadbalancing": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"elasticmapreduce": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"events": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"glacier": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"health": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"iam": service{
|
||||
PartitionEndpoint: "aws-iso-global",
|
||||
IsRegionalized: boxedFalse,
|
||||
|
||||
Endpoints: endpoints{
|
||||
"aws-iso-global": endpoint{
|
||||
Hostname: "iam.us-iso-east-1.c2s.ic.gov",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-iso-east-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"kinesis": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"kms": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ProdFips": endpoint{
|
||||
Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-iso-east-1",
|
||||
},
|
||||
},
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"lambda": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"logs": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"monitoring": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"rds": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"redshift": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"s3": service{
|
||||
Defaults: endpoint{
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"snowball": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"sns": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"sqs": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"states": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"streams.dynamodb": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
CredentialScope: credentialScope{
|
||||
Service: "dynamodb",
|
||||
},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"sts": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"support": service{
|
||||
PartitionEndpoint: "aws-iso-global",
|
||||
|
||||
Endpoints: endpoints{
|
||||
"aws-iso-global": endpoint{
|
||||
Hostname: "support.us-iso-east-1.c2s.ic.gov",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-iso-east-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"swf": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"workspaces": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-iso-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// AwsIsoBPartition returns the Resolver for AWS ISOB (US).
|
||||
func AwsIsoBPartition() Partition {
|
||||
return awsisobPartition.Partition()
|
||||
}
|
||||
|
||||
var awsisobPartition = partition{
|
||||
ID: "aws-iso-b",
|
||||
Name: "AWS ISOB (US)",
|
||||
DNSSuffix: "sc2s.sgov.gov",
|
||||
RegionRegex: regionRegex{
|
||||
Regexp: func() *regexp.Regexp {
|
||||
reg, _ := regexp.Compile("^us\\-isob\\-\\w+\\-\\d+$")
|
||||
return reg
|
||||
}(),
|
||||
},
|
||||
Defaults: endpoint{
|
||||
Hostname: "{service}.{region}.{dnsSuffix}",
|
||||
Protocols: []string{"https"},
|
||||
SignatureVersions: []string{"v4"},
|
||||
},
|
||||
Regions: regions{
|
||||
"us-isob-east-1": region{
|
||||
Description: "US ISOB East (Ohio)",
|
||||
},
|
||||
},
|
||||
Services: services{
|
||||
"application-autoscaling": service{
|
||||
Defaults: endpoint{
|
||||
Hostname: "autoscaling.{region}.amazonaws.com",
|
||||
Protocols: []string{"http", "https"},
|
||||
CredentialScope: credentialScope{
|
||||
Service: "application-autoscaling",
|
||||
},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"us-isob-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"autoscaling": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"us-isob-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"cloudformation": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-isob-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"cloudtrail": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-isob-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"config": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-isob-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"directconnect": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-isob-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"dms": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-isob-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"dynamodb": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"us-isob-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"ec2": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"us-isob-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"ec2metadata": service{
|
||||
PartitionEndpoint: "aws-global",
|
||||
IsRegionalized: boxedFalse,
|
||||
|
||||
Endpoints: endpoints{
|
||||
"aws-global": endpoint{
|
||||
Hostname: "169.254.169.254/latest",
|
||||
Protocols: []string{"http"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"elasticache": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-isob-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"elasticloadbalancing": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-isob-east-1": endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"elasticmapreduce": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-isob-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"events": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-isob-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"glacier": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-isob-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"health": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-isob-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"iam": service{
|
||||
PartitionEndpoint: "aws-iso-b-global",
|
||||
IsRegionalized: boxedFalse,
|
||||
|
||||
Endpoints: endpoints{
|
||||
"aws-iso-b-global": endpoint{
|
||||
Hostname: "iam.us-isob-east-1.sc2s.sgov.gov",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-isob-east-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"kinesis": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-isob-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"kms": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ProdFips": endpoint{
|
||||
Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-isob-east-1",
|
||||
},
|
||||
},
|
||||
"us-isob-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"logs": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-isob-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"monitoring": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-isob-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"rds": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-isob-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"redshift": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-isob-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"s3": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
SignatureVersions: []string{"s3v4"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"us-isob-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"snowball": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-isob-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"sns": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"us-isob-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"sqs": service{
|
||||
Defaults: endpoint{
|
||||
SSLCommonName: "{region}.queue.{dnsSuffix}",
|
||||
Protocols: []string{"http", "https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"us-isob-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"states": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-isob-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"streams.dynamodb": service{
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
CredentialScope: credentialScope{
|
||||
Service: "dynamodb",
|
||||
},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"us-isob-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"sts": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-isob-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
"support": service{
|
||||
PartitionEndpoint: "aws-iso-b-global",
|
||||
|
||||
Endpoints: endpoints{
|
||||
"aws-iso-b-global": endpoint{
|
||||
Hostname: "support.us-isob-east-1.sc2s.sgov.gov",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-isob-east-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"swf": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"us-isob-east-1": endpoint{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
|
|
@ -65,6 +64,15 @@ type Request struct {
|
|||
LastSignedAt time.Time
|
||||
DisableFollowRedirects bool
|
||||
|
||||
// Additional API error codes that should be retried. IsErrorRetryable
|
||||
// will consider these codes in addition to its built in cases.
|
||||
RetryErrorCodes []string
|
||||
|
||||
// Additional API error codes that should be retried with throttle backoff
|
||||
// delay. IsErrorThrottle will consider these codes in addition to its
|
||||
// built in cases.
|
||||
ThrottleErrorCodes []string
|
||||
|
||||
// A value greater than 0 instructs the request to be signed as Presigned URL
|
||||
// You should not set this field directly. Instead use Request's
|
||||
// Presign or PresignRequest methods.
|
||||
|
|
@ -498,21 +506,17 @@ func (r *Request) Send() error {
|
|||
|
||||
if err := r.sendRequest(); err == nil {
|
||||
return nil
|
||||
} else if !shouldRetryError(r.Error) {
|
||||
}
|
||||
r.Handlers.Retry.Run(r)
|
||||
r.Handlers.AfterRetry.Run(r)
|
||||
|
||||
if r.Error != nil || !aws.BoolValue(r.Retryable) {
|
||||
return r.Error
|
||||
}
|
||||
|
||||
if err := r.prepareRetry(); err != nil {
|
||||
r.Error = err
|
||||
return err
|
||||
} else {
|
||||
r.Handlers.Retry.Run(r)
|
||||
r.Handlers.AfterRetry.Run(r)
|
||||
|
||||
if r.Error != nil || !aws.BoolValue(r.Retryable) {
|
||||
return r.Error
|
||||
}
|
||||
|
||||
if err := r.prepareRetry(); err != nil {
|
||||
r.Error = err
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -596,51 +600,6 @@ func AddToUserAgent(r *Request, s string) {
|
|||
r.HTTPRequest.Header.Set("User-Agent", s)
|
||||
}
|
||||
|
||||
type temporary interface {
|
||||
Temporary() bool
|
||||
}
|
||||
|
||||
func shouldRetryError(origErr error) bool {
|
||||
switch err := origErr.(type) {
|
||||
case awserr.Error:
|
||||
if err.Code() == CanceledErrorCode {
|
||||
return false
|
||||
}
|
||||
return shouldRetryError(err.OrigErr())
|
||||
case *url.Error:
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
// Refused connections should be retried as the service may not yet
|
||||
// be running on the port. Go TCP dial considers refused
|
||||
// connections as not temporary.
|
||||
return true
|
||||
}
|
||||
// *url.Error only implements Temporary after golang 1.6 but since
|
||||
// url.Error only wraps the error:
|
||||
return shouldRetryError(err.Err)
|
||||
case temporary:
|
||||
if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" {
|
||||
return true
|
||||
}
|
||||
// If the error is temporary, we want to allow continuation of the
|
||||
// retry process
|
||||
return err.Temporary() || isErrConnectionReset(origErr)
|
||||
case nil:
|
||||
// `awserr.Error.OrigErr()` can be nil, meaning there was an error but
|
||||
// because we don't know the cause, it is marked as retryable. See
|
||||
// TestRequest4xxUnretryable for an example.
|
||||
return true
|
||||
default:
|
||||
switch err.Error() {
|
||||
case "net/http: request canceled",
|
||||
"net/http: request canceled while waiting for connection":
|
||||
// known 1.5 error case when an http request is cancelled
|
||||
return false
|
||||
}
|
||||
// here we don't know the error; so we allow a retry.
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// SanitizeHostForHeader removes default port from host and updates request.Host
|
||||
func SanitizeHostForHeader(r *http.Request) {
|
||||
host := getHost(r)
|
||||
|
|
|
|||
|
|
@ -1,23 +1,41 @@
|
|||
package request
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
)
|
||||
|
||||
// Retryer is an interface to control retry logic for a given service.
|
||||
// The default implementation used by most services is the client.DefaultRetryer
|
||||
// structure, which contains basic retry logic using exponential backoff.
|
||||
// Retryer provides the interface drive the SDK's request retry behavior. The
|
||||
// Retryer implementation is responsible for implementing exponential backoff,
|
||||
// and determine if a request API error should be retried.
|
||||
//
|
||||
// client.DefaultRetryer is the SDK's default implementation of the Retryer. It
|
||||
// uses the which uses the Request.IsErrorRetryable and Request.IsErrorThrottle
|
||||
// methods to determine if the request is retried.
|
||||
type Retryer interface {
|
||||
// RetryRules return the retry delay that should be used by the SDK before
|
||||
// making another request attempt for the failed request.
|
||||
RetryRules(*Request) time.Duration
|
||||
|
||||
// ShouldRetry returns if the failed request is retryable.
|
||||
//
|
||||
// Implementations may consider request attempt count when determining if a
|
||||
// request is retryable, but the SDK will use MaxRetries to limit the
|
||||
// number of attempts a request are made.
|
||||
ShouldRetry(*Request) bool
|
||||
|
||||
// MaxRetries is the number of times a request may be retried before
|
||||
// failing.
|
||||
MaxRetries() int
|
||||
}
|
||||
|
||||
// WithRetryer sets a config Retryer value to the given Config returning it
|
||||
// for chaining.
|
||||
// WithRetryer sets a Retryer value to the given Config returning the Config
|
||||
// value for chaining.
|
||||
func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
|
||||
cfg.Retryer = retryer
|
||||
return cfg
|
||||
|
|
@ -108,32 +126,90 @@ func isNestedErrorRetryable(parentErr awserr.Error) bool {
|
|||
// IsErrorRetryable returns whether the error is retryable, based on its Code.
|
||||
// Returns false if error is nil.
|
||||
func IsErrorRetryable(err error) bool {
|
||||
if err != nil {
|
||||
if aerr, ok := err.(awserr.Error); ok {
|
||||
return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr)
|
||||
}
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
return shouldRetryError(err)
|
||||
}
|
||||
|
||||
type temporary interface {
|
||||
Temporary() bool
|
||||
}
|
||||
|
||||
func shouldRetryError(origErr error) bool {
|
||||
switch err := origErr.(type) {
|
||||
case awserr.Error:
|
||||
if err.Code() == CanceledErrorCode {
|
||||
return false
|
||||
}
|
||||
if isNestedErrorRetryable(err) {
|
||||
return true
|
||||
}
|
||||
|
||||
origErr := err.OrigErr()
|
||||
var shouldRetry bool
|
||||
if origErr != nil {
|
||||
shouldRetry := shouldRetryError(origErr)
|
||||
if err.Code() == "RequestError" && !shouldRetry {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if isCodeRetryable(err.Code()) {
|
||||
return true
|
||||
}
|
||||
return shouldRetry
|
||||
|
||||
case *url.Error:
|
||||
if strings.Contains(err.Error(), "connection refused") {
|
||||
// Refused connections should be retried as the service may not yet
|
||||
// be running on the port. Go TCP dial considers refused
|
||||
// connections as not temporary.
|
||||
return true
|
||||
}
|
||||
// *url.Error only implements Temporary after golang 1.6 but since
|
||||
// url.Error only wraps the error:
|
||||
return shouldRetryError(err.Err)
|
||||
|
||||
case temporary:
|
||||
if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" {
|
||||
return true
|
||||
}
|
||||
// If the error is temporary, we want to allow continuation of the
|
||||
// retry process
|
||||
return err.Temporary() || isErrConnectionReset(origErr)
|
||||
|
||||
case nil:
|
||||
// `awserr.Error.OrigErr()` can be nil, meaning there was an error but
|
||||
// because we don't know the cause, it is marked as retryable. See
|
||||
// TestRequest4xxUnretryable for an example.
|
||||
return true
|
||||
|
||||
default:
|
||||
switch err.Error() {
|
||||
case "net/http: request canceled",
|
||||
"net/http: request canceled while waiting for connection":
|
||||
// known 1.5 error case when an http request is cancelled
|
||||
return false
|
||||
}
|
||||
// here we don't know the error; so we allow a retry.
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsErrorThrottle returns whether the error is to be throttled based on its code.
|
||||
// Returns false if error is nil.
|
||||
func IsErrorThrottle(err error) bool {
|
||||
if err != nil {
|
||||
if aerr, ok := err.(awserr.Error); ok {
|
||||
return isCodeThrottle(aerr.Code())
|
||||
}
|
||||
if aerr, ok := err.(awserr.Error); ok && aerr != nil {
|
||||
return isCodeThrottle(aerr.Code())
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsErrorExpiredCreds returns whether the error code is a credential expiry error.
|
||||
// Returns false if error is nil.
|
||||
// IsErrorExpiredCreds returns whether the error code is a credential expiry
|
||||
// error. Returns false if error is nil.
|
||||
func IsErrorExpiredCreds(err error) bool {
|
||||
if err != nil {
|
||||
if aerr, ok := err.(awserr.Error); ok {
|
||||
return isCodeExpiredCreds(aerr.Code())
|
||||
}
|
||||
if aerr, ok := err.(awserr.Error); ok && aerr != nil {
|
||||
return isCodeExpiredCreds(aerr.Code())
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
@ -143,17 +219,44 @@ func IsErrorExpiredCreds(err error) bool {
|
|||
//
|
||||
// Alias for the utility function IsErrorRetryable
|
||||
func (r *Request) IsErrorRetryable() bool {
|
||||
if isErrCode(r.Error, r.RetryErrorCodes) {
|
||||
return true
|
||||
}
|
||||
|
||||
return IsErrorRetryable(r.Error)
|
||||
}
|
||||
|
||||
// IsErrorThrottle returns whether the error is to be throttled based on its code.
|
||||
// Returns false if the request has no Error set
|
||||
// IsErrorThrottle returns whether the error is to be throttled based on its
|
||||
// code. Returns false if the request has no Error set.
|
||||
//
|
||||
// Alias for the utility function IsErrorThrottle
|
||||
func (r *Request) IsErrorThrottle() bool {
|
||||
if isErrCode(r.Error, r.ThrottleErrorCodes) {
|
||||
return true
|
||||
}
|
||||
|
||||
if r.HTTPResponse != nil {
|
||||
switch r.HTTPResponse.StatusCode {
|
||||
case 429, 502, 503, 504:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return IsErrorThrottle(r.Error)
|
||||
}
|
||||
|
||||
func isErrCode(err error, codes []string) bool {
|
||||
if aerr, ok := err.(awserr.Error); ok && aerr != nil {
|
||||
for _, code := range codes {
|
||||
if code == aerr.Code() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// IsErrorExpired returns whether the error code is a credential expiry error.
|
||||
// Returns false if the request has no Error set.
|
||||
//
|
||||
|
|
|
|||
|
|
@ -99,10 +99,10 @@ type envConfig struct {
|
|||
CustomCABundle string
|
||||
|
||||
csmEnabled string
|
||||
CSMEnabled bool
|
||||
CSMEnabled *bool
|
||||
CSMPort string
|
||||
CSMClientID string
|
||||
CSMHost string
|
||||
CSMClientID string
|
||||
|
||||
// Enables endpoint discovery via environment variables.
|
||||
//
|
||||
|
|
@ -230,7 +230,11 @@ func envConfigLoad(enableSharedConfig bool) envConfig {
|
|||
setFromEnvVal(&cfg.CSMHost, csmHostEnvKey)
|
||||
setFromEnvVal(&cfg.CSMPort, csmPortEnvKey)
|
||||
setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey)
|
||||
cfg.CSMEnabled = len(cfg.csmEnabled) > 0
|
||||
|
||||
if len(cfg.csmEnabled) != 0 {
|
||||
v, _ := strconv.ParseBool(cfg.csmEnabled)
|
||||
cfg.CSMEnabled = &v
|
||||
}
|
||||
|
||||
regionKeys := regionEnvKeys
|
||||
profileKeys := profileEnvKeys
|
||||
|
|
|
|||
|
|
@ -104,9 +104,13 @@ func New(cfgs ...*aws.Config) *Session {
|
|||
}
|
||||
|
||||
s := deprecatedNewSession(cfgs...)
|
||||
if envCfg.CSMEnabled {
|
||||
err := enableCSM(&s.Handlers, envCfg.CSMClientID,
|
||||
envCfg.CSMHost, envCfg.CSMPort, s.Config.Logger)
|
||||
|
||||
if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil {
|
||||
if l := s.Config.Logger; l != nil {
|
||||
l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err))
|
||||
}
|
||||
} else if csmCfg.Enabled {
|
||||
err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to enable CSM, %v", err)
|
||||
s.Config.Logger.Log("ERROR:", err.Error())
|
||||
|
|
@ -347,15 +351,12 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session {
|
|||
return s
|
||||
}
|
||||
|
||||
func enableCSM(handlers *request.Handlers,
|
||||
clientID, host, port string,
|
||||
logger aws.Logger,
|
||||
) error {
|
||||
func enableCSM(handlers *request.Handlers, cfg csmConfig, logger aws.Logger) error {
|
||||
if logger != nil {
|
||||
logger.Log("Enabling CSM")
|
||||
}
|
||||
|
||||
r, err := csm.Start(clientID, csm.AddressWithDefaults(host, port))
|
||||
r, err := csm.Start(cfg.ClientID, csm.AddressWithDefaults(cfg.Host, cfg.Port))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -395,7 +396,13 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session,
|
|||
// Load additional config from file(s)
|
||||
sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig)
|
||||
if err != nil {
|
||||
if _, ok := err.(SharedConfigProfileNotExistsError); !ok {
|
||||
if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && (envCfg.Creds.HasKeys() || userCfg.Credentials != nil) {
|
||||
// Special case where the user has not explicitly specified an AWS_PROFILE,
|
||||
// or session.Options.profile, shared config is not enabled, and the
|
||||
// environment has credentials, allow the shared config file to fail to
|
||||
// load since the user has already provided credentials, and nothing else
|
||||
// is required to be read file. Github(aws/aws-sdk-go#2455)
|
||||
} else if _, ok := err.(SharedConfigProfileNotExistsError); !ok {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
|
@ -410,9 +417,13 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session,
|
|||
}
|
||||
|
||||
initHandlers(s)
|
||||
if envCfg.CSMEnabled {
|
||||
err := enableCSM(&s.Handlers, envCfg.CSMClientID,
|
||||
envCfg.CSMHost, envCfg.CSMPort, s.Config.Logger)
|
||||
|
||||
if csmCfg, err := loadCSMConfig(envCfg, cfgFiles); err != nil {
|
||||
if l := s.Config.Logger; l != nil {
|
||||
l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err))
|
||||
}
|
||||
} else if csmCfg.Enabled {
|
||||
err = enableCSM(&s.Handlers, csmCfg, s.Config.Logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -428,6 +439,46 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session,
|
|||
return s, nil
|
||||
}
|
||||
|
||||
type csmConfig struct {
|
||||
Enabled bool
|
||||
Host string
|
||||
Port string
|
||||
ClientID string
|
||||
}
|
||||
|
||||
var csmProfileName = "aws_csm"
|
||||
|
||||
func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) {
|
||||
if envCfg.CSMEnabled != nil {
|
||||
if *envCfg.CSMEnabled {
|
||||
return csmConfig{
|
||||
Enabled: true,
|
||||
ClientID: envCfg.CSMClientID,
|
||||
Host: envCfg.CSMHost,
|
||||
Port: envCfg.CSMPort,
|
||||
}, nil
|
||||
}
|
||||
return csmConfig{}, nil
|
||||
}
|
||||
|
||||
sharedCfg, err := loadSharedConfig(csmProfileName, cfgFiles, false)
|
||||
if err != nil {
|
||||
if _, ok := err.(SharedConfigProfileNotExistsError); !ok {
|
||||
return csmConfig{}, err
|
||||
}
|
||||
}
|
||||
if sharedCfg.CSMEnabled != nil && *sharedCfg.CSMEnabled == true {
|
||||
return csmConfig{
|
||||
Enabled: true,
|
||||
ClientID: sharedCfg.CSMClientID,
|
||||
Host: sharedCfg.CSMHost,
|
||||
Port: sharedCfg.CSMPort,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return csmConfig{}, nil
|
||||
}
|
||||
|
||||
func loadCustomCABundle(s *Session, bundle io.Reader) error {
|
||||
var t *http.Transport
|
||||
switch v := s.Config.HTTPClient.Transport.(type) {
|
||||
|
|
|
|||
|
|
@ -22,6 +22,12 @@ const (
|
|||
mfaSerialKey = `mfa_serial` // optional
|
||||
roleSessionNameKey = `role_session_name` // optional
|
||||
|
||||
// CSM options
|
||||
csmEnabledKey = `csm_enabled`
|
||||
csmHostKey = `csm_host`
|
||||
csmPortKey = `csm_port`
|
||||
csmClientIDKey = `csm_client_id`
|
||||
|
||||
// Additional Config fields
|
||||
regionKey = `region`
|
||||
|
||||
|
|
@ -76,6 +82,12 @@ type sharedConfig struct {
|
|||
//
|
||||
// endpoint_discovery_enabled = true
|
||||
EnableEndpointDiscovery *bool
|
||||
|
||||
// CSM Options
|
||||
CSMEnabled *bool
|
||||
CSMHost string
|
||||
CSMPort string
|
||||
CSMClientID string
|
||||
}
|
||||
|
||||
type sharedConfigFile struct {
|
||||
|
|
@ -251,10 +263,13 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e
|
|||
}
|
||||
|
||||
// Endpoint discovery
|
||||
if section.Has(enableEndpointDiscoveryKey) {
|
||||
v := section.Bool(enableEndpointDiscoveryKey)
|
||||
cfg.EnableEndpointDiscovery = &v
|
||||
}
|
||||
updateBoolPtr(&cfg.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey)
|
||||
|
||||
// CSM options
|
||||
updateBoolPtr(&cfg.CSMEnabled, section, csmEnabledKey)
|
||||
updateString(&cfg.CSMHost, section, csmHostKey)
|
||||
updateString(&cfg.CSMPort, section, csmPortKey)
|
||||
updateString(&cfg.CSMClientID, section, csmClientIDKey)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -348,6 +363,16 @@ func updateString(dst *string, section ini.Section, key string) {
|
|||
*dst = section.String(key)
|
||||
}
|
||||
|
||||
// updateBoolPtr will only update the dst with the value in the section key,
|
||||
// key is present in the section.
|
||||
func updateBoolPtr(dst **bool, section ini.Section, key string) {
|
||||
if !section.Has(key) {
|
||||
return
|
||||
}
|
||||
*dst = new(bool)
|
||||
**dst = section.Bool(key)
|
||||
}
|
||||
|
||||
// SharedConfigLoadError is an error for the shared config file failed to load.
|
||||
type SharedConfigLoadError struct {
|
||||
Filename string
|
||||
|
|
|
|||
|
|
@ -5,4 +5,4 @@ package aws
|
|||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.22.1"
|
||||
const SDKVersion = "1.23.20"
|
||||
|
|
|
|||
|
|
@ -0,0 +1,15 @@
|
|||
// +build go1.10
|
||||
|
||||
package sdkmath
|
||||
|
||||
import "math"
|
||||
|
||||
// Round returns the nearest integer, rounding half away from zero.
|
||||
//
|
||||
// Special cases are:
|
||||
// Round(±0) = ±0
|
||||
// Round(±Inf) = ±Inf
|
||||
// Round(NaN) = NaN
|
||||
func Round(x float64) float64 {
|
||||
return math.Round(x)
|
||||
}
|
||||
|
|
@ -0,0 +1,56 @@
|
|||
// +build !go1.10
|
||||
|
||||
package sdkmath
|
||||
|
||||
import "math"
|
||||
|
||||
// Copied from the Go standard library's (Go 1.12) math/floor.go for use in
|
||||
// Go version prior to Go 1.10.
|
||||
const (
|
||||
uvone = 0x3FF0000000000000
|
||||
mask = 0x7FF
|
||||
shift = 64 - 11 - 1
|
||||
bias = 1023
|
||||
signMask = 1 << 63
|
||||
fracMask = 1<<shift - 1
|
||||
)
|
||||
|
||||
// Round returns the nearest integer, rounding half away from zero.
|
||||
//
|
||||
// Special cases are:
|
||||
// Round(±0) = ±0
|
||||
// Round(±Inf) = ±Inf
|
||||
// Round(NaN) = NaN
|
||||
//
|
||||
// Copied from the Go standard library's (Go 1.12) math/floor.go for use in
|
||||
// Go version prior to Go 1.10.
|
||||
func Round(x float64) float64 {
|
||||
// Round is a faster implementation of:
|
||||
//
|
||||
// func Round(x float64) float64 {
|
||||
// t := Trunc(x)
|
||||
// if Abs(x-t) >= 0.5 {
|
||||
// return t + Copysign(1, x)
|
||||
// }
|
||||
// return t
|
||||
// }
|
||||
bits := math.Float64bits(x)
|
||||
e := uint(bits>>shift) & mask
|
||||
if e < bias {
|
||||
// Round abs(x) < 1 including denormals.
|
||||
bits &= signMask // +-0
|
||||
if e == bias-1 {
|
||||
bits |= uvone // +-1
|
||||
}
|
||||
} else if e < bias+shift {
|
||||
// Round any abs(x) >= 1 containing a fractional component [0,1).
|
||||
//
|
||||
// Numbers with larger exponents are returned unchanged since they
|
||||
// must be either an integer, infinity, or NaN.
|
||||
const half = 1 << (shift - 1)
|
||||
e -= bias
|
||||
bits += half >> e
|
||||
bits &^= fracMask >> e
|
||||
}
|
||||
return math.Float64frombits(bits)
|
||||
}
|
||||
|
|
@ -146,6 +146,9 @@ func unmarshalStatusCode(v reflect.Value, statusCode int) {
|
|||
}
|
||||
|
||||
func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error {
|
||||
if len(headers) == 0 {
|
||||
return nil
|
||||
}
|
||||
switch r.Interface().(type) {
|
||||
case map[string]*string: // we only support string map value types
|
||||
out := map[string]*string{}
|
||||
|
|
@ -155,19 +158,28 @@ func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) err
|
|||
out[k[len(prefix):]] = &v[0]
|
||||
}
|
||||
}
|
||||
r.Set(reflect.ValueOf(out))
|
||||
if len(out) != 0 {
|
||||
r.Set(reflect.ValueOf(out))
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error {
|
||||
isJSONValue := tag.Get("type") == "jsonvalue"
|
||||
if isJSONValue {
|
||||
switch tag.Get("type") {
|
||||
case "jsonvalue":
|
||||
if len(header) == 0 {
|
||||
return nil
|
||||
}
|
||||
} else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) {
|
||||
return nil
|
||||
case "blob":
|
||||
if len(header) == 0 {
|
||||
return nil
|
||||
}
|
||||
default:
|
||||
if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
switch v.Interface().(type) {
|
||||
|
|
@ -178,7 +190,7 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.Set(reflect.ValueOf(&b))
|
||||
v.Set(reflect.ValueOf(b))
|
||||
case *bool:
|
||||
b, err := strconv.ParseBool(header)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -1,8 +1,11 @@
|
|||
package protocol
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/internal/sdkmath"
|
||||
)
|
||||
|
||||
// Names of time formats supported by the SDK
|
||||
|
|
@ -13,12 +16,19 @@ const (
|
|||
)
|
||||
|
||||
// Time formats supported by the SDK
|
||||
// Output time is intended to not contain decimals
|
||||
const (
|
||||
// RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT
|
||||
RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT"
|
||||
|
||||
// This format is used for output time without seconds precision
|
||||
RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT"
|
||||
|
||||
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
|
||||
ISO8601TimeFormat = "2006-01-02T15:04:05Z"
|
||||
ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z"
|
||||
|
||||
// This format is used for output time without seconds precision
|
||||
ISO8601OutputTimeFormat = "2006-01-02T15:04:05Z"
|
||||
)
|
||||
|
||||
// IsKnownTimestampFormat returns if the timestamp format name
|
||||
|
|
@ -42,9 +52,9 @@ func FormatTime(name string, t time.Time) string {
|
|||
|
||||
switch name {
|
||||
case RFC822TimeFormatName:
|
||||
return t.Format(RFC822TimeFormat)
|
||||
return t.Format(RFC822OutputTimeFormat)
|
||||
case ISO8601TimeFormatName:
|
||||
return t.Format(ISO8601TimeFormat)
|
||||
return t.Format(ISO8601OutputTimeFormat)
|
||||
case UnixTimeFormatName:
|
||||
return strconv.FormatInt(t.Unix(), 10)
|
||||
default:
|
||||
|
|
@ -62,10 +72,12 @@ func ParseTime(formatName, value string) (time.Time, error) {
|
|||
return time.Parse(ISO8601TimeFormat, value)
|
||||
case UnixTimeFormatName:
|
||||
v, err := strconv.ParseFloat(value, 64)
|
||||
_, dec := math.Modf(v)
|
||||
dec = sdkmath.Round(dec*1e3) / 1e3 //Rounds 0.1229999 to 0.123
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return time.Unix(int64(v), 0), nil
|
||||
return time.Unix(int64(v), int64(dec*(1e9))), nil
|
||||
default:
|
||||
panic("unknown timestamp format name, " + formatName)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,11 @@
|
|||
package sts
|
||||
|
||||
import "github.com/aws/aws-sdk-go/aws/request"
|
||||
|
||||
func init() {
|
||||
initRequest = customizeRequest
|
||||
}
|
||||
|
||||
func customizeRequest(r *request.Request) {
|
||||
r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeIDPCommunicationErrorException)
|
||||
}
|
||||
|
|
@ -1,5 +0,0 @@
|
|||
# This is a comment
|
||||
# We can use equal or colon notation
|
||||
DIR: root
|
||||
FLAVOUR: none
|
||||
INSIDE_FOLDER=false
|
||||
|
|
@ -1,29 +0,0 @@
|
|||
*.log
|
||||
.DS_Store
|
||||
doc
|
||||
tmp
|
||||
pkg
|
||||
*.gem
|
||||
*.pid
|
||||
coverage
|
||||
coverage.data
|
||||
build/*
|
||||
*.pbxuser
|
||||
*.mode1v3
|
||||
.svn
|
||||
profile
|
||||
.console_history
|
||||
.sass-cache/*
|
||||
.rake_tasks~
|
||||
*.log.lck
|
||||
solr/
|
||||
.jhw-cache/
|
||||
jhw.*
|
||||
*.sublime*
|
||||
node_modules/
|
||||
dist/
|
||||
generated/
|
||||
.vendor/
|
||||
bin/*
|
||||
gin-bin
|
||||
.idea/
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
{
|
||||
"Enable": ["vet", "golint", "goimports", "deadcode", "gotype", "ineffassign", "misspell", "nakedret", "unconvert", "megacheck", "varcheck"]
|
||||
}
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
language: go
|
||||
|
||||
sudo: false
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- go: "1.9.x"
|
||||
- go: "1.10.x"
|
||||
- go: "1.11.x"
|
||||
env:
|
||||
- GO111MODULE=off
|
||||
- go: "1.11.x"
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
- go: "tip"
|
||||
env:
|
||||
- GO111MODULE=off
|
||||
- go: "tip"
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
allow_failures:
|
||||
- go: "tip"
|
||||
|
||||
install: make deps
|
||||
|
||||
script: make ci-test
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
The MIT License (MIT)
|
||||
Copyright (c) 2018 Mark Bates
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
|
@ -1,46 +0,0 @@
|
|||
TAGS ?= "sqlite"
|
||||
GO_BIN ?= go
|
||||
|
||||
install:
|
||||
packr
|
||||
$(GO_BIN) install -v .
|
||||
|
||||
deps:
|
||||
$(GO_BIN) get github.com/gobuffalo/release
|
||||
$(GO_BIN) get github.com/gobuffalo/packr/packr
|
||||
$(GO_BIN) get -tags ${TAGS} -t ./...
|
||||
ifeq ($(GO111MODULE),on)
|
||||
$(GO_BIN) mod tidy
|
||||
endif
|
||||
|
||||
build:
|
||||
packr
|
||||
$(GO_BIN) build -v .
|
||||
|
||||
test:
|
||||
packr
|
||||
$(GO_BIN) test -tags ${TAGS} ./...
|
||||
|
||||
ci-test:
|
||||
$(GO_BIN) test -tags ${TAGS} -race ./...
|
||||
|
||||
lint:
|
||||
gometalinter --vendor ./... --deadline=1m --skip=internal
|
||||
|
||||
update:
|
||||
$(GO_BIN) get -u -tags ${TAGS}
|
||||
ifeq ($(GO111MODULE),on)
|
||||
$(GO_BIN) mod tidy
|
||||
endif
|
||||
packr
|
||||
make test
|
||||
make install
|
||||
ifeq ($(GO111MODULE),on)
|
||||
$(GO_BIN) mod tidy
|
||||
endif
|
||||
|
||||
release-test:
|
||||
$(GO_BIN) test -tags ${TAGS} -race ./...
|
||||
|
||||
release:
|
||||
release -y -f version.go
|
||||
|
|
@ -1,93 +0,0 @@
|
|||
# envy
|
||||
[](https://travis-ci.org/gobuffalo/envy)
|
||||
|
||||
Envy makes working with ENV variables in Go trivial.
|
||||
|
||||
* Get ENV variables with default values.
|
||||
* Set ENV variables safely without affecting the underlying system.
|
||||
* Temporarily change ENV vars; useful for testing.
|
||||
* Map all of the key/values in the ENV.
|
||||
* Loads .env files (by using [godotenv](https://github.com/joho/godotenv/))
|
||||
* More!
|
||||
|
||||
## Installation
|
||||
|
||||
```text
|
||||
$ go get -u github.com/gobuffalo/envy
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
func Test_Get(t *testing.T) {
|
||||
r := require.New(t)
|
||||
r.NotZero(os.Getenv("GOPATH"))
|
||||
r.Equal(os.Getenv("GOPATH"), envy.Get("GOPATH", "foo"))
|
||||
r.Equal("bar", envy.Get("IDONTEXIST", "bar"))
|
||||
}
|
||||
|
||||
func Test_MustGet(t *testing.T) {
|
||||
r := require.New(t)
|
||||
r.NotZero(os.Getenv("GOPATH"))
|
||||
v, err := envy.MustGet("GOPATH")
|
||||
r.NoError(err)
|
||||
r.Equal(os.Getenv("GOPATH"), v)
|
||||
|
||||
_, err = envy.MustGet("IDONTEXIST")
|
||||
r.Error(err)
|
||||
}
|
||||
|
||||
func Test_Set(t *testing.T) {
|
||||
r := require.New(t)
|
||||
_, err := envy.MustGet("FOO")
|
||||
r.Error(err)
|
||||
|
||||
envy.Set("FOO", "foo")
|
||||
r.Equal("foo", envy.Get("FOO", "bar"))
|
||||
}
|
||||
|
||||
func Test_Temp(t *testing.T) {
|
||||
r := require.New(t)
|
||||
|
||||
_, err := envy.MustGet("BAR")
|
||||
r.Error(err)
|
||||
|
||||
envy.Temp(func() {
|
||||
envy.Set("BAR", "foo")
|
||||
r.Equal("foo", envy.Get("BAR", "bar"))
|
||||
_, err = envy.MustGet("BAR")
|
||||
r.NoError(err)
|
||||
})
|
||||
|
||||
_, err = envy.MustGet("BAR")
|
||||
r.Error(err)
|
||||
}
|
||||
```
|
||||
## .env files support
|
||||
|
||||
Envy now supports loading `.env` files by using the [godotenv library](https://github.com/joho/godotenv/).
|
||||
That means one can use and define multiple `.env` files which will be loaded on-demand. By default, no env files will be loaded. To load one or more, you need to call the `envy.Load` function in one of the following ways:
|
||||
|
||||
```go
|
||||
envy.Load() // 1
|
||||
|
||||
envy.Load("MY_ENV_FILE") // 2
|
||||
|
||||
envy.Load(".env", ".env.prod") // 3
|
||||
|
||||
envy.Load(".env", "NON_EXISTING_FILE") // 4
|
||||
|
||||
// 5
|
||||
envy.Load(".env")
|
||||
envy.Load("NON_EXISTING_FILE")
|
||||
|
||||
// 6
|
||||
envy.Load(".env", "NON_EXISTING_FILE", ".env.prod")
|
||||
```
|
||||
|
||||
1. Will load the default `.env` file
|
||||
2. Will load the file `MY_ENV_FILE`, **but not** `.env`
|
||||
3. Will load the file `.env`, and after that will load the `.env.prod` file. If any variable is redefined in `. env.prod` it will be overwritten (will contain the `env.prod` value)
|
||||
4. Will load the `.env` file and return an error as the second file does not exist. The values in `.env` will be loaded and available.
|
||||
5. Same as 4
|
||||
6. Will load the `.env` file and return an error as the second file does not exist. The values in `.env` will be loaded and available, **but the ones in** `.env.prod` **won't**.
|
||||
|
|
@ -1,238 +0,0 @@
|
|||
/*
|
||||
package envy makes working with ENV variables in Go trivial.
|
||||
|
||||
* Get ENV variables with default values.
|
||||
* Set ENV variables safely without affecting the underlying system.
|
||||
* Temporarily change ENV vars; useful for testing.
|
||||
* Map all of the key/values in the ENV.
|
||||
* Loads .env files (by using [godotenv](https://github.com/joho/godotenv/))
|
||||
* More!
|
||||
*/
|
||||
package envy
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/joho/godotenv"
|
||||
)
|
||||
|
||||
var gil = &sync.RWMutex{}
|
||||
var env = map[string]string{}
|
||||
|
||||
func init() {
|
||||
Load()
|
||||
loadEnv()
|
||||
}
|
||||
|
||||
// Load the ENV variables to the env map
|
||||
func loadEnv() {
|
||||
gil.Lock()
|
||||
defer gil.Unlock()
|
||||
// Detect the Go version on the user system, not the one that was used to compile the binary
|
||||
v := ""
|
||||
out, err := exec.Command("go", "version").Output()
|
||||
if err == nil {
|
||||
// This will break when Go 2 lands
|
||||
v = strings.Split(string(out), " ")[2][4:]
|
||||
} else {
|
||||
v = runtime.Version()[4:]
|
||||
}
|
||||
|
||||
goRuntimeVersion, _ := strconv.ParseFloat(runtime.Version()[4:], 64)
|
||||
|
||||
goVersion, err := strconv.ParseFloat(v, 64)
|
||||
if err != nil {
|
||||
goVersion = goRuntimeVersion
|
||||
}
|
||||
|
||||
if os.Getenv("GO_ENV") == "" {
|
||||
// if the flag "test.v" is *defined*, we're running as a unit test. Note that we don't care
|
||||
// about v.Value (verbose test mode); we just want to know if the test environment has defined
|
||||
// it. It's also possible that the flags are not yet fully parsed (i.e. flag.Parsed() == false),
|
||||
// so we could not depend on v.Value anyway.
|
||||
//
|
||||
if v := flag.Lookup("test.v"); v != nil {
|
||||
env["GO_ENV"] = "test"
|
||||
}
|
||||
}
|
||||
|
||||
// set the GOPATH if using >= 1.8 and the GOPATH isn't set
|
||||
if goVersion >= 8 && os.Getenv("GOPATH") == "" {
|
||||
out, err := exec.Command("go", "env", "GOPATH").Output()
|
||||
if err == nil {
|
||||
gp := strings.TrimSpace(string(out))
|
||||
os.Setenv("GOPATH", gp)
|
||||
}
|
||||
}
|
||||
|
||||
for _, e := range os.Environ() {
|
||||
pair := strings.Split(e, "=")
|
||||
env[pair[0]] = os.Getenv(pair[0])
|
||||
}
|
||||
}
|
||||
|
||||
// Reload the ENV variables. Useful if
|
||||
// an external ENV manager has been used
|
||||
func Reload() {
|
||||
env = map[string]string{}
|
||||
loadEnv()
|
||||
}
|
||||
|
||||
// Load .env files. Files will be loaded in the same order that are received.
|
||||
// Redefined vars will override previously existing values.
|
||||
// IE: envy.Load(".env", "test_env/.env") will result in DIR=test_env
|
||||
// If no arg passed, it will try to load a .env file.
|
||||
func Load(files ...string) error {
|
||||
|
||||
// If no files received, load the default one
|
||||
if len(files) == 0 {
|
||||
err := godotenv.Overload()
|
||||
if err == nil {
|
||||
Reload()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// We received a list of files
|
||||
for _, file := range files {
|
||||
|
||||
// Check if it exists or we can access
|
||||
if _, err := os.Stat(file); err != nil {
|
||||
// It does not exist or we can not access.
|
||||
// Return and stop loading
|
||||
return err
|
||||
}
|
||||
|
||||
// It exists and we have permission. Load it
|
||||
if err := godotenv.Overload(file); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Reload the env so all new changes are noticed
|
||||
Reload()
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get a value from the ENV. If it doesn't exist the
|
||||
// default value will be returned.
|
||||
func Get(key string, value string) string {
|
||||
gil.RLock()
|
||||
defer gil.RUnlock()
|
||||
if v, ok := env[key]; ok {
|
||||
return v
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
// Get a value from the ENV. If it doesn't exist
|
||||
// an error will be returned
|
||||
func MustGet(key string) (string, error) {
|
||||
gil.RLock()
|
||||
defer gil.RUnlock()
|
||||
if v, ok := env[key]; ok {
|
||||
return v, nil
|
||||
}
|
||||
return "", fmt.Errorf("could not find ENV var with %s", key)
|
||||
}
|
||||
|
||||
// Set a value into the ENV. This is NOT permanent. It will
|
||||
// only affect values accessed through envy.
|
||||
func Set(key string, value string) {
|
||||
gil.Lock()
|
||||
defer gil.Unlock()
|
||||
env[key] = value
|
||||
}
|
||||
|
||||
// MustSet the value into the underlying ENV, as well as envy.
|
||||
// This may return an error if there is a problem setting the
|
||||
// underlying ENV value.
|
||||
func MustSet(key string, value string) error {
|
||||
gil.Lock()
|
||||
defer gil.Unlock()
|
||||
err := os.Setenv(key, value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
env[key] = value
|
||||
return nil
|
||||
}
|
||||
|
||||
// Map all of the keys/values set in envy.
|
||||
func Map() map[string]string {
|
||||
gil.RLock()
|
||||
defer gil.RUnlock()
|
||||
cp := map[string]string{}
|
||||
for k, v := range env {
|
||||
cp[k] = v
|
||||
}
|
||||
return env
|
||||
}
|
||||
|
||||
// Temp makes a copy of the values and allows operation on
|
||||
// those values temporarily during the run of the function.
|
||||
// At the end of the function run the copy is discarded and
|
||||
// the original values are replaced. This is useful for testing.
|
||||
// Warning: This function is NOT safe to use from a goroutine or
|
||||
// from code which may access any Get or Set function from a goroutine
|
||||
func Temp(f func()) {
|
||||
oenv := env
|
||||
env = map[string]string{}
|
||||
for k, v := range oenv {
|
||||
env[k] = v
|
||||
}
|
||||
defer func() { env = oenv }()
|
||||
f()
|
||||
}
|
||||
|
||||
func GoPath() string {
|
||||
return Get("GOPATH", "")
|
||||
}
|
||||
|
||||
// GoPaths returns all possible GOPATHS that are set.
|
||||
func GoPaths() []string {
|
||||
gp := Get("GOPATH", "")
|
||||
if runtime.GOOS == "windows" {
|
||||
return strings.Split(gp, ";") // Windows uses a different separator
|
||||
}
|
||||
return strings.Split(gp, ":")
|
||||
}
|
||||
|
||||
func importPath(path string) string {
|
||||
for _, gopath := range GoPaths() {
|
||||
srcpath := filepath.Join(gopath, "src")
|
||||
rel, err := filepath.Rel(srcpath, path)
|
||||
if err == nil {
|
||||
return filepath.ToSlash(rel)
|
||||
}
|
||||
}
|
||||
|
||||
// fallback to trim
|
||||
rel := strings.TrimPrefix(path, filepath.Join(GoPath(), "src"))
|
||||
rel = strings.TrimPrefix(rel, string(filepath.Separator))
|
||||
return filepath.ToSlash(rel)
|
||||
}
|
||||
|
||||
func CurrentPackage() string {
|
||||
pwd, _ := os.Getwd()
|
||||
return importPath(pwd)
|
||||
}
|
||||
|
||||
func Environ() []string {
|
||||
gil.RLock()
|
||||
defer gil.RUnlock()
|
||||
var e []string
|
||||
for k, v := range env {
|
||||
e = append(e, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
module github.com/gobuffalo/envy
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/joho/godotenv v1.3.0
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/stretchr/testify v1.2.2
|
||||
)
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=
|
||||
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
# github.com/gobuffalo/envy Stands on the Shoulders of Giants
|
||||
|
||||
github.com/gobuffalo/envy does not try to reinvent the wheel! Instead, it uses the already great wheels developed by the Go community and puts them all together in the best way possible. Without these giants this project would not be possible. Please make sure to check them out and thank them for all of their hard work.
|
||||
|
||||
Thank you to the following **GIANTS**:
|
||||
|
||||
|
||||
* [github.com/gobuffalo/envy](https://godoc.org/github.com/gobuffalo/envy)
|
||||
|
||||
* [github.com/joho/godotenv](https://godoc.org/github.com/joho/godotenv)
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
package envy
|
||||
|
||||
const Version = "v1.6.5"
|
||||
|
|
@ -1,3 +1 @@
|
|||
module github.com/hashicorp/golang-lru
|
||||
|
||||
go 1.12
|
||||
|
|
|
|||
|
|
@ -86,35 +86,17 @@ func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) {
|
|||
}
|
||||
|
||||
// Remove removes the provided key from the cache.
|
||||
func (c *Cache) Remove(key interface{}) (present bool) {
|
||||
func (c *Cache) Remove(key interface{}) {
|
||||
c.lock.Lock()
|
||||
present = c.lru.Remove(key)
|
||||
c.lru.Remove(key)
|
||||
c.lock.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Resize changes the cache size.
|
||||
func (c *Cache) Resize(size int) (evicted int) {
|
||||
c.lock.Lock()
|
||||
evicted = c.lru.Resize(size)
|
||||
c.lock.Unlock()
|
||||
return evicted
|
||||
}
|
||||
|
||||
// RemoveOldest removes the oldest item from the cache.
|
||||
func (c *Cache) RemoveOldest() (key interface{}, value interface{}, ok bool) {
|
||||
func (c *Cache) RemoveOldest() {
|
||||
c.lock.Lock()
|
||||
key, value, ok = c.lru.RemoveOldest()
|
||||
c.lru.RemoveOldest()
|
||||
c.lock.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// GetOldest returns the oldest entry
|
||||
func (c *Cache) GetOldest() (key interface{}, value interface{}, ok bool) {
|
||||
c.lock.Lock()
|
||||
key, value, ok = c.lru.GetOldest()
|
||||
c.lock.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Keys returns a slice of the keys in the cache, from oldest to newest.
|
||||
|
|
|
|||
|
|
@ -73,9 +73,6 @@ func (c *LRU) Add(key, value interface{}) (evicted bool) {
|
|||
func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.evictList.MoveToFront(ent)
|
||||
if ent.Value.(*entry) == nil {
|
||||
return nil, false
|
||||
}
|
||||
return ent.Value.(*entry).value, true
|
||||
}
|
||||
return
|
||||
|
|
@ -145,19 +142,6 @@ func (c *LRU) Len() int {
|
|||
return c.evictList.Len()
|
||||
}
|
||||
|
||||
// Resize changes the cache size.
|
||||
func (c *LRU) Resize(size int) (evicted int) {
|
||||
diff := c.Len() - size
|
||||
if diff < 0 {
|
||||
diff = 0
|
||||
}
|
||||
for i := 0; i < diff; i++ {
|
||||
c.removeOldest()
|
||||
}
|
||||
c.size = size
|
||||
return diff
|
||||
}
|
||||
|
||||
// removeOldest removes the oldest item from the cache.
|
||||
func (c *LRU) removeOldest() {
|
||||
ent := c.evictList.Back()
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ type LRUCache interface {
|
|||
// updates the "recently used"-ness of the key. #value, isFound
|
||||
Get(key interface{}) (value interface{}, ok bool)
|
||||
|
||||
// Checks if a key exists in cache without updating the recent-ness.
|
||||
// Check if a key exsists in cache without updating the recent-ness.
|
||||
Contains(key interface{}) (ok bool)
|
||||
|
||||
// Returns key's value without updating the "recently used"-ness of the key.
|
||||
|
|
@ -31,9 +31,6 @@ type LRUCache interface {
|
|||
// Returns the number of items in the cache.
|
||||
Len() int
|
||||
|
||||
// Clears all cache entries.
|
||||
// Clear all cache entries
|
||||
Purge()
|
||||
|
||||
// Resizes cache, returning number evicted
|
||||
Resize(int) int
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1 +0,0 @@
|
|||
.DS_Store
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.x
|
||||
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
Copyright (c) 2013 John Barton
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
|
|
@ -1,163 +0,0 @@
|
|||
# GoDotEnv [](https://travis-ci.org/joho/godotenv) [](https://ci.appveyor.com/project/joho/godotenv) [](https://goreportcard.com/report/github.com/joho/godotenv)
|
||||
|
||||
A Go (golang) port of the Ruby dotenv project (which loads env vars from a .env file)
|
||||
|
||||
From the original Library:
|
||||
|
||||
> Storing configuration in the environment is one of the tenets of a twelve-factor app. Anything that is likely to change between deployment environments–such as resource handles for databases or credentials for external services–should be extracted from the code into environment variables.
|
||||
>
|
||||
> But it is not always practical to set environment variables on development machines or continuous integration servers where multiple projects are run. Dotenv load variables from a .env file into ENV when the environment is bootstrapped.
|
||||
|
||||
It can be used as a library (for loading in env for your own daemons etc) or as a bin command.
|
||||
|
||||
There is test coverage and CI for both linuxish and windows environments, but I make no guarantees about the bin version working on windows.
|
||||
|
||||
## Installation
|
||||
|
||||
As a library
|
||||
|
||||
```shell
|
||||
go get github.com/joho/godotenv
|
||||
```
|
||||
|
||||
or if you want to use it as a bin command
|
||||
```shell
|
||||
go get github.com/joho/godotenv/cmd/godotenv
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Add your application configuration to your `.env` file in the root of your project:
|
||||
|
||||
```shell
|
||||
S3_BUCKET=YOURS3BUCKET
|
||||
SECRET_KEY=YOURSECRETKEYGOESHERE
|
||||
```
|
||||
|
||||
Then in your Go app you can do something like
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/joho/godotenv"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
err := godotenv.Load()
|
||||
if err != nil {
|
||||
log.Fatal("Error loading .env file")
|
||||
}
|
||||
|
||||
s3Bucket := os.Getenv("S3_BUCKET")
|
||||
secretKey := os.Getenv("SECRET_KEY")
|
||||
|
||||
// now do something with s3 or whatever
|
||||
}
|
||||
```
|
||||
|
||||
If you're even lazier than that, you can just take advantage of the autoload package which will read in `.env` on import
|
||||
|
||||
```go
|
||||
import _ "github.com/joho/godotenv/autoload"
|
||||
```
|
||||
|
||||
While `.env` in the project root is the default, you don't have to be constrained, both examples below are 100% legit
|
||||
|
||||
```go
|
||||
_ = godotenv.Load("somerandomfile")
|
||||
_ = godotenv.Load("filenumberone.env", "filenumbertwo.env")
|
||||
```
|
||||
|
||||
If you want to be really fancy with your env file you can do comments and exports (below is a valid env file)
|
||||
|
||||
```shell
|
||||
# I am a comment and that is OK
|
||||
SOME_VAR=someval
|
||||
FOO=BAR # comments at line end are OK too
|
||||
export BAR=BAZ
|
||||
```
|
||||
|
||||
Or finally you can do YAML(ish) style
|
||||
|
||||
```yaml
|
||||
FOO: bar
|
||||
BAR: baz
|
||||
```
|
||||
|
||||
as a final aside, if you don't want godotenv munging your env you can just get a map back instead
|
||||
|
||||
```go
|
||||
var myEnv map[string]string
|
||||
myEnv, err := godotenv.Read()
|
||||
|
||||
s3Bucket := myEnv["S3_BUCKET"]
|
||||
```
|
||||
|
||||
... or from an `io.Reader` instead of a local file
|
||||
|
||||
```go
|
||||
reader := getRemoteFile()
|
||||
myEnv, err := godotenv.Parse(reader)
|
||||
```
|
||||
|
||||
... or from a `string` if you so desire
|
||||
|
||||
```go
|
||||
content := getRemoteFileContent()
|
||||
myEnv, err := godotenv.Unmarshal(content)
|
||||
```
|
||||
|
||||
### Command Mode
|
||||
|
||||
Assuming you've installed the command as above and you've got `$GOPATH/bin` in your `$PATH`
|
||||
|
||||
```
|
||||
godotenv -f /some/path/to/.env some_command with some args
|
||||
```
|
||||
|
||||
If you don't specify `-f` it will fall back on the default of loading `.env` in `PWD`
|
||||
|
||||
### Writing Env Files
|
||||
|
||||
Godotenv can also write a map representing the environment to a correctly-formatted and escaped file
|
||||
|
||||
```go
|
||||
env, err := godotenv.Unmarshal("KEY=value")
|
||||
err := godotenv.Write(env, "./.env")
|
||||
```
|
||||
|
||||
... or to a string
|
||||
|
||||
```go
|
||||
env, err := godotenv.Unmarshal("KEY=value")
|
||||
content, err := godotenv.Marshal(env)
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are most welcome! The parser itself is pretty stupidly naive and I wouldn't be surprised if it breaks with edge cases.
|
||||
|
||||
*code changes without tests will not be accepted*
|
||||
|
||||
1. Fork it
|
||||
2. Create your feature branch (`git checkout -b my-new-feature`)
|
||||
3. Commit your changes (`git commit -am 'Added some feature'`)
|
||||
4. Push to the branch (`git push origin my-new-feature`)
|
||||
5. Create new Pull Request
|
||||
|
||||
## Releases
|
||||
|
||||
Releases should follow [Semver](http://semver.org/) though the first couple of releases are `v1` and `v1.1`.
|
||||
|
||||
Use [annotated tags for all releases](https://github.com/joho/godotenv/issues/30). Example `git tag -a v1.2.1`
|
||||
|
||||
## CI
|
||||
|
||||
Linux: [](https://travis-ci.org/joho/godotenv) Windows: [](https://ci.appveyor.com/project/joho/godotenv)
|
||||
|
||||
## Who?
|
||||
|
||||
The original library [dotenv](https://github.com/bkeepers/dotenv) was written by [Brandon Keepers](http://opensoul.org/), and this port was done by [John Barton](https://johnbarton.co/) based off the tests/fixtures in the original library.
|
||||
|
|
@ -1,346 +0,0 @@
|
|||
// Package godotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv)
|
||||
//
|
||||
// Examples/readme can be found on the github page at https://github.com/joho/godotenv
|
||||
//
|
||||
// The TL;DR is that you make a .env file that looks something like
|
||||
//
|
||||
// SOME_ENV_VAR=somevalue
|
||||
//
|
||||
// and then in your go code you can call
|
||||
//
|
||||
// godotenv.Load()
|
||||
//
|
||||
// and all the env vars declared in .env will be available through os.Getenv("SOME_ENV_VAR")
|
||||
package godotenv
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const doubleQuoteSpecialChars = "\\\n\r\"!$`"
|
||||
|
||||
// Load will read your env file(s) and load them into ENV for this process.
|
||||
//
|
||||
// Call this function as close as possible to the start of your program (ideally in main)
|
||||
//
|
||||
// If you call Load without any args it will default to loading .env in the current path
|
||||
//
|
||||
// You can otherwise tell it which files to load (there can be more than one) like
|
||||
//
|
||||
// godotenv.Load("fileone", "filetwo")
|
||||
//
|
||||
// It's important to note that it WILL NOT OVERRIDE an env variable that already exists - consider the .env file to set dev vars or sensible defaults
|
||||
func Load(filenames ...string) (err error) {
|
||||
filenames = filenamesOrDefault(filenames)
|
||||
|
||||
for _, filename := range filenames {
|
||||
err = loadFile(filename, false)
|
||||
if err != nil {
|
||||
return // return early on a spazout
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Overload will read your env file(s) and load them into ENV for this process.
|
||||
//
|
||||
// Call this function as close as possible to the start of your program (ideally in main)
|
||||
//
|
||||
// If you call Overload without any args it will default to loading .env in the current path
|
||||
//
|
||||
// You can otherwise tell it which files to load (there can be more than one) like
|
||||
//
|
||||
// godotenv.Overload("fileone", "filetwo")
|
||||
//
|
||||
// It's important to note this WILL OVERRIDE an env variable that already exists - consider the .env file to forcefilly set all vars.
|
||||
func Overload(filenames ...string) (err error) {
|
||||
filenames = filenamesOrDefault(filenames)
|
||||
|
||||
for _, filename := range filenames {
|
||||
err = loadFile(filename, true)
|
||||
if err != nil {
|
||||
return // return early on a spazout
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Read all env (with same file loading semantics as Load) but return values as
|
||||
// a map rather than automatically writing values into env
|
||||
func Read(filenames ...string) (envMap map[string]string, err error) {
|
||||
filenames = filenamesOrDefault(filenames)
|
||||
envMap = make(map[string]string)
|
||||
|
||||
for _, filename := range filenames {
|
||||
individualEnvMap, individualErr := readFile(filename)
|
||||
|
||||
if individualErr != nil {
|
||||
err = individualErr
|
||||
return // return early on a spazout
|
||||
}
|
||||
|
||||
for key, value := range individualEnvMap {
|
||||
envMap[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Parse reads an env file from io.Reader, returning a map of keys and values.
|
||||
func Parse(r io.Reader) (envMap map[string]string, err error) {
|
||||
envMap = make(map[string]string)
|
||||
|
||||
var lines []string
|
||||
scanner := bufio.NewScanner(r)
|
||||
for scanner.Scan() {
|
||||
lines = append(lines, scanner.Text())
|
||||
}
|
||||
|
||||
if err = scanner.Err(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, fullLine := range lines {
|
||||
if !isIgnoredLine(fullLine) {
|
||||
var key, value string
|
||||
key, value, err = parseLine(fullLine, envMap)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
envMap[key] = value
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
//Unmarshal reads an env file from a string, returning a map of keys and values.
|
||||
func Unmarshal(str string) (envMap map[string]string, err error) {
|
||||
return Parse(strings.NewReader(str))
|
||||
}
|
||||
|
||||
// Exec loads env vars from the specified filenames (empty map falls back to default)
|
||||
// then executes the cmd specified.
|
||||
//
|
||||
// Simply hooks up os.Stdin/err/out to the command and calls Run()
|
||||
//
|
||||
// If you want more fine grained control over your command it's recommended
|
||||
// that you use `Load()` or `Read()` and the `os/exec` package yourself.
|
||||
func Exec(filenames []string, cmd string, cmdArgs []string) error {
|
||||
Load(filenames...)
|
||||
|
||||
command := exec.Command(cmd, cmdArgs...)
|
||||
command.Stdin = os.Stdin
|
||||
command.Stdout = os.Stdout
|
||||
command.Stderr = os.Stderr
|
||||
return command.Run()
|
||||
}
|
||||
|
||||
// Write serializes the given environment and writes it to a file
|
||||
func Write(envMap map[string]string, filename string) error {
|
||||
content, error := Marshal(envMap)
|
||||
if error != nil {
|
||||
return error
|
||||
}
|
||||
file, error := os.Create(filename)
|
||||
if error != nil {
|
||||
return error
|
||||
}
|
||||
_, err := file.WriteString(content)
|
||||
return err
|
||||
}
|
||||
|
||||
// Marshal outputs the given environment as a dotenv-formatted environment file.
|
||||
// Each line is in the format: KEY="VALUE" where VALUE is backslash-escaped.
|
||||
func Marshal(envMap map[string]string) (string, error) {
|
||||
lines := make([]string, 0, len(envMap))
|
||||
for k, v := range envMap {
|
||||
lines = append(lines, fmt.Sprintf(`%s="%s"`, k, doubleQuoteEscape(v)))
|
||||
}
|
||||
sort.Strings(lines)
|
||||
return strings.Join(lines, "\n"), nil
|
||||
}
|
||||
|
||||
func filenamesOrDefault(filenames []string) []string {
|
||||
if len(filenames) == 0 {
|
||||
return []string{".env"}
|
||||
}
|
||||
return filenames
|
||||
}
|
||||
|
||||
func loadFile(filename string, overload bool) error {
|
||||
envMap, err := readFile(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
currentEnv := map[string]bool{}
|
||||
rawEnv := os.Environ()
|
||||
for _, rawEnvLine := range rawEnv {
|
||||
key := strings.Split(rawEnvLine, "=")[0]
|
||||
currentEnv[key] = true
|
||||
}
|
||||
|
||||
for key, value := range envMap {
|
||||
if !currentEnv[key] || overload {
|
||||
os.Setenv(key, value)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func readFile(filename string) (envMap map[string]string, err error) {
|
||||
file, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
return Parse(file)
|
||||
}
|
||||
|
||||
func parseLine(line string, envMap map[string]string) (key string, value string, err error) {
|
||||
if len(line) == 0 {
|
||||
err = errors.New("zero length string")
|
||||
return
|
||||
}
|
||||
|
||||
// ditch the comments (but keep quoted hashes)
|
||||
if strings.Contains(line, "#") {
|
||||
segmentsBetweenHashes := strings.Split(line, "#")
|
||||
quotesAreOpen := false
|
||||
var segmentsToKeep []string
|
||||
for _, segment := range segmentsBetweenHashes {
|
||||
if strings.Count(segment, "\"") == 1 || strings.Count(segment, "'") == 1 {
|
||||
if quotesAreOpen {
|
||||
quotesAreOpen = false
|
||||
segmentsToKeep = append(segmentsToKeep, segment)
|
||||
} else {
|
||||
quotesAreOpen = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(segmentsToKeep) == 0 || quotesAreOpen {
|
||||
segmentsToKeep = append(segmentsToKeep, segment)
|
||||
}
|
||||
}
|
||||
|
||||
line = strings.Join(segmentsToKeep, "#")
|
||||
}
|
||||
|
||||
firstEquals := strings.Index(line, "=")
|
||||
firstColon := strings.Index(line, ":")
|
||||
splitString := strings.SplitN(line, "=", 2)
|
||||
if firstColon != -1 && (firstColon < firstEquals || firstEquals == -1) {
|
||||
//this is a yaml-style line
|
||||
splitString = strings.SplitN(line, ":", 2)
|
||||
}
|
||||
|
||||
if len(splitString) != 2 {
|
||||
err = errors.New("Can't separate key from value")
|
||||
return
|
||||
}
|
||||
|
||||
// Parse the key
|
||||
key = splitString[0]
|
||||
if strings.HasPrefix(key, "export") {
|
||||
key = strings.TrimPrefix(key, "export")
|
||||
}
|
||||
key = strings.Trim(key, " ")
|
||||
|
||||
// Parse the value
|
||||
value = parseValue(splitString[1], envMap)
|
||||
return
|
||||
}
|
||||
|
||||
func parseValue(value string, envMap map[string]string) string {
|
||||
|
||||
// trim
|
||||
value = strings.Trim(value, " ")
|
||||
|
||||
// check if we've got quoted values or possible escapes
|
||||
if len(value) > 1 {
|
||||
rs := regexp.MustCompile(`\A'(.*)'\z`)
|
||||
singleQuotes := rs.FindStringSubmatch(value)
|
||||
|
||||
rd := regexp.MustCompile(`\A"(.*)"\z`)
|
||||
doubleQuotes := rd.FindStringSubmatch(value)
|
||||
|
||||
if singleQuotes != nil || doubleQuotes != nil {
|
||||
// pull the quotes off the edges
|
||||
value = value[1 : len(value)-1]
|
||||
}
|
||||
|
||||
if doubleQuotes != nil {
|
||||
// expand newlines
|
||||
escapeRegex := regexp.MustCompile(`\\.`)
|
||||
value = escapeRegex.ReplaceAllStringFunc(value, func(match string) string {
|
||||
c := strings.TrimPrefix(match, `\`)
|
||||
switch c {
|
||||
case "n":
|
||||
return "\n"
|
||||
case "r":
|
||||
return "\r"
|
||||
default:
|
||||
return match
|
||||
}
|
||||
})
|
||||
// unescape characters
|
||||
e := regexp.MustCompile(`\\([^$])`)
|
||||
value = e.ReplaceAllString(value, "$1")
|
||||
}
|
||||
|
||||
if singleQuotes == nil {
|
||||
value = expandVariables(value, envMap)
|
||||
}
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
func expandVariables(v string, m map[string]string) string {
|
||||
r := regexp.MustCompile(`(\\)?(\$)(\()?\{?([A-Z0-9_]+)?\}?`)
|
||||
|
||||
return r.ReplaceAllStringFunc(v, func(s string) string {
|
||||
submatch := r.FindStringSubmatch(s)
|
||||
|
||||
if submatch == nil {
|
||||
return s
|
||||
}
|
||||
if submatch[1] == "\\" || submatch[2] == "(" {
|
||||
return submatch[0][1:]
|
||||
} else if submatch[4] != "" {
|
||||
return m[submatch[4]]
|
||||
}
|
||||
return s
|
||||
})
|
||||
}
|
||||
|
||||
func isIgnoredLine(line string) bool {
|
||||
trimmedLine := strings.Trim(line, " \n\t")
|
||||
return len(trimmedLine) == 0 || strings.HasPrefix(trimmedLine, "#")
|
||||
}
|
||||
|
||||
func doubleQuoteEscape(line string) string {
|
||||
for _, c := range doubleQuoteSpecialChars {
|
||||
toReplace := "\\" + string(c)
|
||||
if c == '\n' {
|
||||
toReplace = `\n`
|
||||
}
|
||||
if c == '\r' {
|
||||
toReplace = `\r`
|
||||
}
|
||||
line = strings.Replace(line, string(c), toReplace, -1)
|
||||
}
|
||||
return line
|
||||
}
|
||||
|
|
@ -1,29 +0,0 @@
|
|||
*.log
|
||||
.DS_Store
|
||||
doc
|
||||
tmp
|
||||
pkg
|
||||
*.gem
|
||||
*.pid
|
||||
coverage
|
||||
coverage.data
|
||||
build/*
|
||||
*.pbxuser
|
||||
*.mode1v3
|
||||
.svn
|
||||
profile
|
||||
.console_history
|
||||
.sass-cache/*
|
||||
.rake_tasks~
|
||||
*.log.lck
|
||||
solr/
|
||||
.jhw-cache/
|
||||
jhw.*
|
||||
*.sublime*
|
||||
node_modules/
|
||||
dist/
|
||||
generated/
|
||||
.vendor/
|
||||
bin/*
|
||||
gin-bin
|
||||
.idea/
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
{
|
||||
"Enable": ["vet", "golint", "goimports", "deadcode", "gotype", "ineffassign", "misspell", "nakedret", "unconvert", "megacheck", "varcheck"]
|
||||
}
|
||||
|
|
@ -1 +0,0 @@
|
|||
swp$
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
language: go
|
||||
|
||||
sudo: false
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- go: "1.9.x"
|
||||
- go: "1.10.x"
|
||||
- go: "1.11.x"
|
||||
env:
|
||||
- GO111MODULE=off
|
||||
- go: "1.11.x"
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
- go: "tip"
|
||||
env:
|
||||
- GO111MODULE=off
|
||||
- go: "tip"
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
allow_failures:
|
||||
- go: "tip"
|
||||
|
||||
install: make deps
|
||||
|
||||
script: make ci-test
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
Copyright (c) 2011 Chris Farmiloe
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
|
@ -1,55 +0,0 @@
|
|||
TAGS ?= "sqlite"
|
||||
GO_BIN ?= go
|
||||
|
||||
install:
|
||||
packr
|
||||
$(GO_BIN) install -tags ${TAGS} -v .
|
||||
make tidy
|
||||
|
||||
tidy:
|
||||
ifeq ($(GO111MODULE),on)
|
||||
$(GO_BIN) mod tidy
|
||||
else
|
||||
echo skipping go mod tidy
|
||||
endif
|
||||
|
||||
deps:
|
||||
$(GO_BIN) get github.com/gobuffalo/release
|
||||
$(GO_BIN) get github.com/gobuffalo/packr/packr
|
||||
$(GO_BIN) get -tags ${TAGS} -t ./...
|
||||
make tidy
|
||||
|
||||
build:
|
||||
packr
|
||||
$(GO_BIN) build -v .
|
||||
make tidy
|
||||
|
||||
test:
|
||||
packr
|
||||
$(GO_BIN) test -tags ${TAGS} ./...
|
||||
make tidy
|
||||
|
||||
ci-test:
|
||||
$(GO_BIN) test -tags ${TAGS} -race ./...
|
||||
make tidy
|
||||
|
||||
lint:
|
||||
gometalinter --vendor ./... --deadline=1m --skip=internal
|
||||
make tidy
|
||||
|
||||
update:
|
||||
$(GO_BIN) get -u -tags ${TAGS}
|
||||
make tidy
|
||||
packr
|
||||
make test
|
||||
make install
|
||||
make tidy
|
||||
|
||||
release-test:
|
||||
$(GO_BIN) test -tags ${TAGS} -race ./...
|
||||
make tidy
|
||||
|
||||
release:
|
||||
make tidy
|
||||
release -y -f version.go
|
||||
make tidy
|
||||
|
|
@ -1,214 +0,0 @@
|
|||
[](https://travis-ci.org/markbates/inflect)
|
||||
|
||||
#### INSTALLATION
|
||||
|
||||
go get github.com/markbates/inflect
|
||||
|
||||
#### PACKAGE
|
||||
package inflect
|
||||
|
||||
|
||||
#### FUNCTIONS
|
||||
```go
|
||||
func AddAcronym(word string)
|
||||
func AddHuman(suffix, replacement string)
|
||||
func AddIrregular(singular, plural string)
|
||||
func AddPlural(suffix, replacement string)
|
||||
func AddSingular(suffix, replacement string)
|
||||
func AddUncountable(word string)
|
||||
func Asciify(word string) string
|
||||
func Camelize(word string) string
|
||||
func CamelizeDownFirst(word string) string
|
||||
func Capitalize(word string) string
|
||||
func Dasherize(word string) string
|
||||
func ForeignKey(word string) string
|
||||
func ForeignKeyCondensed(word string) string
|
||||
func Humanize(word string) string
|
||||
func Ordinalize(word string) string
|
||||
func Parameterize(word string) string
|
||||
func ParameterizeJoin(word, sep string) string
|
||||
func Pluralize(word string) string
|
||||
func Singularize(word string) string
|
||||
func Tableize(word string) string
|
||||
func Titleize(word string) string
|
||||
func Typeify(word string) string
|
||||
func Uncountables() map[string]bool
|
||||
func Underscore(word string) string
|
||||
```
|
||||
|
||||
#### TYPES
|
||||
```go
|
||||
type Rule struct {
|
||||
// contains filtered or unexported fields
|
||||
}
|
||||
```
|
||||
|
||||
used by rulesets
|
||||
|
||||
```go
|
||||
type Ruleset struct {
|
||||
// contains filtered or unexported fields
|
||||
}
|
||||
```
|
||||
|
||||
a Ruleset is the config of pluralization rules
|
||||
you can extend the rules with the Add* methods
|
||||
|
||||
```
|
||||
func NewDefaultRuleset() *Ruleset
|
||||
```
|
||||
create a new ruleset and load it with the default
|
||||
set of common English pluralization rules
|
||||
|
||||
```
|
||||
func NewRuleset() *Ruleset
|
||||
```
|
||||
|
||||
create a blank ruleset. Unless you are going to
|
||||
build your own rules from scratch you probably
|
||||
won't need this and can just use the defaultRuleset
|
||||
via the global inflect.* methods
|
||||
|
||||
```
|
||||
func (rs *Ruleset) AddAcronym(word string)
|
||||
```
|
||||
if you use acronym you may need to add them to the ruleset
|
||||
to prevent Underscored words of things like "HTML" coming out
|
||||
as "h_t_m_l"
|
||||
|
||||
```
|
||||
func (rs *Ruleset) AddHuman(suffix, replacement string)
|
||||
```
|
||||
|
||||
Human rules are applied by humanize to show more friendly
|
||||
versions of words
|
||||
|
||||
```
|
||||
func (rs *Ruleset) AddIrregular(singular, plural string)
|
||||
```
|
||||
|
||||
Add any inconsistent pluralizing/singularizing rules
|
||||
to the set here.
|
||||
|
||||
```
|
||||
func (rs *Ruleset) AddPlural(suffix, replacement string)
|
||||
```
|
||||
|
||||
add a pluralization rule
|
||||
|
||||
```
|
||||
func (rs *Ruleset) AddPluralExact(suffix, replacement string, exact bool)
|
||||
```
|
||||
|
||||
add a pluralization rule with full string match
|
||||
|
||||
```
|
||||
func (rs *Ruleset) AddSingular(suffix, replacement string)
|
||||
```
|
||||
|
||||
add a singular rule
|
||||
|
||||
```
|
||||
func (rs *Ruleset) AddSingularExact(suffix, replacement string, exact bool)
|
||||
```
|
||||
same as AddSingular but you can set `exact` to force
|
||||
a full string match
|
||||
|
||||
```
|
||||
func (rs *Ruleset) AddUncountable(word string)
|
||||
```
|
||||
add a word to this ruleset that has the same singular and plural form
|
||||
for example: "rice"
|
||||
|
||||
```
|
||||
func (rs *Ruleset) Asciify(word string) string
|
||||
```
|
||||
transforms Latin characters like é -> e
|
||||
|
||||
```
|
||||
func (rs *Ruleset) Camelize(word string) string
|
||||
```
|
||||
"dino_party" -> "DinoParty"
|
||||
|
||||
```
|
||||
func (rs *Ruleset) CamelizeDownFirst(word string) string
|
||||
```
|
||||
same as Camelcase but with first letter downcased
|
||||
|
||||
```
|
||||
func (rs *Ruleset) Capitalize(word string) string
|
||||
```
|
||||
uppercase first character
|
||||
|
||||
```
|
||||
func (rs *Ruleset) Dasherize(word string) string
|
||||
```
|
||||
"SomeText" -> "some-text"
|
||||
|
||||
```
|
||||
func (rs *Ruleset) ForeignKey(word string) string
|
||||
```
|
||||
an underscored foreign key name "Person" -> "person_id"
|
||||
|
||||
```
|
||||
func (rs *Ruleset) ForeignKeyCondensed(word string) string
|
||||
```
|
||||
a foreign key (with an underscore) "Person" -> "personid"
|
||||
|
||||
```
|
||||
func (rs *Ruleset) Humanize(word string) string
|
||||
```
|
||||
First letter of sentence capitalized
|
||||
Uses custom friendly replacements via AddHuman()
|
||||
|
||||
```
|
||||
func (rs *Ruleset) Ordinalize(str string) string
|
||||
```
|
||||
"1031" -> "1031st"
|
||||
|
||||
```
|
||||
func (rs *Ruleset) Parameterize(word string) string
|
||||
```
|
||||
param safe dasherized names like "my-param"
|
||||
|
||||
```
|
||||
func (rs *Ruleset) ParameterizeJoin(word, sep string) string
|
||||
```
|
||||
param safe dasherized names with custom separator
|
||||
|
||||
```
|
||||
func (rs *Ruleset) Pluralize(word string) string
|
||||
```
|
||||
returns the plural form of a singular word
|
||||
|
||||
```
|
||||
func (rs *Ruleset) Singularize(word string) string
|
||||
```
|
||||
returns the singular form of a plural word
|
||||
|
||||
```
|
||||
func (rs *Ruleset) Tableize(word string) string
|
||||
```
|
||||
Rails style pluralized table names: "SuperPerson" -> "super_people"
|
||||
|
||||
```
|
||||
func (rs *Ruleset) Titleize(word string) string
|
||||
```
|
||||
Capitalize every word in sentence "hello there" -> "Hello There"
|
||||
|
||||
```
|
||||
func (rs *Ruleset) Typeify(word string) string
|
||||
```
|
||||
"something_like_this" -> "SomethingLikeThis"
|
||||
|
||||
```
|
||||
func (rs *Ruleset) Uncountables() map[string]bool
|
||||
```
|
||||
|
||||
```
|
||||
func (rs *Ruleset) Underscore(word string) string
|
||||
```
|
||||
|
||||
lowercase underscore version "BigBen" -> "big_ben"
|
||||
|
||||
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
module github.com/markbates/inflect
|
||||
|
||||
require (
|
||||
github.com/gobuffalo/envy v1.6.5
|
||||
github.com/stretchr/testify v1.2.2
|
||||
)
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/gobuffalo/envy v1.6.5 h1:X3is06x7v0nW2xiy2yFbbIjwHz57CD6z6MkvqULTCm8=
|
||||
github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ=
|
||||
github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=
|
||||
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
package inflect
|
||||
|
||||
//Helpers is a map of the helper names with its corresponding inflect function
|
||||
var Helpers = map[string]interface{}{
|
||||
"asciffy": Asciify,
|
||||
"camelize": Camelize,
|
||||
"camelize_down_first": CamelizeDownFirst,
|
||||
"capitalize": Capitalize,
|
||||
"dasherize": Dasherize,
|
||||
"humanize": Humanize,
|
||||
"ordinalize": Ordinalize,
|
||||
"parameterize": Parameterize,
|
||||
"pluralize": Pluralize,
|
||||
"pluralize_with_size": PluralizeWithSize,
|
||||
"singularize": Singularize,
|
||||
"tableize": Tableize,
|
||||
"typeify": Typeify,
|
||||
"underscore": Underscore,
|
||||
}
|
||||
|
|
@ -1,892 +0,0 @@
|
|||
package inflect
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// baseAcronyms comes from https://en.wikipedia.org/wiki/List_of_information_technology_acronymss
|
||||
const baseAcronyms = `JSON,JWT,ID,UUID,SQL,ACK,ACL,ADSL,AES,ANSI,API,ARP,ATM,BGP,BSS,CAT,CCITT,CHAP,CIDR,CIR,CLI,CPE,CPU,CRC,CRT,CSMA,CMOS,DCE,DEC,DES,DHCP,DNS,DRAM,DSL,DSLAM,DTE,DMI,EHA,EIA,EIGRP,EOF,ESS,FCC,FCS,FDDI,FTP,GBIC,gbps,GEPOF,HDLC,HTTP,HTTPS,IANA,ICMP,IDF,IDS,IEEE,IETF,IMAP,IP,IPS,ISDN,ISP,kbps,LACP,LAN,LAPB,LAPF,LLC,MAC,MAN,Mbps,MC,MDF,MIB,MoCA,MPLS,MTU,NAC,NAT,NBMA,NIC,NRZ,NRZI,NVRAM,OSI,OSPF,OUI,PAP,PAT,PC,PIM,PIM,PCM,PDU,POP3,POP,POTS,PPP,PPTP,PTT,PVST,RADIUS,RAM,RARP,RFC,RIP,RLL,ROM,RSTP,RTP,RCP,SDLC,SFD,SFP,SLARP,SLIP,SMTP,SNA,SNAP,SNMP,SOF,SRAM,SSH,SSID,STP,SYN,TDM,TFTP,TIA,TOFU,UDP,URL,URI,USB,UTP,VC,VLAN,VLSM,VPN,W3C,WAN,WEP,WiFi,WPA,WWW`
|
||||
|
||||
// Rule used by rulesets
|
||||
type Rule struct {
|
||||
suffix string
|
||||
replacement string
|
||||
exact bool
|
||||
}
|
||||
|
||||
// Ruleset a Ruleset is the config of pluralization rules
|
||||
// you can extend the rules with the Add* methods
|
||||
type Ruleset struct {
|
||||
uncountables map[string]bool
|
||||
plurals []*Rule
|
||||
singulars []*Rule
|
||||
humans []*Rule
|
||||
acronyms []*Rule
|
||||
}
|
||||
|
||||
// NewRuleset creates a blank ruleset. Unless you are going to
|
||||
// build your own rules from scratch you probably
|
||||
// won't need this and can just use the defaultRuleset
|
||||
// via the global inflect.* methods
|
||||
func NewRuleset() *Ruleset {
|
||||
rs := new(Ruleset)
|
||||
rs.uncountables = make(map[string]bool)
|
||||
rs.plurals = make([]*Rule, 0)
|
||||
rs.singulars = make([]*Rule, 0)
|
||||
rs.humans = make([]*Rule, 0)
|
||||
rs.acronyms = make([]*Rule, 0)
|
||||
return rs
|
||||
}
|
||||
|
||||
// NewDefaultRuleset creates a new ruleset and load it with the default
|
||||
// set of common English pluralization rules
|
||||
func NewDefaultRuleset() *Ruleset {
|
||||
rs := NewRuleset()
|
||||
rs.AddPlural("movie", "movies")
|
||||
rs.AddPlural("s", "s")
|
||||
rs.AddPlural("testis", "testes")
|
||||
rs.AddPlural("axis", "axes")
|
||||
rs.AddPlural("octopus", "octopi")
|
||||
rs.AddPlural("virus", "viri")
|
||||
rs.AddPlural("octopi", "octopi")
|
||||
rs.AddPlural("viri", "viri")
|
||||
rs.AddPlural("alias", "aliases")
|
||||
rs.AddPlural("status", "statuses")
|
||||
rs.AddPlural("Status", "Statuses")
|
||||
rs.AddPlural("campus", "campuses")
|
||||
rs.AddPlural("bus", "buses")
|
||||
rs.AddPlural("buffalo", "buffaloes")
|
||||
rs.AddPlural("tomato", "tomatoes")
|
||||
rs.AddPlural("tum", "ta")
|
||||
rs.AddPlural("ium", "ia")
|
||||
rs.AddPlural("ta", "ta")
|
||||
rs.AddPlural("ia", "ia")
|
||||
rs.AddPlural("sis", "ses")
|
||||
rs.AddPlural("lf", "lves")
|
||||
rs.AddPlural("rf", "rves")
|
||||
rs.AddPlural("afe", "aves")
|
||||
rs.AddPlural("bfe", "bves")
|
||||
rs.AddPlural("cfe", "cves")
|
||||
rs.AddPlural("dfe", "dves")
|
||||
rs.AddPlural("efe", "eves")
|
||||
rs.AddPlural("gfe", "gves")
|
||||
rs.AddPlural("hfe", "hves")
|
||||
rs.AddPlural("ife", "ives")
|
||||
rs.AddPlural("jfe", "jves")
|
||||
rs.AddPlural("kfe", "kves")
|
||||
rs.AddPlural("lfe", "lves")
|
||||
rs.AddPlural("mfe", "mves")
|
||||
rs.AddPlural("nfe", "nves")
|
||||
rs.AddPlural("ofe", "oves")
|
||||
rs.AddPlural("pfe", "pves")
|
||||
rs.AddPlural("qfe", "qves")
|
||||
rs.AddPlural("rfe", "rves")
|
||||
rs.AddPlural("sfe", "sves")
|
||||
rs.AddPlural("tfe", "tves")
|
||||
rs.AddPlural("ufe", "uves")
|
||||
rs.AddPlural("vfe", "vves")
|
||||
rs.AddPlural("wfe", "wves")
|
||||
rs.AddPlural("xfe", "xves")
|
||||
rs.AddPlural("yfe", "yves")
|
||||
rs.AddPlural("zfe", "zves")
|
||||
rs.AddPlural("hive", "hives")
|
||||
rs.AddPlural("quy", "quies")
|
||||
rs.AddPlural("by", "bies")
|
||||
rs.AddPlural("cy", "cies")
|
||||
rs.AddPlural("dy", "dies")
|
||||
rs.AddPlural("fy", "fies")
|
||||
rs.AddPlural("gy", "gies")
|
||||
rs.AddPlural("hy", "hies")
|
||||
rs.AddPlural("jy", "jies")
|
||||
rs.AddPlural("ky", "kies")
|
||||
rs.AddPlural("ly", "lies")
|
||||
rs.AddPlural("my", "mies")
|
||||
rs.AddPlural("ny", "nies")
|
||||
rs.AddPlural("py", "pies")
|
||||
rs.AddPlural("qy", "qies")
|
||||
rs.AddPlural("ry", "ries")
|
||||
rs.AddPlural("sy", "sies")
|
||||
rs.AddPlural("ty", "ties")
|
||||
rs.AddPlural("vy", "vies")
|
||||
rs.AddPlural("wy", "wies")
|
||||
rs.AddPlural("xy", "xies")
|
||||
rs.AddPlural("zy", "zies")
|
||||
rs.AddPlural("x", "xes")
|
||||
rs.AddPlural("ch", "ches")
|
||||
rs.AddPlural("ss", "sses")
|
||||
rs.AddPlural("sh", "shes")
|
||||
rs.AddPlural("matrix", "matrices")
|
||||
rs.AddPlural("vertix", "vertices")
|
||||
rs.AddPlural("indix", "indices")
|
||||
rs.AddPlural("matrex", "matrices")
|
||||
rs.AddPlural("vertex", "vertices")
|
||||
rs.AddPlural("index", "indices")
|
||||
rs.AddPlural("mouse", "mice")
|
||||
rs.AddPlural("louse", "lice")
|
||||
rs.AddPlural("mice", "mice")
|
||||
rs.AddPlural("lice", "lice")
|
||||
rs.AddPlural("ress", "resses")
|
||||
rs.AddPluralExact("ox", "oxen", true)
|
||||
rs.AddPluralExact("oxen", "oxen", true)
|
||||
rs.AddPluralExact("quiz", "quizzes", true)
|
||||
rs.AddSingular("s", "")
|
||||
rs.AddSingular("ss", "ss")
|
||||
rs.AddSingular("news", "news")
|
||||
rs.AddSingular("ta", "tum")
|
||||
rs.AddSingular("ia", "ium")
|
||||
rs.AddSingular("analyses", "analysis")
|
||||
rs.AddSingular("bases", "basis")
|
||||
rs.AddSingularExact("basis", "basis", true)
|
||||
rs.AddSingular("diagnoses", "diagnosis")
|
||||
rs.AddSingularExact("diagnosis", "diagnosis", true)
|
||||
rs.AddSingular("parentheses", "parenthesis")
|
||||
rs.AddSingular("prognoses", "prognosis")
|
||||
rs.AddSingular("synopses", "synopsis")
|
||||
rs.AddSingular("theses", "thesis")
|
||||
rs.AddSingular("analyses", "analysis")
|
||||
rs.AddSingularExact("analysis", "analysis", true)
|
||||
rs.AddSingular("ovies", "ovie")
|
||||
rs.AddSingular("aves", "afe")
|
||||
rs.AddSingular("bves", "bfe")
|
||||
rs.AddSingular("cves", "cfe")
|
||||
rs.AddSingular("dves", "dfe")
|
||||
rs.AddSingular("eves", "efe")
|
||||
rs.AddSingular("gves", "gfe")
|
||||
rs.AddSingular("hves", "hfe")
|
||||
rs.AddSingular("ives", "ife")
|
||||
rs.AddSingular("jves", "jfe")
|
||||
rs.AddSingular("kves", "kfe")
|
||||
rs.AddSingular("lves", "lfe")
|
||||
rs.AddSingular("mves", "mfe")
|
||||
rs.AddSingular("nves", "nfe")
|
||||
rs.AddSingular("oves", "ofe")
|
||||
rs.AddSingular("pves", "pfe")
|
||||
rs.AddSingular("qves", "qfe")
|
||||
rs.AddSingular("rves", "rfe")
|
||||
rs.AddSingular("sves", "sfe")
|
||||
rs.AddSingular("tves", "tfe")
|
||||
rs.AddSingular("uves", "ufe")
|
||||
rs.AddSingular("vves", "vfe")
|
||||
rs.AddSingular("wves", "wfe")
|
||||
rs.AddSingular("xves", "xfe")
|
||||
rs.AddSingular("yves", "yfe")
|
||||
rs.AddSingular("zves", "zfe")
|
||||
rs.AddSingular("hives", "hive")
|
||||
rs.AddSingular("tives", "tive")
|
||||
rs.AddSingular("lves", "lf")
|
||||
rs.AddSingular("rves", "rf")
|
||||
rs.AddSingular("quies", "quy")
|
||||
rs.AddSingular("bies", "by")
|
||||
rs.AddSingular("cies", "cy")
|
||||
rs.AddSingular("dies", "dy")
|
||||
rs.AddSingular("fies", "fy")
|
||||
rs.AddSingular("gies", "gy")
|
||||
rs.AddSingular("hies", "hy")
|
||||
rs.AddSingular("jies", "jy")
|
||||
rs.AddSingular("kies", "ky")
|
||||
rs.AddSingular("lies", "ly")
|
||||
rs.AddSingular("mies", "my")
|
||||
rs.AddSingular("nies", "ny")
|
||||
rs.AddSingular("pies", "py")
|
||||
rs.AddSingular("qies", "qy")
|
||||
rs.AddSingular("ries", "ry")
|
||||
rs.AddSingular("sies", "sy")
|
||||
rs.AddSingular("ties", "ty")
|
||||
// rs.AddSingular("vies", "vy")
|
||||
rs.AddSingular("wies", "wy")
|
||||
rs.AddSingular("xies", "xy")
|
||||
rs.AddSingular("zies", "zy")
|
||||
rs.AddSingular("series", "series")
|
||||
rs.AddSingular("xes", "x")
|
||||
rs.AddSingular("ches", "ch")
|
||||
rs.AddSingular("sses", "ss")
|
||||
rs.AddSingular("shes", "sh")
|
||||
rs.AddSingular("mice", "mouse")
|
||||
rs.AddSingular("lice", "louse")
|
||||
rs.AddSingular("buses", "bus")
|
||||
rs.AddSingularExact("bus", "bus", true)
|
||||
rs.AddSingular("oes", "o")
|
||||
rs.AddSingular("shoes", "shoe")
|
||||
rs.AddSingular("crises", "crisis")
|
||||
rs.AddSingularExact("crisis", "crisis", true)
|
||||
rs.AddSingular("axes", "axis")
|
||||
rs.AddSingularExact("axis", "axis", true)
|
||||
rs.AddSingular("testes", "testis")
|
||||
rs.AddSingularExact("testis", "testis", true)
|
||||
rs.AddSingular("octopi", "octopus")
|
||||
rs.AddSingularExact("octopus", "octopus", true)
|
||||
rs.AddSingular("viri", "virus")
|
||||
rs.AddSingularExact("virus", "virus", true)
|
||||
rs.AddSingular("statuses", "status")
|
||||
rs.AddSingular("Statuses", "Status")
|
||||
rs.AddSingular("campuses", "campus")
|
||||
rs.AddSingularExact("status", "status", true)
|
||||
rs.AddSingularExact("Status", "Status", true)
|
||||
rs.AddSingularExact("campus", "campus", true)
|
||||
rs.AddSingular("aliases", "alias")
|
||||
rs.AddSingularExact("alias", "alias", true)
|
||||
rs.AddSingularExact("oxen", "ox", true)
|
||||
rs.AddSingular("vertices", "vertex")
|
||||
rs.AddSingular("indices", "index")
|
||||
rs.AddSingular("matrices", "matrix")
|
||||
rs.AddSingularExact("quizzes", "quiz", true)
|
||||
rs.AddSingular("databases", "database")
|
||||
rs.AddSingular("resses", "ress")
|
||||
rs.AddSingular("ress", "ress")
|
||||
rs.AddIrregular("person", "people")
|
||||
rs.AddIrregular("man", "men")
|
||||
rs.AddIrregular("child", "children")
|
||||
rs.AddIrregular("sex", "sexes")
|
||||
rs.AddIrregular("move", "moves")
|
||||
rs.AddIrregular("zombie", "zombies")
|
||||
rs.AddIrregular("Status", "Statuses")
|
||||
rs.AddIrregular("status", "statuses")
|
||||
rs.AddIrregular("campus", "campuses")
|
||||
rs.AddIrregular("human", "humans")
|
||||
rs.AddUncountable("equipment")
|
||||
rs.AddUncountable("information")
|
||||
rs.AddUncountable("rice")
|
||||
rs.AddUncountable("money")
|
||||
rs.AddUncountable("species")
|
||||
rs.AddUncountable("series")
|
||||
rs.AddUncountable("fish")
|
||||
rs.AddUncountable("sheep")
|
||||
rs.AddUncountable("jeans")
|
||||
rs.AddUncountable("police")
|
||||
|
||||
acronyms := strings.Split(baseAcronyms, ",")
|
||||
for _, acr := range acronyms {
|
||||
rs.AddAcronym(acr)
|
||||
}
|
||||
|
||||
return rs
|
||||
}
|
||||
|
||||
// Uncountables returns a map of uncountables in the ruleset
|
||||
func (rs *Ruleset) Uncountables() map[string]bool {
|
||||
return rs.uncountables
|
||||
}
|
||||
|
||||
// AddPlural add a pluralization rule
|
||||
func (rs *Ruleset) AddPlural(suffix, replacement string) {
|
||||
rs.AddPluralExact(suffix, replacement, false)
|
||||
}
|
||||
|
||||
// AddPluralExact add a pluralization rule with full string match
|
||||
func (rs *Ruleset) AddPluralExact(suffix, replacement string, exact bool) {
|
||||
// remove uncountable
|
||||
delete(rs.uncountables, suffix)
|
||||
// create rule
|
||||
r := new(Rule)
|
||||
r.suffix = suffix
|
||||
r.replacement = replacement
|
||||
r.exact = exact
|
||||
// prepend
|
||||
rs.plurals = append([]*Rule{r}, rs.plurals...)
|
||||
}
|
||||
|
||||
// AddSingular add a singular rule
|
||||
func (rs *Ruleset) AddSingular(suffix, replacement string) {
|
||||
rs.AddSingularExact(suffix, replacement, false)
|
||||
}
|
||||
|
||||
// AddSingularExact same as AddSingular but you can set `exact` to force
|
||||
// a full string match
|
||||
func (rs *Ruleset) AddSingularExact(suffix, replacement string, exact bool) {
|
||||
// remove from uncountable
|
||||
delete(rs.uncountables, suffix)
|
||||
// create rule
|
||||
r := new(Rule)
|
||||
r.suffix = suffix
|
||||
r.replacement = replacement
|
||||
r.exact = exact
|
||||
rs.singulars = append([]*Rule{r}, rs.singulars...)
|
||||
}
|
||||
|
||||
// AddHuman Human rules are applied by humanize to show more friendly
|
||||
// versions of words
|
||||
func (rs *Ruleset) AddHuman(suffix, replacement string) {
|
||||
r := new(Rule)
|
||||
r.suffix = suffix
|
||||
r.replacement = replacement
|
||||
rs.humans = append([]*Rule{r}, rs.humans...)
|
||||
}
|
||||
|
||||
// AddIrregular Add any inconsistent pluralizing/singularizing rules
|
||||
// to the set here.
|
||||
func (rs *Ruleset) AddIrregular(singular, plural string) {
|
||||
delete(rs.uncountables, singular)
|
||||
delete(rs.uncountables, plural)
|
||||
rs.AddPlural(singular, plural)
|
||||
rs.AddPlural(plural, plural)
|
||||
rs.AddSingular(plural, singular)
|
||||
}
|
||||
|
||||
// AddAcronym if you use acronym you may need to add them to the ruleset
|
||||
// to prevent Underscored words of things like "HTML" coming out
|
||||
// as "h_t_m_l"
|
||||
func (rs *Ruleset) AddAcronym(word string) {
|
||||
r := new(Rule)
|
||||
r.suffix = word
|
||||
r.replacement = rs.Titleize(strings.ToLower(word))
|
||||
rs.acronyms = append(rs.acronyms, r)
|
||||
}
|
||||
|
||||
// AddUncountable add a word to this ruleset that has the same singular and plural form
|
||||
// for example: "rice"
|
||||
func (rs *Ruleset) AddUncountable(word string) {
|
||||
rs.uncountables[strings.ToLower(word)] = true
|
||||
}
|
||||
|
||||
func (rs *Ruleset) isUncountable(word string) bool {
|
||||
// handle multiple words by using the last one
|
||||
words := strings.Split(word, " ")
|
||||
if _, exists := rs.uncountables[strings.ToLower(words[len(words)-1])]; exists {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
//isAcronym returns if a word is acronym or not.
|
||||
func (rs *Ruleset) isAcronym(word string) bool {
|
||||
for _, rule := range rs.acronyms {
|
||||
if strings.ToUpper(rule.suffix) == strings.ToUpper(word) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
//PluralizeWithSize pluralize with taking number into account
|
||||
func (rs *Ruleset) PluralizeWithSize(word string, size int) string {
|
||||
if size == 1 {
|
||||
return rs.Singularize(word)
|
||||
}
|
||||
return rs.Pluralize(word)
|
||||
}
|
||||
|
||||
// Pluralize returns the plural form of a singular word
|
||||
func (rs *Ruleset) Pluralize(word string) string {
|
||||
if len(word) == 0 {
|
||||
return word
|
||||
}
|
||||
lWord := strings.ToLower(word)
|
||||
if rs.isUncountable(lWord) {
|
||||
return word
|
||||
}
|
||||
|
||||
var candidate string
|
||||
for _, rule := range rs.plurals {
|
||||
if rule.exact {
|
||||
if lWord == rule.suffix {
|
||||
// Capitalized word
|
||||
if lWord[0] != word[0] && lWord[1:] == word[1:] {
|
||||
return rs.Capitalize(rule.replacement)
|
||||
}
|
||||
return rule.replacement
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.EqualFold(word, rule.suffix) {
|
||||
candidate = rule.replacement
|
||||
}
|
||||
|
||||
if strings.HasSuffix(word, rule.suffix) {
|
||||
return replaceLast(word, rule.suffix, rule.replacement)
|
||||
}
|
||||
}
|
||||
|
||||
if candidate != "" {
|
||||
return candidate
|
||||
}
|
||||
return word + "s"
|
||||
}
|
||||
|
||||
//Singularize returns the singular form of a plural word
|
||||
func (rs *Ruleset) Singularize(word string) string {
|
||||
if len(word) <= 1 {
|
||||
return word
|
||||
}
|
||||
lWord := strings.ToLower(word)
|
||||
if rs.isUncountable(lWord) {
|
||||
return word
|
||||
}
|
||||
|
||||
var candidate string
|
||||
|
||||
for _, rule := range rs.singulars {
|
||||
if rule.exact {
|
||||
if lWord == rule.suffix {
|
||||
// Capitalized word
|
||||
if lWord[0] != word[0] && lWord[1:] == word[1:] {
|
||||
return rs.Capitalize(rule.replacement)
|
||||
}
|
||||
return rule.replacement
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.EqualFold(word, rule.suffix) {
|
||||
candidate = rule.replacement
|
||||
}
|
||||
|
||||
if strings.HasSuffix(word, rule.suffix) {
|
||||
return replaceLast(word, rule.suffix, rule.replacement)
|
||||
}
|
||||
}
|
||||
|
||||
if candidate != "" {
|
||||
return candidate
|
||||
}
|
||||
|
||||
return word
|
||||
}
|
||||
|
||||
//Capitalize uppercase first character
|
||||
func (rs *Ruleset) Capitalize(word string) string {
|
||||
if rs.isAcronym(word) {
|
||||
return strings.ToUpper(word)
|
||||
}
|
||||
return strings.ToUpper(word[:1]) + word[1:]
|
||||
}
|
||||
|
||||
//Camelize "dino_party" -> "DinoParty"
|
||||
func (rs *Ruleset) Camelize(word string) string {
|
||||
if rs.isAcronym(word) {
|
||||
return strings.ToUpper(word)
|
||||
}
|
||||
words := splitAtCaseChangeWithTitlecase(word)
|
||||
return strings.Join(words, "")
|
||||
}
|
||||
|
||||
//CamelizeDownFirst same as Camelcase but with first letter downcased
|
||||
func (rs *Ruleset) CamelizeDownFirst(word string) string {
|
||||
word = Camelize(word)
|
||||
return strings.ToLower(word[:1]) + word[1:]
|
||||
}
|
||||
|
||||
//Titleize Capitalize every word in sentence "hello there" -> "Hello There"
|
||||
func (rs *Ruleset) Titleize(word string) string {
|
||||
words := splitAtCaseChangeWithTitlecase(word)
|
||||
result := strings.Join(words, " ")
|
||||
|
||||
var acronymWords []string
|
||||
for index, word := range words {
|
||||
if len(word) == 1 {
|
||||
acronymWords = append(acronymWords, word)
|
||||
}
|
||||
|
||||
if len(word) > 1 || index == len(words)-1 || len(acronymWords) > 1 {
|
||||
acronym := strings.Join(acronymWords, "")
|
||||
if !rs.isAcronym(acronym) {
|
||||
acronymWords = acronymWords[:len(acronymWords)]
|
||||
continue
|
||||
}
|
||||
|
||||
result = strings.Replace(result, strings.Join(acronymWords, " "), acronym, 1)
|
||||
acronymWords = []string{}
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (rs *Ruleset) safeCaseAcronyms(word string) string {
|
||||
// convert an acronym like HTML into Html
|
||||
for _, rule := range rs.acronyms {
|
||||
word = strings.Replace(word, rule.suffix, rule.replacement, -1)
|
||||
}
|
||||
return word
|
||||
}
|
||||
|
||||
func (rs *Ruleset) separatedWords(word, sep string) string {
|
||||
word = rs.safeCaseAcronyms(word)
|
||||
words := splitAtCaseChange(word)
|
||||
return strings.Join(words, sep)
|
||||
}
|
||||
|
||||
//Underscore lowercase underscore version "BigBen" -> "big_ben"
|
||||
func (rs *Ruleset) Underscore(word string) string {
|
||||
return rs.separatedWords(word, "_")
|
||||
}
|
||||
|
||||
//Humanize First letter of sentence capitalized
|
||||
// Uses custom friendly replacements via AddHuman()
|
||||
func (rs *Ruleset) Humanize(word string) string {
|
||||
word = replaceLast(word, "_id", "") // strip foreign key kinds
|
||||
// replace and strings in humans list
|
||||
for _, rule := range rs.humans {
|
||||
word = strings.Replace(word, rule.suffix, rule.replacement, -1)
|
||||
}
|
||||
sentence := rs.separatedWords(word, " ")
|
||||
|
||||
r, n := utf8.DecodeRuneInString(sentence)
|
||||
return string(unicode.ToUpper(r)) + sentence[n:]
|
||||
}
|
||||
|
||||
//ForeignKey an underscored foreign key name "Person" -> "person_id"
|
||||
func (rs *Ruleset) ForeignKey(word string) string {
|
||||
return rs.Underscore(rs.Singularize(word)) + "_id"
|
||||
}
|
||||
|
||||
//ForeignKeyCondensed a foreign key (with an underscore) "Person" -> "personid"
|
||||
func (rs *Ruleset) ForeignKeyCondensed(word string) string {
|
||||
return rs.Underscore(word) + "id"
|
||||
}
|
||||
|
||||
//Tableize Rails style pluralized table names: "SuperPerson" -> "super_people"
|
||||
func (rs *Ruleset) Tableize(word string) string {
|
||||
return rs.Pluralize(rs.Underscore(rs.Typeify(word)))
|
||||
}
|
||||
|
||||
var notUrlSafe *regexp.Regexp = regexp.MustCompile(`[^\w\d\-_ ]`)
|
||||
|
||||
//Parameterize param safe dasherized names like "my-param"
|
||||
func (rs *Ruleset) Parameterize(word string) string {
|
||||
return ParameterizeJoin(word, "-")
|
||||
}
|
||||
|
||||
//ParameterizeJoin param safe dasherized names with custom separator
|
||||
func (rs *Ruleset) ParameterizeJoin(word, sep string) string {
|
||||
word = strings.ToLower(word)
|
||||
word = rs.Asciify(word)
|
||||
word = notUrlSafe.ReplaceAllString(word, "")
|
||||
word = strings.Replace(word, " ", sep, -1)
|
||||
if len(sep) > 0 {
|
||||
squash, err := regexp.Compile(sep + "+")
|
||||
if err == nil {
|
||||
word = squash.ReplaceAllString(word, sep)
|
||||
}
|
||||
}
|
||||
word = strings.Trim(word, sep+" ")
|
||||
return word
|
||||
}
|
||||
|
||||
var lookalikes = map[string]*regexp.Regexp{
|
||||
"A": regexp.MustCompile(`À|Á|Â|Ã|Ä|Å`),
|
||||
"AE": regexp.MustCompile(`Æ`),
|
||||
"C": regexp.MustCompile(`Ç`),
|
||||
"E": regexp.MustCompile(`È|É|Ê|Ë`),
|
||||
"G": regexp.MustCompile(`Ğ`),
|
||||
"I": regexp.MustCompile(`Ì|Í|Î|Ï|İ`),
|
||||
"N": regexp.MustCompile(`Ñ`),
|
||||
"O": regexp.MustCompile(`Ò|Ó|Ô|Õ|Ö|Ø`),
|
||||
"S": regexp.MustCompile(`Ş`),
|
||||
"U": regexp.MustCompile(`Ù|Ú|Û|Ü`),
|
||||
"Y": regexp.MustCompile(`Ý`),
|
||||
"ss": regexp.MustCompile(`ß`),
|
||||
"a": regexp.MustCompile(`à|á|â|ã|ä|å`),
|
||||
"ae": regexp.MustCompile(`æ`),
|
||||
"c": regexp.MustCompile(`ç`),
|
||||
"e": regexp.MustCompile(`è|é|ê|ë`),
|
||||
"g": regexp.MustCompile(`ğ`),
|
||||
"i": regexp.MustCompile(`ì|í|î|ï|ı`),
|
||||
"n": regexp.MustCompile(`ñ`),
|
||||
"o": regexp.MustCompile(`ò|ó|ô|õ|ö|ø`),
|
||||
"s": regexp.MustCompile(`ş`),
|
||||
"u": regexp.MustCompile(`ù|ú|û|ü|ũ|ū|ŭ|ů|ű|ų`),
|
||||
"y": regexp.MustCompile(`ý|ÿ`),
|
||||
}
|
||||
|
||||
//Asciify transforms Latin characters like é -> e
|
||||
func (rs *Ruleset) Asciify(word string) string {
|
||||
for repl, regex := range lookalikes {
|
||||
word = regex.ReplaceAllString(word, repl)
|
||||
}
|
||||
return word
|
||||
}
|
||||
|
||||
var tablePrefix = regexp.MustCompile(`^[^.]*\.`)
|
||||
|
||||
//Typeify "something_like_this" -> "SomethingLikeThis"
|
||||
func (rs *Ruleset) Typeify(word string) string {
|
||||
word = tablePrefix.ReplaceAllString(word, "")
|
||||
return rs.Camelize(rs.Singularize(word))
|
||||
}
|
||||
|
||||
//Dasherize "SomeText" -> "some-text"
|
||||
func (rs *Ruleset) Dasherize(word string) string {
|
||||
return rs.separatedWords(word, "-")
|
||||
}
|
||||
|
||||
//Ordinalize "1031" -> "1031st"
|
||||
func (rs *Ruleset) Ordinalize(str string) string {
|
||||
number, err := strconv.Atoi(str)
|
||||
if err != nil {
|
||||
return str
|
||||
}
|
||||
switch abs(number) % 100 {
|
||||
case 11, 12, 13:
|
||||
return fmt.Sprintf("%dth", number)
|
||||
default:
|
||||
switch abs(number) % 10 {
|
||||
case 1:
|
||||
return fmt.Sprintf("%dst", number)
|
||||
case 2:
|
||||
return fmt.Sprintf("%dnd", number)
|
||||
case 3:
|
||||
return fmt.Sprintf("%drd", number)
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("%dth", number)
|
||||
}
|
||||
|
||||
//ForeignKeyToAttribute returns the attribute name from the foreign key
|
||||
func (rs *Ruleset) ForeignKeyToAttribute(str string) string {
|
||||
w := rs.Camelize(str)
|
||||
if strings.HasSuffix(w, "Id") {
|
||||
return strings.TrimSuffix(w, "Id") + "ID"
|
||||
}
|
||||
return w
|
||||
}
|
||||
|
||||
//LoadReader loads rules from io.Reader param
|
||||
func (rs *Ruleset) LoadReader(r io.Reader) error {
|
||||
m := map[string]string{}
|
||||
err := json.NewDecoder(r).Decode(&m)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not decode inflection JSON from reader: %s", err)
|
||||
}
|
||||
for s, p := range m {
|
||||
defaultRuleset.AddIrregular(s, p)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
/////////////////////////////////////////
|
||||
// the default global ruleset
|
||||
//////////////////////////////////////////
|
||||
|
||||
var defaultRuleset *Ruleset
|
||||
|
||||
//LoadReader loads rules from io.Reader param
|
||||
func LoadReader(r io.Reader) error {
|
||||
return defaultRuleset.LoadReader(r)
|
||||
}
|
||||
|
||||
func init() {
|
||||
defaultRuleset = NewDefaultRuleset()
|
||||
|
||||
pwd, _ := os.Getwd()
|
||||
cfg := filepath.Join(pwd, "inflections.json")
|
||||
if p := os.Getenv("INFLECT_PATH"); p != "" {
|
||||
cfg = p
|
||||
}
|
||||
if _, err := os.Stat(cfg); err == nil {
|
||||
b, err := ioutil.ReadFile(cfg)
|
||||
if err != nil {
|
||||
fmt.Printf("could not read inflection file %s (%s)\n", cfg, err)
|
||||
return
|
||||
}
|
||||
if err = defaultRuleset.LoadReader(bytes.NewReader(b)); err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//Uncountables returns a list of uncountables rules
|
||||
func Uncountables() map[string]bool {
|
||||
return defaultRuleset.Uncountables()
|
||||
}
|
||||
|
||||
//AddPlural adds plural to the ruleset
|
||||
func AddPlural(suffix, replacement string) {
|
||||
defaultRuleset.AddPlural(suffix, replacement)
|
||||
}
|
||||
|
||||
//AddSingular adds singular to the ruleset
|
||||
func AddSingular(suffix, replacement string) {
|
||||
defaultRuleset.AddSingular(suffix, replacement)
|
||||
}
|
||||
|
||||
//AddHuman adds human
|
||||
func AddHuman(suffix, replacement string) {
|
||||
defaultRuleset.AddHuman(suffix, replacement)
|
||||
}
|
||||
|
||||
func AddIrregular(singular, plural string) {
|
||||
defaultRuleset.AddIrregular(singular, plural)
|
||||
}
|
||||
|
||||
func AddAcronym(word string) {
|
||||
defaultRuleset.AddAcronym(word)
|
||||
}
|
||||
|
||||
func AddUncountable(word string) {
|
||||
defaultRuleset.AddUncountable(word)
|
||||
}
|
||||
|
||||
func Pluralize(word string) string {
|
||||
return defaultRuleset.Pluralize(word)
|
||||
}
|
||||
|
||||
func PluralizeWithSize(word string, size int) string {
|
||||
return defaultRuleset.PluralizeWithSize(word, size)
|
||||
}
|
||||
|
||||
func Singularize(word string) string {
|
||||
return defaultRuleset.Singularize(word)
|
||||
}
|
||||
|
||||
func Capitalize(word string) string {
|
||||
return defaultRuleset.Capitalize(word)
|
||||
}
|
||||
|
||||
func Camelize(word string) string {
|
||||
return defaultRuleset.Camelize(word)
|
||||
}
|
||||
|
||||
func CamelizeDownFirst(word string) string {
|
||||
return defaultRuleset.CamelizeDownFirst(word)
|
||||
}
|
||||
|
||||
func Titleize(word string) string {
|
||||
return defaultRuleset.Titleize(word)
|
||||
}
|
||||
|
||||
func Underscore(word string) string {
|
||||
return defaultRuleset.Underscore(word)
|
||||
}
|
||||
|
||||
func Humanize(word string) string {
|
||||
return defaultRuleset.Humanize(word)
|
||||
}
|
||||
|
||||
func ForeignKey(word string) string {
|
||||
return defaultRuleset.ForeignKey(word)
|
||||
}
|
||||
|
||||
func ForeignKeyCondensed(word string) string {
|
||||
return defaultRuleset.ForeignKeyCondensed(word)
|
||||
}
|
||||
|
||||
func Tableize(word string) string {
|
||||
return defaultRuleset.Tableize(word)
|
||||
}
|
||||
|
||||
func Parameterize(word string) string {
|
||||
return defaultRuleset.Parameterize(word)
|
||||
}
|
||||
|
||||
func ParameterizeJoin(word, sep string) string {
|
||||
return defaultRuleset.ParameterizeJoin(word, sep)
|
||||
}
|
||||
|
||||
func Typeify(word string) string {
|
||||
return defaultRuleset.Typeify(word)
|
||||
}
|
||||
|
||||
func Dasherize(word string) string {
|
||||
return defaultRuleset.Dasherize(word)
|
||||
}
|
||||
|
||||
func Ordinalize(word string) string {
|
||||
return defaultRuleset.Ordinalize(word)
|
||||
}
|
||||
|
||||
func Asciify(word string) string {
|
||||
return defaultRuleset.Asciify(word)
|
||||
}
|
||||
|
||||
func ForeignKeyToAttribute(word string) string {
|
||||
return defaultRuleset.ForeignKeyToAttribute(word)
|
||||
}
|
||||
|
||||
// helper funcs
|
||||
|
||||
func reverse(s string) string {
|
||||
o := make([]rune, utf8.RuneCountInString(s))
|
||||
i := len(o)
|
||||
for _, c := range s {
|
||||
i--
|
||||
o[i] = c
|
||||
}
|
||||
return string(o)
|
||||
}
|
||||
|
||||
func isSpacerChar(c rune) bool {
|
||||
switch {
|
||||
case c == rune("_"[0]):
|
||||
return true
|
||||
case c == rune(" "[0]):
|
||||
return true
|
||||
case c == rune(":"[0]):
|
||||
return true
|
||||
case c == rune("-"[0]):
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func splitAtCaseChange(s string) []string {
|
||||
words := make([]string, 0)
|
||||
word := make([]rune, 0)
|
||||
for _, c := range s {
|
||||
spacer := isSpacerChar(c)
|
||||
if len(word) > 0 {
|
||||
if unicode.IsUpper(c) || spacer {
|
||||
words = append(words, string(word))
|
||||
word = make([]rune, 0)
|
||||
}
|
||||
}
|
||||
if !spacer {
|
||||
word = append(word, unicode.ToLower(c))
|
||||
}
|
||||
}
|
||||
words = append(words, string(word))
|
||||
return words
|
||||
}
|
||||
|
||||
func splitAtCaseChangeWithTitlecase(s string) []string {
|
||||
words := make([]string, 0)
|
||||
word := make([]rune, 0)
|
||||
|
||||
for _, c := range s {
|
||||
spacer := isSpacerChar(c)
|
||||
if len(word) > 0 {
|
||||
if unicode.IsUpper(c) || spacer {
|
||||
words = append(words, string(word))
|
||||
word = make([]rune, 0)
|
||||
}
|
||||
}
|
||||
if !spacer {
|
||||
if len(word) > 0 {
|
||||
word = append(word, unicode.ToLower(c))
|
||||
} else {
|
||||
word = append(word, unicode.ToUpper(c))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
words = append(words, string(word))
|
||||
return words
|
||||
}
|
||||
|
||||
func replaceLast(s, match, repl string) string {
|
||||
// reverse strings
|
||||
srev := reverse(s)
|
||||
mrev := reverse(match)
|
||||
rrev := reverse(repl)
|
||||
// match first and reverse back
|
||||
return reverse(strings.Replace(srev, mrev, rrev, 1))
|
||||
}
|
||||
|
||||
func abs(x int) int {
|
||||
if x < 0 {
|
||||
return -x
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
|
@ -1,4 +0,0 @@
|
|||
{
|
||||
"feedback": "feedback",
|
||||
"buffalo!": "buffalos!"
|
||||
}
|
||||
|
|
@ -1,163 +0,0 @@
|
|||
package inflect
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/gobuffalo/envy"
|
||||
)
|
||||
|
||||
// Name is a string that represents the "name" of a thing, like an app, model, etc...
|
||||
type Name string
|
||||
|
||||
// Title version of a name. ie. "foo_bar" => "Foo Bar"
|
||||
func (n Name) Title() string {
|
||||
x := strings.Split(string(n), "/")
|
||||
for i, s := range x {
|
||||
x[i] = Titleize(s)
|
||||
}
|
||||
|
||||
return strings.Join(x, " ")
|
||||
}
|
||||
|
||||
// Underscore version of a name. ie. "FooBar" => "foo_bar"
|
||||
func (n Name) Underscore() string {
|
||||
w := string(n)
|
||||
if strings.ToUpper(w) == w {
|
||||
return strings.ToLower(w)
|
||||
}
|
||||
return Underscore(w)
|
||||
}
|
||||
|
||||
// Plural version of a name
|
||||
func (n Name) Plural() string {
|
||||
return Pluralize(string(n))
|
||||
}
|
||||
|
||||
// Singular version of a name
|
||||
func (n Name) Singular() string {
|
||||
return Singularize(string(n))
|
||||
}
|
||||
|
||||
// Camel version of a name
|
||||
func (n Name) Camel() string {
|
||||
c := Camelize(string(n))
|
||||
if strings.HasSuffix(c, "Id") {
|
||||
c = strings.TrimSuffix(c, "Id")
|
||||
c += "ID"
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// Model version of a name. ie. "user" => "User"
|
||||
func (n Name) Model() string {
|
||||
x := strings.Split(string(n), "/")
|
||||
for i, s := range x {
|
||||
x[i] = Camelize(Singularize(s))
|
||||
}
|
||||
|
||||
return strings.Join(x, "")
|
||||
}
|
||||
|
||||
// Resource version of a name
|
||||
func (n Name) Resource() string {
|
||||
name := n.Underscore()
|
||||
x := strings.FieldsFunc(name, func(r rune) bool {
|
||||
return r == '_' || r == '/'
|
||||
})
|
||||
|
||||
for i, w := range x {
|
||||
if i == len(x)-1 {
|
||||
x[i] = Camelize(Pluralize(strings.ToLower(w)))
|
||||
continue
|
||||
}
|
||||
|
||||
x[i] = Camelize(w)
|
||||
}
|
||||
|
||||
return strings.Join(x, "")
|
||||
}
|
||||
|
||||
// ModelPlural version of a name. ie. "user" => "Users"
|
||||
func (n Name) ModelPlural() string {
|
||||
return Camelize(Pluralize(n.Model()))
|
||||
}
|
||||
|
||||
// File version of a name
|
||||
func (n Name) File() string {
|
||||
return Underscore(Camelize(string(n)))
|
||||
}
|
||||
|
||||
// Table version of a name
|
||||
func (n Name) Table() string {
|
||||
return Underscore(Pluralize(string(n)))
|
||||
}
|
||||
|
||||
// UnderSingular version of a name
|
||||
func (n Name) UnderSingular() string {
|
||||
return Underscore(Singularize(string(n)))
|
||||
}
|
||||
|
||||
// PluralCamel version of a name
|
||||
func (n Name) PluralCamel() string {
|
||||
return Pluralize(Camelize(string(n)))
|
||||
}
|
||||
|
||||
// PluralUnder version of a name
|
||||
func (n Name) PluralUnder() string {
|
||||
return Pluralize(Underscore(string(n)))
|
||||
}
|
||||
|
||||
// URL version of a name
|
||||
func (n Name) URL() string {
|
||||
return n.PluralUnder()
|
||||
}
|
||||
|
||||
// CamelSingular version of a name
|
||||
func (n Name) CamelSingular() string {
|
||||
return Camelize(Singularize(string(n)))
|
||||
}
|
||||
|
||||
// VarCaseSingular version of a name. ie. "FooBar" => "fooBar"
|
||||
func (n Name) VarCaseSingular() string {
|
||||
return CamelizeDownFirst(Singularize(Underscore(n.Resource())))
|
||||
}
|
||||
|
||||
// VarCasePlural version of a name. ie. "FooBar" => "fooBar"
|
||||
func (n Name) VarCasePlural() string {
|
||||
return CamelizeDownFirst(n.Resource())
|
||||
}
|
||||
|
||||
// Lower case version of a string
|
||||
func (n Name) Lower() string {
|
||||
return strings.ToLower(string(n))
|
||||
}
|
||||
|
||||
// ParamID returns foo_bar_id
|
||||
func (n Name) ParamID() string {
|
||||
return fmt.Sprintf("%s_id", strings.Replace(n.UnderSingular(), "/", "_", -1))
|
||||
}
|
||||
|
||||
// Package returns go package
|
||||
func (n Name) Package() string {
|
||||
key := string(n)
|
||||
|
||||
for _, gp := range envy.GoPaths() {
|
||||
key = strings.TrimPrefix(key, filepath.Join(gp, "src"))
|
||||
key = strings.TrimPrefix(key, gp)
|
||||
}
|
||||
key = strings.TrimPrefix(key, string(filepath.Separator))
|
||||
|
||||
key = strings.Replace(key, "\\", "/", -1)
|
||||
return key
|
||||
}
|
||||
|
||||
// Char returns first character in lower case, this is useful for methods inside a struct.
|
||||
func (n Name) Char() string {
|
||||
return strings.ToLower(string(n[0]))
|
||||
}
|
||||
|
||||
func (n Name) String() string {
|
||||
return string(n)
|
||||
}
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
# github.com/markbates/inflect Stands on the Shoulders of Giants
|
||||
|
||||
github.com/markbates/inflect does not try to reinvent the wheel! Instead, it uses the already great wheels developed by the Go community and puts them all together in the best way possible. Without these giants this project would not be possible. Please make sure to check them out and thank them for all of their hard work.
|
||||
|
||||
Thank you to the following **GIANTS**:
|
||||
|
||||
|
||||
* [github.com/gobuffalo/envy](https://godoc.org/github.com/gobuffalo/envy)
|
||||
|
||||
* [github.com/joho/godotenv](https://godoc.org/github.com/joho/godotenv)
|
||||
|
||||
* [github.com/markbates/inflect](https://godoc.org/github.com/markbates/inflect)
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
package inflect
|
||||
|
||||
const Version = "v1.0.4"
|
||||
|
|
@ -78,7 +78,7 @@ Package `tag` allows adding or modifying tags in the current context.
|
|||
|
||||
[embedmd]:# (internal/readme/tags.go new)
|
||||
```go
|
||||
ctx, err = tag.New(ctx,
|
||||
ctx, err := tag.New(ctx,
|
||||
tag.Insert(osKey, "macOS-10.12.5"),
|
||||
tag.Upsert(userIDKey, "cde36753ed"),
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,12 +1,14 @@
|
|||
module go.opencensus.io
|
||||
|
||||
require (
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6
|
||||
github.com/golang/protobuf v1.3.1
|
||||
github.com/google/go-cmp v0.3.0
|
||||
github.com/hashicorp/golang-lru v0.5.1
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd // indirect
|
||||
golang.org/x/text v0.3.2 // indirect
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb // indirect
|
||||
google.golang.org/grpc v1.20.1
|
||||
)
|
||||
|
||||
go 1.13
|
||||
|
|
|
|||
|
|
@ -3,6 +3,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
|
|||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
|
|
@ -10,20 +12,19 @@ github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg
|
|||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09 h1:KaQtG+aDELoNmXYas3TVkGNYRuq8JQ1aa7LJt8EXVyo=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
|
@ -45,6 +46,7 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm
|
|||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd h1:/e+gpKk9r3dJobndpTytxS2gOy6m5uvpg+ISQoEcusQ=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
|
|
|||
|
|
@ -33,5 +33,5 @@ var UserAgent = fmt.Sprintf("opencensus-go/%s", opencensus.Version())
|
|||
// end as a monotonic time.
|
||||
// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks
|
||||
func MonotonicEndTime(start time.Time) time.Time {
|
||||
return start.Add(time.Now().Sub(start))
|
||||
return start.Add(time.Since(start))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io"
|
|||
|
||||
// Version is the current release version of OpenCensus in use.
|
||||
func Version() string {
|
||||
return "0.22.0"
|
||||
return "0.23.0"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,9 +16,9 @@
|
|||
package ocgrpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"context"
|
||||
"go.opencensus.io/tag"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/stats"
|
||||
|
|
@ -30,7 +30,7 @@ func (h *ClientHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo)
|
|||
startTime := time.Now()
|
||||
if info == nil {
|
||||
if grpclog.V(2) {
|
||||
grpclog.Infof("clientHandler.TagRPC called with nil info.", info.FullMethodName)
|
||||
grpclog.Info("clientHandler.TagRPC called with nil info.")
|
||||
}
|
||||
return ctx
|
||||
}
|
||||
|
|
|
|||
|
|
@ -61,14 +61,14 @@ var (
|
|||
// Server tags are applied to the context used to process each RPC, as well as
|
||||
// the measures at the end of each RPC.
|
||||
var (
|
||||
KeyServerMethod, _ = tag.NewKey("grpc_server_method")
|
||||
KeyServerStatus, _ = tag.NewKey("grpc_server_status")
|
||||
KeyServerMethod = tag.MustNewKey("grpc_server_method")
|
||||
KeyServerStatus = tag.MustNewKey("grpc_server_status")
|
||||
)
|
||||
|
||||
// Client tags are applied to measures at the end of each RPC.
|
||||
var (
|
||||
KeyClientMethod, _ = tag.NewKey("grpc_client_method")
|
||||
KeyClientStatus, _ = tag.NewKey("grpc_client_status")
|
||||
KeyClientMethod = tag.MustNewKey("grpc_client_method")
|
||||
KeyClientStatus = tag.MustNewKey("grpc_client_status")
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
|
|||
|
|
@ -128,7 +128,7 @@ func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Requ
|
|||
// TODO: Handle cases where ContentLength is not set.
|
||||
} else if r.ContentLength > 0 {
|
||||
span.AddMessageReceiveEvent(0, /* TODO: messageID */
|
||||
int64(r.ContentLength), -1)
|
||||
r.ContentLength, -1)
|
||||
}
|
||||
return r.WithContext(ctx), span.End
|
||||
}
|
||||
|
|
@ -174,8 +174,6 @@ type trackingResponseWriter struct {
|
|||
// Compile time assertion for ResponseWriter interface
|
||||
var _ http.ResponseWriter = (*trackingResponseWriter)(nil)
|
||||
|
||||
var logTagsErrorOnce sync.Once
|
||||
|
||||
func (t *trackingResponseWriter) end(tags *addedTags) {
|
||||
t.endOnce.Do(func() {
|
||||
if t.statusCode == 0 {
|
||||
|
|
|
|||
|
|
@ -92,38 +92,38 @@ var (
|
|||
// The value of this tag can be controlled by the HTTP client, so you need
|
||||
// to watch out for potentially generating high-cardinality labels in your
|
||||
// metrics backend if you use this tag in views.
|
||||
Host, _ = tag.NewKey("http.host")
|
||||
Host = tag.MustNewKey("http.host")
|
||||
|
||||
// StatusCode is the numeric HTTP response status code,
|
||||
// or "error" if a transport error occurred and no status code was read.
|
||||
StatusCode, _ = tag.NewKey("http.status")
|
||||
StatusCode = tag.MustNewKey("http.status")
|
||||
|
||||
// Path is the URL path (not including query string) in the request.
|
||||
//
|
||||
// The value of this tag can be controlled by the HTTP client, so you need
|
||||
// to watch out for potentially generating high-cardinality labels in your
|
||||
// metrics backend if you use this tag in views.
|
||||
Path, _ = tag.NewKey("http.path")
|
||||
Path = tag.MustNewKey("http.path")
|
||||
|
||||
// Method is the HTTP method of the request, capitalized (GET, POST, etc.).
|
||||
Method, _ = tag.NewKey("http.method")
|
||||
Method = tag.MustNewKey("http.method")
|
||||
|
||||
// KeyServerRoute is a low cardinality string representing the logical
|
||||
// handler of the request. This is usually the pattern registered on the a
|
||||
// ServeMux (or similar string).
|
||||
KeyServerRoute, _ = tag.NewKey("http_server_route")
|
||||
KeyServerRoute = tag.MustNewKey("http_server_route")
|
||||
)
|
||||
|
||||
// Client tag keys.
|
||||
var (
|
||||
// KeyClientMethod is the HTTP method, capitalized (i.e. GET, POST, PUT, DELETE, etc.).
|
||||
KeyClientMethod, _ = tag.NewKey("http_client_method")
|
||||
KeyClientMethod = tag.MustNewKey("http_client_method")
|
||||
// KeyClientPath is the URL path (not including query string).
|
||||
KeyClientPath, _ = tag.NewKey("http_client_path")
|
||||
KeyClientPath = tag.MustNewKey("http_client_path")
|
||||
// KeyClientStatus is the HTTP status code as an integer (e.g. 200, 404, 500.), or "error" if no response status line was received.
|
||||
KeyClientStatus, _ = tag.NewKey("http_client_status")
|
||||
KeyClientStatus = tag.MustNewKey("http_client_status")
|
||||
// KeyClientHost is the value of the request Host header.
|
||||
KeyClientHost, _ = tag.NewKey("http_client_host")
|
||||
KeyClientHost = tag.MustNewKey("http_client_host")
|
||||
)
|
||||
|
||||
// Default distributions used by views in this package.
|
||||
|
|
|
|||
|
|
@ -186,6 +186,8 @@ func TraceStatus(httpStatusCode int, statusLine string) trace.Status {
|
|||
code = trace.StatusCodeCancelled
|
||||
case http.StatusBadRequest:
|
||||
code = trace.StatusCodeInvalidArgument
|
||||
case http.StatusUnprocessableEntity:
|
||||
code = trace.StatusCodeInvalidArgument
|
||||
case http.StatusGatewayTimeout:
|
||||
code = trace.StatusCodeDeadlineExceeded
|
||||
case http.StatusNotFound:
|
||||
|
|
|
|||
|
|
@ -23,9 +23,10 @@ const (
|
|||
// does not have cluster names as an internal concept so this may be
|
||||
// set to any meaningful value within the environment. For example,
|
||||
// GKE clusters have a name which can be used for this label.
|
||||
K8SKeyClusterName = "k8s.cluster.name"
|
||||
K8SKeyNamespaceName = "k8s.namespace.name"
|
||||
K8SKeyPodName = "k8s.pod.name"
|
||||
K8SKeyClusterName = "k8s.cluster.name"
|
||||
K8SKeyNamespaceName = "k8s.namespace.name"
|
||||
K8SKeyPodName = "k8s.pod.name"
|
||||
K8SKeyDeploymentName = "k8s.deployment.name"
|
||||
)
|
||||
|
||||
// Constants for Container resources.
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue