mirror of https://github.com/kubernetes/kops.git
Merge pull request #15060 from kubernetes/dependencies/update-1674799703
Update dependencies
This commit is contained in:
commit
0bc3015ce6
10
go.mod
10
go.mod
|
@ -14,15 +14,15 @@ require (
|
|||
github.com/Masterminds/sprig/v3 v3.2.3
|
||||
github.com/apparentlymart/go-cidr v1.1.0
|
||||
github.com/aws/amazon-ec2-instance-selector/v2 v2.4.0
|
||||
github.com/aws/aws-sdk-go v1.44.183
|
||||
github.com/aws/aws-sdk-go v1.44.188
|
||||
github.com/blang/semver/v4 v4.0.0
|
||||
github.com/cert-manager/cert-manager v1.11.0
|
||||
github.com/digitalocean/godo v1.93.0
|
||||
github.com/digitalocean/godo v1.95.0
|
||||
github.com/go-ini/ini v1.67.0
|
||||
github.com/go-logr/logr v1.2.3
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/google/go-cmp v0.5.9
|
||||
github.com/google/go-containerregistry v0.12.1
|
||||
github.com/google/go-containerregistry v0.13.0
|
||||
github.com/google/go-tpm v0.3.3
|
||||
github.com/google/go-tpm-tools v0.3.10
|
||||
github.com/google/uuid v1.3.0
|
||||
|
@ -43,7 +43,7 @@ require (
|
|||
github.com/weaveworks/mesh v0.0.0-20191105120815-58dbcc3e8e63
|
||||
go.uber.org/multierr v1.9.0
|
||||
golang.org/x/crypto v0.5.0
|
||||
golang.org/x/exp v0.0.0-20230118134722-a68e582fa157
|
||||
golang.org/x/exp v0.0.0-20230126173853-a67bb567ff2e
|
||||
golang.org/x/net v0.5.0
|
||||
golang.org/x/oauth2 v0.4.0
|
||||
golang.org/x/sync v0.1.0
|
||||
|
@ -62,7 +62,7 @@ require (
|
|||
k8s.io/cloud-provider-gcp/providers v0.25.5
|
||||
k8s.io/component-base v0.26.1
|
||||
k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9
|
||||
k8s.io/klog/v2 v2.80.1
|
||||
k8s.io/klog/v2 v2.90.0
|
||||
k8s.io/kubectl v0.26.1
|
||||
k8s.io/kubelet v0.26.1
|
||||
k8s.io/mount-utils v0.26.1
|
||||
|
|
20
go.sum
20
go.sum
|
@ -122,8 +122,8 @@ github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z
|
|||
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
|
||||
github.com/aws/amazon-ec2-instance-selector/v2 v2.4.0 h1:9l68/pwVYm6EOAeBmoVUL4ekw6VlbwtPyX9/F+IpMxQ=
|
||||
github.com/aws/amazon-ec2-instance-selector/v2 v2.4.0/go.mod h1:AEJrtkLkCkfIBIazidrVrgZqaXl+9dxI/wRgjdw+7G0=
|
||||
github.com/aws/aws-sdk-go v1.44.183 h1:mUk45JZTIMMg9m8GmrbvACCsIOKtKezXRxp06uI5Ahk=
|
||||
github.com/aws/aws-sdk-go v1.44.183/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go v1.44.188 h1:NCN6wFDWKU72Ka+f7cCk3HRj1KxkEXhRdr7lO8oBRRQ=
|
||||
github.com/aws/aws-sdk-go v1.44.188/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
|
@ -186,8 +186,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
|||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/digitalocean/godo v1.93.0 h1:N0K9z2yssZVP7nBHQ32P1Wemd5yeiJdH4ROg+7ySRxY=
|
||||
github.com/digitalocean/godo v1.93.0/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzKqFL4R60dmA=
|
||||
github.com/digitalocean/godo v1.95.0 h1:S48/byPKui7RHZc1wYEPfRvkcEvToADNb5I3guu95xg=
|
||||
github.com/digitalocean/godo v1.95.0/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzKqFL4R60dmA=
|
||||
github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
|
||||
github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
|
||||
github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2 h1:aBfCb7iqHmDEIp6fBvC/hQUddQfg+3qdYjwzaiP9Hnc=
|
||||
|
@ -330,8 +330,8 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-containerregistry v0.12.1 h1:W1mzdNUTx4Zla4JaixCRLhORcR7G6KxE5hHl5fkPsp8=
|
||||
github.com/google/go-containerregistry v0.12.1/go.mod h1:sdIK+oHQO7B93xI8UweYdl887YhuIwg9vz8BSLH3+8k=
|
||||
github.com/google/go-containerregistry v0.13.0 h1:y1C7Z3e149OJbOPDBxLYR8ITPz8dTKqQwjErKVHJC8k=
|
||||
github.com/google/go-containerregistry v0.13.0/go.mod h1:J9FQ+eSS4a1aC2GNZxvNpbWhgp0487v+cgiilB4FqDo=
|
||||
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
|
||||
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
||||
github.com/google/go-sev-guest v0.4.1 h1:IjxtGAvzR+zSyAqMc1FWfYKCg1cwPkBly9+Xog3YMZc=
|
||||
|
@ -768,8 +768,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
|||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20230118134722-a68e582fa157 h1:fiNkyhJPUvxbRPbCqY/D9qdjmPzfHcpK3P4bM4gioSY=
|
||||
golang.org/x/exp v0.0.0-20230118134722-a68e582fa157/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/exp v0.0.0-20230126173853-a67bb567ff2e h1:nEzRHNOazEST44vMvEwxGxnYGrzXEmxJmnti5mKSWTk=
|
||||
golang.org/x/exp v0.0.0-20230126173853-a67bb567ff2e/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
@ -1269,8 +1269,8 @@ k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
|||
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
|
||||
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.90.0 h1:VkTxIV/FjRXn1fgNNcKGM8cfmL1Z33ZjXRTVxKCoF5M=
|
||||
k8s.io/klog/v2 v2.90.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715 h1:tBEbstoM+K0FiBV5KGAKQ0kuvf54v/hwpldiJt69w1s=
|
||||
k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4=
|
||||
k8s.io/kubectl v0.26.1 h1:K8A0Jjlwg8GqrxOXxAbjY5xtmXYeYjLU96cHp2WMQ7s=
|
||||
|
|
|
@ -15,7 +15,7 @@ require (
|
|||
k8s.io/api v0.26.1
|
||||
k8s.io/apimachinery v0.26.1
|
||||
k8s.io/client-go v11.0.1-0.20190805182717-6502b5e7b1b5+incompatible
|
||||
k8s.io/klog/v2 v2.80.1
|
||||
k8s.io/klog/v2 v2.90.0
|
||||
k8s.io/kops v1.24.1
|
||||
sigs.k8s.io/boskos v0.0.0-20220704141725-37bd9bb41b86
|
||||
sigs.k8s.io/kubetest2 v0.0.0-20220801170629-1284e5ada592
|
||||
|
@ -35,7 +35,7 @@ require (
|
|||
github.com/StackExchange/wmi v1.2.1 // indirect
|
||||
github.com/acomagu/bufpipe v1.0.3 // indirect
|
||||
github.com/apparentlymart/go-cidr v1.1.0 // indirect
|
||||
github.com/aws/aws-sdk-go v1.44.183 // indirect
|
||||
github.com/aws/aws-sdk-go v1.44.188 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver v3.5.1+incompatible // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
|
@ -65,7 +65,7 @@ require (
|
|||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/gnostic v0.6.9 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/go-containerregistry v0.12.1 // indirect
|
||||
github.com/google/go-containerregistry v0.13.0 // indirect
|
||||
github.com/google/go-github/v33 v33.0.0 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/gofuzz v1.2.1-0.20210504230335-f78f29fc09ea // indirect
|
||||
|
@ -113,7 +113,7 @@ require (
|
|||
go.opencensus.io v0.24.0 // indirect
|
||||
go4.org v0.0.0-20201209231011-d4a079459e60 // indirect
|
||||
golang.org/x/crypto v0.5.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230118134722-a68e582fa157 // indirect
|
||||
golang.org/x/exp v0.0.0-20230126173853-a67bb567ff2e // indirect
|
||||
golang.org/x/mod v0.7.0 // indirect
|
||||
golang.org/x/net v0.5.0 // indirect
|
||||
golang.org/x/oauth2 v0.4.0 // indirect
|
||||
|
|
|
@ -305,8 +305,8 @@ github.com/aws/aws-sdk-go v1.31.6/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU
|
|||
github.com/aws/aws-sdk-go v1.31.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
|
||||
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||
github.com/aws/aws-sdk-go v1.37.22/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.44.183 h1:mUk45JZTIMMg9m8GmrbvACCsIOKtKezXRxp06uI5Ahk=
|
||||
github.com/aws/aws-sdk-go v1.44.183/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go v1.44.188 h1:NCN6wFDWKU72Ka+f7cCk3HRj1KxkEXhRdr7lO8oBRRQ=
|
||||
github.com/aws/aws-sdk-go v1.44.188/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I=
|
||||
github.com/bazelbuild/buildtools v0.0.0-20190917191645-69366ca98f89/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU=
|
||||
github.com/bazelbuild/buildtools v0.0.0-20200922170545-10384511ce98/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU=
|
||||
|
@ -1015,8 +1015,8 @@ github.com/google/go-containerregistry v0.0.0-20200331213917-3d03ed9b1ca2/go.mod
|
|||
github.com/google/go-containerregistry v0.1.1/go.mod h1:npTSyywOeILcgWqd+rvtzGWflIPPcBQhYoOONaY4ltM=
|
||||
github.com/google/go-containerregistry v0.3.0/go.mod h1:BJ7VxR1hAhdiZBGGnvGETHEmFs1hzXc4VM1xjOPO9wA=
|
||||
github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
|
||||
github.com/google/go-containerregistry v0.12.1 h1:W1mzdNUTx4Zla4JaixCRLhORcR7G6KxE5hHl5fkPsp8=
|
||||
github.com/google/go-containerregistry v0.12.1/go.mod h1:sdIK+oHQO7B93xI8UweYdl887YhuIwg9vz8BSLH3+8k=
|
||||
github.com/google/go-containerregistry v0.13.0 h1:y1C7Z3e149OJbOPDBxLYR8ITPz8dTKqQwjErKVHJC8k=
|
||||
github.com/google/go-containerregistry v0.13.0/go.mod h1:J9FQ+eSS4a1aC2GNZxvNpbWhgp0487v+cgiilB4FqDo=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-github/v27 v27.0.6/go.mod h1:/0Gr8pJ55COkmv+S/yPKCczSkUPIM/LnFyubufRNIS0=
|
||||
github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM=
|
||||
|
@ -2088,8 +2088,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
|||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20230118134722-a68e582fa157 h1:fiNkyhJPUvxbRPbCqY/D9qdjmPzfHcpK3P4bM4gioSY=
|
||||
golang.org/x/exp v0.0.0-20230118134722-a68e582fa157/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/exp v0.0.0-20230126173853-a67bb567ff2e h1:nEzRHNOazEST44vMvEwxGxnYGrzXEmxJmnti5mKSWTk=
|
||||
golang.org/x/exp v0.0.0-20230126173853-a67bb567ff2e/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
@ -2936,8 +2936,8 @@ k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
|||
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
||||
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
|
||||
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.90.0 h1:VkTxIV/FjRXn1fgNNcKGM8cfmL1Z33ZjXRTVxKCoF5M=
|
||||
k8s.io/klog/v2 v2.90.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
|
||||
k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
|
||||
k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
|
||||
|
|
|
@ -27,6 +27,7 @@ const (
|
|||
ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore).
|
||||
ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney).
|
||||
ApSoutheast3RegionID = "ap-southeast-3" // Asia Pacific (Jakarta).
|
||||
ApSoutheast4RegionID = "ap-southeast-4" // Asia Pacific (Melbourne).
|
||||
CaCentral1RegionID = "ca-central-1" // Canada (Central).
|
||||
EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt).
|
||||
EuCentral2RegionID = "eu-central-2" // Europe (Zurich).
|
||||
|
@ -172,6 +173,9 @@ var awsPartition = partition{
|
|||
"ap-southeast-3": region{
|
||||
Description: "Asia Pacific (Jakarta)",
|
||||
},
|
||||
"ap-southeast-4": region{
|
||||
Description: "Asia Pacific (Melbourne)",
|
||||
},
|
||||
"ca-central-1": region{
|
||||
Description: "Canada (Central)",
|
||||
},
|
||||
|
@ -261,6 +265,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -432,6 +439,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -935,6 +945,15 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-northeast-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-west-1",
|
||||
}: endpoint{},
|
||||
|
@ -1168,6 +1187,14 @@ var awsPartition = partition{
|
|||
Region: "ap-southeast-3",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{
|
||||
Hostname: "api.ecr.ap-southeast-4.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ap-southeast-4",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{
|
||||
|
@ -2096,6 +2123,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -2287,6 +2317,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -2493,6 +2526,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -3550,6 +3586,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -4018,12 +4057,32 @@ var awsPartition = partition{
|
|||
},
|
||||
"cases": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "fips-us-east-1",
|
||||
}: endpoint{
|
||||
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "fips-us-west-2",
|
||||
}: endpoint{
|
||||
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-east-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-west-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-west-2",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{},
|
||||
},
|
||||
},
|
||||
"cassandra": service{
|
||||
|
@ -4291,6 +4350,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -4479,6 +4541,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -4760,6 +4825,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -4963,6 +5031,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "eu-west-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "me-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "me-south-1",
|
||||
}: endpoint{},
|
||||
|
@ -5237,6 +5308,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -6289,6 +6363,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -7440,6 +7517,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -7604,6 +7684,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -7680,6 +7763,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -8188,6 +8274,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -8354,6 +8443,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -8522,6 +8614,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -8709,6 +8804,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -9017,6 +9115,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -9394,6 +9495,15 @@ var awsPartition = partition{
|
|||
}: endpoint{
|
||||
Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "eu-south-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-south-2",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "elasticfilesystem-fips.eu-south-2.amazonaws.com",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "eu-west-1",
|
||||
}: endpoint{},
|
||||
|
@ -9547,6 +9657,15 @@ var awsPartition = partition{
|
|||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "fips-eu-south-2",
|
||||
}: endpoint{
|
||||
Hostname: "elasticfilesystem-fips.eu-south-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "eu-south-2",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "fips-eu-west-1",
|
||||
}: endpoint{
|
||||
|
@ -9739,6 +9858,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -9887,6 +10009,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -10442,6 +10567,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -10593,6 +10721,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -11606,6 +11737,9 @@ var awsPartition = partition{
|
|||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "me-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "me-south-1",
|
||||
}: endpoint{},
|
||||
|
@ -14205,6 +14339,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -14628,6 +14765,15 @@ var awsPartition = partition{
|
|||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "kms-fips.ap-southeast-4.amazonaws.com",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4-fips",
|
||||
}: endpoint{
|
||||
|
@ -15146,6 +15292,15 @@ var awsPartition = partition{
|
|||
}: endpoint{
|
||||
Hostname: "lambda.ap-southeast-3.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "lambda.ap-southeast-4.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -15802,6 +15957,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -16910,6 +17068,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -17389,6 +17550,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -18006,6 +18170,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -18626,6 +18793,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -19623,6 +19793,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -19780,6 +19953,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -20181,6 +20357,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -20718,6 +20897,11 @@ var awsPartition = partition{
|
|||
}: endpoint{
|
||||
Hostname: "resource-explorer-2.ap-southeast-2.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{
|
||||
Hostname: "resource-explorer-2.ap-southeast-4.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{
|
||||
|
@ -20800,6 +20984,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-south-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-south-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-1",
|
||||
}: endpoint{},
|
||||
|
@ -20809,18 +20996,27 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-central-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-north-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-south-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-south-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-west-1",
|
||||
}: endpoint{},
|
||||
|
@ -21550,6 +21746,15 @@ var awsPartition = partition{
|
|||
}: endpoint{
|
||||
Hostname: "s3.dualstack.ap-southeast-3.amazonaws.com",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "s3.dualstack.ap-southeast-4.amazonaws.com",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "aws-global",
|
||||
}: endpoint{
|
||||
|
@ -22639,6 +22844,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -24412,6 +24620,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -24560,6 +24771,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -24705,6 +24919,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -25054,6 +25271,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -25214,12 +25434,18 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "eu-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-central-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-north-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-south-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-south-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-west-1",
|
||||
}: endpoint{},
|
||||
|
@ -25361,6 +25587,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -25453,6 +25682,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "aws-global",
|
||||
}: endpoint{
|
||||
|
@ -25629,6 +25861,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -25771,6 +26006,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -25913,6 +26151,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -28359,6 +28600,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -30629,6 +30873,9 @@ var awsusgovPartition = partition{
|
|||
},
|
||||
},
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "us-gov-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
}: endpoint{},
|
||||
|
@ -31585,9 +31832,24 @@ var awsusgovPartition = partition{
|
|||
},
|
||||
"databrew": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "fips-us-gov-west-1",
|
||||
}: endpoint{
|
||||
Hostname: "databrew.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "databrew.us-gov-west-1.amazonaws.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
"datasync": service{
|
||||
|
@ -36552,6 +36814,13 @@ var awsisobPartition = partition{
|
|||
}: endpoint{},
|
||||
},
|
||||
},
|
||||
"dlm": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "us-isob-east-1",
|
||||
}: endpoint{},
|
||||
},
|
||||
},
|
||||
"dms": service{
|
||||
Defaults: endpointDefaults{
|
||||
defaultKey{}: endpoint{},
|
||||
|
|
|
@ -5,4 +5,4 @@ package aws
|
|||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.44.183"
|
||||
const SDKVersion = "1.44.188"
|
||||
|
|
|
@ -22,7 +22,7 @@ const (
|
|||
// UnmarshalTypedError provides unmarshaling errors API response errors
|
||||
// for both typed and untyped errors.
|
||||
type UnmarshalTypedError struct {
|
||||
exceptions map[string]func(protocol.ResponseMetadata) error
|
||||
exceptions map[string]func(protocol.ResponseMetadata) error
|
||||
queryExceptions map[string]func(protocol.ResponseMetadata, string) error
|
||||
}
|
||||
|
||||
|
@ -30,11 +30,13 @@ type UnmarshalTypedError struct {
|
|||
// set of exception names to the error unmarshalers
|
||||
func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError {
|
||||
return &UnmarshalTypedError{
|
||||
exceptions: exceptions,
|
||||
exceptions: exceptions,
|
||||
queryExceptions: map[string]func(protocol.ResponseMetadata, string) error{},
|
||||
}
|
||||
}
|
||||
|
||||
// NewUnmarshalTypedErrorWithOptions works similar to NewUnmarshalTypedError applying options to the UnmarshalTypedError
|
||||
// before returning it
|
||||
func NewUnmarshalTypedErrorWithOptions(exceptions map[string]func(protocol.ResponseMetadata) error, optFns ...func(*UnmarshalTypedError)) *UnmarshalTypedError {
|
||||
unmarshaledError := NewUnmarshalTypedError(exceptions)
|
||||
for _, fn := range optFns {
|
||||
|
@ -43,6 +45,11 @@ func NewUnmarshalTypedErrorWithOptions(exceptions map[string]func(protocol.Respo
|
|||
return unmarshaledError
|
||||
}
|
||||
|
||||
// WithQueryCompatibility is a helper function to construct a functional option for use with NewUnmarshalTypedErrorWithOptions.
|
||||
// The queryExceptions given act as an override for unmarshalling errors when query compatible error codes are found.
|
||||
// See also [awsQueryCompatible trait]
|
||||
//
|
||||
// [awsQueryCompatible trait]: https://smithy.io/2.0/aws/protocols/aws-query-protocol.html#aws-protocols-awsquerycompatible-trait
|
||||
func WithQueryCompatibility(queryExceptions map[string]func(protocol.ResponseMetadata, string) error) func(*UnmarshalTypedError) {
|
||||
return func(typedError *UnmarshalTypedError) {
|
||||
typedError.queryExceptions = queryExceptions
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -140,6 +140,10 @@ type EC2API interface {
|
|||
AssociateInstanceEventWindowWithContext(aws.Context, *ec2.AssociateInstanceEventWindowInput, ...request.Option) (*ec2.AssociateInstanceEventWindowOutput, error)
|
||||
AssociateInstanceEventWindowRequest(*ec2.AssociateInstanceEventWindowInput) (*request.Request, *ec2.AssociateInstanceEventWindowOutput)
|
||||
|
||||
AssociateIpamResourceDiscovery(*ec2.AssociateIpamResourceDiscoveryInput) (*ec2.AssociateIpamResourceDiscoveryOutput, error)
|
||||
AssociateIpamResourceDiscoveryWithContext(aws.Context, *ec2.AssociateIpamResourceDiscoveryInput, ...request.Option) (*ec2.AssociateIpamResourceDiscoveryOutput, error)
|
||||
AssociateIpamResourceDiscoveryRequest(*ec2.AssociateIpamResourceDiscoveryInput) (*request.Request, *ec2.AssociateIpamResourceDiscoveryOutput)
|
||||
|
||||
AssociateRouteTable(*ec2.AssociateRouteTableInput) (*ec2.AssociateRouteTableOutput, error)
|
||||
AssociateRouteTableWithContext(aws.Context, *ec2.AssociateRouteTableInput, ...request.Option) (*ec2.AssociateRouteTableOutput, error)
|
||||
AssociateRouteTableRequest(*ec2.AssociateRouteTableInput) (*request.Request, *ec2.AssociateRouteTableOutput)
|
||||
|
@ -348,6 +352,10 @@ type EC2API interface {
|
|||
CreateIpamPoolWithContext(aws.Context, *ec2.CreateIpamPoolInput, ...request.Option) (*ec2.CreateIpamPoolOutput, error)
|
||||
CreateIpamPoolRequest(*ec2.CreateIpamPoolInput) (*request.Request, *ec2.CreateIpamPoolOutput)
|
||||
|
||||
CreateIpamResourceDiscovery(*ec2.CreateIpamResourceDiscoveryInput) (*ec2.CreateIpamResourceDiscoveryOutput, error)
|
||||
CreateIpamResourceDiscoveryWithContext(aws.Context, *ec2.CreateIpamResourceDiscoveryInput, ...request.Option) (*ec2.CreateIpamResourceDiscoveryOutput, error)
|
||||
CreateIpamResourceDiscoveryRequest(*ec2.CreateIpamResourceDiscoveryInput) (*request.Request, *ec2.CreateIpamResourceDiscoveryOutput)
|
||||
|
||||
CreateIpamScope(*ec2.CreateIpamScopeInput) (*ec2.CreateIpamScopeOutput, error)
|
||||
CreateIpamScopeWithContext(aws.Context, *ec2.CreateIpamScopeInput, ...request.Option) (*ec2.CreateIpamScopeOutput, error)
|
||||
CreateIpamScopeRequest(*ec2.CreateIpamScopeInput) (*request.Request, *ec2.CreateIpamScopeOutput)
|
||||
|
@ -644,6 +652,10 @@ type EC2API interface {
|
|||
DeleteIpamPoolWithContext(aws.Context, *ec2.DeleteIpamPoolInput, ...request.Option) (*ec2.DeleteIpamPoolOutput, error)
|
||||
DeleteIpamPoolRequest(*ec2.DeleteIpamPoolInput) (*request.Request, *ec2.DeleteIpamPoolOutput)
|
||||
|
||||
DeleteIpamResourceDiscovery(*ec2.DeleteIpamResourceDiscoveryInput) (*ec2.DeleteIpamResourceDiscoveryOutput, error)
|
||||
DeleteIpamResourceDiscoveryWithContext(aws.Context, *ec2.DeleteIpamResourceDiscoveryInput, ...request.Option) (*ec2.DeleteIpamResourceDiscoveryOutput, error)
|
||||
DeleteIpamResourceDiscoveryRequest(*ec2.DeleteIpamResourceDiscoveryInput) (*request.Request, *ec2.DeleteIpamResourceDiscoveryOutput)
|
||||
|
||||
DeleteIpamScope(*ec2.DeleteIpamScopeInput) (*ec2.DeleteIpamScopeOutput, error)
|
||||
DeleteIpamScopeWithContext(aws.Context, *ec2.DeleteIpamScopeInput, ...request.Option) (*ec2.DeleteIpamScopeOutput, error)
|
||||
DeleteIpamScopeRequest(*ec2.DeleteIpamScopeInput) (*request.Request, *ec2.DeleteIpamScopeOutput)
|
||||
|
@ -1227,6 +1239,20 @@ type EC2API interface {
|
|||
DescribeIpamPoolsPages(*ec2.DescribeIpamPoolsInput, func(*ec2.DescribeIpamPoolsOutput, bool) bool) error
|
||||
DescribeIpamPoolsPagesWithContext(aws.Context, *ec2.DescribeIpamPoolsInput, func(*ec2.DescribeIpamPoolsOutput, bool) bool, ...request.Option) error
|
||||
|
||||
DescribeIpamResourceDiscoveries(*ec2.DescribeIpamResourceDiscoveriesInput) (*ec2.DescribeIpamResourceDiscoveriesOutput, error)
|
||||
DescribeIpamResourceDiscoveriesWithContext(aws.Context, *ec2.DescribeIpamResourceDiscoveriesInput, ...request.Option) (*ec2.DescribeIpamResourceDiscoveriesOutput, error)
|
||||
DescribeIpamResourceDiscoveriesRequest(*ec2.DescribeIpamResourceDiscoveriesInput) (*request.Request, *ec2.DescribeIpamResourceDiscoveriesOutput)
|
||||
|
||||
DescribeIpamResourceDiscoveriesPages(*ec2.DescribeIpamResourceDiscoveriesInput, func(*ec2.DescribeIpamResourceDiscoveriesOutput, bool) bool) error
|
||||
DescribeIpamResourceDiscoveriesPagesWithContext(aws.Context, *ec2.DescribeIpamResourceDiscoveriesInput, func(*ec2.DescribeIpamResourceDiscoveriesOutput, bool) bool, ...request.Option) error
|
||||
|
||||
DescribeIpamResourceDiscoveryAssociations(*ec2.DescribeIpamResourceDiscoveryAssociationsInput) (*ec2.DescribeIpamResourceDiscoveryAssociationsOutput, error)
|
||||
DescribeIpamResourceDiscoveryAssociationsWithContext(aws.Context, *ec2.DescribeIpamResourceDiscoveryAssociationsInput, ...request.Option) (*ec2.DescribeIpamResourceDiscoveryAssociationsOutput, error)
|
||||
DescribeIpamResourceDiscoveryAssociationsRequest(*ec2.DescribeIpamResourceDiscoveryAssociationsInput) (*request.Request, *ec2.DescribeIpamResourceDiscoveryAssociationsOutput)
|
||||
|
||||
DescribeIpamResourceDiscoveryAssociationsPages(*ec2.DescribeIpamResourceDiscoveryAssociationsInput, func(*ec2.DescribeIpamResourceDiscoveryAssociationsOutput, bool) bool) error
|
||||
DescribeIpamResourceDiscoveryAssociationsPagesWithContext(aws.Context, *ec2.DescribeIpamResourceDiscoveryAssociationsInput, func(*ec2.DescribeIpamResourceDiscoveryAssociationsOutput, bool) bool, ...request.Option) error
|
||||
|
||||
DescribeIpamScopes(*ec2.DescribeIpamScopesInput) (*ec2.DescribeIpamScopesOutput, error)
|
||||
DescribeIpamScopesWithContext(aws.Context, *ec2.DescribeIpamScopesInput, ...request.Option) (*ec2.DescribeIpamScopesOutput, error)
|
||||
DescribeIpamScopesRequest(*ec2.DescribeIpamScopesInput) (*request.Request, *ec2.DescribeIpamScopesOutput)
|
||||
|
@ -1884,6 +1910,10 @@ type EC2API interface {
|
|||
DisassociateInstanceEventWindowWithContext(aws.Context, *ec2.DisassociateInstanceEventWindowInput, ...request.Option) (*ec2.DisassociateInstanceEventWindowOutput, error)
|
||||
DisassociateInstanceEventWindowRequest(*ec2.DisassociateInstanceEventWindowInput) (*request.Request, *ec2.DisassociateInstanceEventWindowOutput)
|
||||
|
||||
DisassociateIpamResourceDiscovery(*ec2.DisassociateIpamResourceDiscoveryInput) (*ec2.DisassociateIpamResourceDiscoveryOutput, error)
|
||||
DisassociateIpamResourceDiscoveryWithContext(aws.Context, *ec2.DisassociateIpamResourceDiscoveryInput, ...request.Option) (*ec2.DisassociateIpamResourceDiscoveryOutput, error)
|
||||
DisassociateIpamResourceDiscoveryRequest(*ec2.DisassociateIpamResourceDiscoveryInput) (*request.Request, *ec2.DisassociateIpamResourceDiscoveryOutput)
|
||||
|
||||
DisassociateRouteTable(*ec2.DisassociateRouteTableInput) (*ec2.DisassociateRouteTableOutput, error)
|
||||
DisassociateRouteTableWithContext(aws.Context, *ec2.DisassociateRouteTableInput, ...request.Option) (*ec2.DisassociateRouteTableOutput, error)
|
||||
DisassociateRouteTableRequest(*ec2.DisassociateRouteTableInput) (*request.Request, *ec2.DisassociateRouteTableOutput)
|
||||
|
@ -2063,6 +2093,20 @@ type EC2API interface {
|
|||
GetIpamAddressHistoryPages(*ec2.GetIpamAddressHistoryInput, func(*ec2.GetIpamAddressHistoryOutput, bool) bool) error
|
||||
GetIpamAddressHistoryPagesWithContext(aws.Context, *ec2.GetIpamAddressHistoryInput, func(*ec2.GetIpamAddressHistoryOutput, bool) bool, ...request.Option) error
|
||||
|
||||
GetIpamDiscoveredAccounts(*ec2.GetIpamDiscoveredAccountsInput) (*ec2.GetIpamDiscoveredAccountsOutput, error)
|
||||
GetIpamDiscoveredAccountsWithContext(aws.Context, *ec2.GetIpamDiscoveredAccountsInput, ...request.Option) (*ec2.GetIpamDiscoveredAccountsOutput, error)
|
||||
GetIpamDiscoveredAccountsRequest(*ec2.GetIpamDiscoveredAccountsInput) (*request.Request, *ec2.GetIpamDiscoveredAccountsOutput)
|
||||
|
||||
GetIpamDiscoveredAccountsPages(*ec2.GetIpamDiscoveredAccountsInput, func(*ec2.GetIpamDiscoveredAccountsOutput, bool) bool) error
|
||||
GetIpamDiscoveredAccountsPagesWithContext(aws.Context, *ec2.GetIpamDiscoveredAccountsInput, func(*ec2.GetIpamDiscoveredAccountsOutput, bool) bool, ...request.Option) error
|
||||
|
||||
GetIpamDiscoveredResourceCidrs(*ec2.GetIpamDiscoveredResourceCidrsInput) (*ec2.GetIpamDiscoveredResourceCidrsOutput, error)
|
||||
GetIpamDiscoveredResourceCidrsWithContext(aws.Context, *ec2.GetIpamDiscoveredResourceCidrsInput, ...request.Option) (*ec2.GetIpamDiscoveredResourceCidrsOutput, error)
|
||||
GetIpamDiscoveredResourceCidrsRequest(*ec2.GetIpamDiscoveredResourceCidrsInput) (*request.Request, *ec2.GetIpamDiscoveredResourceCidrsOutput)
|
||||
|
||||
GetIpamDiscoveredResourceCidrsPages(*ec2.GetIpamDiscoveredResourceCidrsInput, func(*ec2.GetIpamDiscoveredResourceCidrsOutput, bool) bool) error
|
||||
GetIpamDiscoveredResourceCidrsPagesWithContext(aws.Context, *ec2.GetIpamDiscoveredResourceCidrsInput, func(*ec2.GetIpamDiscoveredResourceCidrsOutput, bool) bool, ...request.Option) error
|
||||
|
||||
GetIpamPoolAllocations(*ec2.GetIpamPoolAllocationsInput) (*ec2.GetIpamPoolAllocationsOutput, error)
|
||||
GetIpamPoolAllocationsWithContext(aws.Context, *ec2.GetIpamPoolAllocationsInput, ...request.Option) (*ec2.GetIpamPoolAllocationsOutput, error)
|
||||
GetIpamPoolAllocationsRequest(*ec2.GetIpamPoolAllocationsInput) (*request.Request, *ec2.GetIpamPoolAllocationsOutput)
|
||||
|
@ -2332,6 +2376,10 @@ type EC2API interface {
|
|||
ModifyIpamResourceCidrWithContext(aws.Context, *ec2.ModifyIpamResourceCidrInput, ...request.Option) (*ec2.ModifyIpamResourceCidrOutput, error)
|
||||
ModifyIpamResourceCidrRequest(*ec2.ModifyIpamResourceCidrInput) (*request.Request, *ec2.ModifyIpamResourceCidrOutput)
|
||||
|
||||
ModifyIpamResourceDiscovery(*ec2.ModifyIpamResourceDiscoveryInput) (*ec2.ModifyIpamResourceDiscoveryOutput, error)
|
||||
ModifyIpamResourceDiscoveryWithContext(aws.Context, *ec2.ModifyIpamResourceDiscoveryInput, ...request.Option) (*ec2.ModifyIpamResourceDiscoveryOutput, error)
|
||||
ModifyIpamResourceDiscoveryRequest(*ec2.ModifyIpamResourceDiscoveryInput) (*request.Request, *ec2.ModifyIpamResourceDiscoveryOutput)
|
||||
|
||||
ModifyIpamScope(*ec2.ModifyIpamScopeInput) (*ec2.ModifyIpamScopeOutput, error)
|
||||
ModifyIpamScopeWithContext(aws.Context, *ec2.ModifyIpamScopeInput, ...request.Option) (*ec2.ModifyIpamScopeOutput, error)
|
||||
ModifyIpamScopeRequest(*ec2.ModifyIpamScopeInput) (*request.Request, *ec2.ModifyIpamScopeOutput)
|
||||
|
|
|
@ -4163,6 +4163,8 @@ func (c *EventBridge) PutTargetsRequest(input *PutTargetsInput) (req *request.Re
|
|||
//
|
||||
// - Redshift cluster
|
||||
//
|
||||
// - Redshift Serverless workgroup
|
||||
//
|
||||
// - SageMaker Pipeline
|
||||
//
|
||||
// - SNS topic
|
||||
|
@ -4191,9 +4193,9 @@ func (c *EventBridge) PutTargetsRequest(input *PutTargetsInput) (req *request.Re
|
|||
// To be able to make API calls against the resources that you own, Amazon EventBridge
|
||||
// needs the appropriate permissions. For Lambda and Amazon SNS resources, EventBridge
|
||||
// relies on resource-based policies. For EC2 instances, Kinesis Data Streams,
|
||||
// Step Functions state machines and API Gateway REST APIs, EventBridge relies
|
||||
// on IAM roles that you specify in the RoleARN argument in PutTargets. For
|
||||
// more information, see Authentication and Access Control (https://docs.aws.amazon.com/eventbridge/latest/userguide/auth-and-access-control-eventbridge.html)
|
||||
// Step Functions state machines and API Gateway APIs, EventBridge relies on
|
||||
// IAM roles that you specify in the RoleARN argument in PutTargets. For more
|
||||
// information, see Authentication and Access Control (https://docs.aws.amazon.com/eventbridge/latest/userguide/auth-and-access-control-eventbridge.html)
|
||||
// in the Amazon EventBridge User Guide.
|
||||
//
|
||||
// If another Amazon Web Services account is in the same region and has granted
|
||||
|
@ -6079,6 +6081,8 @@ type Connection struct {
|
|||
_ struct{} `type:"structure"`
|
||||
|
||||
// The authorization type specified for the connection.
|
||||
//
|
||||
// OAUTH tokens are refreshed when a 401 or 407 response is returned.
|
||||
AuthorizationType *string `type:"string" enum:"ConnectionAuthorizationType"`
|
||||
|
||||
// The ARN of the connection.
|
||||
|
@ -7159,6 +7163,8 @@ type CreateConnectionInput struct {
|
|||
|
||||
// The type of authorization to use for the connection.
|
||||
//
|
||||
// OAUTH tokens are refreshed when a 401 or 407 response is returned.
|
||||
//
|
||||
// AuthorizationType is a required field
|
||||
AuthorizationType *string `type:"string" required:"true" enum:"ConnectionAuthorizationType"`
|
||||
|
||||
|
@ -7479,7 +7485,9 @@ type CreateEndpointInput struct {
|
|||
// Name is a required field
|
||||
Name *string `min:"1" type:"string" required:"true"`
|
||||
|
||||
// Enable or disable event replication.
|
||||
// Enable or disable event replication. The default state is ENABLED which means
|
||||
// you must supply a RoleArn. If you don't have a RoleArn or you don't want
|
||||
// event replication enabled, set the state to DISABLED.
|
||||
ReplicationConfig *ReplicationConfig `type:"structure"`
|
||||
|
||||
// The ARN of the role used for replication.
|
||||
|
@ -7682,12 +7690,13 @@ type CreateEventBusInput struct {
|
|||
|
||||
// The name of the new event bus.
|
||||
//
|
||||
// Event bus names cannot contain the / character. You can't use the name default
|
||||
// for a custom event bus, as this name is already used for your account's default
|
||||
// event bus.
|
||||
// Custom event bus names can't contain the / character, but you can use the
|
||||
// / character in partner event bus names. In addition, for partner event buses,
|
||||
// the name must exactly match the name of the partner event source that this
|
||||
// event bus is matched to.
|
||||
//
|
||||
// If this is a partner event bus, the name must exactly match the name of the
|
||||
// partner event source that this event bus is matched to.
|
||||
// You can't use the name default for a custom event bus, as this name is already
|
||||
// used for your account's default event bus.
|
||||
//
|
||||
// Name is a required field
|
||||
Name *string `min:"1" type:"string" required:"true"`
|
||||
|
@ -10498,11 +10507,11 @@ func (s EnableRuleOutput) GoString() string {
|
|||
return s.String()
|
||||
}
|
||||
|
||||
// An global endpoint used to improve your application's availability by making
|
||||
// A global endpoint used to improve your application's availability by making
|
||||
// it regional-fault tolerant. For more information about global endpoints,
|
||||
// see Making applications Regional-fault tolerant with global endpoints and
|
||||
// event replication (https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-global-endpoints.html)
|
||||
// in the Amazon EventBridge User Guide..
|
||||
// in the Amazon EventBridge User Guide.
|
||||
type Endpoint struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
|
@ -10516,7 +10525,7 @@ type Endpoint struct {
|
|||
Description *string `type:"string"`
|
||||
|
||||
// The URL subdomain of the endpoint. For example, if the URL for Endpoint is
|
||||
// abcde.veo.endpoints.event.amazonaws.com, then the EndpointId is abcde.veo.
|
||||
// https://abcde.veo.endpoints.event.amazonaws.com, then the EndpointId is abcde.veo.
|
||||
EndpointId *string `min:"1" type:"string"`
|
||||
|
||||
// The URL of the endpoint.
|
||||
|
@ -10531,7 +10540,10 @@ type Endpoint struct {
|
|||
// The name of the endpoint.
|
||||
Name *string `min:"1" type:"string"`
|
||||
|
||||
// Whether event replication was enabled or disabled for this endpoint.
|
||||
// Whether event replication was enabled or disabled for this endpoint. The
|
||||
// default state is ENABLED which means you must supply a RoleArn. If you don't
|
||||
// have a RoleArn or you don't want event replication enabled, set the state
|
||||
// to DISABLED.
|
||||
ReplicationConfig *ReplicationConfig `type:"structure"`
|
||||
|
||||
// The ARN of the role used by event replication for the endpoint.
|
||||
|
@ -10693,12 +10705,13 @@ func (s *EndpointEventBus) SetEventBusArn(v string) *EndpointEventBus {
|
|||
return s
|
||||
}
|
||||
|
||||
// An event bus receives events from a source and routes them to rules associated
|
||||
// with that event bus. Your account's default event bus receives events from
|
||||
// Amazon Web Services services. A custom event bus can receive events from
|
||||
// your custom applications and services. A partner event bus receives events
|
||||
// from an event source created by an SaaS partner. These events come from the
|
||||
// partners services or applications.
|
||||
// An event bus receives events from a source, uses rules to evaluate them,
|
||||
// applies any configured input transformation, and routes them to the appropriate
|
||||
// target(s). Your account's default event bus receives events from Amazon Web
|
||||
// Services services. A custom event bus can receive events from your custom
|
||||
// applications and services. A partner event bus receives events from an event
|
||||
// source created by an SaaS partner. These events come from the partners services
|
||||
// or applications.
|
||||
type EventBus struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
|
@ -10906,23 +10919,23 @@ func (s *FailoverConfig) SetSecondary(v *Secondary) *FailoverConfig {
|
|||
return s
|
||||
}
|
||||
|
||||
// These are custom parameter to be used when the target is an API Gateway REST
|
||||
// APIs or EventBridge ApiDestinations. In the latter case, these are merged
|
||||
// with any InvocationParameters specified on the Connection, with any values
|
||||
// from the Connection taking precedence.
|
||||
// These are custom parameter to be used when the target is an API Gateway APIs
|
||||
// or EventBridge ApiDestinations. In the latter case, these are merged with
|
||||
// any InvocationParameters specified on the Connection, with any values from
|
||||
// the Connection taking precedence.
|
||||
type HttpParameters struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The headers that need to be sent as part of request invoking the API Gateway
|
||||
// REST API or EventBridge ApiDestination.
|
||||
// API or EventBridge ApiDestination.
|
||||
HeaderParameters map[string]*string `type:"map"`
|
||||
|
||||
// The path parameter values to be used to populate API Gateway REST API or
|
||||
// EventBridge ApiDestination path wildcards ("*").
|
||||
// The path parameter values to be used to populate API Gateway API or EventBridge
|
||||
// ApiDestination path wildcards ("*").
|
||||
PathParameterValues []*string `type:"list"`
|
||||
|
||||
// The query string keys/values that need to be sent as part of request invoking
|
||||
// the API Gateway REST API or EventBridge ApiDestination.
|
||||
// the API Gateway API or EventBridge ApiDestination.
|
||||
QueryStringParameters map[string]*string `type:"map"`
|
||||
}
|
||||
|
||||
|
@ -11045,8 +11058,7 @@ type InputTransformer struct {
|
|||
|
||||
// Input template where you specify placeholders that will be filled with the
|
||||
// values of the keys from InputPathsMap to customize the data sent to the target.
|
||||
// Enclose each InputPathsMaps value in brackets: <value> The InputTemplate
|
||||
// must be valid JSON.
|
||||
// Enclose each InputPathsMaps value in brackets: <value>
|
||||
//
|
||||
// If InputTemplate is a JSON object (surrounded by curly braces), the following
|
||||
// restrictions apply:
|
||||
|
@ -13509,7 +13521,7 @@ type PutEventsInput struct {
|
|||
_ struct{} `type:"structure"`
|
||||
|
||||
// The URL subdomain of the endpoint. For example, if the URL for Endpoint is
|
||||
// abcde.veo.endpoints.event.amazonaws.com, then the EndpointId is abcde.veo.
|
||||
// https://abcde.veo.endpoints.event.amazonaws.com, then the EndpointId is abcde.veo.
|
||||
//
|
||||
// When using Java, you must include auth-crt on the class path.
|
||||
//
|
||||
|
@ -13589,6 +13601,9 @@ type PutEventsOutput struct {
|
|||
// The successfully and unsuccessfully ingested events results. If the ingestion
|
||||
// was successful, the entry has the event ID in it. Otherwise, you can use
|
||||
// the error code and error message to identify the problem with the entry.
|
||||
//
|
||||
// For each record, the index of the response element is the same as the index
|
||||
// in the request array.
|
||||
Entries []*PutEventsResultEntry `type:"list"`
|
||||
|
||||
// The number of failed entries.
|
||||
|
@ -13633,7 +13648,8 @@ type PutEventsRequestEntry struct {
|
|||
// contain fields and nested subobjects.
|
||||
Detail *string `type:"string"`
|
||||
|
||||
// Free-form string used to decide what fields to expect in the event detail.
|
||||
// Free-form string, with a maximum of 128 characters, used to decide what fields
|
||||
// to expect in the event detail.
|
||||
DetailType *string `type:"string"`
|
||||
|
||||
// The name or ARN of the event bus to receive the event. Only the rules that
|
||||
|
@ -13902,7 +13918,8 @@ type PutPartnerEventsRequestEntry struct {
|
|||
// contain fields and nested subobjects.
|
||||
Detail *string `type:"string"`
|
||||
|
||||
// A free-form string used to decide what fields to expect in the event detail.
|
||||
// A free-form string, with a maximum of 128 characters, used to decide what
|
||||
// fields to expect in the event detail.
|
||||
DetailType *string `type:"string"`
|
||||
|
||||
// Amazon Web Services resources, identified by Amazon Resource Name (ARN),
|
||||
|
@ -14187,7 +14204,8 @@ type PutRuleInput struct {
|
|||
// this, the default event bus is used.
|
||||
EventBusName *string `min:"1" type:"string"`
|
||||
|
||||
// The event pattern. For more information, see EventBridge event patterns (https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-event-patterns.html.html)
|
||||
// The event pattern. For more information, see Amazon EventBridge event patterns
|
||||
// (https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-event-patterns.html)
|
||||
// in the Amazon EventBridge User Guide.
|
||||
EventPattern *string `type:"string"`
|
||||
|
||||
|
@ -14525,8 +14543,8 @@ func (s *PutTargetsResultEntry) SetTargetId(v string) *PutTargetsResultEntry {
|
|||
}
|
||||
|
||||
// These are custom parameters to be used when the target is a Amazon Redshift
|
||||
// cluster to invoke the Amazon Redshift Data API ExecuteStatement based on
|
||||
// EventBridge events.
|
||||
// cluster or Redshift Serverless workgroup to invoke the Amazon Redshift Data
|
||||
// API ExecuteStatement based on EventBridge events.
|
||||
type RedshiftDataParameters struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
|
@ -14536,6 +14554,8 @@ type RedshiftDataParameters struct {
|
|||
Database *string `min:"1" type:"string" required:"true"`
|
||||
|
||||
// The database user name. Required when authenticating using temporary credentials.
|
||||
//
|
||||
// Do not provide this parameter when connecting to a Redshift Serverless workgroup.
|
||||
DbUser *string `min:"1" type:"string"`
|
||||
|
||||
// The name or ARN of the secret that enables access to the database. Required
|
||||
|
@ -16177,13 +16197,13 @@ type Target struct {
|
|||
// in the Amazon EC2 Container Service Developer Guide.
|
||||
EcsParameters *EcsParameters `type:"structure"`
|
||||
|
||||
// Contains the HTTP parameters to use when the target is a API Gateway REST
|
||||
// endpoint or EventBridge ApiDestination.
|
||||
// Contains the HTTP parameters to use when the target is a API Gateway endpoint
|
||||
// or EventBridge ApiDestination.
|
||||
//
|
||||
// If you specify an API Gateway REST API or EventBridge ApiDestination as a
|
||||
// target, you can use this parameter to specify headers, path parameters, and
|
||||
// query string keys/values as part of your target invoking request. If you're
|
||||
// using ApiDestinations, the corresponding Connection can also have these values
|
||||
// If you specify an API Gateway API or EventBridge ApiDestination as a target,
|
||||
// you can use this parameter to specify headers, path parameters, and query
|
||||
// string keys/values as part of your target invoking request. If you're using
|
||||
// ApiDestinations, the corresponding Connection can also have these values
|
||||
// configured. In case of any conflicting keys, values from the Connection take
|
||||
// precedence.
|
||||
HttpParameters *HttpParameters `type:"structure"`
|
||||
|
@ -16201,8 +16221,8 @@ type Target struct {
|
|||
Input *string `type:"string"`
|
||||
|
||||
// The value of the JSONPath that is used for extracting part of the matched
|
||||
// event when passing it to the target. You must use JSON dot notation, not
|
||||
// bracket notation. For more information about JSON paths, see JSONPath (http://goessner.net/articles/JsonPath/).
|
||||
// event when passing it to the target. You may use JSON dot notation or bracket
|
||||
// notation. For more information about JSON paths, see JSONPath (http://goessner.net/articles/JsonPath/).
|
||||
InputPath *string `type:"string"`
|
||||
|
||||
// Settings to enable you to provide custom input to a target based on certain
|
||||
|
@ -17429,7 +17449,7 @@ type UpdateEndpointInput struct {
|
|||
// The ARN of the role used by event replication for this request.
|
||||
RoleArn *string `min:"1" type:"string"`
|
||||
|
||||
// Configure the routing policy, including the health check and secondary Region..
|
||||
// Configure the routing policy, including the health check and secondary Region.
|
||||
RoutingConfig *RoutingConfig `type:"structure"`
|
||||
}
|
||||
|
||||
|
|
|
@ -20424,6 +20424,9 @@ const (
|
|||
|
||||
// CloudWatchRegionUsIsobEast1 is a CloudWatchRegion enum value
|
||||
CloudWatchRegionUsIsobEast1 = "us-isob-east-1"
|
||||
|
||||
// CloudWatchRegionApSoutheast4 is a CloudWatchRegion enum value
|
||||
CloudWatchRegionApSoutheast4 = "ap-southeast-4"
|
||||
)
|
||||
|
||||
// CloudWatchRegion_Values returns all elements of the CloudWatchRegion enum
|
||||
|
@ -20462,6 +20465,7 @@ func CloudWatchRegion_Values() []string {
|
|||
CloudWatchRegionUsIsoEast1,
|
||||
CloudWatchRegionUsIsoWest1,
|
||||
CloudWatchRegionUsIsobEast1,
|
||||
CloudWatchRegionApSoutheast4,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -20789,6 +20793,9 @@ const (
|
|||
|
||||
// ResourceRecordSetRegionEuSouth2 is a ResourceRecordSetRegion enum value
|
||||
ResourceRecordSetRegionEuSouth2 = "eu-south-2"
|
||||
|
||||
// ResourceRecordSetRegionApSoutheast4 is a ResourceRecordSetRegion enum value
|
||||
ResourceRecordSetRegionApSoutheast4 = "ap-southeast-4"
|
||||
)
|
||||
|
||||
// ResourceRecordSetRegion_Values returns all elements of the ResourceRecordSetRegion enum
|
||||
|
@ -20822,6 +20829,7 @@ func ResourceRecordSetRegion_Values() []string {
|
|||
ResourceRecordSetRegionAfSouth1,
|
||||
ResourceRecordSetRegionEuSouth1,
|
||||
ResourceRecordSetRegionEuSouth2,
|
||||
ResourceRecordSetRegionApSoutheast4,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -20977,6 +20985,9 @@ const (
|
|||
|
||||
// VPCRegionEuSouth2 is a VPCRegion enum value
|
||||
VPCRegionEuSouth2 = "eu-south-2"
|
||||
|
||||
// VPCRegionApSoutheast4 is a VPCRegion enum value
|
||||
VPCRegionApSoutheast4 = "ap-southeast-4"
|
||||
)
|
||||
|
||||
// VPCRegion_Values returns all elements of the VPCRegion enum
|
||||
|
@ -21014,5 +21025,6 @@ func VPCRegion_Values() []string {
|
|||
VPCRegionAfSouth1,
|
||||
VPCRegionEuSouth1,
|
||||
VPCRegionEuSouth2,
|
||||
VPCRegionApSoutheast4,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -56,12 +56,11 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
|
|||
// AssumeRole API operation for AWS Security Token Service.
|
||||
//
|
||||
// Returns a set of temporary security credentials that you can use to access
|
||||
// Amazon Web Services resources that you might not normally have access to.
|
||||
// These temporary credentials consist of an access key ID, a secret access
|
||||
// key, and a security token. Typically, you use AssumeRole within your account
|
||||
// or for cross-account access. For a comparison of AssumeRole with other API
|
||||
// operations that produce temporary credentials, see Requesting Temporary Security
|
||||
// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
|
||||
// Amazon Web Services resources. These temporary credentials consist of an
|
||||
// access key ID, a secret access key, and a security token. Typically, you
|
||||
// use AssumeRole within your account or for cross-account access. For a comparison
|
||||
// of AssumeRole with other API operations that produce temporary credentials,
|
||||
// see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
|
||||
// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
|
||||
// in the IAM User Guide.
|
||||
//
|
||||
|
@ -1103,13 +1102,15 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
|
|||
// # Permissions
|
||||
//
|
||||
// You can use the temporary credentials created by GetFederationToken in any
|
||||
// Amazon Web Services service except the following:
|
||||
// Amazon Web Services service with the following exceptions:
|
||||
//
|
||||
// - You cannot call any IAM operations using the CLI or the Amazon Web Services
|
||||
// API.
|
||||
// API. This limitation does not apply to console sessions.
|
||||
//
|
||||
// - You cannot call any STS operations except GetCallerIdentity.
|
||||
//
|
||||
// You can use temporary credentials for single sign-on (SSO) to the console.
|
||||
//
|
||||
// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
|
||||
// to this operation. You can pass a single JSON policy document to use as an
|
||||
// inline session policy. You can also specify up to 10 managed policy Amazon
|
||||
|
|
|
@ -1,5 +1,14 @@
|
|||
# Change Log
|
||||
|
||||
## [v1.95.0] - 2023-01-23
|
||||
|
||||
- #595 - @dweinshenker - Add UpgradeMajorVersion to godo
|
||||
|
||||
## [v1.94.0] - 2022-01-23
|
||||
|
||||
- #596 - @DMW2151 - DBAAS-3906: Include updatePool for DB Clusters
|
||||
- #593 - @danaelhe - Add Uptime Checks and Alerts Support
|
||||
|
||||
## [v1.93.0] - 2022-12-15
|
||||
|
||||
- #591 - @andrewsomething - tokens: Add initial support for new API.
|
||||
|
|
|
@ -9,27 +9,28 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
databaseBasePath = "/v2/databases"
|
||||
databaseSinglePath = databaseBasePath + "/%s"
|
||||
databaseCAPath = databaseBasePath + "/%s/ca"
|
||||
databaseConfigPath = databaseBasePath + "/%s/config"
|
||||
databaseResizePath = databaseBasePath + "/%s/resize"
|
||||
databaseMigratePath = databaseBasePath + "/%s/migrate"
|
||||
databaseMaintenancePath = databaseBasePath + "/%s/maintenance"
|
||||
databaseBackupsPath = databaseBasePath + "/%s/backups"
|
||||
databaseUsersPath = databaseBasePath + "/%s/users"
|
||||
databaseUserPath = databaseBasePath + "/%s/users/%s"
|
||||
databaseResetUserAuthPath = databaseUserPath + "/reset_auth"
|
||||
databaseDBPath = databaseBasePath + "/%s/dbs/%s"
|
||||
databaseDBsPath = databaseBasePath + "/%s/dbs"
|
||||
databasePoolPath = databaseBasePath + "/%s/pools/%s"
|
||||
databasePoolsPath = databaseBasePath + "/%s/pools"
|
||||
databaseReplicaPath = databaseBasePath + "/%s/replicas/%s"
|
||||
databaseReplicasPath = databaseBasePath + "/%s/replicas"
|
||||
databaseEvictionPolicyPath = databaseBasePath + "/%s/eviction_policy"
|
||||
databaseSQLModePath = databaseBasePath + "/%s/sql_mode"
|
||||
databaseFirewallRulesPath = databaseBasePath + "/%s/firewall"
|
||||
databaseOptionsPath = databaseBasePath + "/options"
|
||||
databaseBasePath = "/v2/databases"
|
||||
databaseSinglePath = databaseBasePath + "/%s"
|
||||
databaseCAPath = databaseBasePath + "/%s/ca"
|
||||
databaseConfigPath = databaseBasePath + "/%s/config"
|
||||
databaseResizePath = databaseBasePath + "/%s/resize"
|
||||
databaseMigratePath = databaseBasePath + "/%s/migrate"
|
||||
databaseMaintenancePath = databaseBasePath + "/%s/maintenance"
|
||||
databaseBackupsPath = databaseBasePath + "/%s/backups"
|
||||
databaseUsersPath = databaseBasePath + "/%s/users"
|
||||
databaseUserPath = databaseBasePath + "/%s/users/%s"
|
||||
databaseResetUserAuthPath = databaseUserPath + "/reset_auth"
|
||||
databaseDBPath = databaseBasePath + "/%s/dbs/%s"
|
||||
databaseDBsPath = databaseBasePath + "/%s/dbs"
|
||||
databasePoolPath = databaseBasePath + "/%s/pools/%s"
|
||||
databasePoolsPath = databaseBasePath + "/%s/pools"
|
||||
databaseReplicaPath = databaseBasePath + "/%s/replicas/%s"
|
||||
databaseReplicasPath = databaseBasePath + "/%s/replicas"
|
||||
databaseEvictionPolicyPath = databaseBasePath + "/%s/eviction_policy"
|
||||
databaseSQLModePath = databaseBasePath + "/%s/sql_mode"
|
||||
databaseFirewallRulesPath = databaseBasePath + "/%s/firewall"
|
||||
databaseOptionsPath = databaseBasePath + "/options"
|
||||
databaseUpgradeMajorVersionPath = databaseBasePath + "/%s/upgrade"
|
||||
)
|
||||
|
||||
// SQL Mode constants allow for MySQL-specific SQL flavor configuration.
|
||||
|
@ -124,6 +125,7 @@ type DatabasesService interface {
|
|||
CreatePool(context.Context, string, *DatabaseCreatePoolRequest) (*DatabasePool, *Response, error)
|
||||
GetPool(context.Context, string, string) (*DatabasePool, *Response, error)
|
||||
DeletePool(context.Context, string, string) (*Response, error)
|
||||
UpdatePool(context.Context, string, string, *DatabaseUpdatePoolRequest) (*Response, error)
|
||||
GetReplica(context.Context, string, string) (*DatabaseReplica, *Response, error)
|
||||
ListReplicas(context.Context, string, *ListOptions) ([]DatabaseReplica, *Response, error)
|
||||
CreateReplica(context.Context, string, *DatabaseCreateReplicaRequest) (*DatabaseReplica, *Response, error)
|
||||
|
@ -141,6 +143,7 @@ type DatabasesService interface {
|
|||
UpdateRedisConfig(context.Context, string, *RedisConfig) (*Response, error)
|
||||
UpdateMySQLConfig(context.Context, string, *MySQLConfig) (*Response, error)
|
||||
ListOptions(todo context.Context) (*DatabaseOptions, *Response, error)
|
||||
UpgradeMajorVersion(context.Context, string, *UpgradeVersionRequest) (*Response, error)
|
||||
}
|
||||
|
||||
// DatabasesServiceOp handles communication with the Databases related methods
|
||||
|
@ -299,6 +302,14 @@ type DatabaseCreatePoolRequest struct {
|
|||
Mode string `json:"mode"`
|
||||
}
|
||||
|
||||
// DatabaseUpdatePoolRequest is used to update a database connection pool
|
||||
type DatabaseUpdatePoolRequest struct {
|
||||
User string `json:"user,omitempty"`
|
||||
Size int `json:"size"`
|
||||
Database string `json:"db"`
|
||||
Mode string `json:"mode"`
|
||||
}
|
||||
|
||||
// DatabaseCreateUserRequest is used to create a new database user
|
||||
type DatabaseCreateUserRequest struct {
|
||||
Name string `json:"name"`
|
||||
|
@ -521,6 +532,10 @@ type evictionPolicyRoot struct {
|
|||
EvictionPolicy string `json:"eviction_policy"`
|
||||
}
|
||||
|
||||
type UpgradeVersionRequest struct {
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
type sqlModeRoot struct {
|
||||
SQLMode string `json:"sql_mode"`
|
||||
}
|
||||
|
@ -904,6 +919,37 @@ func (svc *DatabasesServiceOp) DeletePool(ctx context.Context, databaseID, name
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
// UpdatePool will update an existing database connection pool
|
||||
func (svc *DatabasesServiceOp) UpdatePool(ctx context.Context, databaseID, name string, updatePool *DatabaseUpdatePoolRequest) (*Response, error) {
|
||||
path := fmt.Sprintf(databasePoolPath, databaseID, name)
|
||||
|
||||
if updatePool == nil {
|
||||
return nil, NewArgError("updatePool", "cannot be nil")
|
||||
}
|
||||
|
||||
if updatePool.Mode == "" {
|
||||
return nil, NewArgError("mode", "cannot be empty")
|
||||
}
|
||||
|
||||
if updatePool.Database == "" {
|
||||
return nil, NewArgError("database", "cannot be empty")
|
||||
}
|
||||
|
||||
if updatePool.Size < 1 {
|
||||
return nil, NewArgError("size", "cannot be less than 1")
|
||||
}
|
||||
|
||||
req, err := svc.client.NewRequest(ctx, http.MethodPut, path, updatePool)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := svc.client.Do(ctx, req, nil)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// GetReplica returns a single database replica
|
||||
func (svc *DatabasesServiceOp) GetReplica(ctx context.Context, databaseID, name string) (*DatabaseReplica, *Response, error) {
|
||||
path := fmt.Sprintf(databaseReplicaPath, databaseID, name)
|
||||
|
@ -1179,3 +1225,19 @@ func (svc *DatabasesServiceOp) ListOptions(ctx context.Context) (*DatabaseOption
|
|||
|
||||
return root.Options, resp, nil
|
||||
}
|
||||
|
||||
// UpgradeMajorVersion upgrades the major version of a cluster.
|
||||
func (svc *DatabasesServiceOp) UpgradeMajorVersion(ctx context.Context, databaseID string, upgradeReq *UpgradeVersionRequest) (*Response, error) {
|
||||
path := fmt.Sprintf(databaseUpgradeMajorVersionPath, databaseID)
|
||||
req, err := svc.client.NewRequest(ctx, http.MethodPut, path, upgradeReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := svc.client.Do(ctx, req, nil)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
libraryVersion = "1.92.0"
|
||||
libraryVersion = "1.95.0"
|
||||
defaultBaseURL = "https://api.digitalocean.com/"
|
||||
userAgent = "godo/" + libraryVersion
|
||||
mediaType = "application/json"
|
||||
|
@ -82,6 +82,7 @@ type Client struct {
|
|||
StorageActions StorageActionsService
|
||||
Tags TagsService
|
||||
Tokens TokensService
|
||||
UptimeChecks UptimeChecksService
|
||||
VPCs VPCsService
|
||||
|
||||
// Optional function called after every successful request made to the DO APIs
|
||||
|
@ -252,6 +253,7 @@ func NewClient(httpClient *http.Client) *Client {
|
|||
c.StorageActions = &StorageActionsServiceOp{client: c}
|
||||
c.Tags = &TagsServiceOp{client: c}
|
||||
c.Tokens = &TokensServiceOp{client: c}
|
||||
c.UptimeChecks = &UptimeChecksServiceOp{client: c}
|
||||
c.VPCs = &VPCsServiceOp{client: c}
|
||||
|
||||
c.headers = make(map[string]string)
|
||||
|
|
|
@ -0,0 +1,342 @@
|
|||
package godo
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"path"
|
||||
)
|
||||
|
||||
const uptimeChecksBasePath = "/v2/uptime/checks"
|
||||
|
||||
// UptimeChecksService is an interface for creating and managing Uptime checks with the DigitalOcean API.
|
||||
// See: https://docs.digitalocean.com/reference/api/api-reference/#tag/Uptime
|
||||
type UptimeChecksService interface {
|
||||
List(context.Context, *ListOptions) ([]UptimeCheck, *Response, error)
|
||||
Get(context.Context, string) (*UptimeCheck, *Response, error)
|
||||
GetState(context.Context, string) (*UptimeCheckState, *Response, error)
|
||||
Create(context.Context, *CreateUptimeCheckRequest) (*UptimeCheck, *Response, error)
|
||||
Update(context.Context, string, *UpdateUptimeCheckRequest) (*UptimeCheck, *Response, error)
|
||||
Delete(context.Context, string) (*Response, error)
|
||||
GetAlert(context.Context, string, string) (*UptimeAlert, *Response, error)
|
||||
ListAlerts(context.Context, string, *ListOptions) ([]UptimeAlert, *Response, error)
|
||||
CreateAlert(context.Context, string, *CreateUptimeAlertRequest) (*UptimeAlert, *Response, error)
|
||||
UpdateAlert(context.Context, string, string, *UpdateUptimeAlertRequest) (*UptimeAlert, *Response, error)
|
||||
DeleteAlert(context.Context, string, string) (*Response, error)
|
||||
}
|
||||
|
||||
// UptimeChecksServiceOp handles communication with Uptime Check methods of the DigitalOcean API.
|
||||
type UptimeChecksServiceOp struct {
|
||||
client *Client
|
||||
}
|
||||
|
||||
// UptimeCheck represents a DigitalOcean UptimeCheck configuration.
|
||||
type UptimeCheck struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
Target string `json:"target"`
|
||||
Regions []string `json:"regions"`
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
// UptimeAlert represents a DigitalOcean Uptime Alert configuration.
|
||||
type UptimeAlert struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
Threshold int `json:"threshold"`
|
||||
Comparison string `json:"comparison"`
|
||||
Notifications *Notifications `json:"notifications"`
|
||||
Period string `json:"period"`
|
||||
}
|
||||
|
||||
// Notifications represents a DigitalOcean Notifications configuration.
|
||||
type Notifications struct {
|
||||
Email []string `json:"email"`
|
||||
Slack []SlackDetails `json:"slack"`
|
||||
}
|
||||
|
||||
// UptimeCheckState represents a DigitalOcean Uptime Check's state configuration.
|
||||
type UptimeCheckState struct {
|
||||
Regions map[string]UptimeRegion `json:"regions"`
|
||||
PreviousOutage UptimePreviousOutage `json:"previous_outage"`
|
||||
}
|
||||
|
||||
type UptimeRegion struct {
|
||||
Status string `json:"status"`
|
||||
StatusChangedAt string `json:"status_changed_at"`
|
||||
ThirtyDayUptimePercentage float32 `json:"thirty_day_uptime_percentage"`
|
||||
}
|
||||
|
||||
// UptimePreviousOutage represents a DigitalOcean Uptime Check's previous outage configuration.
|
||||
type UptimePreviousOutage struct {
|
||||
Region string `json:"region"`
|
||||
StartedAt string `json:"started_at"`
|
||||
EndedAt string `json:"ended_at"`
|
||||
DurationSeconds int `json:"duration_seconds"`
|
||||
}
|
||||
|
||||
// CreateUptimeCheckRequest represents the request to create a new uptime check.
|
||||
type CreateUptimeCheckRequest struct {
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
Target string `json:"target"`
|
||||
Regions []string `json:"regions"`
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
// UpdateUptimeCheckRequest represents the request to update uptime check information.
|
||||
type UpdateUptimeCheckRequest struct {
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
Target string `json:"target"`
|
||||
Regions []string `json:"regions"`
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
// CreateUptimeUptimeAlertRequest represents the request to create a new Uptime Alert.
|
||||
type CreateUptimeAlertRequest struct {
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
Threshold int `json:"threshold"`
|
||||
Comparison string `json:"comparison"`
|
||||
Notifications *Notifications `json:"notifications"`
|
||||
Period string `json:"period"`
|
||||
}
|
||||
|
||||
// UpdateUptimeAlertRequest represents the request to create a new alert.
|
||||
type UpdateUptimeAlertRequest struct {
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
Threshold int `json:"threshold"`
|
||||
Comparison string `json:"comparison"`
|
||||
Notifications *Notifications `json:"notifications"`
|
||||
Period string `json:"period"`
|
||||
}
|
||||
|
||||
type uptimeChecksRoot struct {
|
||||
UptimeChecks []UptimeCheck `json:"checks"`
|
||||
Links *Links `json:"links"`
|
||||
Meta *Meta `json:"meta"`
|
||||
}
|
||||
|
||||
type uptimeCheckStateRoot struct {
|
||||
UptimeCheckState UptimeCheckState `json:"state"`
|
||||
}
|
||||
|
||||
type uptimeAlertsRoot struct {
|
||||
UptimeAlerts []UptimeAlert `json:"alerts"`
|
||||
Links *Links `json:"links"`
|
||||
Meta *Meta `json:"meta"`
|
||||
}
|
||||
|
||||
type uptimeCheckRoot struct {
|
||||
UptimeCheck *UptimeCheck `json:"check"`
|
||||
}
|
||||
|
||||
type uptimeAlertRoot struct {
|
||||
UptimeAlert *UptimeAlert `json:"alert"`
|
||||
}
|
||||
|
||||
var _ UptimeChecksService = &UptimeChecksServiceOp{}
|
||||
|
||||
// List Checks.
|
||||
func (p *UptimeChecksServiceOp) List(ctx context.Context, opts *ListOptions) ([]UptimeCheck, *Response, error) {
|
||||
path, err := addOptions(uptimeChecksBasePath, opts)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
req, err := p.client.NewRequest(ctx, http.MethodGet, path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(uptimeChecksRoot)
|
||||
resp, err := p.client.Do(ctx, req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
if l := root.Links; l != nil {
|
||||
resp.Links = l
|
||||
}
|
||||
if m := root.Meta; m != nil {
|
||||
resp.Meta = m
|
||||
}
|
||||
|
||||
return root.UptimeChecks, resp, err
|
||||
}
|
||||
|
||||
// GetState of uptime check.
|
||||
func (p *UptimeChecksServiceOp) GetState(ctx context.Context, uptimeCheckID string) (*UptimeCheckState, *Response, error) {
|
||||
path := path.Join(uptimeChecksBasePath, uptimeCheckID, "/state")
|
||||
|
||||
req, err := p.client.NewRequest(ctx, http.MethodGet, path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(uptimeCheckStateRoot)
|
||||
resp, err := p.client.Do(ctx, req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return &root.UptimeCheckState, resp, err
|
||||
}
|
||||
|
||||
// Get retrieves a single uptime check by its ID.
|
||||
func (p *UptimeChecksServiceOp) Get(ctx context.Context, uptimeCheckID string) (*UptimeCheck, *Response, error) {
|
||||
path := path.Join(uptimeChecksBasePath, uptimeCheckID)
|
||||
|
||||
req, err := p.client.NewRequest(ctx, http.MethodGet, path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(uptimeCheckRoot)
|
||||
resp, err := p.client.Do(ctx, req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return root.UptimeCheck, resp, err
|
||||
}
|
||||
|
||||
// Create a new uptime check.
|
||||
func (p *UptimeChecksServiceOp) Create(ctx context.Context, cr *CreateUptimeCheckRequest) (*UptimeCheck, *Response, error) {
|
||||
req, err := p.client.NewRequest(ctx, http.MethodPost, uptimeChecksBasePath, cr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(uptimeCheckRoot)
|
||||
resp, err := p.client.Do(ctx, req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return root.UptimeCheck, resp, err
|
||||
}
|
||||
|
||||
// Update an uptime check.
|
||||
func (p *UptimeChecksServiceOp) Update(ctx context.Context, uptimeCheckID string, ur *UpdateUptimeCheckRequest) (*UptimeCheck, *Response, error) {
|
||||
path := path.Join(uptimeChecksBasePath, uptimeCheckID)
|
||||
req, err := p.client.NewRequest(ctx, http.MethodPut, path, ur)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(uptimeCheckRoot)
|
||||
resp, err := p.client.Do(ctx, req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return root.UptimeCheck, resp, err
|
||||
}
|
||||
|
||||
// Delete an existing uptime check.
|
||||
func (p *UptimeChecksServiceOp) Delete(ctx context.Context, uptimeCheckID string) (*Response, error) {
|
||||
path := path.Join(uptimeChecksBasePath, uptimeCheckID)
|
||||
req, err := p.client.NewRequest(ctx, http.MethodDelete, path, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p.client.Do(ctx, req, nil)
|
||||
}
|
||||
|
||||
// alerts
|
||||
|
||||
// ListAlerts lists alerts for a check.
|
||||
func (p *UptimeChecksServiceOp) ListAlerts(ctx context.Context, uptimeCheckID string, opts *ListOptions) ([]UptimeAlert, *Response, error) {
|
||||
fullPath := path.Join(uptimeChecksBasePath, uptimeCheckID, "/alerts")
|
||||
path, err := addOptions(fullPath, opts)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
req, err := p.client.NewRequest(ctx, http.MethodGet, path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(uptimeAlertsRoot)
|
||||
resp, err := p.client.Do(ctx, req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
if l := root.Links; l != nil {
|
||||
resp.Links = l
|
||||
}
|
||||
if m := root.Meta; m != nil {
|
||||
resp.Meta = m
|
||||
}
|
||||
|
||||
return root.UptimeAlerts, resp, err
|
||||
}
|
||||
|
||||
// CreateAlert creates a new check alert.
|
||||
func (p *UptimeChecksServiceOp) CreateAlert(ctx context.Context, uptimeCheckID string, cr *CreateUptimeAlertRequest) (*UptimeAlert, *Response, error) {
|
||||
fullPath := path.Join(uptimeChecksBasePath, uptimeCheckID, "/alerts")
|
||||
req, err := p.client.NewRequest(ctx, http.MethodPost, fullPath, cr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(uptimeAlertRoot)
|
||||
resp, err := p.client.Do(ctx, req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return root.UptimeAlert, resp, err
|
||||
}
|
||||
|
||||
// GetAlert retrieves a single uptime check alert by its ID.
|
||||
func (p *UptimeChecksServiceOp) GetAlert(ctx context.Context, uptimeCheckID string, alertID string) (*UptimeAlert, *Response, error) {
|
||||
path := fmt.Sprintf("v2/uptime/checks/%s/alerts/%s", uptimeCheckID, alertID)
|
||||
|
||||
req, err := p.client.NewRequest(ctx, http.MethodGet, path, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(uptimeAlertRoot)
|
||||
resp, err := p.client.Do(ctx, req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return root.UptimeAlert, resp, err
|
||||
}
|
||||
|
||||
// UpdateAlert updates an check's alert.
|
||||
func (p *UptimeChecksServiceOp) UpdateAlert(ctx context.Context, uptimeCheckID string, alertID string, ur *UpdateUptimeAlertRequest) (*UptimeAlert, *Response, error) {
|
||||
path := path.Join(uptimeChecksBasePath, uptimeCheckID, "/alerts/", alertID)
|
||||
req, err := p.client.NewRequest(ctx, http.MethodPut, path, ur)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
root := new(uptimeAlertRoot)
|
||||
resp, err := p.client.Do(ctx, req, root)
|
||||
if err != nil {
|
||||
return nil, resp, err
|
||||
}
|
||||
|
||||
return root.UptimeAlert, resp, err
|
||||
}
|
||||
|
||||
// DeleteAlert deletes an existing check's alert.
|
||||
func (p *UptimeChecksServiceOp) DeleteAlert(ctx context.Context, uptimeCheckID string, alertID string) (*Response, error) {
|
||||
path := path.Join(uptimeChecksBasePath, uptimeCheckID, "/alerts/", alertID)
|
||||
req, err := p.client.NewRequest(ctx, http.MethodDelete, path, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p.client.Do(ctx, req, nil)
|
||||
}
|
97
vendor/github.com/google/go-containerregistry/internal/compression/compression.go
generated
vendored
Normal file
97
vendor/github.com/google/go-containerregistry/internal/compression/compression.go
generated
vendored
Normal file
|
@ -0,0 +1,97 @@
|
|||
// Copyright 2022 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package compression abstracts over gzip and zstd.
|
||||
package compression
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/google/go-containerregistry/internal/gzip"
|
||||
"github.com/google/go-containerregistry/internal/zstd"
|
||||
"github.com/google/go-containerregistry/pkg/compression"
|
||||
)
|
||||
|
||||
// Opener represents e.g. opening a file.
|
||||
type Opener = func() (io.ReadCloser, error)
|
||||
|
||||
// GetCompression detects whether an Opener is compressed and which algorithm is used.
|
||||
func GetCompression(opener Opener) (compression.Compression, error) {
|
||||
rc, err := opener()
|
||||
if err != nil {
|
||||
return compression.None, err
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
cp, _, err := PeekCompression(rc)
|
||||
if err != nil {
|
||||
return compression.None, err
|
||||
}
|
||||
|
||||
return cp, nil
|
||||
}
|
||||
|
||||
// PeekCompression detects whether the input stream is compressed and which algorithm is used.
|
||||
//
|
||||
// If r implements Peek, we will use that directly, otherwise a small number
|
||||
// of bytes are buffered to Peek at the gzip/zstd header, and the returned
|
||||
// PeekReader can be used as a replacement for the consumed input io.Reader.
|
||||
func PeekCompression(r io.Reader) (compression.Compression, PeekReader, error) {
|
||||
pr := intoPeekReader(r)
|
||||
|
||||
if isGZip, _, err := checkHeader(pr, gzip.MagicHeader); err != nil {
|
||||
return compression.None, pr, err
|
||||
} else if isGZip {
|
||||
return compression.GZip, pr, nil
|
||||
}
|
||||
|
||||
if isZStd, _, err := checkHeader(pr, zstd.MagicHeader); err != nil {
|
||||
return compression.None, pr, err
|
||||
} else if isZStd {
|
||||
return compression.ZStd, pr, nil
|
||||
}
|
||||
|
||||
return compression.None, pr, nil
|
||||
}
|
||||
|
||||
// PeekReader is an io.Reader that also implements Peek a la bufio.Reader.
|
||||
type PeekReader interface {
|
||||
io.Reader
|
||||
Peek(n int) ([]byte, error)
|
||||
}
|
||||
|
||||
// IntoPeekReader creates a PeekReader from an io.Reader.
|
||||
// If the reader already has a Peek method, it will just return the passed reader.
|
||||
func intoPeekReader(r io.Reader) PeekReader {
|
||||
if p, ok := r.(PeekReader); ok {
|
||||
return p
|
||||
}
|
||||
|
||||
return bufio.NewReader(r)
|
||||
}
|
||||
|
||||
// CheckHeader checks whether the first bytes from a PeekReader match an expected header
|
||||
func checkHeader(pr PeekReader, expectedHeader []byte) (bool, PeekReader, error) {
|
||||
header, err := pr.Peek(len(expectedHeader))
|
||||
if err != nil {
|
||||
// https://github.com/google/go-containerregistry/issues/367
|
||||
if err == io.EOF {
|
||||
return false, pr, nil
|
||||
}
|
||||
return false, pr, err
|
||||
}
|
||||
return bytes.Equal(header, expectedHeader), pr, nil
|
||||
}
|
|
@ -18,7 +18,6 @@ package estargz
|
|||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/containerd/stargz-snapshotter/estargz"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
|
@ -29,9 +28,9 @@ var _ io.ReadCloser = (*estargz.Blob)(nil)
|
|||
|
||||
// ReadCloser reads uncompressed tarball input from the io.ReadCloser and
|
||||
// returns:
|
||||
// * An io.ReadCloser from which compressed data may be read, and
|
||||
// * A v1.Hash with the hash of the estargz table of contents, or
|
||||
// * An error if the estargz processing encountered a problem.
|
||||
// - An io.ReadCloser from which compressed data may be read, and
|
||||
// - A v1.Hash with the hash of the estargz table of contents, or
|
||||
// - An error if the estargz processing encountered a problem.
|
||||
//
|
||||
// Refer to estargz for the options:
|
||||
// https://pkg.go.dev/github.com/containerd/stargz-snapshotter/estargz@v0.4.1#Option
|
||||
|
@ -39,7 +38,7 @@ func ReadCloser(r io.ReadCloser, opts ...estargz.Option) (*estargz.Blob, v1.Hash
|
|||
defer r.Close()
|
||||
|
||||
// TODO(#876): Avoid buffering into memory.
|
||||
bs, err := ioutil.ReadAll(r)
|
||||
bs, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, v1.Hash{}, err
|
||||
}
|
||||
|
|
|
@ -24,7 +24,8 @@ import (
|
|||
"github.com/google/go-containerregistry/internal/and"
|
||||
)
|
||||
|
||||
var gzipMagicHeader = []byte{'\x1f', '\x8b'}
|
||||
// MagicHeader is the start of gzip files.
|
||||
var MagicHeader = []byte{'\x1f', '\x8b'}
|
||||
|
||||
// ReadCloser reads uncompressed input data from the io.ReadCloser and
|
||||
// returns an io.ReadCloser from which compressed data may be read.
|
||||
|
@ -84,7 +85,7 @@ func ReadCloserLevel(r io.ReadCloser, level int) io.ReadCloser {
|
|||
}
|
||||
|
||||
// UnzipReadCloser reads compressed input data from the io.ReadCloser and
|
||||
// returns an io.ReadCloser from which uncompessed data may be read.
|
||||
// returns an io.ReadCloser from which uncompressed data may be read.
|
||||
func UnzipReadCloser(r io.ReadCloser) (io.ReadCloser, error) {
|
||||
gr, err := gzip.NewReader(r)
|
||||
if err != nil {
|
||||
|
@ -113,34 +114,5 @@ func Is(r io.Reader) (bool, error) {
|
|||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return bytes.Equal(magicHeader, gzipMagicHeader), nil
|
||||
}
|
||||
|
||||
// PeekReader is an io.Reader that also implements Peek a la bufio.Reader.
|
||||
type PeekReader interface {
|
||||
io.Reader
|
||||
Peek(n int) ([]byte, error)
|
||||
}
|
||||
|
||||
// Peek detects whether the input stream is gzip compressed.
|
||||
//
|
||||
// If r implements Peek, we will use that directly, otherwise a small number
|
||||
// of bytes are buffered to Peek at the gzip header, and the returned
|
||||
// PeekReader can be used as a replacement for the consumed input io.Reader.
|
||||
func Peek(r io.Reader) (bool, PeekReader, error) {
|
||||
var pr PeekReader
|
||||
if p, ok := r.(PeekReader); ok {
|
||||
pr = p
|
||||
} else {
|
||||
pr = bufio.NewReader(r)
|
||||
}
|
||||
header, err := pr.Peek(2)
|
||||
if err != nil {
|
||||
// https://github.com/google/go-containerregistry/issues/367
|
||||
if err == io.EOF {
|
||||
return false, pr, nil
|
||||
}
|
||||
return false, pr, err
|
||||
}
|
||||
return bytes.Equal(header, gzipMagicHeader), pr, nil
|
||||
return bytes.Equal(magicHeader, MagicHeader), nil
|
||||
}
|
||||
|
|
|
@ -76,3 +76,19 @@ func Retry(f func() error, p Predicate, backoff wait.Backoff) (err error) {
|
|||
wait.ExponentialBackoff(backoff, condition)
|
||||
return
|
||||
}
|
||||
|
||||
type contextKey string
|
||||
|
||||
var key = contextKey("never")
|
||||
|
||||
// Never returns a context that signals something should not be retried.
|
||||
// This is a hack and can be used to communicate across package boundaries
|
||||
// to avoid retry amplification.
|
||||
func Never(ctx context.Context) context.Context {
|
||||
return context.WithValue(ctx, key, true)
|
||||
}
|
||||
|
||||
// Ever returns true if the context was wrapped by Never.
|
||||
func Ever(ctx context.Context) bool {
|
||||
return ctx.Value(key) == nil
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
|
@ -104,7 +103,7 @@ func Windows(layer v1.Layer) (v1.Layer, error) {
|
|||
b := w.Bytes()
|
||||
// gzip the contents, then create the layer
|
||||
opener := func() (io.ReadCloser, error) {
|
||||
return gzip.ReadCloser(ioutil.NopCloser(bytes.NewReader(b))), nil
|
||||
return gzip.ReadCloser(io.NopCloser(bytes.NewReader(b))), nil
|
||||
}
|
||||
layer, err = tarball.LayerFromOpener(opener)
|
||||
if err != nil {
|
||||
|
|
116
vendor/github.com/google/go-containerregistry/internal/zstd/zstd.go
generated
vendored
Normal file
116
vendor/github.com/google/go-containerregistry/internal/zstd/zstd.go
generated
vendored
Normal file
|
@ -0,0 +1,116 @@
|
|||
// Copyright 2022 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package zstd provides helper functions for interacting with zstd streams.
|
||||
package zstd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"github.com/google/go-containerregistry/internal/and"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
)
|
||||
|
||||
// MagicHeader is the start of zstd files.
|
||||
var MagicHeader = []byte{'\x28', '\xb5', '\x2f', '\xfd'}
|
||||
|
||||
// ReadCloser reads uncompressed input data from the io.ReadCloser and
|
||||
// returns an io.ReadCloser from which compressed data may be read.
|
||||
// This uses zstd level 1 for the compression.
|
||||
func ReadCloser(r io.ReadCloser) io.ReadCloser {
|
||||
return ReadCloserLevel(r, 1)
|
||||
}
|
||||
|
||||
// ReadCloserLevel reads uncompressed input data from the io.ReadCloser and
|
||||
// returns an io.ReadCloser from which compressed data may be read.
|
||||
func ReadCloserLevel(r io.ReadCloser, level int) io.ReadCloser {
|
||||
pr, pw := io.Pipe()
|
||||
|
||||
// For highly compressible layers, zstd.Writer will output a very small
|
||||
// number of bytes per Write(). This is normally fine, but when pushing
|
||||
// to a registry, we want to ensure that we're taking full advantage of
|
||||
// the available bandwidth instead of sending tons of tiny writes over
|
||||
// the wire.
|
||||
// 64K ought to be small enough for anybody.
|
||||
bw := bufio.NewWriterSize(pw, 2<<16)
|
||||
|
||||
// Returns err so we can pw.CloseWithError(err)
|
||||
go func() error {
|
||||
// TODO(go1.14): Just defer {pw,zw,r}.Close like you'd expect.
|
||||
// Context: https://golang.org/issue/24283
|
||||
zw, err := zstd.NewWriter(bw, zstd.WithEncoderLevel(zstd.EncoderLevelFromZstd(level)))
|
||||
if err != nil {
|
||||
return pw.CloseWithError(err)
|
||||
}
|
||||
|
||||
if _, err := io.Copy(zw, r); err != nil {
|
||||
defer r.Close()
|
||||
defer zw.Close()
|
||||
return pw.CloseWithError(err)
|
||||
}
|
||||
|
||||
// Close zstd writer to Flush it and write zstd trailers.
|
||||
if err := zw.Close(); err != nil {
|
||||
return pw.CloseWithError(err)
|
||||
}
|
||||
|
||||
// Flush bufio writer to ensure we write out everything.
|
||||
if err := bw.Flush(); err != nil {
|
||||
return pw.CloseWithError(err)
|
||||
}
|
||||
|
||||
// We don't really care if these fail.
|
||||
defer pw.Close()
|
||||
defer r.Close()
|
||||
|
||||
return nil
|
||||
}()
|
||||
|
||||
return pr
|
||||
}
|
||||
|
||||
// UnzipReadCloser reads compressed input data from the io.ReadCloser and
|
||||
// returns an io.ReadCloser from which uncompressed data may be read.
|
||||
func UnzipReadCloser(r io.ReadCloser) (io.ReadCloser, error) {
|
||||
gr, err := zstd.NewReader(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &and.ReadCloser{
|
||||
Reader: gr,
|
||||
CloseFunc: func() error {
|
||||
// If the unzip fails, then this seems to return the same
|
||||
// error as the read. We don't want this to interfere with
|
||||
// us closing the main ReadCloser, since this could leave
|
||||
// an open file descriptor (fails on Windows).
|
||||
gr.Close()
|
||||
return r.Close()
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Is detects whether the input stream is compressed.
|
||||
func Is(r io.Reader) (bool, error) {
|
||||
magicHeader := make([]byte, 4)
|
||||
n, err := r.Read(magicHeader)
|
||||
if n == 0 && err == io.EOF {
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return bytes.Equal(magicHeader, MagicHeader), nil
|
||||
}
|
|
@ -123,6 +123,10 @@ func (dk *defaultKeychain) Resolve(target Resource) (Authenticator, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// cf.GetAuthConfig automatically sets the ServerAddress attribute. Since
|
||||
// we don't make use of it, clear the value for a proper "is-empty" test.
|
||||
// See: https://github.com/google/go-containerregistry/issues/1510
|
||||
cfg.ServerAddress = ""
|
||||
if cfg != empty {
|
||||
break
|
||||
}
|
||||
|
|
26
vendor/github.com/google/go-containerregistry/pkg/compression/compression.go
generated
vendored
Normal file
26
vendor/github.com/google/go-containerregistry/pkg/compression/compression.go
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
// Copyright 2022 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package compression abstracts over gzip and zstd.
|
||||
package compression
|
||||
|
||||
// Compression is an enumeration of the supported compression algorithms
|
||||
type Compression string
|
||||
|
||||
// The collection of known MediaType values.
|
||||
const (
|
||||
None Compression = "none"
|
||||
GZip Compression = "gzip"
|
||||
ZStd Compression = "zstd"
|
||||
)
|
|
@ -18,7 +18,6 @@ import (
|
|||
"archive/tar"
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sort"
|
||||
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
|
@ -57,7 +56,7 @@ func Layer(filemap map[string][]byte) (v1.Layer, error) {
|
|||
|
||||
// Return a new copy of the buffer each time it's opened.
|
||||
return tarball.LayerFromOpener(func() (io.ReadCloser, error) {
|
||||
return ioutil.NopCloser(bytes.NewBuffer(b.Bytes())), nil
|
||||
return io.NopCloser(bytes.NewBuffer(b.Bytes())), nil
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -187,9 +187,11 @@ func filterEmpty(h []v1.History) []v1.History {
|
|||
// One manifest.json file at the top level containing information about several images.
|
||||
// One repositories file mapping from the image <registry>/<repo name> to <tag> to the id of the top most layer.
|
||||
// For every layer, a directory named with the layer ID is created with the following contents:
|
||||
// layer.tar - The uncompressed layer tarball.
|
||||
// <layer id>.json- Layer metadata json.
|
||||
// VERSION- Schema version string. Always set to "1.0".
|
||||
//
|
||||
// layer.tar - The uncompressed layer tarball.
|
||||
// <layer id>.json- Layer metadata json.
|
||||
// VERSION- Schema version string. Always set to "1.0".
|
||||
//
|
||||
// One file for the config blob, named after its SHA.
|
||||
func MultiWrite(refToImage map[name.Reference]v1.Image, w io.Writer) error {
|
||||
tf := tar.NewWriter(w)
|
||||
|
|
|
@ -16,24 +16,24 @@
|
|||
package logs
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"log"
|
||||
)
|
||||
|
||||
var (
|
||||
// Warn is used to log non-fatal errors.
|
||||
Warn = log.New(ioutil.Discard, "", log.LstdFlags)
|
||||
Warn = log.New(io.Discard, "", log.LstdFlags)
|
||||
|
||||
// Progress is used to log notable, successful events.
|
||||
Progress = log.New(ioutil.Discard, "", log.LstdFlags)
|
||||
Progress = log.New(io.Discard, "", log.LstdFlags)
|
||||
|
||||
// Debug is used to log information that is useful for debugging.
|
||||
Debug = log.New(ioutil.Discard, "", log.LstdFlags)
|
||||
Debug = log.New(io.Discard, "", log.LstdFlags)
|
||||
)
|
||||
|
||||
// Enabled checks to see if the logger's writer is set to something other
|
||||
// than ioutil.Discard. This allows callers to avoid expensive operations
|
||||
// than io.Discard. This allows callers to avoid expensive operations
|
||||
// that will end up in /dev/null anyway.
|
||||
func Enabled(l *log.Logger) bool {
|
||||
return l.Writer() != ioutil.Discard
|
||||
return l.Writer() != io.Discard
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ func (e *ErrBadName) Is(target error) bool {
|
|||
}
|
||||
|
||||
// newErrBadName returns a ErrBadName which returns the given formatted string from Error().
|
||||
func newErrBadName(fmtStr string, args ...interface{}) *ErrBadName {
|
||||
func newErrBadName(fmtStr string, args ...any) *ErrBadName {
|
||||
return &ErrBadName{fmt.Sprintf(fmtStr, args...)}
|
||||
}
|
||||
|
||||
|
|
|
@ -56,16 +56,16 @@ type stringConst string
|
|||
// To discourage its use in scenarios where the value is not known at code
|
||||
// authoring time, it must be passed a string constant:
|
||||
//
|
||||
// const str = "valid/string"
|
||||
// MustParseReference(str)
|
||||
// MustParseReference("another/valid/string")
|
||||
// MustParseReference(str + "/and/more")
|
||||
// const str = "valid/string"
|
||||
// MustParseReference(str)
|
||||
// MustParseReference("another/valid/string")
|
||||
// MustParseReference(str + "/and/more")
|
||||
//
|
||||
// These will not compile:
|
||||
//
|
||||
// var str = "valid/string"
|
||||
// MustParseReference(str)
|
||||
// MustParseReference(strings.Join([]string{"valid", "string"}, "/"))
|
||||
// var str = "valid/string"
|
||||
// MustParseReference(str)
|
||||
// MustParseReference(strings.Join([]string{"valid", "string"}, "/"))
|
||||
func MustParseReference(s stringConst, opts ...Option) Reference {
|
||||
ref, err := ParseReference(string(s), opts...)
|
||||
if err != nil {
|
||||
|
|
|
@ -90,8 +90,10 @@ type HealthConfig struct {
|
|||
}
|
||||
|
||||
// Config is a submessage of the config file described as:
|
||||
// The execution parameters which SHOULD be used as a base when running
|
||||
// a container using the image.
|
||||
//
|
||||
// The execution parameters which SHOULD be used as a base when running
|
||||
// a container using the image.
|
||||
//
|
||||
// The names of the fields in this message are chosen to reflect the JSON
|
||||
// payload of the Config as defined here:
|
||||
// https://git.io/vrAET
|
||||
|
|
|
@ -16,7 +16,6 @@ package layout
|
|||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
|
@ -30,7 +29,7 @@ func (l Path) Blob(h v1.Hash) (io.ReadCloser, error) {
|
|||
// Bytes is a convenience function to return a blob from the Path as
|
||||
// a byte slice.
|
||||
func (l Path) Bytes(h v1.Hash) ([]byte, error) {
|
||||
return ioutil.ReadFile(l.blobPath(h))
|
||||
return os.ReadFile(l.blobPath(h))
|
||||
}
|
||||
|
||||
func (l Path) blobPath(h v1.Hash) string {
|
||||
|
|
|
@ -19,7 +19,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
|
@ -45,7 +45,7 @@ func ImageIndexFromPath(path string) (v1.ImageIndex, error) {
|
|||
|
||||
// ImageIndex returns a v1.ImageIndex for the Path.
|
||||
func (l Path) ImageIndex() (v1.ImageIndex, error) {
|
||||
rawIndex, err := ioutil.ReadFile(l.path("index.json"))
|
||||
rawIndex, err := os.ReadFile(l.path("index.json"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -1,3 +1,17 @@
|
|||
// Copyright 2019 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package layout
|
||||
|
||||
import v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
|
@ -187,7 +186,7 @@ func (l Path) WriteFile(name string, data []byte, perm os.FileMode) error {
|
|||
return err
|
||||
}
|
||||
|
||||
return ioutil.WriteFile(l.path(name), data, perm)
|
||||
return os.WriteFile(l.path(name), data, perm)
|
||||
}
|
||||
|
||||
// WriteBlob copies a file to the blobs/ directory in the Path from the given ReadCloser at
|
||||
|
@ -215,7 +214,7 @@ func (l Path) writeBlob(hash v1.Hash, size int64, rc io.ReadCloser, renamer func
|
|||
// If a renamer func was provided write to a temporary file
|
||||
open := func() (*os.File, error) { return os.Create(file) }
|
||||
if renamer != nil {
|
||||
open = func() (*os.File, error) { return ioutil.TempFile(dir, hash.Hex) }
|
||||
open = func() (*os.File, error) { return os.CreateTemp(dir, hash.Hex) }
|
||||
}
|
||||
w, err := open()
|
||||
if err != nil {
|
||||
|
@ -273,7 +272,7 @@ func (l Path) writeLayer(layer v1.Layer) error {
|
|||
if errors.Is(err, stream.ErrNotComputed) {
|
||||
// Allow digest errors, since streams may not have calculated the hash
|
||||
// yet. Instead, use an empty value, which will be transformed into a
|
||||
// random file name with `ioutil.TempFile` and the final digest will be
|
||||
// random file name with `os.CreateTemp` and the final digest will be
|
||||
// calculated after writing to a temp file and before renaming to the
|
||||
// final path.
|
||||
d = v1.Hash{Algorithm: "sha256", Hex: ""}
|
||||
|
@ -351,7 +350,7 @@ func (l Path) WriteImage(img v1.Image) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := l.WriteBlob(cfgName, ioutil.NopCloser(bytes.NewReader(cfgBlob))); err != nil {
|
||||
if err := l.WriteBlob(cfgName, io.NopCloser(bytes.NewReader(cfgBlob))); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -365,7 +364,7 @@ func (l Path) WriteImage(img v1.Image) error {
|
|||
return err
|
||||
}
|
||||
|
||||
return l.WriteBlob(d, ioutil.NopCloser(bytes.NewReader(manifest)))
|
||||
return l.WriteBlob(d, io.NopCloser(bytes.NewReader(manifest)))
|
||||
}
|
||||
|
||||
type withLayer interface {
|
||||
|
|
|
@ -25,7 +25,9 @@ import (
|
|||
type Matcher func(desc v1.Descriptor) bool
|
||||
|
||||
// Name returns a match.Matcher that matches based on the value of the
|
||||
// "org.opencontainers.image.ref.name" annotation:
|
||||
//
|
||||
// "org.opencontainers.image.ref.name" annotation:
|
||||
//
|
||||
// github.com/opencontainers/image-spec/blob/v1.0.1/annotations.md#pre-defined-annotation-keys
|
||||
func Name(name string) Matcher {
|
||||
return Annotation(imagespec.AnnotationRefName, name)
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -126,15 +125,15 @@ type Annotatable interface {
|
|||
// The annotatable input is expected to be a v1.Image or v1.ImageIndex, and
|
||||
// returns the same type. You can type-assert the result like so:
|
||||
//
|
||||
// img := Annotations(empty.Image, map[string]string{
|
||||
// "foo": "bar",
|
||||
// }).(v1.Image)
|
||||
// img := Annotations(empty.Image, map[string]string{
|
||||
// "foo": "bar",
|
||||
// }).(v1.Image)
|
||||
//
|
||||
// Or for an index:
|
||||
//
|
||||
// idx := Annotations(empty.Index, map[string]string{
|
||||
// "foo": "bar",
|
||||
// }).(v1.ImageIndex)
|
||||
// idx := Annotations(empty.Index, map[string]string{
|
||||
// "foo": "bar",
|
||||
// }).(v1.ImageIndex)
|
||||
//
|
||||
// If the input Annotatable is not an Image or ImageIndex, the result will
|
||||
// attempt to lazily annotate the raw manifest.
|
||||
|
@ -164,7 +163,7 @@ func (a arbitraryRawManifest) RawManifest() ([]byte, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var m map[string]interface{}
|
||||
var m map[string]any
|
||||
if err := json.Unmarshal(b, &m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -333,6 +332,13 @@ func inWhiteoutDir(fileMap map[string]bool, file string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func max(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Time sets all timestamps in an image to the given timestamp.
|
||||
func Time(img v1.Image, t time.Time) (v1.Image, error) {
|
||||
newImage := empty.Image
|
||||
|
@ -342,26 +348,45 @@ func Time(img v1.Image, t time.Time) (v1.Image, error) {
|
|||
return nil, fmt.Errorf("getting image layers: %w", err)
|
||||
}
|
||||
|
||||
// Strip away all timestamps from layers
|
||||
newLayers := make([]v1.Layer, len(layers))
|
||||
for idx, layer := range layers {
|
||||
newLayer, err := layerTime(layer, t)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting layer times: %w", err)
|
||||
}
|
||||
newLayers[idx] = newLayer
|
||||
}
|
||||
|
||||
newImage, err = AppendLayers(newImage, newLayers...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("appending layers: %w", err)
|
||||
}
|
||||
|
||||
ocf, err := img.ConfigFile()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting original config file: %w", err)
|
||||
}
|
||||
|
||||
addendums := make([]Addendum, max(len(ocf.History), len(layers)))
|
||||
var historyIdx, addendumIdx int
|
||||
for layerIdx := 0; layerIdx < len(layers); addendumIdx, layerIdx = addendumIdx+1, layerIdx+1 {
|
||||
newLayer, err := layerTime(layers[layerIdx], t)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting layer times: %w", err)
|
||||
}
|
||||
|
||||
// try to search for the history entry that corresponds to this layer
|
||||
for ; historyIdx < len(ocf.History); historyIdx++ {
|
||||
addendums[addendumIdx].History = ocf.History[historyIdx]
|
||||
// if it's an EmptyLayer, do not set the Layer and have the Addendum with just the History
|
||||
// and move on to the next History entry
|
||||
if ocf.History[historyIdx].EmptyLayer {
|
||||
addendumIdx++
|
||||
continue
|
||||
}
|
||||
// otherwise, we can exit from the cycle
|
||||
historyIdx++
|
||||
break
|
||||
}
|
||||
addendums[addendumIdx].Layer = newLayer
|
||||
}
|
||||
|
||||
// add all leftover History entries
|
||||
for ; historyIdx < len(ocf.History); historyIdx, addendumIdx = historyIdx+1, addendumIdx+1 {
|
||||
addendums[addendumIdx].History = ocf.History[historyIdx]
|
||||
}
|
||||
|
||||
newImage, err = Append(newImage, addendums...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("appending layers: %w", err)
|
||||
}
|
||||
|
||||
cf, err := newImage.ConfigFile()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting config file: %w", err)
|
||||
|
@ -384,6 +409,7 @@ func Time(img v1.Image, t time.Time) (v1.Image, error) {
|
|||
h.Comment = ocf.History[i].Comment
|
||||
h.EmptyLayer = ocf.History[i].EmptyLayer
|
||||
// Explicitly ignore Author field; which hinders reproducibility
|
||||
h.Author = ""
|
||||
cfg.History[i] = h
|
||||
}
|
||||
|
||||
|
@ -430,7 +456,7 @@ func layerTime(layer v1.Layer, t time.Time) (v1.Layer, error) {
|
|||
b := w.Bytes()
|
||||
// gzip the contents, then create the layer
|
||||
opener := func() (io.ReadCloser, error) {
|
||||
return gzip.ReadCloser(ioutil.NopCloser(bytes.NewReader(b))), nil
|
||||
return gzip.ReadCloser(io.NopCloser(bytes.NewReader(b))), nil
|
||||
}
|
||||
layer, err = tarball.LayerFromOpener(opener)
|
||||
if err != nil {
|
||||
|
|
|
@ -18,7 +18,10 @@ import (
|
|||
"io"
|
||||
|
||||
"github.com/google/go-containerregistry/internal/and"
|
||||
"github.com/google/go-containerregistry/internal/compression"
|
||||
"github.com/google/go-containerregistry/internal/gzip"
|
||||
"github.com/google/go-containerregistry/internal/zstd"
|
||||
comp "github.com/google/go-containerregistry/pkg/compression"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
@ -51,23 +54,27 @@ func (cle *compressedLayerExtender) Uncompressed() (io.ReadCloser, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// Often, the "compressed" bytes are not actually gzip-compressed.
|
||||
// Peek at the first two bytes to determine whether or not it's correct to
|
||||
// wrap this with gzip.UnzipReadCloser.
|
||||
gzipped, pr, err := gzip.Peek(rc)
|
||||
// Often, the "compressed" bytes are not actually-compressed.
|
||||
// Peek at the first two bytes to determine whether it's correct to
|
||||
// wrap this with gzip.UnzipReadCloser or zstd.UnzipReadCloser.
|
||||
cp, pr, err := compression.PeekCompression(rc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
prc := &and.ReadCloser{
|
||||
Reader: pr,
|
||||
CloseFunc: rc.Close,
|
||||
}
|
||||
|
||||
if !gzipped {
|
||||
switch cp {
|
||||
case comp.GZip:
|
||||
return gzip.UnzipReadCloser(prc)
|
||||
case comp.ZStd:
|
||||
return zstd.UnzipReadCloser(prc)
|
||||
default:
|
||||
return prc, nil
|
||||
}
|
||||
|
||||
return gzip.UnzipReadCloser(prc)
|
||||
}
|
||||
|
||||
// DiffID implements v1.Layer
|
||||
|
|
|
@ -19,7 +19,6 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
|
@ -67,12 +66,12 @@ func (cl *configLayer) DiffID() (v1.Hash, error) {
|
|||
|
||||
// Uncompressed implements v1.Layer
|
||||
func (cl *configLayer) Uncompressed() (io.ReadCloser, error) {
|
||||
return ioutil.NopCloser(bytes.NewBuffer(cl.content)), nil
|
||||
return io.NopCloser(bytes.NewBuffer(cl.content)), nil
|
||||
}
|
||||
|
||||
// Compressed implements v1.Layer
|
||||
func (cl *configLayer) Compressed() (io.ReadCloser, error) {
|
||||
return ioutil.NopCloser(bytes.NewBuffer(cl.content)), nil
|
||||
return io.NopCloser(bytes.NewBuffer(cl.content)), nil
|
||||
}
|
||||
|
||||
// Size implements v1.Layer
|
||||
|
@ -355,7 +354,7 @@ func UncompressedSize(l v1.Layer) (int64, error) {
|
|||
}
|
||||
defer rc.Close()
|
||||
|
||||
return io.Copy(ioutil.Discard, rc)
|
||||
return io.Copy(io.Discard, rc)
|
||||
}
|
||||
|
||||
type withExists interface {
|
||||
|
@ -385,7 +384,7 @@ func Exists(l v1.Layer) (bool, error) {
|
|||
|
||||
// Recursively unwrap our wrappers so that we can check for the original implementation.
|
||||
// We might want to expose this?
|
||||
func unwrap(i interface{}) interface{} {
|
||||
func unwrap(i any) any {
|
||||
if ule, ok := i.(*uncompressedLayerExtender); ok {
|
||||
return unwrap(ule.UncompressedLayer)
|
||||
}
|
||||
|
|
|
@ -1,3 +1,17 @@
|
|||
// Copyright 2019 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
|
@ -35,11 +49,10 @@ func CheckPushPermission(ref name.Reference, kc authn.Keychain, t http.RoundTrip
|
|||
// authorize a push. Figure out how to return early here when we can,
|
||||
// to avoid a roundtrip for spec-compliant registries.
|
||||
w := writer{
|
||||
repo: ref.Context(),
|
||||
client: &http.Client{Transport: tr},
|
||||
context: context.Background(),
|
||||
repo: ref.Context(),
|
||||
client: &http.Client{Transport: tr},
|
||||
}
|
||||
loc, _, err := w.initiateUpload("", "", "")
|
||||
loc, _, err := w.initiateUpload(context.Background(), "", "", "")
|
||||
if loc != "" {
|
||||
// Since we're only initiating the upload to check whether we
|
||||
// can, we should attempt to cancel it, in case initiating
|
||||
|
|
|
@ -19,7 +19,6 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
@ -260,7 +259,7 @@ func (f *fetcher) fetchManifest(ref name.Reference, acceptable []types.MediaType
|
|||
return nil, nil, err
|
||||
}
|
||||
|
||||
manifest, err := ioutil.ReadAll(resp.Body)
|
||||
manifest, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
|
|
@ -17,7 +17,6 @@ package remote
|
|||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sync"
|
||||
|
@ -115,7 +114,7 @@ func (r *remoteImage) RawConfigFile() ([]byte, error) {
|
|||
}
|
||||
defer body.Close()
|
||||
|
||||
r.config, err = ioutil.ReadAll(body)
|
||||
r.config, err = io.ReadAll(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -153,7 +152,7 @@ func (rl *remoteImageLayer) Compressed() (io.ReadCloser, error) {
|
|||
}
|
||||
|
||||
if d.Data != nil {
|
||||
return verify.ReadCloser(ioutil.NopCloser(bytes.NewReader(d.Data)), d.Size, d.Digest)
|
||||
return verify.ReadCloser(io.NopCloser(bytes.NewReader(d.Data)), d.Size, d.Digest)
|
||||
}
|
||||
|
||||
// We don't want to log binary layers -- this can break terminals.
|
||||
|
|
|
@ -194,10 +194,12 @@ func (r *remoteIndex) imageByPlatform(platform v1.Platform) (v1.Image, error) {
|
|||
// This naively matches the first manifest with matching platform attributes.
|
||||
//
|
||||
// We should probably use this instead:
|
||||
// github.com/containerd/containerd/platforms
|
||||
//
|
||||
// github.com/containerd/containerd/platforms
|
||||
//
|
||||
// But first we'd need to migrate to:
|
||||
// github.com/opencontainers/image-spec/specs-go/v1
|
||||
//
|
||||
// github.com/opencontainers/image-spec/specs-go/v1
|
||||
func (r *remoteIndex) childByPlatform(platform v1.Platform) (*Descriptor, error) {
|
||||
index, err := r.IndexManifest()
|
||||
if err != nil {
|
||||
|
|
|
@ -89,7 +89,6 @@ func MultiWrite(m map[name.Reference]Taggable, options ...Option) (rerr error) {
|
|||
w := writer{
|
||||
repo: repo,
|
||||
client: &http.Client{Transport: tr},
|
||||
context: o.context,
|
||||
backoff: o.retryBackoff,
|
||||
predicate: o.retryPredicate,
|
||||
}
|
||||
|
|
|
@ -74,6 +74,22 @@ var defaultRetryBackoff = Backoff{
|
|||
Steps: 3,
|
||||
}
|
||||
|
||||
// Useful for tests
|
||||
var fastBackoff = Backoff{
|
||||
Duration: 1.0 * time.Millisecond,
|
||||
Factor: 3.0,
|
||||
Jitter: 0.1,
|
||||
Steps: 3,
|
||||
}
|
||||
|
||||
var retryableStatusCodes = []int{
|
||||
http.StatusRequestTimeout,
|
||||
http.StatusInternalServerError,
|
||||
http.StatusBadGateway,
|
||||
http.StatusServiceUnavailable,
|
||||
http.StatusGatewayTimeout,
|
||||
}
|
||||
|
||||
const (
|
||||
defaultJobs = 4
|
||||
|
||||
|
@ -87,10 +103,7 @@ const (
|
|||
var DefaultTransport http.RoundTripper = &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
// By default we wrap the transport in retries, so reduce the
|
||||
// default dial timeout to 5s to avoid 5x 30s of connection
|
||||
// timeouts when doing the "ping" on certain http registries.
|
||||
Timeout: 5 * time.Second,
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).DialContext,
|
||||
ForceAttemptHTTP2: true,
|
||||
|
@ -143,7 +156,7 @@ func makeOptions(target authn.Resource, opts ...Option) (*options, error) {
|
|||
}
|
||||
|
||||
// Wrap the transport in something that can retry network flakes.
|
||||
o.transport = transport.NewRetry(o.transport)
|
||||
o.transport = transport.NewRetry(o.transport, transport.WithRetryPredicate(defaultRetryPredicate), transport.WithRetryStatusCodes(retryableStatusCodes...))
|
||||
|
||||
// Wrap this last to prevent transport.New from double-wrapping.
|
||||
if o.userAgent != "" {
|
||||
|
|
14
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go
generated
vendored
14
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go
generated
vendored
|
@ -19,7 +19,7 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
@ -268,11 +268,13 @@ func (bt *bearerTransport) refreshOauth(ctx context.Context) ([]byte, error) {
|
|||
defer resp.Body.Close()
|
||||
|
||||
if err := CheckError(resp, http.StatusOK); err != nil {
|
||||
logs.Warn.Printf("No matching credentials were found for %q", bt.registry)
|
||||
if bt.basic == authn.Anonymous {
|
||||
logs.Warn.Printf("No matching credentials were found for %q", bt.registry)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ioutil.ReadAll(resp.Body)
|
||||
return io.ReadAll(resp.Body)
|
||||
}
|
||||
|
||||
// https://docs.docker.com/registry/spec/auth/token/
|
||||
|
@ -308,9 +310,11 @@ func (bt *bearerTransport) refreshBasic(ctx context.Context) ([]byte, error) {
|
|||
defer resp.Body.Close()
|
||||
|
||||
if err := CheckError(resp, http.StatusOK); err != nil {
|
||||
logs.Warn.Printf("No matching credentials were found for %q", bt.registry)
|
||||
if bt.basic == authn.Anonymous {
|
||||
logs.Warn.Printf("No matching credentials were found for %q", bt.registry)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ioutil.ReadAll(resp.Body)
|
||||
return io.ReadAll(resp.Body)
|
||||
}
|
||||
|
|
10
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go
generated
vendored
10
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go
generated
vendored
|
@ -17,7 +17,7 @@ package transport
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
|
@ -86,9 +86,9 @@ func (e *Error) Temporary() bool {
|
|||
|
||||
// Diagnostic represents a single error returned by a Docker registry interaction.
|
||||
type Diagnostic struct {
|
||||
Code ErrorCode `json:"code"`
|
||||
Message string `json:"message,omitempty"`
|
||||
Detail interface{} `json:"detail,omitempty"`
|
||||
Code ErrorCode `json:"code"`
|
||||
Message string `json:"message,omitempty"`
|
||||
Detail any `json:"detail,omitempty"`
|
||||
}
|
||||
|
||||
// String stringifies the Diagnostic in the form: $Code: $Message[; $Detail]
|
||||
|
@ -153,7 +153,7 @@ func CheckError(resp *http.Response, codes ...int) error {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
b, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
173
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go
generated
vendored
173
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go
generated
vendored
|
@ -19,11 +19,12 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
authchallenge "github.com/docker/distribution/registry/client/auth/challenge"
|
||||
"github.com/google/go-containerregistry/pkg/logs"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
)
|
||||
|
||||
|
@ -35,6 +36,9 @@ const (
|
|||
bearer challenge = "bearer"
|
||||
)
|
||||
|
||||
// 300ms is the default fallback period for go's DNS dialer but we could make this configurable.
|
||||
var fallbackDelay = 300 * time.Millisecond
|
||||
|
||||
type pingResp struct {
|
||||
challenge challenge
|
||||
|
||||
|
@ -50,27 +54,7 @@ func (c challenge) Canonical() challenge {
|
|||
return challenge(strings.ToLower(string(c)))
|
||||
}
|
||||
|
||||
func parseChallenge(suffix string) map[string]string {
|
||||
kv := make(map[string]string)
|
||||
for _, token := range strings.Split(suffix, ",") {
|
||||
// Trim any whitespace around each token.
|
||||
token = strings.Trim(token, " ")
|
||||
|
||||
// Break the token into a key/value pair
|
||||
if parts := strings.SplitN(token, "=", 2); len(parts) == 2 {
|
||||
// Unquote the value, if it is quoted.
|
||||
kv[parts[0]] = strings.Trim(parts[1], `"`)
|
||||
} else {
|
||||
// If there was only one part, treat is as a key with an empty value
|
||||
kv[token] = ""
|
||||
}
|
||||
}
|
||||
return kv
|
||||
}
|
||||
|
||||
func ping(ctx context.Context, reg name.Registry, t http.RoundTripper) (*pingResp, error) {
|
||||
client := http.Client{Transport: t}
|
||||
|
||||
// This first attempts to use "https" for every request, falling back to http
|
||||
// if the registry matches our localhost heuristic or if it is intentionally
|
||||
// set to insecure via name.NewInsecureRegistry.
|
||||
|
@ -78,54 +62,117 @@ func ping(ctx context.Context, reg name.Registry, t http.RoundTripper) (*pingRes
|
|||
if reg.Scheme() == "http" {
|
||||
schemes = append(schemes, "http")
|
||||
}
|
||||
if len(schemes) == 1 {
|
||||
return pingSingle(ctx, reg, t, schemes[0])
|
||||
}
|
||||
return pingParallel(ctx, reg, t, schemes)
|
||||
}
|
||||
|
||||
var errs []error
|
||||
for _, scheme := range schemes {
|
||||
url := fmt.Sprintf("%s://%s/v2/", scheme, reg.Name())
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
// Potentially retry with http.
|
||||
continue
|
||||
}
|
||||
defer func() {
|
||||
// By draining the body, make sure to reuse the connection made by
|
||||
// the ping for the following access to the registry
|
||||
io.Copy(ioutil.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
}()
|
||||
func pingSingle(ctx context.Context, reg name.Registry, t http.RoundTripper, scheme string) (*pingResp, error) {
|
||||
client := http.Client{Transport: t}
|
||||
url := fmt.Sprintf("%s://%s/v2/", scheme, reg.Name())
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
// By draining the body, make sure to reuse the connection made by
|
||||
// the ping for the following access to the registry
|
||||
io.Copy(io.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
}()
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK:
|
||||
// If we get a 200, then no authentication is needed.
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK:
|
||||
// If we get a 200, then no authentication is needed.
|
||||
return &pingResp{
|
||||
challenge: anonymous,
|
||||
scheme: scheme,
|
||||
}, nil
|
||||
case http.StatusUnauthorized:
|
||||
if challenges := authchallenge.ResponseChallenges(resp); len(challenges) != 0 {
|
||||
// If we hit more than one, let's try to find one that we know how to handle.
|
||||
wac := pickFromMultipleChallenges(challenges)
|
||||
return &pingResp{
|
||||
challenge: anonymous,
|
||||
scheme: scheme,
|
||||
challenge: challenge(wac.Scheme).Canonical(),
|
||||
parameters: wac.Parameters,
|
||||
scheme: scheme,
|
||||
}, nil
|
||||
case http.StatusUnauthorized:
|
||||
if challenges := authchallenge.ResponseChallenges(resp); len(challenges) != 0 {
|
||||
// If we hit more than one, let's try to find one that we know how to handle.
|
||||
wac := pickFromMultipleChallenges(challenges)
|
||||
return &pingResp{
|
||||
challenge: challenge(wac.Scheme).Canonical(),
|
||||
parameters: wac.Parameters,
|
||||
scheme: scheme,
|
||||
}, nil
|
||||
}
|
||||
// Otherwise, just return the challenge without parameters.
|
||||
return &pingResp{
|
||||
challenge: challenge(resp.Header.Get("WWW-Authenticate")).Canonical(),
|
||||
scheme: scheme,
|
||||
}, nil
|
||||
default:
|
||||
return nil, CheckError(resp, http.StatusOK, http.StatusUnauthorized)
|
||||
}
|
||||
}
|
||||
|
||||
// Based on the golang happy eyeballs dialParallel impl in net/dial.go.
|
||||
func pingParallel(ctx context.Context, reg name.Registry, t http.RoundTripper, schemes []string) (*pingResp, error) {
|
||||
returned := make(chan struct{})
|
||||
defer close(returned)
|
||||
|
||||
type pingResult struct {
|
||||
*pingResp
|
||||
error
|
||||
primary bool
|
||||
done bool
|
||||
}
|
||||
|
||||
results := make(chan pingResult)
|
||||
|
||||
startRacer := func(ctx context.Context, scheme string) {
|
||||
pr, err := pingSingle(ctx, reg, t, scheme)
|
||||
select {
|
||||
case results <- pingResult{pingResp: pr, error: err, primary: scheme == "https", done: true}:
|
||||
case <-returned:
|
||||
if pr != nil {
|
||||
logs.Debug.Printf("%s lost race", scheme)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var primary, fallback pingResult
|
||||
|
||||
primaryCtx, primaryCancel := context.WithCancel(ctx)
|
||||
defer primaryCancel()
|
||||
go startRacer(primaryCtx, schemes[0])
|
||||
|
||||
fallbackTimer := time.NewTimer(fallbackDelay)
|
||||
defer fallbackTimer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-fallbackTimer.C:
|
||||
fallbackCtx, fallbackCancel := context.WithCancel(ctx)
|
||||
defer fallbackCancel()
|
||||
go startRacer(fallbackCtx, schemes[1])
|
||||
|
||||
case res := <-results:
|
||||
if res.error == nil {
|
||||
return res.pingResp, nil
|
||||
}
|
||||
if res.primary {
|
||||
primary = res
|
||||
} else {
|
||||
fallback = res
|
||||
}
|
||||
if primary.done && fallback.done {
|
||||
return nil, multierrs([]error{primary.error, fallback.error})
|
||||
}
|
||||
if res.primary && fallbackTimer.Stop() {
|
||||
// Primary failed and we haven't started the fallback,
|
||||
// reset time to start fallback immediately.
|
||||
fallbackTimer.Reset(0)
|
||||
}
|
||||
// Otherwise, just return the challenge without parameters.
|
||||
return &pingResp{
|
||||
challenge: challenge(resp.Header.Get("WWW-Authenticate")).Canonical(),
|
||||
scheme: scheme,
|
||||
}, nil
|
||||
default:
|
||||
return nil, CheckError(resp, http.StatusOK, http.StatusUnauthorized)
|
||||
}
|
||||
}
|
||||
return nil, multierrs(errs)
|
||||
}
|
||||
|
||||
func pickFromMultipleChallenges(challenges []authchallenge.Challenge) authchallenge.Challenge {
|
||||
|
@ -161,7 +208,7 @@ func (m multierrs) Error() string {
|
|||
return b.String()
|
||||
}
|
||||
|
||||
func (m multierrs) As(target interface{}) bool {
|
||||
func (m multierrs) As(target any) bool {
|
||||
for _, err := range m {
|
||||
if errors.As(err, target) {
|
||||
return true
|
||||
|
|
24
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go
generated
vendored
24
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go
generated
vendored
|
@ -21,12 +21,12 @@ import (
|
|||
"github.com/google/go-containerregistry/internal/retry"
|
||||
)
|
||||
|
||||
// Sleep for 0.1, 0.3, 0.9, 2.7 seconds. This should cover networking blips.
|
||||
// Sleep for 0.1 then 0.3 seconds. This should cover networking blips.
|
||||
var defaultBackoff = retry.Backoff{
|
||||
Duration: 100 * time.Millisecond,
|
||||
Factor: 3.0,
|
||||
Jitter: 0.1,
|
||||
Steps: 5,
|
||||
Steps: 3,
|
||||
}
|
||||
|
||||
var _ http.RoundTripper = (*retryTransport)(nil)
|
||||
|
@ -36,6 +36,7 @@ type retryTransport struct {
|
|||
inner http.RoundTripper
|
||||
backoff retry.Backoff
|
||||
predicate retry.Predicate
|
||||
codes []int
|
||||
}
|
||||
|
||||
// Option is a functional option for retryTransport.
|
||||
|
@ -44,6 +45,7 @@ type Option func(*options)
|
|||
type options struct {
|
||||
backoff retry.Backoff
|
||||
predicate retry.Predicate
|
||||
codes []int
|
||||
}
|
||||
|
||||
// Backoff is an alias of retry.Backoff to expose this configuration option to consumers of this lib
|
||||
|
@ -63,6 +65,13 @@ func WithRetryPredicate(predicate func(error) bool) Option {
|
|||
}
|
||||
}
|
||||
|
||||
// WithRetryStatusCodes sets which http response codes will be retried.
|
||||
func WithRetryStatusCodes(codes ...int) Option {
|
||||
return func(o *options) {
|
||||
o.codes = codes
|
||||
}
|
||||
}
|
||||
|
||||
// NewRetry returns a transport that retries errors.
|
||||
func NewRetry(inner http.RoundTripper, opts ...Option) http.RoundTripper {
|
||||
o := &options{
|
||||
|
@ -78,12 +87,23 @@ func NewRetry(inner http.RoundTripper, opts ...Option) http.RoundTripper {
|
|||
inner: inner,
|
||||
backoff: o.backoff,
|
||||
predicate: o.predicate,
|
||||
codes: o.codes,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *retryTransport) RoundTrip(in *http.Request) (out *http.Response, err error) {
|
||||
roundtrip := func() error {
|
||||
out, err = t.inner.RoundTrip(in)
|
||||
if !retry.Ever(in.Context()) {
|
||||
return nil
|
||||
}
|
||||
if out != nil {
|
||||
for _, code := range t.codes {
|
||||
if out.StatusCode == code {
|
||||
return CheckError(out)
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
retry.Retry(roundtrip, t.predicate, t.backoff)
|
||||
|
|
|
@ -75,7 +75,6 @@ func writeImage(ctx context.Context, ref name.Reference, img v1.Image, o *option
|
|||
w := writer{
|
||||
repo: ref.Context(),
|
||||
client: &http.Client{Transport: tr},
|
||||
context: ctx,
|
||||
progress: progress,
|
||||
backoff: o.retryBackoff,
|
||||
predicate: o.retryPredicate,
|
||||
|
@ -169,9 +168,8 @@ func writeImage(ctx context.Context, ref name.Reference, img v1.Image, o *option
|
|||
|
||||
// writer writes the elements of an image to a remote image reference.
|
||||
type writer struct {
|
||||
repo name.Repository
|
||||
client *http.Client
|
||||
context context.Context
|
||||
repo name.Repository
|
||||
client *http.Client
|
||||
|
||||
progress *progress
|
||||
backoff Backoff
|
||||
|
@ -207,7 +205,7 @@ func (w *writer) nextLocation(resp *http.Response) (string, error) {
|
|||
// HEAD request to the blob store API. GCR performs an existence check on the
|
||||
// initiation if "mount" is specified, even if no "from" sources are specified.
|
||||
// However, this is not broadly applicable to all registries, e.g. ECR.
|
||||
func (w *writer) checkExistingBlob(h v1.Hash) (bool, error) {
|
||||
func (w *writer) checkExistingBlob(ctx context.Context, h v1.Hash) (bool, error) {
|
||||
u := w.url(fmt.Sprintf("/v2/%s/blobs/%s", w.repo.RepositoryStr(), h.String()))
|
||||
|
||||
req, err := http.NewRequest(http.MethodHead, u.String(), nil)
|
||||
|
@ -215,7 +213,7 @@ func (w *writer) checkExistingBlob(h v1.Hash) (bool, error) {
|
|||
return false, err
|
||||
}
|
||||
|
||||
resp, err := w.client.Do(req.WithContext(w.context))
|
||||
resp, err := w.client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -230,7 +228,7 @@ func (w *writer) checkExistingBlob(h v1.Hash) (bool, error) {
|
|||
|
||||
// checkExistingManifest checks if a manifest exists already in the repository
|
||||
// by making a HEAD request to the manifest API.
|
||||
func (w *writer) checkExistingManifest(h v1.Hash, mt types.MediaType) (bool, error) {
|
||||
func (w *writer) checkExistingManifest(ctx context.Context, h v1.Hash, mt types.MediaType) (bool, error) {
|
||||
u := w.url(fmt.Sprintf("/v2/%s/manifests/%s", w.repo.RepositoryStr(), h.String()))
|
||||
|
||||
req, err := http.NewRequest(http.MethodHead, u.String(), nil)
|
||||
|
@ -239,7 +237,7 @@ func (w *writer) checkExistingManifest(h v1.Hash, mt types.MediaType) (bool, err
|
|||
}
|
||||
req.Header.Set("Accept", string(mt))
|
||||
|
||||
resp, err := w.client.Do(req.WithContext(w.context))
|
||||
resp, err := w.client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
@ -258,7 +256,7 @@ func (w *writer) checkExistingManifest(h v1.Hash, mt types.MediaType) (bool, err
|
|||
// On success, the layer was either mounted (nothing more to do) or a blob
|
||||
// upload was initiated and the body of that blob should be sent to the returned
|
||||
// location.
|
||||
func (w *writer) initiateUpload(from, mount, origin string) (location string, mounted bool, err error) {
|
||||
func (w *writer) initiateUpload(ctx context.Context, from, mount, origin string) (location string, mounted bool, err error) {
|
||||
u := w.url(fmt.Sprintf("/v2/%s/blobs/uploads/", w.repo.RepositoryStr()))
|
||||
uv := url.Values{}
|
||||
if mount != "" && from != "" {
|
||||
|
@ -277,7 +275,7 @@ func (w *writer) initiateUpload(from, mount, origin string) (location string, mo
|
|||
return "", false, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
resp, err := w.client.Do(req.WithContext(w.context))
|
||||
resp, err := w.client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
|
@ -287,7 +285,7 @@ func (w *writer) initiateUpload(from, mount, origin string) (location string, mo
|
|||
if origin != "" && origin != w.repo.RegistryStr() {
|
||||
// https://github.com/google/go-containerregistry/issues/1404
|
||||
logs.Warn.Printf("retrying without mount: %v", err)
|
||||
return w.initiateUpload("", "", "")
|
||||
return w.initiateUpload(ctx, "", "", "")
|
||||
}
|
||||
return "", false, err
|
||||
}
|
||||
|
@ -364,7 +362,7 @@ func (w *writer) streamBlob(ctx context.Context, layer v1.Layer, streamLocation
|
|||
|
||||
// commitBlob commits this blob by sending a PUT to the location returned from
|
||||
// streaming the blob.
|
||||
func (w *writer) commitBlob(location, digest string) error {
|
||||
func (w *writer) commitBlob(ctx context.Context, location, digest string) error {
|
||||
u, err := url.Parse(location)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -379,7 +377,7 @@ func (w *writer) commitBlob(location, digest string) error {
|
|||
}
|
||||
req.Header.Set("Content-Type", "application/octet-stream")
|
||||
|
||||
resp, err := w.client.Do(req.WithContext(w.context))
|
||||
resp, err := w.client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -399,11 +397,12 @@ func (w *writer) incrProgress(written int64) {
|
|||
// uploadOne performs a complete upload of a single layer.
|
||||
func (w *writer) uploadOne(ctx context.Context, l v1.Layer) error {
|
||||
tryUpload := func() error {
|
||||
ctx := retry.Never(ctx)
|
||||
var from, mount, origin string
|
||||
if h, err := l.Digest(); err == nil {
|
||||
// If we know the digest, this isn't a streaming layer. Do an existence
|
||||
// check so we can skip uploading the layer if possible.
|
||||
existing, err := w.checkExistingBlob(h)
|
||||
existing, err := w.checkExistingBlob(ctx, h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -424,7 +423,7 @@ func (w *writer) uploadOne(ctx context.Context, l v1.Layer) error {
|
|||
origin = ml.Reference.Context().RegistryStr()
|
||||
}
|
||||
|
||||
location, mounted, err := w.initiateUpload(from, mount, origin)
|
||||
location, mounted, err := w.initiateUpload(ctx, from, mount, origin)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if mounted {
|
||||
|
@ -463,7 +462,7 @@ func (w *writer) uploadOne(ctx context.Context, l v1.Layer) error {
|
|||
}
|
||||
digest := h.String()
|
||||
|
||||
if err := w.commitBlob(location, digest); err != nil {
|
||||
if err := w.commitBlob(ctx, location, digest); err != nil {
|
||||
return err
|
||||
}
|
||||
logs.Progress.Printf("pushed blob: %s", digest)
|
||||
|
@ -491,7 +490,7 @@ func (w *writer) writeIndex(ctx context.Context, ref name.Reference, ii v1.Image
|
|||
// TODO(#803): Pipe through remote.WithJobs and upload these in parallel.
|
||||
for _, desc := range index.Manifests {
|
||||
ref := ref.Context().Digest(desc.Digest.String())
|
||||
exists, err := w.checkExistingManifest(desc.Digest, desc.MediaType)
|
||||
exists, err := w.checkExistingManifest(ctx, desc.Digest, desc.MediaType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -581,6 +580,7 @@ func unpackTaggable(t Taggable) ([]byte, *v1.Descriptor, error) {
|
|||
// commitManifest does a PUT of the image's manifest.
|
||||
func (w *writer) commitManifest(ctx context.Context, t Taggable, ref name.Reference) error {
|
||||
tryUpload := func() error {
|
||||
ctx := retry.Never(ctx)
|
||||
raw, desc, err := unpackTaggable(t)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -656,7 +656,6 @@ func WriteIndex(ref name.Reference, ii v1.ImageIndex, options ...Option) (rerr e
|
|||
w := writer{
|
||||
repo: ref.Context(),
|
||||
client: &http.Client{Transport: tr},
|
||||
context: o.context,
|
||||
backoff: o.retryBackoff,
|
||||
predicate: o.retryPredicate,
|
||||
}
|
||||
|
@ -799,7 +798,6 @@ func WriteLayer(repo name.Repository, layer v1.Layer, options ...Option) (rerr e
|
|||
w := writer{
|
||||
repo: repo,
|
||||
client: &http.Client{Transport: tr},
|
||||
context: o.context,
|
||||
backoff: o.retryBackoff,
|
||||
predicate: o.retryPredicate,
|
||||
}
|
||||
|
@ -870,7 +868,6 @@ func Put(ref name.Reference, t Taggable, options ...Option) error {
|
|||
w := writer{
|
||||
repo: ref.Context(),
|
||||
client: &http.Client{Transport: tr},
|
||||
context: o.context,
|
||||
backoff: o.retryBackoff,
|
||||
predicate: o.retryPredicate,
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package stream implements a single-pass streaming v1.Layer.
|
||||
package stream
|
||||
|
||||
import (
|
||||
|
|
|
@ -21,13 +21,13 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/google/go-containerregistry/internal/gzip"
|
||||
comp "github.com/google/go-containerregistry/internal/compression"
|
||||
"github.com/google/go-containerregistry/pkg/compression"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
|
@ -166,7 +166,13 @@ func (i *image) areLayersCompressed() (bool, error) {
|
|||
return false, err
|
||||
}
|
||||
defer blob.Close()
|
||||
return gzip.Is(blob)
|
||||
|
||||
cp, _, err := comp.PeekCompression(blob)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return cp != compression.None, nil
|
||||
}
|
||||
|
||||
func (i *image) loadTarDescriptorAndConfig() error {
|
||||
|
@ -195,7 +201,7 @@ func (i *image) loadTarDescriptorAndConfig() error {
|
|||
}
|
||||
defer cfg.Close()
|
||||
|
||||
i.config, err = ioutil.ReadAll(cfg)
|
||||
i.config, err = io.ReadAll(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -19,14 +19,17 @@ import (
|
|||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/stargz-snapshotter/estargz"
|
||||
"github.com/google/go-containerregistry/internal/and"
|
||||
comp "github.com/google/go-containerregistry/internal/compression"
|
||||
gestargz "github.com/google/go-containerregistry/internal/estargz"
|
||||
ggzip "github.com/google/go-containerregistry/internal/gzip"
|
||||
"github.com/google/go-containerregistry/internal/zstd"
|
||||
"github.com/google/go-containerregistry/pkg/compression"
|
||||
"github.com/google/go-containerregistry/pkg/logs"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
)
|
||||
|
@ -37,7 +40,8 @@ type layer struct {
|
|||
size int64
|
||||
compressedopener Opener
|
||||
uncompressedopener Opener
|
||||
compression int
|
||||
compression compression.Compression
|
||||
compressionLevel int
|
||||
annotations map[string]string
|
||||
estgzopts []estargz.Option
|
||||
mediaType types.MediaType
|
||||
|
@ -90,11 +94,32 @@ func (l *layer) MediaType() (types.MediaType, error) {
|
|||
// LayerOption applies options to layer
|
||||
type LayerOption func(*layer)
|
||||
|
||||
// WithCompression is a functional option for overriding the default
|
||||
// compression algorithm used for compressing uncompressed tarballs.
|
||||
// Please note that WithCompression(compression.ZStd) should be used
|
||||
// in conjunction with WithMediaType(types.OCILayerZStd)
|
||||
func WithCompression(comp compression.Compression) LayerOption {
|
||||
return func(l *layer) {
|
||||
switch comp {
|
||||
case compression.ZStd:
|
||||
l.compression = compression.ZStd
|
||||
case compression.GZip:
|
||||
l.compression = compression.GZip
|
||||
case compression.None:
|
||||
logs.Warn.Printf("Compression type 'none' is not supported for tarball layers; using gzip compression.")
|
||||
l.compression = compression.GZip
|
||||
default:
|
||||
logs.Warn.Printf("Unexpected compression type for WithCompression(): %s; using gzip compression instead.", comp)
|
||||
l.compression = compression.GZip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithCompressionLevel is a functional option for overriding the default
|
||||
// compression level used for compressing uncompressed tarballs.
|
||||
func WithCompressionLevel(level int) LayerOption {
|
||||
return func(l *layer) {
|
||||
l.compression = level
|
||||
l.compressionLevel = level
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -128,7 +153,7 @@ func WithCompressedCaching(l *layer) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
return ioutil.NopCloser(bytes.NewBuffer(buf.Bytes())), nil
|
||||
return io.NopCloser(bytes.NewBuffer(buf.Bytes())), nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -149,7 +174,7 @@ func WithEstargz(l *layer) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eopts := append(l.estgzopts, estargz.WithCompressionLevel(l.compression))
|
||||
eopts := append(l.estgzopts, estargz.WithCompressionLevel(l.compressionLevel))
|
||||
rc, h, err := gestargz.ReadCloser(crc, eopts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -196,31 +221,28 @@ func LayerFromFile(path string, opts ...LayerOption) (v1.Layer, error) {
|
|||
// the uncompressed path may end up gzipping things multiple times:
|
||||
// 1. Compute the layer SHA256
|
||||
// 2. Upload the compressed layer.
|
||||
//
|
||||
// Since gzip can be expensive, we support an option to memoize the
|
||||
// compression that can be passed here: tarball.WithCompressedCaching
|
||||
func LayerFromOpener(opener Opener, opts ...LayerOption) (v1.Layer, error) {
|
||||
rc, err := opener()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
compressed, err := ggzip.Is(rc)
|
||||
comp, err := comp.GetCompression(opener)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
layer := &layer{
|
||||
compression: gzip.BestSpeed,
|
||||
annotations: make(map[string]string, 1),
|
||||
mediaType: types.DockerLayer,
|
||||
compression: compression.GZip,
|
||||
compressionLevel: gzip.BestSpeed,
|
||||
annotations: make(map[string]string, 1),
|
||||
mediaType: types.DockerLayer,
|
||||
}
|
||||
|
||||
if estgz := os.Getenv("GGCR_EXPERIMENT_ESTARGZ"); estgz == "1" {
|
||||
opts = append([]LayerOption{WithEstargz}, opts...)
|
||||
}
|
||||
|
||||
if compressed {
|
||||
switch comp {
|
||||
case compression.GZip:
|
||||
layer.compressedopener = opener
|
||||
layer.uncompressedopener = func() (io.ReadCloser, error) {
|
||||
urc, err := opener()
|
||||
|
@ -229,14 +251,28 @@ func LayerFromOpener(opener Opener, opts ...LayerOption) (v1.Layer, error) {
|
|||
}
|
||||
return ggzip.UnzipReadCloser(urc)
|
||||
}
|
||||
} else {
|
||||
case compression.ZStd:
|
||||
layer.compressedopener = opener
|
||||
layer.uncompressedopener = func() (io.ReadCloser, error) {
|
||||
urc, err := opener()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return zstd.UnzipReadCloser(urc)
|
||||
}
|
||||
default:
|
||||
layer.uncompressedopener = opener
|
||||
layer.compressedopener = func() (io.ReadCloser, error) {
|
||||
crc, err := opener()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ggzip.ReadCloserLevel(crc, layer.compression), nil
|
||||
|
||||
if layer.compression == compression.ZStd {
|
||||
return zstd.ReadCloserLevel(crc, layer.compressionLevel), nil
|
||||
}
|
||||
|
||||
return ggzip.ReadCloserLevel(crc, layer.compressionLevel), nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -244,6 +280,23 @@ func LayerFromOpener(opener Opener, opts ...LayerOption) (v1.Layer, error) {
|
|||
opt(layer)
|
||||
}
|
||||
|
||||
// Warn if media type does not match compression
|
||||
var mediaTypeMismatch = false
|
||||
switch layer.compression {
|
||||
case compression.GZip:
|
||||
mediaTypeMismatch =
|
||||
layer.mediaType != types.OCILayer &&
|
||||
layer.mediaType != types.OCIRestrictedLayer &&
|
||||
layer.mediaType != types.DockerLayer
|
||||
|
||||
case compression.ZStd:
|
||||
mediaTypeMismatch = layer.mediaType != types.OCILayerZStd
|
||||
}
|
||||
|
||||
if mediaTypeMismatch {
|
||||
logs.Warn.Printf("Unexpected mediaType (%s) for selected compression in %s in LayerFromOpener().", layer.mediaType, layer.compression)
|
||||
}
|
||||
|
||||
if layer.digest, layer.size, err = computeDigest(layer.compressedopener); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -264,7 +317,7 @@ func LayerFromOpener(opener Opener, opts ...LayerOption) (v1.Layer, error) {
|
|||
//
|
||||
// Deprecated: Use LayerFromOpener or stream.NewLayer instead, if possible.
|
||||
func LayerFromReader(reader io.Reader, opts ...LayerOption) (v1.Layer, error) {
|
||||
tmp, err := ioutil.TempFile("", "")
|
||||
tmp, err := os.CreateTemp("", "")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating temp file to buffer reader: %w", err)
|
||||
}
|
||||
|
|
|
@ -315,6 +315,7 @@ func getSizeAndManifest(refToImage map[name.Reference]v1.Image) (int64, []byte,
|
|||
func calculateTarballSize(refToImage map[name.Reference]v1.Image, mBytes []byte) (size int64, err error) {
|
||||
imageToTags := dedupRefToImage(refToImage)
|
||||
|
||||
seenLayerDigests := make(map[string]struct{})
|
||||
for img, name := range imageToTags {
|
||||
manifest, err := img.Manifest()
|
||||
if err != nil {
|
||||
|
@ -322,6 +323,11 @@ func calculateTarballSize(refToImage map[name.Reference]v1.Image, mBytes []byte)
|
|||
}
|
||||
size += calculateSingleFileInTarSize(manifest.Config.Size)
|
||||
for _, l := range manifest.Layers {
|
||||
hex := l.Digest.Hex
|
||||
if _, ok := seenLayerDigests[hex]; ok {
|
||||
continue
|
||||
}
|
||||
seenLayerDigests[hex] = struct{}{}
|
||||
size += calculateSingleFileInTarSize(l.Size)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package types holds common OCI media types.
|
||||
package types
|
||||
|
||||
// MediaType is an enumeration of the supported mime types that an element of an image might have.
|
||||
|
@ -24,6 +25,7 @@ const (
|
|||
OCIManifestSchema1 MediaType = "application/vnd.oci.image.manifest.v1+json"
|
||||
OCIConfigJSON MediaType = "application/vnd.oci.image.config.v1+json"
|
||||
OCILayer MediaType = "application/vnd.oci.image.layer.v1.tar+gzip"
|
||||
OCILayerZStd MediaType = "application/vnd.oci.image.layer.v1.tar+zstd"
|
||||
OCIRestrictedLayer MediaType = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip"
|
||||
OCIUncompressedLayer MediaType = "application/vnd.oci.image.layer.v1.tar"
|
||||
OCIUncompressedRestrictedLayer MediaType = "application/vnd.oci.image.layer.nondistributable.v1.tar"
|
||||
|
|
|
@ -40,44 +40,22 @@ type Buffer struct {
|
|||
next *Buffer
|
||||
}
|
||||
|
||||
// Buffers manages the reuse of individual buffer instances. It is thread-safe.
|
||||
type Buffers struct {
|
||||
// mu protects the free list. It is separate from the main mutex
|
||||
// so buffers can be grabbed and printed to without holding the main lock,
|
||||
// for better parallelization.
|
||||
mu sync.Mutex
|
||||
|
||||
// freeList is a list of byte buffers, maintained under mu.
|
||||
freeList *Buffer
|
||||
var buffers = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(Buffer)
|
||||
},
|
||||
}
|
||||
|
||||
// GetBuffer returns a new, ready-to-use buffer.
|
||||
func (bl *Buffers) GetBuffer() *Buffer {
|
||||
bl.mu.Lock()
|
||||
b := bl.freeList
|
||||
if b != nil {
|
||||
bl.freeList = b.next
|
||||
}
|
||||
bl.mu.Unlock()
|
||||
if b == nil {
|
||||
b = new(Buffer)
|
||||
} else {
|
||||
b.next = nil
|
||||
b.Reset()
|
||||
}
|
||||
func GetBuffer() *Buffer {
|
||||
b := buffers.Get().(*Buffer)
|
||||
b.Reset()
|
||||
return b
|
||||
}
|
||||
|
||||
// PutBuffer returns a buffer to the free list.
|
||||
func (bl *Buffers) PutBuffer(b *Buffer) {
|
||||
if b.Len() >= 256 {
|
||||
// Let big buffers die a natural death.
|
||||
return
|
||||
}
|
||||
bl.mu.Lock()
|
||||
b.next = bl.freeList
|
||||
bl.freeList = b
|
||||
bl.mu.Unlock()
|
||||
func PutBuffer(b *Buffer) {
|
||||
buffers.Put(b)
|
||||
}
|
||||
|
||||
// Some custom tiny helper functions to print the log header efficiently.
|
||||
|
|
|
@ -24,6 +24,10 @@ import (
|
|||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
type textWriter interface {
|
||||
WriteText(*bytes.Buffer)
|
||||
}
|
||||
|
||||
// WithValues implements LogSink.WithValues. The old key/value pairs are
|
||||
// assumed to be well-formed, the new ones are checked and padded if
|
||||
// necessary. It returns a new slice.
|
||||
|
@ -91,6 +95,51 @@ func MergeKVs(first, second []interface{}) []interface{} {
|
|||
return merged
|
||||
}
|
||||
|
||||
// MergeKVsInto is a variant of MergeKVs which directly formats the key/value
|
||||
// pairs into a buffer.
|
||||
func MergeAndFormatKVs(b *bytes.Buffer, first, second []interface{}) {
|
||||
if len(first) == 0 && len(second) == 0 {
|
||||
// Nothing to do at all.
|
||||
return
|
||||
}
|
||||
|
||||
if len(first) == 0 && len(second)%2 == 0 {
|
||||
// Nothing to be overridden, second slice is well-formed
|
||||
// and can be used directly.
|
||||
for i := 0; i < len(second); i += 2 {
|
||||
KVFormat(b, second[i], second[i+1])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Determine which keys are in the second slice so that we can skip
|
||||
// them when iterating over the first one. The code intentionally
|
||||
// favors performance over completeness: we assume that keys are string
|
||||
// constants and thus compare equal when the string values are equal. A
|
||||
// string constant being overridden by, for example, a fmt.Stringer is
|
||||
// not handled.
|
||||
overrides := map[interface{}]bool{}
|
||||
for i := 0; i < len(second); i += 2 {
|
||||
overrides[second[i]] = true
|
||||
}
|
||||
for i := 0; i < len(first); i += 2 {
|
||||
key := first[i]
|
||||
if overrides[key] {
|
||||
continue
|
||||
}
|
||||
KVFormat(b, key, first[i+1])
|
||||
}
|
||||
// Round down.
|
||||
l := len(second)
|
||||
l = l / 2 * 2
|
||||
for i := 1; i < l; i += 2 {
|
||||
KVFormat(b, second[i-1], second[i])
|
||||
}
|
||||
if len(second)%2 == 1 {
|
||||
KVFormat(b, second[len(second)-1], missingValue)
|
||||
}
|
||||
}
|
||||
|
||||
const missingValue = "(MISSING)"
|
||||
|
||||
// KVListFormat serializes all key/value pairs into the provided buffer.
|
||||
|
@ -104,66 +153,74 @@ func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) {
|
|||
} else {
|
||||
v = missingValue
|
||||
}
|
||||
b.WriteByte(' ')
|
||||
// Keys are assumed to be well-formed according to
|
||||
// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments
|
||||
// for the sake of performance. Keys with spaces,
|
||||
// special characters, etc. will break parsing.
|
||||
if sK, ok := k.(string); ok {
|
||||
// Avoid one allocation when the key is a string, which
|
||||
// normally it should be.
|
||||
b.WriteString(sK)
|
||||
} else {
|
||||
b.WriteString(fmt.Sprintf("%s", k))
|
||||
}
|
||||
KVFormat(b, k, v)
|
||||
}
|
||||
}
|
||||
|
||||
// The type checks are sorted so that more frequently used ones
|
||||
// come first because that is then faster in the common
|
||||
// cases. In Kubernetes, ObjectRef (a Stringer) is more common
|
||||
// than plain strings
|
||||
// (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235).
|
||||
switch v := v.(type) {
|
||||
case fmt.Stringer:
|
||||
writeStringValue(b, true, StringerToString(v))
|
||||
// KVFormat serializes one key/value pair into the provided buffer.
|
||||
// A space gets inserted before the pair.
|
||||
func KVFormat(b *bytes.Buffer, k, v interface{}) {
|
||||
b.WriteByte(' ')
|
||||
// Keys are assumed to be well-formed according to
|
||||
// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments
|
||||
// for the sake of performance. Keys with spaces,
|
||||
// special characters, etc. will break parsing.
|
||||
if sK, ok := k.(string); ok {
|
||||
// Avoid one allocation when the key is a string, which
|
||||
// normally it should be.
|
||||
b.WriteString(sK)
|
||||
} else {
|
||||
b.WriteString(fmt.Sprintf("%s", k))
|
||||
}
|
||||
|
||||
// The type checks are sorted so that more frequently used ones
|
||||
// come first because that is then faster in the common
|
||||
// cases. In Kubernetes, ObjectRef (a Stringer) is more common
|
||||
// than plain strings
|
||||
// (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235).
|
||||
switch v := v.(type) {
|
||||
case textWriter:
|
||||
writeTextWriterValue(b, v)
|
||||
case fmt.Stringer:
|
||||
writeStringValue(b, true, StringerToString(v))
|
||||
case string:
|
||||
writeStringValue(b, true, v)
|
||||
case error:
|
||||
writeStringValue(b, true, ErrorToString(v))
|
||||
case logr.Marshaler:
|
||||
value := MarshalerToValue(v)
|
||||
// A marshaler that returns a string is useful for
|
||||
// delayed formatting of complex values. We treat this
|
||||
// case like a normal string. This is useful for
|
||||
// multi-line support.
|
||||
//
|
||||
// We could do this by recursively formatting a value,
|
||||
// but that comes with the risk of infinite recursion
|
||||
// if a marshaler returns itself. Instead we call it
|
||||
// only once and rely on it returning the intended
|
||||
// value directly.
|
||||
switch value := value.(type) {
|
||||
case string:
|
||||
writeStringValue(b, true, v)
|
||||
case error:
|
||||
writeStringValue(b, true, ErrorToString(v))
|
||||
case logr.Marshaler:
|
||||
value := MarshalerToValue(v)
|
||||
// A marshaler that returns a string is useful for
|
||||
// delayed formatting of complex values. We treat this
|
||||
// case like a normal string. This is useful for
|
||||
// multi-line support.
|
||||
//
|
||||
// We could do this by recursively formatting a value,
|
||||
// but that comes with the risk of infinite recursion
|
||||
// if a marshaler returns itself. Instead we call it
|
||||
// only once and rely on it returning the intended
|
||||
// value directly.
|
||||
switch value := value.(type) {
|
||||
case string:
|
||||
writeStringValue(b, true, value)
|
||||
default:
|
||||
writeStringValue(b, false, fmt.Sprintf("%+v", value))
|
||||
}
|
||||
case []byte:
|
||||
// In https://github.com/kubernetes/klog/pull/237 it was decided
|
||||
// to format byte slices with "%+q". The advantages of that are:
|
||||
// - readable output if the bytes happen to be printable
|
||||
// - non-printable bytes get represented as unicode escape
|
||||
// sequences (\uxxxx)
|
||||
//
|
||||
// The downsides are that we cannot use the faster
|
||||
// strconv.Quote here and that multi-line output is not
|
||||
// supported. If developers know that a byte array is
|
||||
// printable and they want multi-line output, they can
|
||||
// convert the value to string before logging it.
|
||||
b.WriteByte('=')
|
||||
b.WriteString(fmt.Sprintf("%+q", v))
|
||||
writeStringValue(b, true, value)
|
||||
default:
|
||||
writeStringValue(b, false, fmt.Sprintf("%+v", v))
|
||||
writeStringValue(b, false, fmt.Sprintf("%+v", value))
|
||||
}
|
||||
case []byte:
|
||||
// In https://github.com/kubernetes/klog/pull/237 it was decided
|
||||
// to format byte slices with "%+q". The advantages of that are:
|
||||
// - readable output if the bytes happen to be printable
|
||||
// - non-printable bytes get represented as unicode escape
|
||||
// sequences (\uxxxx)
|
||||
//
|
||||
// The downsides are that we cannot use the faster
|
||||
// strconv.Quote here and that multi-line output is not
|
||||
// supported. If developers know that a byte array is
|
||||
// printable and they want multi-line output, they can
|
||||
// convert the value to string before logging it.
|
||||
b.WriteByte('=')
|
||||
b.WriteString(fmt.Sprintf("%+q", v))
|
||||
default:
|
||||
writeStringValue(b, false, fmt.Sprintf("%+v", v))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -203,6 +260,16 @@ func ErrorToString(err error) (ret string) {
|
|||
return
|
||||
}
|
||||
|
||||
func writeTextWriterValue(b *bytes.Buffer, v textWriter) {
|
||||
b.WriteRune('=')
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
fmt.Fprintf(b, `"<panic: %s>"`, err)
|
||||
}
|
||||
}()
|
||||
v.WriteText(b)
|
||||
}
|
||||
|
||||
func writeStringValue(b *bytes.Buffer, quote bool, v string) {
|
||||
data := []byte(v)
|
||||
index := bytes.IndexByte(data, '\n')
|
||||
|
|
|
@ -17,8 +17,10 @@ limitations under the License.
|
|||
package klog
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
@ -31,11 +33,30 @@ type ObjectRef struct {
|
|||
|
||||
func (ref ObjectRef) String() string {
|
||||
if ref.Namespace != "" {
|
||||
return fmt.Sprintf("%s/%s", ref.Namespace, ref.Name)
|
||||
var builder strings.Builder
|
||||
builder.Grow(len(ref.Namespace) + len(ref.Name) + 1)
|
||||
builder.WriteString(ref.Namespace)
|
||||
builder.WriteRune('/')
|
||||
builder.WriteString(ref.Name)
|
||||
return builder.String()
|
||||
}
|
||||
return ref.Name
|
||||
}
|
||||
|
||||
func (ref ObjectRef) WriteText(out *bytes.Buffer) {
|
||||
out.WriteRune('"')
|
||||
ref.writeUnquoted(out)
|
||||
out.WriteRune('"')
|
||||
}
|
||||
|
||||
func (ref ObjectRef) writeUnquoted(out *bytes.Buffer) {
|
||||
if ref.Namespace != "" {
|
||||
out.WriteString(ref.Namespace)
|
||||
out.WriteRune('/')
|
||||
}
|
||||
out.WriteString(ref.Name)
|
||||
}
|
||||
|
||||
// MarshalLog ensures that loggers with support for structured output will log
|
||||
// as a struct by removing the String method via a custom type.
|
||||
func (ref ObjectRef) MarshalLog() interface{} {
|
||||
|
@ -117,31 +138,31 @@ var _ fmt.Stringer = kobjSlice{}
|
|||
var _ logr.Marshaler = kobjSlice{}
|
||||
|
||||
func (ks kobjSlice) String() string {
|
||||
objectRefs, err := ks.process()
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
objectRefs, errStr := ks.process()
|
||||
if errStr != "" {
|
||||
return errStr
|
||||
}
|
||||
return fmt.Sprintf("%v", objectRefs)
|
||||
}
|
||||
|
||||
func (ks kobjSlice) MarshalLog() interface{} {
|
||||
objectRefs, err := ks.process()
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
objectRefs, errStr := ks.process()
|
||||
if errStr != "" {
|
||||
return errStr
|
||||
}
|
||||
return objectRefs
|
||||
}
|
||||
|
||||
func (ks kobjSlice) process() ([]interface{}, error) {
|
||||
func (ks kobjSlice) process() (objs []interface{}, err string) {
|
||||
s := reflect.ValueOf(ks.arg)
|
||||
switch s.Kind() {
|
||||
case reflect.Invalid:
|
||||
// nil parameter, print as nil.
|
||||
return nil, nil
|
||||
return nil, ""
|
||||
case reflect.Slice:
|
||||
// Okay, handle below.
|
||||
default:
|
||||
return nil, fmt.Errorf("<KObjSlice needs a slice, got type %T>", ks.arg)
|
||||
return nil, fmt.Sprintf("<KObjSlice needs a slice, got type %T>", ks.arg)
|
||||
}
|
||||
objectRefs := make([]interface{}, 0, s.Len())
|
||||
for i := 0; i < s.Len(); i++ {
|
||||
|
@ -151,8 +172,41 @@ func (ks kobjSlice) process() ([]interface{}, error) {
|
|||
} else if v, ok := item.(KMetadata); ok {
|
||||
objectRefs = append(objectRefs, KObj(v))
|
||||
} else {
|
||||
return nil, fmt.Errorf("<KObjSlice needs a slice of values implementing KMetadata, got type %T>", item)
|
||||
return nil, fmt.Sprintf("<KObjSlice needs a slice of values implementing KMetadata, got type %T>", item)
|
||||
}
|
||||
}
|
||||
return objectRefs, ""
|
||||
}
|
||||
|
||||
var nilToken = []byte("<nil>")
|
||||
|
||||
func (ks kobjSlice) WriteText(out *bytes.Buffer) {
|
||||
s := reflect.ValueOf(ks.arg)
|
||||
switch s.Kind() {
|
||||
case reflect.Invalid:
|
||||
// nil parameter, print as empty slice.
|
||||
out.WriteString("[]")
|
||||
return
|
||||
case reflect.Slice:
|
||||
// Okay, handle below.
|
||||
default:
|
||||
fmt.Fprintf(out, `"<KObjSlice needs a slice, got type %T>"`, ks.arg)
|
||||
return
|
||||
}
|
||||
out.Write([]byte{'['})
|
||||
defer out.Write([]byte{']'})
|
||||
for i := 0; i < s.Len(); i++ {
|
||||
if i > 0 {
|
||||
out.Write([]byte{' '})
|
||||
}
|
||||
item := s.Index(i).Interface()
|
||||
if item == nil {
|
||||
out.Write(nilToken)
|
||||
} else if v, ok := item.(KMetadata); ok {
|
||||
KObj(v).writeUnquoted(out)
|
||||
} else {
|
||||
fmt.Fprintf(out, "<KObjSlice needs a slice of values implementing KMetadata, got type %T>", item)
|
||||
return
|
||||
}
|
||||
}
|
||||
return objectRefs, nil
|
||||
}
|
||||
|
|
|
@ -532,11 +532,6 @@ func (s settings) deepCopy() settings {
|
|||
type loggingT struct {
|
||||
settings
|
||||
|
||||
// bufferCache maintains the free list. It uses its own mutex
|
||||
// so buffers can be grabbed and printed to without holding the main lock,
|
||||
// for better parallelization.
|
||||
bufferCache buffer.Buffers
|
||||
|
||||
// flushD holds a flushDaemon that frequently flushes log file buffers.
|
||||
// Uses its own mutex.
|
||||
flushD *flushDaemon
|
||||
|
@ -664,7 +659,7 @@ func (l *loggingT) header(s severity.Severity, depth int) (*buffer.Buffer, strin
|
|||
|
||||
// formatHeader formats a log header using the provided file name and line number.
|
||||
func (l *loggingT) formatHeader(s severity.Severity, file string, line int) *buffer.Buffer {
|
||||
buf := l.bufferCache.GetBuffer()
|
||||
buf := buffer.GetBuffer()
|
||||
if l.skipHeaders {
|
||||
return buf
|
||||
}
|
||||
|
@ -682,8 +677,8 @@ func (l *loggingT) printlnDepth(s severity.Severity, logger *logr.Logger, filter
|
|||
// if logger is set, we clear the generated header as we rely on the backing
|
||||
// logger implementation to print headers
|
||||
if logger != nil {
|
||||
l.bufferCache.PutBuffer(buf)
|
||||
buf = l.bufferCache.GetBuffer()
|
||||
buffer.PutBuffer(buf)
|
||||
buf = buffer.GetBuffer()
|
||||
}
|
||||
if filter != nil {
|
||||
args = filter.Filter(args)
|
||||
|
@ -701,8 +696,8 @@ func (l *loggingT) printDepth(s severity.Severity, logger *logr.Logger, filter L
|
|||
// if logr is set, we clear the generated header as we rely on the backing
|
||||
// logr implementation to print headers
|
||||
if logger != nil {
|
||||
l.bufferCache.PutBuffer(buf)
|
||||
buf = l.bufferCache.GetBuffer()
|
||||
buffer.PutBuffer(buf)
|
||||
buf = buffer.GetBuffer()
|
||||
}
|
||||
if filter != nil {
|
||||
args = filter.Filter(args)
|
||||
|
@ -723,8 +718,8 @@ func (l *loggingT) printfDepth(s severity.Severity, logger *logr.Logger, filter
|
|||
// if logr is set, we clear the generated header as we rely on the backing
|
||||
// logr implementation to print headers
|
||||
if logger != nil {
|
||||
l.bufferCache.PutBuffer(buf)
|
||||
buf = l.bufferCache.GetBuffer()
|
||||
buffer.PutBuffer(buf)
|
||||
buf = buffer.GetBuffer()
|
||||
}
|
||||
if filter != nil {
|
||||
format, args = filter.FilterF(format, args)
|
||||
|
@ -744,8 +739,8 @@ func (l *loggingT) printWithFileLine(s severity.Severity, logger *logr.Logger, f
|
|||
// if logr is set, we clear the generated header as we rely on the backing
|
||||
// logr implementation to print headers
|
||||
if logger != nil {
|
||||
l.bufferCache.PutBuffer(buf)
|
||||
buf = l.bufferCache.GetBuffer()
|
||||
buffer.PutBuffer(buf)
|
||||
buf = buffer.GetBuffer()
|
||||
}
|
||||
if filter != nil {
|
||||
args = filter.Filter(args)
|
||||
|
@ -785,7 +780,7 @@ func (l *loggingT) infoS(logger *logr.Logger, filter LogFilter, depth int, msg s
|
|||
// set log severity by s
|
||||
func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, keysAndValues ...interface{}) {
|
||||
// Only create a new buffer if we don't have one cached.
|
||||
b := l.bufferCache.GetBuffer()
|
||||
b := buffer.GetBuffer()
|
||||
// The message is always quoted, even if it contains line breaks.
|
||||
// If developers want multi-line output, they should use a small, fixed
|
||||
// message and put the multi-line output into a value.
|
||||
|
@ -796,7 +791,7 @@ func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string,
|
|||
serialize.KVListFormat(&b.Buffer, keysAndValues...)
|
||||
l.printDepth(s, logging.logger, nil, depth+1, &b.Buffer)
|
||||
// Make the buffer available for reuse.
|
||||
l.bufferCache.PutBuffer(b)
|
||||
buffer.PutBuffer(b)
|
||||
}
|
||||
|
||||
// redirectBuffer is used to set an alternate destination for the logs
|
||||
|
@ -948,7 +943,7 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf
|
|||
timeoutFlush(ExitFlushTimeout)
|
||||
OsExit(255) // C++ uses -1, which is silly because it's anded with 255 anyway.
|
||||
}
|
||||
l.bufferCache.PutBuffer(buf)
|
||||
buffer.PutBuffer(buf)
|
||||
|
||||
if stats := severityStats[s]; stats != nil {
|
||||
atomic.AddInt64(&stats.lines, 1)
|
||||
|
@ -1313,6 +1308,13 @@ func newVerbose(level Level, b bool) Verbose {
|
|||
// less than or equal to the value of the -vmodule pattern matching the source file
|
||||
// containing the call.
|
||||
func V(level Level) Verbose {
|
||||
return VDepth(1, level)
|
||||
}
|
||||
|
||||
// VDepth is a variant of V that accepts a number of stack frames that will be
|
||||
// skipped when checking the -vmodule patterns. VDepth(0) is equivalent to
|
||||
// V().
|
||||
func VDepth(depth int, level Level) Verbose {
|
||||
// This function tries hard to be cheap unless there's work to do.
|
||||
// The fast path is two atomic loads and compares.
|
||||
|
||||
|
@ -1329,7 +1331,7 @@ func V(level Level) Verbose {
|
|||
// but if V logging is enabled we're slow anyway.
|
||||
logging.mu.Lock()
|
||||
defer logging.mu.Unlock()
|
||||
if runtime.Callers(2, logging.pcs[:]) == 0 {
|
||||
if runtime.Callers(2+depth, logging.pcs[:]) == 0 {
|
||||
return newVerbose(level, false)
|
||||
}
|
||||
// runtime.Callers returns "return PCs", but we want
|
||||
|
|
|
@ -42,19 +42,21 @@ func (l *klogger) Init(info logr.RuntimeInfo) {
|
|||
l.callDepth += info.CallDepth
|
||||
}
|
||||
|
||||
func (l klogger) Info(level int, msg string, kvList ...interface{}) {
|
||||
func (l *klogger) Info(level int, msg string, kvList ...interface{}) {
|
||||
merged := serialize.MergeKVs(l.values, kvList)
|
||||
if l.prefix != "" {
|
||||
msg = l.prefix + ": " + msg
|
||||
}
|
||||
V(Level(level)).InfoSDepth(l.callDepth+1, msg, merged...)
|
||||
// Skip this function.
|
||||
VDepth(l.callDepth+1, Level(level)).InfoSDepth(l.callDepth+1, msg, merged...)
|
||||
}
|
||||
|
||||
func (l klogger) Enabled(level int) bool {
|
||||
return V(Level(level)).Enabled()
|
||||
func (l *klogger) Enabled(level int) bool {
|
||||
// Skip this function and logr.Logger.Info where Enabled is called.
|
||||
return VDepth(l.callDepth+2, Level(level)).Enabled()
|
||||
}
|
||||
|
||||
func (l klogger) Error(err error, msg string, kvList ...interface{}) {
|
||||
func (l *klogger) Error(err error, msg string, kvList ...interface{}) {
|
||||
merged := serialize.MergeKVs(l.values, kvList)
|
||||
if l.prefix != "" {
|
||||
msg = l.prefix + ": " + msg
|
||||
|
|
|
@ -119,27 +119,28 @@ func pretty(value interface{}) string {
|
|||
return strings.TrimSpace(string(buffer.Bytes()))
|
||||
}
|
||||
|
||||
func (l klogger) Info(level int, msg string, kvList ...interface{}) {
|
||||
func (l *klogger) Info(level int, msg string, kvList ...interface{}) {
|
||||
switch l.format {
|
||||
case FormatSerialize:
|
||||
msgStr := flatten("msg", msg)
|
||||
merged := serialize.MergeKVs(l.values, kvList)
|
||||
kvStr := flatten(merged...)
|
||||
klog.V(klog.Level(level)).InfoDepth(l.callDepth+1, l.prefix, " ", msgStr, " ", kvStr)
|
||||
klog.VDepth(l.callDepth+1, klog.Level(level)).InfoDepth(l.callDepth+1, l.prefix, " ", msgStr, " ", kvStr)
|
||||
case FormatKlog:
|
||||
merged := serialize.MergeKVs(l.values, kvList)
|
||||
if l.prefix != "" {
|
||||
msg = l.prefix + ": " + msg
|
||||
}
|
||||
klog.V(klog.Level(level)).InfoSDepth(l.callDepth+1, msg, merged...)
|
||||
klog.VDepth(l.callDepth+1, klog.Level(level)).InfoSDepth(l.callDepth+1, msg, merged...)
|
||||
}
|
||||
}
|
||||
|
||||
func (l klogger) Enabled(level int) bool {
|
||||
return klog.V(klog.Level(level)).Enabled()
|
||||
func (l *klogger) Enabled(level int) bool {
|
||||
// Skip this function and logr.Logger.Info where Enabled is called.
|
||||
return klog.VDepth(l.callDepth+2, klog.Level(level)).Enabled()
|
||||
}
|
||||
|
||||
func (l klogger) Error(err error, msg string, kvList ...interface{}) {
|
||||
func (l *klogger) Error(err error, msg string, kvList ...interface{}) {
|
||||
msgStr := flatten("msg", msg)
|
||||
var loggableErr interface{}
|
||||
if err != nil {
|
||||
|
|
|
@ -91,7 +91,7 @@ github.com/aws/amazon-ec2-instance-selector/v2/pkg/instancetypes
|
|||
github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector
|
||||
github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector/outputs
|
||||
github.com/aws/amazon-ec2-instance-selector/v2/pkg/sorter
|
||||
# github.com/aws/aws-sdk-go v1.44.183
|
||||
# github.com/aws/aws-sdk-go v1.44.188
|
||||
## explicit; go 1.11
|
||||
github.com/aws/aws-sdk-go/aws
|
||||
github.com/aws/aws-sdk-go/aws/arn
|
||||
|
@ -240,7 +240,7 @@ github.com/cpuguy83/go-md2man/v2/md2man
|
|||
# github.com/davecgh/go-spew v1.1.1
|
||||
## explicit
|
||||
github.com/davecgh/go-spew/spew
|
||||
# github.com/digitalocean/godo v1.93.0
|
||||
# github.com/digitalocean/godo v1.95.0
|
||||
## explicit; go 1.18
|
||||
github.com/digitalocean/godo
|
||||
github.com/digitalocean/godo/metrics
|
||||
|
@ -377,9 +377,10 @@ github.com/google/go-cmp/cmp/internal/diff
|
|||
github.com/google/go-cmp/cmp/internal/flags
|
||||
github.com/google/go-cmp/cmp/internal/function
|
||||
github.com/google/go-cmp/cmp/internal/value
|
||||
# github.com/google/go-containerregistry v0.12.1
|
||||
## explicit; go 1.17
|
||||
# github.com/google/go-containerregistry v0.13.0
|
||||
## explicit; go 1.18
|
||||
github.com/google/go-containerregistry/internal/and
|
||||
github.com/google/go-containerregistry/internal/compression
|
||||
github.com/google/go-containerregistry/internal/estargz
|
||||
github.com/google/go-containerregistry/internal/gzip
|
||||
github.com/google/go-containerregistry/internal/legacy
|
||||
|
@ -388,7 +389,9 @@ github.com/google/go-containerregistry/internal/retry
|
|||
github.com/google/go-containerregistry/internal/retry/wait
|
||||
github.com/google/go-containerregistry/internal/verify
|
||||
github.com/google/go-containerregistry/internal/windows
|
||||
github.com/google/go-containerregistry/internal/zstd
|
||||
github.com/google/go-containerregistry/pkg/authn
|
||||
github.com/google/go-containerregistry/pkg/compression
|
||||
github.com/google/go-containerregistry/pkg/crane
|
||||
github.com/google/go-containerregistry/pkg/legacy
|
||||
github.com/google/go-containerregistry/pkg/legacy/tarball
|
||||
|
@ -876,7 +879,7 @@ golang.org/x/crypto/scrypt
|
|||
golang.org/x/crypto/ssh
|
||||
golang.org/x/crypto/ssh/agent
|
||||
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
|
||||
# golang.org/x/exp v0.0.0-20230118134722-a68e582fa157
|
||||
# golang.org/x/exp v0.0.0-20230126173853-a67bb567ff2e
|
||||
## explicit; go 1.18
|
||||
golang.org/x/exp/constraints
|
||||
# golang.org/x/mod v0.7.0
|
||||
|
@ -1637,7 +1640,7 @@ k8s.io/gengo/types
|
|||
# k8s.io/klog v1.0.0
|
||||
## explicit; go 1.12
|
||||
k8s.io/klog
|
||||
# k8s.io/klog/v2 v2.80.1
|
||||
# k8s.io/klog/v2 v2.90.0
|
||||
## explicit; go 1.13
|
||||
k8s.io/klog/v2
|
||||
k8s.io/klog/v2/internal/buffer
|
||||
|
|
Loading…
Reference in New Issue