mirror of https://github.com/containers/podman.git
Add (podman {image,manifest} push --sign-by-sigstore=param-file.yaml)
(podman push) and (podman manifest push) now support --sign-by-sigstore=param-file, using the containers-sigstore-signing-params.yaml(5) file format. That notably adds support for Fulcio and Rekor signing. Signed-off-by: Miloslav Trmač <mitr@redhat.com>
This commit is contained in:
parent
356f7b6c9d
commit
069edc3adf
|
@ -2,28 +2,34 @@ package common
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/containers/common/pkg/ssh"
|
||||
"github.com/containers/image/v5/pkg/cli"
|
||||
"github.com/containers/image/v5/pkg/cli/sigstore"
|
||||
"github.com/containers/image/v5/signature/signer"
|
||||
"github.com/containers/podman/v4/pkg/domain/entities"
|
||||
)
|
||||
|
||||
// PrepareSigningPassphrase updates pushOpts.SignPassphrase and SignSigstorePrivateKeyPassphrase based on a --sign-passphrase-file value signPassphraseFile,
|
||||
// and validates pushOpts.Sign* consistency.
|
||||
// It may interactively prompt for a passphrase if one is required and wasn’t provided otherwise.
|
||||
func PrepareSigningPassphrase(pushOpts *entities.ImagePushOptions, signPassphraseFile string) error {
|
||||
// PrepareSigning updates pushOpts.Signers, pushOpts.SignPassphrase and SignSigstorePrivateKeyPassphrase based on a --sign-passphrase-file
|
||||
// value signPassphraseFile and a --sign-by-sigsstore value signBySigstoreParamFile, and validates pushOpts.Sign* consistency.
|
||||
// It may interactively prompt for a passphrase if one is required and wasn’t provided otherwise;
|
||||
// or it may interactively trigger an OIDC authentication, using standard input/output, or even open a web browser.
|
||||
// Returns a cleanup callback on success, which must be called when done.
|
||||
func PrepareSigning(pushOpts *entities.ImagePushOptions,
|
||||
signPassphraseFile, signBySigstoreParamFile string) (func(), error) {
|
||||
// c/common/libimage.Image does allow creating both simple signing and sigstore signatures simultaneously,
|
||||
// with independent passphrases, but that would make the CLI probably too confusing.
|
||||
// For now, use the passphrase with either, but only one of them.
|
||||
if signPassphraseFile != "" && pushOpts.SignBy != "" && pushOpts.SignBySigstorePrivateKeyFile != "" {
|
||||
return fmt.Errorf("only one of --sign-by and sign-by-sigstore-private-key can be used with --sign-passphrase-file")
|
||||
return nil, fmt.Errorf("only one of --sign-by and sign-by-sigstore-private-key can be used with --sign-passphrase-file")
|
||||
}
|
||||
|
||||
var passphrase string
|
||||
if signPassphraseFile != "" {
|
||||
p, err := cli.ReadPassphraseFile(signPassphraseFile)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
passphrase = p
|
||||
} else if pushOpts.SignBySigstorePrivateKeyFile != "" {
|
||||
|
@ -32,5 +38,29 @@ func PrepareSigningPassphrase(pushOpts *entities.ImagePushOptions, signPassphras
|
|||
} // pushOpts.SignBy triggers a GPG-agent passphrase prompt, possibly using a more secure channel, so we usually shouldn’t prompt ourselves if no passphrase was explicitly provided.
|
||||
pushOpts.SignPassphrase = passphrase
|
||||
pushOpts.SignSigstorePrivateKeyPassphrase = []byte(passphrase)
|
||||
return nil
|
||||
cleanup := signingCleanup{}
|
||||
if signBySigstoreParamFile != "" {
|
||||
signer, err := sigstore.NewSignerFromParameterFile(signBySigstoreParamFile, &sigstore.Options{
|
||||
PrivateKeyPassphrasePrompt: cli.ReadPassphraseFile,
|
||||
Stdin: os.Stdin,
|
||||
Stdout: os.Stdout,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pushOpts.Signers = append(pushOpts.Signers, signer)
|
||||
cleanup.signers = append(cleanup.signers, signer)
|
||||
}
|
||||
return cleanup.cleanup, nil
|
||||
}
|
||||
|
||||
// signingCleanup carries state for cleanup after PrepareSigning
|
||||
type signingCleanup struct {
|
||||
signers []*signer.Signer
|
||||
}
|
||||
|
||||
func (c *signingCleanup) cleanup() {
|
||||
for _, s := range c.signers {
|
||||
s.Close()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,11 +18,12 @@ import (
|
|||
// CLI-only fields into the API types.
|
||||
type pushOptionsWrapper struct {
|
||||
entities.ImagePushOptions
|
||||
TLSVerifyCLI bool // CLI only
|
||||
CredentialsCLI string
|
||||
SignPassphraseFileCLI string
|
||||
EncryptionKeys []string
|
||||
EncryptLayers []int
|
||||
TLSVerifyCLI bool // CLI only
|
||||
CredentialsCLI string
|
||||
SignPassphraseFileCLI string
|
||||
SignBySigstoreParamFileCLI string
|
||||
EncryptionKeys []string
|
||||
EncryptLayers []int
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -110,6 +111,10 @@ func pushFlags(cmd *cobra.Command) {
|
|||
flags.StringVar(&pushOptions.SignBy, signByFlagName, "", "Add a signature at the destination using the specified key")
|
||||
_ = cmd.RegisterFlagCompletionFunc(signByFlagName, completion.AutocompleteNone)
|
||||
|
||||
signBySigstoreFlagName := "sign-by-sigstore"
|
||||
flags.StringVar(&pushOptions.SignBySigstoreParamFileCLI, signBySigstoreFlagName, "", "Sign the image using a sigstore parameter file at `PATH`")
|
||||
_ = cmd.RegisterFlagCompletionFunc(signBySigstoreFlagName, completion.AutocompleteDefault)
|
||||
|
||||
signBySigstorePrivateKeyFlagName := "sign-by-sigstore-private-key"
|
||||
flags.StringVar(&pushOptions.SignBySigstorePrivateKeyFile, signBySigstorePrivateKeyFlagName, "", "Sign the image using a sigstore private key at `PATH`")
|
||||
_ = cmd.RegisterFlagCompletionFunc(signBySigstorePrivateKeyFlagName, completion.AutocompleteDefault)
|
||||
|
@ -138,6 +143,7 @@ func pushFlags(cmd *cobra.Command) {
|
|||
_ = flags.MarkHidden("digestfile")
|
||||
_ = flags.MarkHidden("quiet")
|
||||
_ = flags.MarkHidden(signByFlagName)
|
||||
_ = flags.MarkHidden(signBySigstoreFlagName)
|
||||
_ = flags.MarkHidden(signBySigstorePrivateKeyFlagName)
|
||||
_ = flags.MarkHidden(signPassphraseFileFlagName)
|
||||
_ = flags.MarkHidden(encryptionKeysFlagName)
|
||||
|
@ -181,9 +187,12 @@ func imagePush(cmd *cobra.Command, args []string) error {
|
|||
pushOptions.Writer = os.Stderr
|
||||
}
|
||||
|
||||
if err := common.PrepareSigningPassphrase(&pushOptions.ImagePushOptions, pushOptions.SignPassphraseFileCLI); err != nil {
|
||||
signingCleanup, err := common.PrepareSigning(&pushOptions.ImagePushOptions,
|
||||
pushOptions.SignPassphraseFileCLI, pushOptions.SignBySigstoreParamFileCLI)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer signingCleanup()
|
||||
|
||||
encConfig, encLayers, err := util.EncryptConfig(pushOptions.EncryptionKeys, pushOptions.EncryptLayers)
|
||||
if err != nil {
|
||||
|
|
|
@ -21,9 +21,10 @@ import (
|
|||
type manifestPushOptsWrapper struct {
|
||||
entities.ImagePushOptions
|
||||
|
||||
TLSVerifyCLI, Insecure bool // CLI only
|
||||
CredentialsCLI string
|
||||
SignPassphraseFileCLI string
|
||||
TLSVerifyCLI, Insecure bool // CLI only
|
||||
CredentialsCLI string
|
||||
SignBySigstoreParamFileCLI string
|
||||
SignPassphraseFileCLI string
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -76,6 +77,10 @@ func init() {
|
|||
flags.StringVar(&manifestPushOpts.SignBy, signByFlagName, "", "sign the image using a GPG key with the specified `FINGERPRINT`")
|
||||
_ = pushCmd.RegisterFlagCompletionFunc(signByFlagName, completion.AutocompleteNone)
|
||||
|
||||
signBySigstoreFlagName := "sign-by-sigstore"
|
||||
flags.StringVar(&manifestPushOpts.SignBySigstoreParamFileCLI, signBySigstoreFlagName, "", "Sign the image using a sigstore parameter file at `PATH`")
|
||||
_ = pushCmd.RegisterFlagCompletionFunc(signBySigstoreFlagName, completion.AutocompleteDefault)
|
||||
|
||||
signBySigstorePrivateKeyFlagName := "sign-by-sigstore-private-key"
|
||||
flags.StringVar(&manifestPushOpts.SignBySigstorePrivateKeyFile, signBySigstorePrivateKeyFlagName, "", "Sign the image using a sigstore private key at `PATH`")
|
||||
_ = pushCmd.RegisterFlagCompletionFunc(signBySigstorePrivateKeyFlagName, completion.AutocompleteDefault)
|
||||
|
@ -97,6 +102,7 @@ func init() {
|
|||
if registry.IsRemote() {
|
||||
_ = flags.MarkHidden("cert-dir")
|
||||
_ = flags.MarkHidden(signByFlagName)
|
||||
_ = flags.MarkHidden(signBySigstoreFlagName)
|
||||
_ = flags.MarkHidden(signBySigstorePrivateKeyFlagName)
|
||||
_ = flags.MarkHidden(signPassphraseFileFlagName)
|
||||
}
|
||||
|
@ -128,9 +134,12 @@ func push(cmd *cobra.Command, args []string) error {
|
|||
manifestPushOpts.Writer = os.Stderr
|
||||
}
|
||||
|
||||
if err := common.PrepareSigningPassphrase(&manifestPushOpts.ImagePushOptions, manifestPushOpts.SignPassphraseFileCLI); err != nil {
|
||||
signingCleanup, err := common.PrepareSigning(&manifestPushOpts.ImagePushOptions,
|
||||
manifestPushOpts.SignPassphraseFileCLI, manifestPushOpts.SignBySigstoreParamFileCLI)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer signingCleanup()
|
||||
|
||||
// TLS verification in c/image is controlled via a `types.OptionalBool`
|
||||
// which allows for distinguishing among set-true, set-false, unspecified
|
||||
|
|
|
@ -49,6 +49,11 @@ Delete the manifest list or image index from local storage if pushing succeeds.
|
|||
|
||||
Sign the pushed images with a “simple signing” signature using the specified key. (This option is not available with the remote Podman client, including Mac and Windows (excluding WSL2) machines)
|
||||
|
||||
#### **--sign-by-sigstore**=*param-file***
|
||||
|
||||
Add a sigstore signature based on further options specified in a containers sigstore signing parameter file *param-file*.
|
||||
See containers-sigstore-signing-params.yaml(5) for details about the file format.
|
||||
|
||||
#### **--sign-by-sigstore-private-key**=*path*
|
||||
|
||||
Sign the pushed images with a sigstore signature using a private key at the specified path. (This option is not available with the remote Podman client, including Mac and Windows (excluding WSL2) machines)
|
||||
|
|
|
@ -87,6 +87,11 @@ Discard any pre-existing signatures in the image.
|
|||
|
||||
Add a “simple signing” signature at the destination using the specified key. (This option is not available with the remote Podman client, including Mac and Windows (excluding WSL2) machines)
|
||||
|
||||
#### **--sign-by-sigstore**=*param-file***
|
||||
|
||||
Add a sigstore signature based on further options specified in a containers sigstore signing parameter file *param-file*.
|
||||
See containers-sigstore-signing-params.yaml(5) for details about the file format.
|
||||
|
||||
#### **--sign-by-sigstore-private-key**=*path*
|
||||
|
||||
Add a sigstore signature at the destination using a private key at the specified path. (This option is not available with the remote Podman client, including Mac and Windows (excluding WSL2) machines)
|
||||
|
|
14
go.mod
14
go.mod
|
@ -82,6 +82,7 @@ require (
|
|||
github.com/containerd/containerd v1.6.15 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.13.0 // indirect
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
|
||||
github.com/coreos/go-oidc/v3 v3.5.0 // indirect
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
|
@ -91,6 +92,7 @@ require (
|
|||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.3 // indirect
|
||||
github.com/fsouza/go-dockerclient v1.9.3 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.0 // indirect
|
||||
github.com/go-openapi/analysis v0.21.4 // indirect
|
||||
github.com/go-openapi/errors v0.20.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
|
@ -101,13 +103,19 @@ require (
|
|||
github.com/go-openapi/strfmt v0.21.3 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-openapi/validate v0.22.0 // indirect
|
||||
github.com/go-playground/locales v0.14.0 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.0 // indirect
|
||||
github.com/go-playground/validator/v10 v10.11.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/go-containerregistry v0.12.1 // indirect
|
||||
github.com/google/go-intervals v0.0.2 // indirect
|
||||
github.com/google/trillian v1.5.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.2 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||
github.com/jinzhu/copier v0.3.5 // indirect
|
||||
|
@ -115,6 +123,7 @@ require (
|
|||
github.com/klauspost/compress v1.15.15 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 // indirect
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/leodido/go-urn v1.2.1 // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/manifoldco/promptui v0.9.0 // indirect
|
||||
|
@ -128,6 +137,7 @@ require (
|
|||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pkg/sftp v1.13.5 // indirect
|
||||
|
@ -135,9 +145,11 @@ require (
|
|||
github.com/proglottis/gpgme v0.1.3 // indirect
|
||||
github.com/rivo/uniseg v0.4.3 // indirect
|
||||
github.com/seccomp/libseccomp-golang v0.10.0 // indirect
|
||||
github.com/segmentio/ksuid v1.0.4 // indirect
|
||||
github.com/sigstore/fulcio v1.0.0 // indirect
|
||||
github.com/sigstore/rekor v1.0.1 // indirect
|
||||
github.com/sigstore/sigstore v1.5.1 // indirect
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
|
||||
github.com/sylabs/sif/v2 v2.9.0 // indirect
|
||||
github.com/tchap/go-patricia v2.3.0+incompatible // indirect
|
||||
|
@ -153,7 +165,9 @@ require (
|
|||
go.opencensus.io v0.24.0 // indirect
|
||||
golang.org/x/crypto v0.5.0 // indirect
|
||||
golang.org/x/mod v0.7.0 // indirect
|
||||
golang.org/x/oauth2 v0.4.0 // indirect
|
||||
golang.org/x/tools v0.4.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect
|
||||
google.golang.org/grpc v1.51.0 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
|
|
60
go.sum
60
go.sum
|
@ -25,6 +25,7 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf
|
|||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
|
||||
|
@ -291,6 +292,8 @@ github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc
|
|||
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
|
||||
github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
|
||||
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
github.com/coreos/go-oidc/v3 v3.5.0 h1:VxKtbccHZxs8juq7RdJntSqtXFtde9YpNpGn0yqgEHw=
|
||||
github.com/coreos/go-oidc/v3 v3.5.0/go.mod h1:ecXRtV4romGPeO6ieExAsUK9cb/3fp9hXNz1tlv8PIM=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
|
@ -383,6 +386,7 @@ github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 h1:IeaD1VDVB
|
|||
github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 h1:a4DFiKFJiDRGFD1qIcqGLX/WlUMD9dyLSLDt+9QZgt8=
|
||||
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
|
||||
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
|
||||
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
|
@ -405,6 +409,8 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9
|
|||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo=
|
||||
github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
|
@ -457,6 +463,15 @@ github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+
|
|||
github.com/go-openapi/validate v0.21.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
|
||||
github.com/go-openapi/validate v0.22.0 h1:b0QecH6VslW/TxtpKgzpO1SNG7GU2FsaqKdP1E2T50Y=
|
||||
github.com/go-openapi/validate v0.22.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
|
||||
github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A=
|
||||
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU=
|
||||
github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
|
||||
github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho=
|
||||
github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
|
||||
github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ=
|
||||
github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU=
|
||||
github.com/go-rod/rod v0.112.3 h1:xbSaA9trZ8v/+eJRGOM6exK1RCsLPwwnzA78vpES0gk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
|
@ -503,6 +518,7 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
|
|||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
|
@ -552,6 +568,7 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
|
||||
|
@ -581,6 +598,8 @@ github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLe
|
|||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
github.com/google/trillian v1.5.0 h1:I5pIN18bKlXtlj1Tk919rQ3mWBU2BzNNR6JhLISGMB4=
|
||||
github.com/google/trillian v1.5.0/go.mod h1:2/gAIc+G1MUcErOPc+cSwHAQHZlGy+RYHjVGnhUQ3e8=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
|
@ -617,12 +636,18 @@ github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv
|
|||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
||||
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
|
||||
github.com/hashicorp/go-hclog v1.3.1 h1:vDwF1DFNZhntP4DAjuTpOw3uEgMUpXh1pB5fW9DqHpo=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.2/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
|
||||
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||
|
@ -705,6 +730,8 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
|||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w=
|
||||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||
github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf h1:ndns1qx/5dL43g16EQkPV/i8+b3l5bYQwLeoSBe7tS8=
|
||||
github.com/letsencrypt/boulder v0.0.0-20221109233200-85aa52084eaf/go.mod h1:aGkAgvWY/IUcVFfuly53REpfv5edu25oij+qHRFaraA=
|
||||
github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
|
||||
|
@ -723,8 +750,10 @@ github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsI
|
|||
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
|
||||
github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
|
@ -855,6 +884,7 @@ github.com/opencontainers/selinux v1.10.2 h1:NFy2xCsjn7+WspbfZkUd5zyVeisV7VFbPSP
|
|||
github.com/opencontainers/selinux v1.10.2/go.mod h1:cARutUbaUrlRClyvxOICCgKixCs6L05aUsohzA3EkHQ=
|
||||
github.com/openshift/imagebuilder v1.2.4-0.20220711175835-4151e43600df h1:vf6pdI10F2Tim5a9JKiVVl4/dpNz1OEhz4EnfLdLtiA=
|
||||
github.com/openshift/imagebuilder v1.2.4-0.20220711175835-4151e43600df/go.mod h1:TRYHe4CH9U6nkDjxjBNM5klrLbJBrRbpJE5SaRwUBsQ=
|
||||
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M=
|
||||
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
|
||||
|
@ -864,6 +894,7 @@ github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAv
|
|||
github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
|
||||
github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
|
@ -919,8 +950,9 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L
|
|||
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
|
||||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||
github.com/rootless-containers/rootlesskit v1.1.0 h1:cRaRIYxY8oce4eE/zeAUZhgKu/4tU1p9YHN4+suwV7M=
|
||||
github.com/rootless-containers/rootlesskit v1.1.0/go.mod h1:H+o9ndNe7tS91WqU0/+vpvc+VaCd7TCIWaJjnV0ujUo=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
|
@ -933,6 +965,8 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg
|
|||
github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y=
|
||||
github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY=
|
||||
github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
|
||||
github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c=
|
||||
github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
|
||||
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sigstore/fulcio v1.0.0 h1:hBZW6qg9GXTtCX8jOg1hmyjYLrmsEKZGeMwAbW3XNEg=
|
||||
|
@ -952,6 +986,8 @@ github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic
|
|||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA=
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
|
@ -1064,11 +1100,15 @@ github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQ
|
|||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
|
||||
github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ=
|
||||
github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE=
|
||||
github.com/ysmood/leakless v0.8.0 h1:BzLrVoiwxikpgEQR0Lk8NyBN5Cit2b1z+u0mgL4ZJak=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
|
||||
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
|
||||
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
|
||||
|
@ -1118,6 +1158,7 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U
|
|||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
|
@ -1125,6 +1166,7 @@ golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPh
|
|||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
|
||||
|
@ -1164,6 +1206,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA=
|
||||
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -1221,6 +1264,9 @@ golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qx
|
|||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220114011407-0dd24b26b47d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
|
||||
golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
|
||||
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
|
@ -1235,6 +1281,9 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ
|
|||
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk=
|
||||
golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M=
|
||||
golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -1247,6 +1296,7 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -1339,6 +1389,7 @@ golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
@ -1346,16 +1397,19 @@ golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220817070843-5a390386f1f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220823224334-20c2bfdbfe24/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220909162455-aba9fc2a8ff2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
|
||||
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
|
||||
golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg=
|
||||
golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -1367,6 +1421,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
|
||||
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
@ -1443,6 +1498,7 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
|
|||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.4.0 h1:7mTAgkunk3fr4GAloyyCasadO6h9zSsQZbwvcaIciV4=
|
||||
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -1478,6 +1534,7 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
|
|||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
|
@ -1579,6 +1636,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
|
|||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
|
||||
"github.com/containers/common/pkg/config"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/signature/signer"
|
||||
"github.com/containers/image/v5/types"
|
||||
encconfig "github.com/containers/ocicrypt/config"
|
||||
"github.com/containers/podman/v4/pkg/inspect"
|
||||
|
@ -210,6 +211,10 @@ type ImagePushOptions struct {
|
|||
RemoveSignatures bool
|
||||
// SignaturePolicy to use when pulling. Ignored for remote calls.
|
||||
SignaturePolicy string
|
||||
// Signers, if non-empty, asks for signatures to be added during the copy
|
||||
// using the provided signers.
|
||||
// Rejected for remote calls.
|
||||
Signers []*signer.Signer
|
||||
// SignBy adds a signature at the destination using the specified key.
|
||||
// Ignored for remote calls.
|
||||
SignBy string
|
||||
|
|
|
@ -306,6 +306,7 @@ func (ir *ImageEngine) Push(ctx context.Context, source string, destination stri
|
|||
pushOptions.RemoveSignatures = options.RemoveSignatures
|
||||
pushOptions.PolicyAllowStorage = true
|
||||
pushOptions.SignaturePolicyPath = options.SignaturePolicy
|
||||
pushOptions.Signers = options.Signers
|
||||
pushOptions.SignBy = options.SignBy
|
||||
pushOptions.SignPassphrase = options.SignPassphrase
|
||||
pushOptions.SignBySigstorePrivateKeyFile = options.SignBySigstorePrivateKeyFile
|
||||
|
|
|
@ -333,6 +333,7 @@ func (ir *ImageEngine) ManifestPush(ctx context.Context, name, destination strin
|
|||
pushOptions.ImageListSelection = cp.CopySpecificImages
|
||||
pushOptions.ManifestMIMEType = manifestType
|
||||
pushOptions.RemoveSignatures = opts.RemoveSignatures
|
||||
pushOptions.Signers = opts.Signers
|
||||
pushOptions.SignBy = opts.SignBy
|
||||
pushOptions.SignPassphrase = opts.SignPassphrase
|
||||
pushOptions.SignBySigstorePrivateKeyFile = opts.SignBySigstorePrivateKeyFile
|
||||
|
|
|
@ -244,6 +244,9 @@ func (ir *ImageEngine) Import(ctx context.Context, opts entities.ImageImportOpti
|
|||
}
|
||||
|
||||
func (ir *ImageEngine) Push(ctx context.Context, source string, destination string, opts entities.ImagePushOptions) error {
|
||||
if opts.Signers != nil {
|
||||
return fmt.Errorf("forwarding Signers is not supported for remote clients")
|
||||
}
|
||||
if opts.OciEncryptConfig != nil {
|
||||
return fmt.Errorf("encryption is not supported for remote clients")
|
||||
}
|
||||
|
|
|
@ -130,6 +130,10 @@ func (ir *ImageEngine) ManifestRm(ctx context.Context, names []string) (*entitie
|
|||
|
||||
// ManifestPush pushes a manifest list or image index to the destination
|
||||
func (ir *ImageEngine) ManifestPush(ctx context.Context, name, destination string, opts entities.ImagePushOptions) (string, error) {
|
||||
if opts.Signers != nil {
|
||||
return "", fmt.Errorf("forwarding Signers is not supported for remote clients")
|
||||
}
|
||||
|
||||
options := new(images.PushOptions)
|
||||
options.WithUsername(opts.Username).WithPassword(opts.Password).WithAuthfile(opts.Authfile).WithRemoveSignatures(opts.RemoveSignatures).WithAll(opts.All).WithFormat(opts.Format).WithCompressionFormat(opts.CompressionFormat).WithQuiet(opts.Quiet).WithProgressWriter(opts.Writer)
|
||||
|
||||
|
|
|
@ -1018,6 +1018,12 @@ func generatePolicyFile(tempDir string) string {
|
|||
"type": "sigstoreSigned",
|
||||
"keyPath": "testdata/sigstore-key.pub"
|
||||
}
|
||||
],
|
||||
"localhost:5000/sigstore-signed-params": [
|
||||
{
|
||||
"type": "sigstoreSigned",
|
||||
"keyPath": "testdata/sigstore-key.pub"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
|
@ -149,7 +149,7 @@ var _ = Describe("Podman push", func() {
|
|||
}
|
||||
|
||||
if !IsRemote() { // Remote does not support signing
|
||||
By("pushing and pulling with sigstore signatures")
|
||||
By("pushing and pulling with --sign-by-sigstore-private-key")
|
||||
// Ideally, this should set SystemContext.RegistriesDirPath, but Podman currently doesn’t
|
||||
// expose that as an option. So, for now, modify /etc/directly, and skip testing sigstore if
|
||||
// we don’t have permission to do so.
|
||||
|
@ -187,6 +187,28 @@ var _ = Describe("Podman push", func() {
|
|||
pull = podmanTest.Podman([]string{"pull", "-q", "--tls-verify=false", "--signature-policy", policyPath, "localhost:5000/sigstore-signed"})
|
||||
pull.WaitWithDefaultTimeout()
|
||||
Expect(pull).Should(Exit(0))
|
||||
|
||||
By("pushing and pulling with --sign-by-sigstore")
|
||||
// Verify that the policy rejects unsigned images
|
||||
push = podmanTest.Podman([]string{"push", "-q", "--tls-verify=false", "--remove-signatures", ALPINE, "localhost:5000/sigstore-signed-params"})
|
||||
push.WaitWithDefaultTimeout()
|
||||
Expect(push).Should(Exit(0))
|
||||
Expect(push.ErrorToString()).To(BeEmpty())
|
||||
|
||||
pull = podmanTest.Podman([]string{"pull", "--tls-verify=false", "--signature-policy", policyPath, "localhost:5000/sigstore-signed-params"})
|
||||
pull.WaitWithDefaultTimeout()
|
||||
Expect(pull).To(ExitWithError())
|
||||
Expect(pull.ErrorToString()).To(ContainSubstring("A signature was required, but no signature exists"))
|
||||
|
||||
// Sign an image, and verify it is accepted.
|
||||
push = podmanTest.Podman([]string{"push", "-q", "--tls-verify=false", "--remove-signatures", "--sign-by-sigstore", "testdata/sigstore-signing-params.yaml", ALPINE, "localhost:5000/sigstore-signed-params"})
|
||||
push.WaitWithDefaultTimeout()
|
||||
Expect(push).Should(Exit(0))
|
||||
Expect(push.ErrorToString()).To(BeEmpty())
|
||||
|
||||
pull = podmanTest.Podman([]string{"pull", "--tls-verify=false", "--signature-policy", policyPath, "localhost:5000/sigstore-signed-params"})
|
||||
pull.WaitWithDefaultTimeout()
|
||||
Expect(pull).Should(Exit(0))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
docker:
|
||||
localhost:5000/sigstore-signed:
|
||||
use-sigstore-attachments: true
|
||||
localhost:5000/sigstore-signed-params:
|
||||
use-sigstore-attachments: true
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
privateKeyFile: "testdata/sigstore-key.key"
|
||||
privateKeyPassphraseFile: "testdata/sigstore-key.key.pass"
|
75
vendor/github.com/containers/image/v5/pkg/cli/sigstore/params/sigstore.go
generated
vendored
Normal file
75
vendor/github.com/containers/image/v5/pkg/cli/sigstore/params/sigstore.go
generated
vendored
Normal file
|
@ -0,0 +1,75 @@
|
|||
package params
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// SigningParameterFile collects parameters used for creating sigstore signatures.
|
||||
//
|
||||
// To consume such a file, most callers should use c/image/pkg/cli/sigstore instead
|
||||
// of dealing with this type explicitly using ParseFile.
|
||||
//
|
||||
// This type is exported primarily to allow creating parameter files programmatically
|
||||
// (and eventually this subpackage should provide an API to convert this type into
|
||||
// the appropriate file contents, so that callers don’t need to do that manually).
|
||||
type SigningParameterFile struct {
|
||||
// Keep this in sync with docs/containers-sigstore-signing-params.yaml.5.md !
|
||||
|
||||
PrivateKeyFile string `yaml:"privateKeyFile,omitempty"` // If set, sign using a private key stored in this file.
|
||||
PrivateKeyPassphraseFile string `yaml:"privateKeyPassphraseFile,omitempty"` // A file that contains the passprase required for PrivateKeyFile.
|
||||
|
||||
Fulcio *SigningParameterFileFulcio `yaml:"fulcio,omitempty"` // If set, sign using a short-lived key and a Fulcio-issued certificate.
|
||||
|
||||
RekorURL string `yaml:"rekorURL,omitempty"` // If set, upload the signature to the specified Rekor server, and include a log inclusion proof in the signature.
|
||||
}
|
||||
|
||||
// SigningParameterFileFulcio is a subset of SigningParameterFile dedicated to Fulcio parameters.
|
||||
type SigningParameterFileFulcio struct {
|
||||
// Keep this in sync with docs/containers-sigstore-signing-params.yaml.5.md !
|
||||
|
||||
FulcioURL string `yaml:"fulcioURL,omitempty"` // URL of the Fulcio server. Required.
|
||||
|
||||
// How to obtain the OIDC ID token required by Fulcio. Required.
|
||||
OIDCMode OIDCMode `yaml:"oidcMode,omitempty"`
|
||||
|
||||
// oidcMode = staticToken
|
||||
OIDCIDToken string `yaml:"oidcIDToken,omitempty"`
|
||||
|
||||
// oidcMode = deviceGrant || interactive
|
||||
OIDCIssuerURL string `yaml:"oidcIssuerURL,omitempty"` //
|
||||
OIDCClientID string `yaml:"oidcClientID,omitempty"`
|
||||
OIDCClientSecret string `yaml:"oidcClientSecret,omitempty"`
|
||||
}
|
||||
|
||||
type OIDCMode string
|
||||
|
||||
const (
|
||||
// OIDCModeStaticToken means the parameter file contains an user-provided OIDC ID token value.
|
||||
OIDCModeStaticToken OIDCMode = "staticToken"
|
||||
// OIDCModeDeviceGrant specifies the OIDC ID token should be obtained using a device authorization grant (RFC 8628).
|
||||
OIDCModeDeviceGrant OIDCMode = "deviceGrant"
|
||||
// OIDCModeInteractive specifies the OIDC ID token should be obtained interactively (automatically opening a browser,
|
||||
// or interactively prompting the user.)
|
||||
OIDCModeInteractive OIDCMode = "interactive"
|
||||
)
|
||||
|
||||
// ParseFile parses a SigningParameterFile at the specified path.
|
||||
//
|
||||
// Most consumers of the parameter file should use c/image/pkg/cli/sigstore to obtain a *signer.Signer instead.
|
||||
func ParseFile(path string) (*SigningParameterFile, error) {
|
||||
var res SigningParameterFile
|
||||
source, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading %q: %w", path, err)
|
||||
}
|
||||
dec := yaml.NewDecoder(bytes.NewReader(source))
|
||||
dec.KnownFields(true)
|
||||
if err = dec.Decode(&res); err != nil {
|
||||
return nil, fmt.Errorf("parsing %q: %w", path, err)
|
||||
}
|
||||
return &res, nil
|
||||
}
|
117
vendor/github.com/containers/image/v5/pkg/cli/sigstore/sigstore.go
generated
vendored
Normal file
117
vendor/github.com/containers/image/v5/pkg/cli/sigstore/sigstore.go
generated
vendored
Normal file
|
@ -0,0 +1,117 @@
|
|||
package sigstore
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
|
||||
"github.com/containers/image/v5/pkg/cli"
|
||||
"github.com/containers/image/v5/pkg/cli/sigstore/params"
|
||||
"github.com/containers/image/v5/signature/signer"
|
||||
"github.com/containers/image/v5/signature/sigstore"
|
||||
"github.com/containers/image/v5/signature/sigstore/fulcio"
|
||||
"github.com/containers/image/v5/signature/sigstore/rekor"
|
||||
)
|
||||
|
||||
// Options collects data that the caller should provide to NewSignerFromParameterFile.
|
||||
// The caller should set all fields unless documented otherwise.
|
||||
type Options struct {
|
||||
PrivateKeyPassphrasePrompt func(keyFile string) (string, error) // A function to call to interactively prompt for a passphrase
|
||||
Stdin io.Reader
|
||||
Stdout io.Writer
|
||||
}
|
||||
|
||||
// NewSignerFromParameterFile returns a signature.Signer which creates sigstore signatures based a parameter file at the specified path.
|
||||
//
|
||||
// The caller must call Close() on the returned Signer.
|
||||
func NewSignerFromParameterFile(path string, options *Options) (*signer.Signer, error) {
|
||||
params, err := params.ParseFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setting up signing using parameter file %q: %w", path, err)
|
||||
}
|
||||
return newSignerFromParameterData(params, options)
|
||||
}
|
||||
|
||||
// newSignerFromParameterData returns a signature.Signer which creates sigstore signatures based on parameter file contents.
|
||||
//
|
||||
// The caller must call Close() on the returned Signer.
|
||||
func newSignerFromParameterData(params *params.SigningParameterFile, options *Options) (*signer.Signer, error) {
|
||||
opts := []sigstore.Option{}
|
||||
if params.PrivateKeyFile != "" {
|
||||
var getPassphrase func(keyFile string) (string, error)
|
||||
switch {
|
||||
case params.PrivateKeyPassphraseFile != "":
|
||||
getPassphrase = func(_ string) (string, error) {
|
||||
return cli.ReadPassphraseFile(params.PrivateKeyPassphraseFile)
|
||||
}
|
||||
case options.PrivateKeyPassphrasePrompt != nil:
|
||||
getPassphrase = options.PrivateKeyPassphrasePrompt
|
||||
default: // This shouldn’t happen, the caller is expected to set options.PrivateKeyPassphrasePrompt
|
||||
return nil, fmt.Errorf("private key %s specified, but no way to get a passphrase", params.PrivateKeyFile)
|
||||
}
|
||||
passphrase, err := getPassphrase(params.PrivateKeyFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts = append(opts, sigstore.WithPrivateKeyFile(params.PrivateKeyFile, []byte(passphrase)))
|
||||
}
|
||||
|
||||
if params.Fulcio != nil {
|
||||
fulcioOpt, err := fulcioOption(params.Fulcio, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts = append(opts, fulcioOpt)
|
||||
}
|
||||
|
||||
if params.RekorURL != "" {
|
||||
rekorURL, err := url.Parse(params.RekorURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing rekorURL %q: %w", params.RekorURL, err)
|
||||
}
|
||||
opts = append(opts, rekor.WithRekor(rekorURL))
|
||||
}
|
||||
|
||||
return sigstore.NewSigner(opts...)
|
||||
}
|
||||
|
||||
// fulcioOption returns a sigstore.Option for Fulcio use based on f.
|
||||
func fulcioOption(f *params.SigningParameterFileFulcio, options *Options) (sigstore.Option, error) {
|
||||
if f.FulcioURL == "" {
|
||||
return nil, errors.New("missing fulcioURL")
|
||||
}
|
||||
fulcioURL, err := url.Parse(f.FulcioURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing fulcioURL %q: %w", f.FulcioURL, err)
|
||||
}
|
||||
|
||||
if f.OIDCMode == params.OIDCModeStaticToken {
|
||||
if f.OIDCIDToken == "" {
|
||||
return nil, errors.New("missing oidcToken")
|
||||
}
|
||||
return fulcio.WithFulcioAndPreexistingOIDCIDToken(fulcioURL, f.OIDCIDToken), nil
|
||||
}
|
||||
|
||||
if f.OIDCIssuerURL == "" {
|
||||
return nil, errors.New("missing oidcIssuerURL")
|
||||
}
|
||||
oidcIssuerURL, err := url.Parse(f.OIDCIssuerURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing oidcIssuerURL %q: %w", f.OIDCIssuerURL, err)
|
||||
}
|
||||
switch f.OIDCMode {
|
||||
case params.OIDCModeDeviceGrant:
|
||||
return fulcio.WithFulcioAndDeviceAuthorizationGrantOIDC(fulcioURL, oidcIssuerURL, f.OIDCClientID, f.OIDCClientSecret,
|
||||
options.Stdout), nil
|
||||
case params.OIDCModeInteractive:
|
||||
return fulcio.WithFulcioAndInteractiveOIDC(fulcioURL, oidcIssuerURL, f.OIDCClientID, f.OIDCClientSecret,
|
||||
options.Stdin, options.Stdout), nil
|
||||
case "":
|
||||
return nil, errors.New("missing oidcMode")
|
||||
case params.OIDCModeStaticToken:
|
||||
return nil, errors.New("internal inconsistency: SigningParameterFileOIDCModeStaticToken was supposed to already be handled")
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown oidcMode value %q", f.OIDCMode)
|
||||
}
|
||||
}
|
155
vendor/github.com/containers/image/v5/signature/sigstore/fulcio/fulcio.go
generated
vendored
Normal file
155
vendor/github.com/containers/image/v5/signature/sigstore/fulcio/fulcio.go
generated
vendored
Normal file
|
@ -0,0 +1,155 @@
|
|||
package fulcio
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
|
||||
"github.com/containers/image/v5/internal/useragent"
|
||||
"github.com/containers/image/v5/signature/sigstore/internal"
|
||||
"github.com/sigstore/fulcio/pkg/api"
|
||||
"github.com/sigstore/sigstore/pkg/oauth"
|
||||
"github.com/sigstore/sigstore/pkg/oauthflow"
|
||||
sigstoreSignature "github.com/sigstore/sigstore/pkg/signature"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
// setupSignerWithFulcio updates s with a certificate generated by fulcioURL based on oidcIDToken
|
||||
func setupSignerWithFulcio(s *internal.SigstoreSigner, fulcioURL *url.URL, oidcIDToken *oauthflow.OIDCIDToken) error {
|
||||
// ECDSA-P256 is the only interoperable algorithm per
|
||||
// https://github.com/sigstore/cosign/blob/main/specs/SIGNATURE_SPEC.md#signature-schemes .
|
||||
privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("generating short-term private key: %w", err)
|
||||
}
|
||||
keyAlgorithm := "ecdsa"
|
||||
// SHA-256 is opencontainers/go-digest.Canonical, thus the algorithm to use here as well per
|
||||
// https://github.com/sigstore/cosign/blob/main/specs/SIGNATURE_SPEC.md#hashing-algorithms
|
||||
signer, err := sigstoreSignature.LoadECDSASigner(privateKey, crypto.SHA256)
|
||||
if err != nil {
|
||||
return fmt.Errorf("initializing short-term private key: %w", err)
|
||||
}
|
||||
s.PrivateKey = signer
|
||||
|
||||
logrus.Debugf("Requesting a certificate from Fulcio at %s", fulcioURL.Redacted())
|
||||
fulcioClient := api.NewClient(fulcioURL, api.WithUserAgent(useragent.DefaultUserAgent))
|
||||
// Sign the email address as part of the request
|
||||
h := sha256.Sum256([]byte(oidcIDToken.Subject))
|
||||
keyOwnershipProof, err := ecdsa.SignASN1(rand.Reader, privateKey, h[:])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error signing key ownership proof: %w", err)
|
||||
}
|
||||
publicKeyBytes, err := x509.MarshalPKIXPublicKey(&privateKey.PublicKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("converting public key to ASN.1: %w", err)
|
||||
}
|
||||
// Note that unlike most OAuth2 uses, this passes the ID token, not an access token.
|
||||
// This is only secure if every Fulcio server has an individual client ID value
|
||||
// = fulcioOIDCClientID, distinct from other Fulcio servers,
|
||||
// that is embedded into the ID token’s "aud" field.
|
||||
resp, err := fulcioClient.SigningCert(api.CertificateRequest{
|
||||
PublicKey: api.Key{
|
||||
Content: publicKeyBytes,
|
||||
Algorithm: keyAlgorithm,
|
||||
},
|
||||
SignedEmailAddress: keyOwnershipProof,
|
||||
}, oidcIDToken.RawString)
|
||||
if err != nil {
|
||||
return fmt.Errorf("obtaining certificate from Fulcio: %w", err)
|
||||
}
|
||||
s.FulcioGeneratedCertificate = resp.CertPEM
|
||||
s.FulcioGeneratedCertificateChain = resp.ChainPEM
|
||||
// Cosign goes through an unmarshal/marshal roundtrip for Fulcio-generated certificates, let’s not do that.
|
||||
s.SigningKeyOrCert = resp.CertPEM
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithFulcioAndPreexistingOIDCIDToken sets up signing to use a short-lived key and a Fulcio-issued certificate
|
||||
// based on a caller-provided OIDC ID token.
|
||||
func WithFulcioAndPreexistingOIDCIDToken(fulcioURL *url.URL, oidcIDToken string) internal.Option {
|
||||
return func(s *internal.SigstoreSigner) error {
|
||||
if s.PrivateKey != nil {
|
||||
return fmt.Errorf("multiple private key sources specified when preparing to create sigstore signatures")
|
||||
}
|
||||
|
||||
// This adds dependencies even just to parse the token. We could possibly reimplement that, and split this variant
|
||||
// into a subpackage without the OIDC dependencies… but really, is this going to be used in significantly different situations
|
||||
// than the two interactive OIDC authentication workflows?
|
||||
//
|
||||
// Are there any widely used tools to manually obtain an ID token? Why would there be?
|
||||
// For long-term usage, users provisioning a static OIDC credential might just as well provision an already-generated certificate
|
||||
// or something like that.
|
||||
logrus.Debugf("Using a statically-provided OIDC token")
|
||||
staticTokenGetter := oauthflow.StaticTokenGetter{RawToken: oidcIDToken}
|
||||
oidcIDToken, err := staticTokenGetter.GetIDToken(nil, oauth2.Config{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing OIDC token: %w", err)
|
||||
}
|
||||
|
||||
return setupSignerWithFulcio(s, fulcioURL, oidcIDToken)
|
||||
}
|
||||
}
|
||||
|
||||
// WithFulcioAndDeviceAuthorizationGrantOIDC sets up signing to use a short-lived key and a Fulcio-issued certificate
|
||||
// based on an OIDC ID token obtained using a device authorization grant (RFC 8628).
|
||||
//
|
||||
// interactiveOutput must be directly accesible to a human user in real time (i.e. not be just a log file).
|
||||
func WithFulcioAndDeviceAuthorizationGrantOIDC(fulcioURL *url.URL, oidcIssuerURL *url.URL, oidcClientID, oidcClientSecret string,
|
||||
interactiveOutput io.Writer) internal.Option {
|
||||
return func(s *internal.SigstoreSigner) error {
|
||||
if s.PrivateKey != nil {
|
||||
return fmt.Errorf("multiple private key sources specified when preparing to create sigstore signatures")
|
||||
}
|
||||
|
||||
logrus.Debugf("Starting OIDC device flow for issuer %s", oidcIssuerURL.Redacted())
|
||||
tokenGetter := oauthflow.NewDeviceFlowTokenGetterForIssuer(oidcIssuerURL.String())
|
||||
tokenGetter.MessagePrinter = func(s string) {
|
||||
fmt.Fprintln(interactiveOutput, s)
|
||||
}
|
||||
oidcIDToken, err := oauthflow.OIDConnect(oidcIssuerURL.String(), oidcClientID, oidcClientSecret, "", tokenGetter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error authenticating with OIDC: %w", err)
|
||||
}
|
||||
|
||||
return setupSignerWithFulcio(s, fulcioURL, oidcIDToken)
|
||||
}
|
||||
}
|
||||
|
||||
// WithFulcioAndInterativeOIDC sets up signing to use a short-lived key and a Fulcio-issued certificate
|
||||
// based on an interactively-obtained OIDC ID token.
|
||||
// The token is obtained
|
||||
// - directly using a browser, listening on localhost, automatically opening a browser to the OIDC issuer,
|
||||
// to be redirected on localhost. (I.e. the current environment must allow launching a browser that connect back to the current process;
|
||||
// either or both may be impossible in a container or a remote VM).
|
||||
// - or by instructing the user to manually open a browser, obtain the OIDC code, and interactively input it as text.
|
||||
//
|
||||
// interactiveInput and interactiveOutput must both be directly operable by a human user in real time (i.e. not be just a log file).
|
||||
func WithFulcioAndInteractiveOIDC(fulcioURL *url.URL, oidcIssuerURL *url.URL, oidcClientID, oidcClientSecret string,
|
||||
interactiveInput io.Reader, interactiveOutput io.Writer) internal.Option {
|
||||
return func(s *internal.SigstoreSigner) error {
|
||||
if s.PrivateKey != nil {
|
||||
return fmt.Errorf("multiple private key sources specified when preparing to create sigstore signatures")
|
||||
}
|
||||
|
||||
logrus.Debugf("Starting interactive OIDC authentication for issuer %s", oidcIssuerURL.Redacted())
|
||||
// This is intended to match oauthflow.DefaultIDTokenGetter, overriding only input/output
|
||||
tokenGetter := &oauthflow.InteractiveIDTokenGetter{
|
||||
HTMLPage: oauth.InteractiveSuccessHTML,
|
||||
Input: interactiveInput,
|
||||
Output: interactiveOutput,
|
||||
}
|
||||
oidcIDToken, err := oauthflow.OIDConnect(oidcIssuerURL.String(), oidcClientID, oidcClientSecret, "", tokenGetter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error authenticating with OIDC: %w", err)
|
||||
}
|
||||
|
||||
return setupSignerWithFulcio(s, fulcioURL, oidcIDToken)
|
||||
}
|
||||
}
|
52
vendor/github.com/containers/image/v5/signature/sigstore/rekor/leveled_logger.go
generated
vendored
Normal file
52
vendor/github.com/containers/image/v5/signature/sigstore/rekor/leveled_logger.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
package rekor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/go-retryablehttp"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// leveledLogger adapts our use of logrus to the expected go-retryablehttp.LeveledLogger interface.
|
||||
type leveledLogger struct {
|
||||
logger *logrus.Logger
|
||||
}
|
||||
|
||||
func leveledLoggerForLogrus(logger *logrus.Logger) retryablehttp.LeveledLogger {
|
||||
return &leveledLogger{logger: logger}
|
||||
}
|
||||
|
||||
// log is the actual conversion implementation
|
||||
func (l *leveledLogger) log(level logrus.Level, msg string, keysAndValues []interface{}) {
|
||||
fields := logrus.Fields{}
|
||||
for i := 0; i < len(keysAndValues)-1; i += 2 {
|
||||
key := keysAndValues[i]
|
||||
keyString, isString := key.(string)
|
||||
if !isString {
|
||||
// It seems attractive to panic() here, but we might already be in a failure state, so let’s not make it worse
|
||||
keyString = fmt.Sprintf("[Invalid LeveledLogger key %#v]", key)
|
||||
}
|
||||
fields[keyString] = keysAndValues[i+1]
|
||||
}
|
||||
l.logger.WithFields(fields).Log(level, msg)
|
||||
}
|
||||
|
||||
// Debug implements retryablehttp.LeveledLogger
|
||||
func (l *leveledLogger) Debug(msg string, keysAndValues ...interface{}) {
|
||||
l.log(logrus.DebugLevel, msg, keysAndValues)
|
||||
}
|
||||
|
||||
// Error implements retryablehttp.LeveledLogger
|
||||
func (l *leveledLogger) Error(msg string, keysAndValues ...interface{}) {
|
||||
l.log(logrus.ErrorLevel, msg, keysAndValues)
|
||||
}
|
||||
|
||||
// Info implements retryablehttp.LeveledLogger
|
||||
func (l *leveledLogger) Info(msg string, keysAndValues ...interface{}) {
|
||||
l.log(logrus.InfoLevel, msg, keysAndValues)
|
||||
}
|
||||
|
||||
// Warn implements retryablehttp.LeveledLogger
|
||||
func (l *leveledLogger) Warn(msg string, keysAndValues ...interface{}) {
|
||||
l.log(logrus.WarnLevel, msg, keysAndValues)
|
||||
}
|
160
vendor/github.com/containers/image/v5/signature/sigstore/rekor/rekor.go
generated
vendored
Normal file
160
vendor/github.com/containers/image/v5/signature/sigstore/rekor/rekor.go
generated
vendored
Normal file
|
@ -0,0 +1,160 @@
|
|||
package rekor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/signature/internal"
|
||||
signerInternal "github.com/containers/image/v5/signature/sigstore/internal"
|
||||
"github.com/go-openapi/strfmt"
|
||||
"github.com/go-openapi/swag"
|
||||
rekor "github.com/sigstore/rekor/pkg/client"
|
||||
"github.com/sigstore/rekor/pkg/generated/client"
|
||||
"github.com/sigstore/rekor/pkg/generated/client/entries"
|
||||
"github.com/sigstore/rekor/pkg/generated/models"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// WithRekor asks the generated signature to be uploaded to the specified Rekor server,
|
||||
// and to include a log inclusion proof in the signature.
|
||||
func WithRekor(rekorURL *url.URL) signerInternal.Option {
|
||||
return func(s *signerInternal.SigstoreSigner) error {
|
||||
logrus.Debugf("Using Rekor server at %s", rekorURL.Redacted())
|
||||
client, err := rekor.GetRekorClient(rekorURL.String(),
|
||||
rekor.WithLogger(leveledLoggerForLogrus(logrus.StandardLogger())))
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating Rekor client: %w", err)
|
||||
}
|
||||
u := uploader{
|
||||
client: client,
|
||||
}
|
||||
s.RekorUploader = u.uploadKeyOrCert
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// uploader wraps a Rekor client, basically so that we can set RekorUploader to a method instead of an one-off closure.
|
||||
type uploader struct {
|
||||
client *client.Rekor
|
||||
}
|
||||
|
||||
// rekorEntryToSET converts a Rekor log entry into a sigstore “signed entry timestamp”.
|
||||
func rekorEntryToSET(entry *models.LogEntryAnon) (internal.UntrustedRekorSET, error) {
|
||||
// We could plausibly call entry.Validate() here; that mostly just uses unnecessary reflection instead of direct == nil checks.
|
||||
// Right now the only extra validation .Validate() does is *entry.LogIndex >= 0 and a regex check on *entry.LogID;
|
||||
// we don’t particularly care about either of these (notably signature verification only uses the Body value).
|
||||
if entry.Verification == nil || entry.IntegratedTime == nil || entry.LogIndex == nil || entry.LogID == nil {
|
||||
return internal.UntrustedRekorSET{}, fmt.Errorf("invalid Rekor entry (missing data): %#v", *entry)
|
||||
}
|
||||
bodyBase64, ok := entry.Body.(string)
|
||||
if !ok {
|
||||
return internal.UntrustedRekorSET{}, fmt.Errorf("unexpected Rekor entry body type: %#v", entry.Body)
|
||||
}
|
||||
body, err := base64.StdEncoding.DecodeString(bodyBase64)
|
||||
if err != nil {
|
||||
return internal.UntrustedRekorSET{}, fmt.Errorf("error parsing Rekor entry body: %w", err)
|
||||
}
|
||||
payloadJSON, err := internal.UntrustedRekorPayload{
|
||||
Body: body,
|
||||
IntegratedTime: *entry.IntegratedTime,
|
||||
LogIndex: *entry.LogIndex,
|
||||
LogID: *entry.LogID,
|
||||
}.MarshalJSON()
|
||||
if err != nil {
|
||||
return internal.UntrustedRekorSET{}, err
|
||||
}
|
||||
|
||||
return internal.UntrustedRekorSET{
|
||||
UntrustedSignedEntryTimestamp: entry.Verification.SignedEntryTimestamp,
|
||||
UntrustedPayload: payloadJSON,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// uploadEntry ensures proposedEntry exists in Rekor (usually uploading it), and returns the resulting log entry.
|
||||
func (u *uploader) uploadEntry(ctx context.Context, proposedEntry models.ProposedEntry) (models.LogEntry, error) {
|
||||
params := entries.NewCreateLogEntryParamsWithContext(ctx)
|
||||
params.SetProposedEntry(proposedEntry)
|
||||
logrus.Debugf("Calling Rekor's CreateLogEntry")
|
||||
resp, err := u.client.Entries.CreateLogEntry(params)
|
||||
if err != nil {
|
||||
// In ordinary operation, we should not get duplicate entries, because our payload contains a timestamp,
|
||||
// so it is supposed to be unique; and the default key format, ECDSA p256, also contains a nonce.
|
||||
// But conflicts can fairly easily happen during debugging and experimentation, so it pays to handle this.
|
||||
var conflictErr *entries.CreateLogEntryConflict
|
||||
if errors.As(err, &conflictErr) && conflictErr.Location != "" {
|
||||
location := conflictErr.Location.String()
|
||||
logrus.Debugf("CreateLogEntry reported a conflict, location = %s", location)
|
||||
// We might be able to just GET the returned Location, but let’s use the generated API client.
|
||||
// OTOH that requires us to hard-code the URI structure…
|
||||
uuidDelimiter := strings.LastIndexByte(location, '/')
|
||||
if uuidDelimiter != -1 { // Otherwise the URI is unexpected, and fall through to the bottom
|
||||
uuid := location[uuidDelimiter+1:]
|
||||
logrus.Debugf("Calling Rekor's NewGetLogEntryByUUIDParamsWithContext")
|
||||
params2 := entries.NewGetLogEntryByUUIDParamsWithContext(ctx)
|
||||
params2.SetEntryUUID(uuid)
|
||||
resp2, err := u.client.Entries.GetLogEntryByUUID(params2)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error re-loading previously-created log entry with UUID %s: %w", uuid, err)
|
||||
}
|
||||
return resp2.GetPayload(), nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Error uploading a log entry: %w", err)
|
||||
}
|
||||
return resp.GetPayload(), nil
|
||||
}
|
||||
|
||||
// uploadKeyOrCert integrates this code into sigstore/internal.Signer.
|
||||
// Given components of the created signature, it returns a SET that should be added to the signature.
|
||||
func (u *uploader) uploadKeyOrCert(ctx context.Context, keyOrCertBytes []byte, signatureBytes []byte, payloadBytes []byte) ([]byte, error) {
|
||||
payloadHash := sha256.Sum256(payloadBytes) // HashedRecord only accepts SHA-256
|
||||
proposedEntry := models.Hashedrekord{
|
||||
APIVersion: swag.String(internal.HashedRekordV001APIVersion),
|
||||
Spec: models.HashedrekordV001Schema{
|
||||
Data: &models.HashedrekordV001SchemaData{
|
||||
Hash: &models.HashedrekordV001SchemaDataHash{
|
||||
Algorithm: swag.String(models.HashedrekordV001SchemaDataHashAlgorithmSha256),
|
||||
Value: swag.String(hex.EncodeToString(payloadHash[:])),
|
||||
},
|
||||
},
|
||||
Signature: &models.HashedrekordV001SchemaSignature{
|
||||
Content: strfmt.Base64(signatureBytes),
|
||||
PublicKey: &models.HashedrekordV001SchemaSignaturePublicKey{
|
||||
Content: strfmt.Base64(keyOrCertBytes),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
uploadedPayload, err := u.uploadEntry(ctx, &proposedEntry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(uploadedPayload) != 1 {
|
||||
return nil, fmt.Errorf("expected 1 Rekor entry, got %d", len(uploadedPayload))
|
||||
}
|
||||
var storedEntry *models.LogEntryAnon
|
||||
// This “loop” extracts the single value from the uploadedPayload map.
|
||||
for _, p := range uploadedPayload {
|
||||
storedEntry = &p
|
||||
break
|
||||
}
|
||||
|
||||
rekorBundle, err := rekorEntryToSET(storedEntry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rekorSETBytes, err := json.Marshal(rekorBundle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rekorSETBytes, nil
|
||||
}
|
|
@ -0,0 +1,202 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
CoreOS Project
|
||||
Copyright 2014 CoreOS, Inc
|
||||
|
||||
This product includes software developed at CoreOS, Inc.
|
||||
(http://www.coreos.com/).
|
|
@ -0,0 +1,16 @@
|
|||
package oidc
|
||||
|
||||
// JOSE asymmetric signing algorithm values as defined by RFC 7518
|
||||
//
|
||||
// see: https://tools.ietf.org/html/rfc7518#section-3.1
|
||||
const (
|
||||
RS256 = "RS256" // RSASSA-PKCS-v1.5 using SHA-256
|
||||
RS384 = "RS384" // RSASSA-PKCS-v1.5 using SHA-384
|
||||
RS512 = "RS512" // RSASSA-PKCS-v1.5 using SHA-512
|
||||
ES256 = "ES256" // ECDSA using P-256 and SHA-256
|
||||
ES384 = "ES384" // ECDSA using P-384 and SHA-384
|
||||
ES512 = "ES512" // ECDSA using P-521 and SHA-512
|
||||
PS256 = "PS256" // RSASSA-PSS using SHA256 and MGF1-SHA256
|
||||
PS384 = "PS384" // RSASSA-PSS using SHA384 and MGF1-SHA384
|
||||
PS512 = "PS512" // RSASSA-PSS using SHA512 and MGF1-SHA512
|
||||
)
|
|
@ -0,0 +1,248 @@
|
|||
package oidc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
jose "github.com/go-jose/go-jose/v3"
|
||||
)
|
||||
|
||||
// StaticKeySet is a verifier that validates JWT against a static set of public keys.
|
||||
type StaticKeySet struct {
|
||||
// PublicKeys used to verify the JWT. Supported types are *rsa.PublicKey and
|
||||
// *ecdsa.PublicKey.
|
||||
PublicKeys []crypto.PublicKey
|
||||
}
|
||||
|
||||
// VerifySignature compares the signature against a static set of public keys.
|
||||
func (s *StaticKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte, error) {
|
||||
jws, err := jose.ParseSigned(jwt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing jwt: %v", err)
|
||||
}
|
||||
for _, pub := range s.PublicKeys {
|
||||
switch pub.(type) {
|
||||
case *rsa.PublicKey:
|
||||
case *ecdsa.PublicKey:
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid public key type provided: %T", pub)
|
||||
}
|
||||
payload, err := jws.Verify(pub)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
return payload, nil
|
||||
}
|
||||
return nil, fmt.Errorf("no public keys able to verify jwt")
|
||||
}
|
||||
|
||||
// NewRemoteKeySet returns a KeySet that can validate JSON web tokens by using HTTP
|
||||
// GETs to fetch JSON web token sets hosted at a remote URL. This is automatically
|
||||
// used by NewProvider using the URLs returned by OpenID Connect discovery, but is
|
||||
// exposed for providers that don't support discovery or to prevent round trips to the
|
||||
// discovery URL.
|
||||
//
|
||||
// The returned KeySet is a long lived verifier that caches keys based on any
|
||||
// keys change. Reuse a common remote key set instead of creating new ones as needed.
|
||||
func NewRemoteKeySet(ctx context.Context, jwksURL string) *RemoteKeySet {
|
||||
return newRemoteKeySet(ctx, jwksURL, time.Now)
|
||||
}
|
||||
|
||||
func newRemoteKeySet(ctx context.Context, jwksURL string, now func() time.Time) *RemoteKeySet {
|
||||
if now == nil {
|
||||
now = time.Now
|
||||
}
|
||||
return &RemoteKeySet{jwksURL: jwksURL, ctx: cloneContext(ctx), now: now}
|
||||
}
|
||||
|
||||
// RemoteKeySet is a KeySet implementation that validates JSON web tokens against
|
||||
// a jwks_uri endpoint.
|
||||
type RemoteKeySet struct {
|
||||
jwksURL string
|
||||
ctx context.Context
|
||||
now func() time.Time
|
||||
|
||||
// guard all other fields
|
||||
mu sync.RWMutex
|
||||
|
||||
// inflight suppresses parallel execution of updateKeys and allows
|
||||
// multiple goroutines to wait for its result.
|
||||
inflight *inflight
|
||||
|
||||
// A set of cached keys.
|
||||
cachedKeys []jose.JSONWebKey
|
||||
}
|
||||
|
||||
// inflight is used to wait on some in-flight request from multiple goroutines.
|
||||
type inflight struct {
|
||||
doneCh chan struct{}
|
||||
|
||||
keys []jose.JSONWebKey
|
||||
err error
|
||||
}
|
||||
|
||||
func newInflight() *inflight {
|
||||
return &inflight{doneCh: make(chan struct{})}
|
||||
}
|
||||
|
||||
// wait returns a channel that multiple goroutines can receive on. Once it returns
|
||||
// a value, the inflight request is done and result() can be inspected.
|
||||
func (i *inflight) wait() <-chan struct{} {
|
||||
return i.doneCh
|
||||
}
|
||||
|
||||
// done can only be called by a single goroutine. It records the result of the
|
||||
// inflight request and signals other goroutines that the result is safe to
|
||||
// inspect.
|
||||
func (i *inflight) done(keys []jose.JSONWebKey, err error) {
|
||||
i.keys = keys
|
||||
i.err = err
|
||||
close(i.doneCh)
|
||||
}
|
||||
|
||||
// result cannot be called until the wait() channel has returned a value.
|
||||
func (i *inflight) result() ([]jose.JSONWebKey, error) {
|
||||
return i.keys, i.err
|
||||
}
|
||||
|
||||
// paresdJWTKey is a context key that allows common setups to avoid parsing the
|
||||
// JWT twice. It holds a *jose.JSONWebSignature value.
|
||||
var parsedJWTKey contextKey
|
||||
|
||||
// VerifySignature validates a payload against a signature from the jwks_uri.
|
||||
//
|
||||
// Users MUST NOT call this method directly and should use an IDTokenVerifier
|
||||
// instead. This method skips critical validations such as 'alg' values and is
|
||||
// only exported to implement the KeySet interface.
|
||||
func (r *RemoteKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte, error) {
|
||||
jws, ok := ctx.Value(parsedJWTKey).(*jose.JSONWebSignature)
|
||||
if !ok {
|
||||
var err error
|
||||
jws, err = jose.ParseSigned(jwt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
|
||||
}
|
||||
}
|
||||
return r.verify(ctx, jws)
|
||||
}
|
||||
|
||||
func (r *RemoteKeySet) verify(ctx context.Context, jws *jose.JSONWebSignature) ([]byte, error) {
|
||||
// We don't support JWTs signed with multiple signatures.
|
||||
keyID := ""
|
||||
for _, sig := range jws.Signatures {
|
||||
keyID = sig.Header.KeyID
|
||||
break
|
||||
}
|
||||
|
||||
keys := r.keysFromCache()
|
||||
for _, key := range keys {
|
||||
if keyID == "" || key.KeyID == keyID {
|
||||
if payload, err := jws.Verify(&key); err == nil {
|
||||
return payload, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If the kid doesn't match, check for new keys from the remote. This is the
|
||||
// strategy recommended by the spec.
|
||||
//
|
||||
// https://openid.net/specs/openid-connect-core-1_0.html#RotateSigKeys
|
||||
keys, err := r.keysFromRemote(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("fetching keys %v", err)
|
||||
}
|
||||
|
||||
for _, key := range keys {
|
||||
if keyID == "" || key.KeyID == keyID {
|
||||
if payload, err := jws.Verify(&key); err == nil {
|
||||
return payload, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, errors.New("failed to verify id token signature")
|
||||
}
|
||||
|
||||
func (r *RemoteKeySet) keysFromCache() (keys []jose.JSONWebKey) {
|
||||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
return r.cachedKeys
|
||||
}
|
||||
|
||||
// keysFromRemote syncs the key set from the remote set, records the values in the
|
||||
// cache, and returns the key set.
|
||||
func (r *RemoteKeySet) keysFromRemote(ctx context.Context) ([]jose.JSONWebKey, error) {
|
||||
// Need to lock to inspect the inflight request field.
|
||||
r.mu.Lock()
|
||||
// If there's not a current inflight request, create one.
|
||||
if r.inflight == nil {
|
||||
r.inflight = newInflight()
|
||||
|
||||
// This goroutine has exclusive ownership over the current inflight
|
||||
// request. It releases the resource by nil'ing the inflight field
|
||||
// once the goroutine is done.
|
||||
go func() {
|
||||
// Sync keys and finish inflight when that's done.
|
||||
keys, err := r.updateKeys()
|
||||
|
||||
r.inflight.done(keys, err)
|
||||
|
||||
// Lock to update the keys and indicate that there is no longer an
|
||||
// inflight request.
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if err == nil {
|
||||
r.cachedKeys = keys
|
||||
}
|
||||
|
||||
// Free inflight so a different request can run.
|
||||
r.inflight = nil
|
||||
}()
|
||||
}
|
||||
inflight := r.inflight
|
||||
r.mu.Unlock()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case <-inflight.wait():
|
||||
return inflight.result()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *RemoteKeySet) updateKeys() ([]jose.JSONWebKey, error) {
|
||||
req, err := http.NewRequest("GET", r.jwksURL, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oidc: can't create request: %v", err)
|
||||
}
|
||||
|
||||
resp, err := doRequest(r.ctx, req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oidc: get keys failed %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read response body: %v", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("oidc: get keys failed: %s %s", resp.Status, body)
|
||||
}
|
||||
|
||||
var keySet jose.JSONWebKeySet
|
||||
err = unmarshalResp(resp, body, &keySet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oidc: failed to decode keys: %v %s", err, body)
|
||||
}
|
||||
return keySet.Keys, nil
|
||||
}
|
|
@ -0,0 +1,522 @@
|
|||
// Package oidc implements OpenID Connect client logic for the golang.org/x/oauth2 package.
|
||||
package oidc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
// ScopeOpenID is the mandatory scope for all OpenID Connect OAuth2 requests.
|
||||
ScopeOpenID = "openid"
|
||||
|
||||
// ScopeOfflineAccess is an optional scope defined by OpenID Connect for requesting
|
||||
// OAuth2 refresh tokens.
|
||||
//
|
||||
// Support for this scope differs between OpenID Connect providers. For instance
|
||||
// Google rejects it, favoring appending "access_type=offline" as part of the
|
||||
// authorization request instead.
|
||||
//
|
||||
// See: https://openid.net/specs/openid-connect-core-1_0.html#OfflineAccess
|
||||
ScopeOfflineAccess = "offline_access"
|
||||
)
|
||||
|
||||
var (
|
||||
errNoAtHash = errors.New("id token did not have an access token hash")
|
||||
errInvalidAtHash = errors.New("access token hash does not match value in ID token")
|
||||
)
|
||||
|
||||
type contextKey int
|
||||
|
||||
var issuerURLKey contextKey
|
||||
|
||||
// ClientContext returns a new Context that carries the provided HTTP client.
|
||||
//
|
||||
// This method sets the same context key used by the golang.org/x/oauth2 package,
|
||||
// so the returned context works for that package too.
|
||||
//
|
||||
// myClient := &http.Client{}
|
||||
// ctx := oidc.ClientContext(parentContext, myClient)
|
||||
//
|
||||
// // This will use the custom client
|
||||
// provider, err := oidc.NewProvider(ctx, "https://accounts.example.com")
|
||||
//
|
||||
func ClientContext(ctx context.Context, client *http.Client) context.Context {
|
||||
return context.WithValue(ctx, oauth2.HTTPClient, client)
|
||||
}
|
||||
|
||||
// cloneContext copies a context's bag-of-values into a new context that isn't
|
||||
// associated with its cancellation. This is used to initialize remote keys sets
|
||||
// which run in the background and aren't associated with the initial context.
|
||||
func cloneContext(ctx context.Context) context.Context {
|
||||
cp := context.Background()
|
||||
if c, ok := ctx.Value(oauth2.HTTPClient).(*http.Client); ok {
|
||||
cp = ClientContext(cp, c)
|
||||
}
|
||||
return cp
|
||||
}
|
||||
|
||||
// InsecureIssuerURLContext allows discovery to work when the issuer_url reported
|
||||
// by upstream is mismatched with the discovery URL. This is meant for integration
|
||||
// with off-spec providers such as Azure.
|
||||
//
|
||||
// discoveryBaseURL := "https://login.microsoftonline.com/organizations/v2.0"
|
||||
// issuerURL := "https://login.microsoftonline.com/my-tenantid/v2.0"
|
||||
//
|
||||
// ctx := oidc.InsecureIssuerURLContext(parentContext, issuerURL)
|
||||
//
|
||||
// // Provider will be discovered with the discoveryBaseURL, but use issuerURL
|
||||
// // for future issuer validation.
|
||||
// provider, err := oidc.NewProvider(ctx, discoveryBaseURL)
|
||||
//
|
||||
// This is insecure because validating the correct issuer is critical for multi-tenant
|
||||
// proivders. Any overrides here MUST be carefully reviewed.
|
||||
func InsecureIssuerURLContext(ctx context.Context, issuerURL string) context.Context {
|
||||
return context.WithValue(ctx, issuerURLKey, issuerURL)
|
||||
}
|
||||
|
||||
func doRequest(ctx context.Context, req *http.Request) (*http.Response, error) {
|
||||
client := http.DefaultClient
|
||||
if c, ok := ctx.Value(oauth2.HTTPClient).(*http.Client); ok {
|
||||
client = c
|
||||
}
|
||||
return client.Do(req.WithContext(ctx))
|
||||
}
|
||||
|
||||
// Provider represents an OpenID Connect server's configuration.
|
||||
type Provider struct {
|
||||
issuer string
|
||||
authURL string
|
||||
tokenURL string
|
||||
userInfoURL string
|
||||
algorithms []string
|
||||
|
||||
// Raw claims returned by the server.
|
||||
rawClaims []byte
|
||||
|
||||
remoteKeySet KeySet
|
||||
}
|
||||
|
||||
type providerJSON struct {
|
||||
Issuer string `json:"issuer"`
|
||||
AuthURL string `json:"authorization_endpoint"`
|
||||
TokenURL string `json:"token_endpoint"`
|
||||
JWKSURL string `json:"jwks_uri"`
|
||||
UserInfoURL string `json:"userinfo_endpoint"`
|
||||
Algorithms []string `json:"id_token_signing_alg_values_supported"`
|
||||
}
|
||||
|
||||
// supportedAlgorithms is a list of algorithms explicitly supported by this
|
||||
// package. If a provider supports other algorithms, such as HS256 or none,
|
||||
// those values won't be passed to the IDTokenVerifier.
|
||||
var supportedAlgorithms = map[string]bool{
|
||||
RS256: true,
|
||||
RS384: true,
|
||||
RS512: true,
|
||||
ES256: true,
|
||||
ES384: true,
|
||||
ES512: true,
|
||||
PS256: true,
|
||||
PS384: true,
|
||||
PS512: true,
|
||||
}
|
||||
|
||||
// ProviderConfig allows creating providers when discovery isn't supported. It's
|
||||
// generally easier to use NewProvider directly.
|
||||
type ProviderConfig struct {
|
||||
// IssuerURL is the identity of the provider, and the string it uses to sign
|
||||
// ID tokens with. For example "https://accounts.google.com". This value MUST
|
||||
// match ID tokens exactly.
|
||||
IssuerURL string
|
||||
// AuthURL is the endpoint used by the provider to support the OAuth 2.0
|
||||
// authorization endpoint.
|
||||
AuthURL string
|
||||
// TokenURL is the endpoint used by the provider to support the OAuth 2.0
|
||||
// token endpoint.
|
||||
TokenURL string
|
||||
// UserInfoURL is the endpoint used by the provider to support the OpenID
|
||||
// Connect UserInfo flow.
|
||||
//
|
||||
// https://openid.net/specs/openid-connect-core-1_0.html#UserInfo
|
||||
UserInfoURL string
|
||||
// JWKSURL is the endpoint used by the provider to advertise public keys to
|
||||
// verify issued ID tokens. This endpoint is polled as new keys are made
|
||||
// available.
|
||||
JWKSURL string
|
||||
|
||||
// Algorithms, if provided, indicate a list of JWT algorithms allowed to sign
|
||||
// ID tokens. If not provided, this defaults to the algorithms advertised by
|
||||
// the JWK endpoint, then the set of algorithms supported by this package.
|
||||
Algorithms []string
|
||||
}
|
||||
|
||||
// NewProvider initializes a provider from a set of endpoints, rather than
|
||||
// through discovery.
|
||||
func (p *ProviderConfig) NewProvider(ctx context.Context) *Provider {
|
||||
return &Provider{
|
||||
issuer: p.IssuerURL,
|
||||
authURL: p.AuthURL,
|
||||
tokenURL: p.TokenURL,
|
||||
userInfoURL: p.UserInfoURL,
|
||||
algorithms: p.Algorithms,
|
||||
remoteKeySet: NewRemoteKeySet(cloneContext(ctx), p.JWKSURL),
|
||||
}
|
||||
}
|
||||
|
||||
// NewProvider uses the OpenID Connect discovery mechanism to construct a Provider.
|
||||
//
|
||||
// The issuer is the URL identifier for the service. For example: "https://accounts.google.com"
|
||||
// or "https://login.salesforce.com".
|
||||
func NewProvider(ctx context.Context, issuer string) (*Provider, error) {
|
||||
wellKnown := strings.TrimSuffix(issuer, "/") + "/.well-known/openid-configuration"
|
||||
req, err := http.NewRequest("GET", wellKnown, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := doRequest(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read response body: %v", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("%s: %s", resp.Status, body)
|
||||
}
|
||||
|
||||
var p providerJSON
|
||||
err = unmarshalResp(resp, body, &p)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oidc: failed to decode provider discovery object: %v", err)
|
||||
}
|
||||
|
||||
issuerURL, skipIssuerValidation := ctx.Value(issuerURLKey).(string)
|
||||
if !skipIssuerValidation {
|
||||
issuerURL = issuer
|
||||
}
|
||||
if p.Issuer != issuerURL && !skipIssuerValidation {
|
||||
return nil, fmt.Errorf("oidc: issuer did not match the issuer returned by provider, expected %q got %q", issuer, p.Issuer)
|
||||
}
|
||||
var algs []string
|
||||
for _, a := range p.Algorithms {
|
||||
if supportedAlgorithms[a] {
|
||||
algs = append(algs, a)
|
||||
}
|
||||
}
|
||||
return &Provider{
|
||||
issuer: issuerURL,
|
||||
authURL: p.AuthURL,
|
||||
tokenURL: p.TokenURL,
|
||||
userInfoURL: p.UserInfoURL,
|
||||
algorithms: algs,
|
||||
rawClaims: body,
|
||||
remoteKeySet: NewRemoteKeySet(cloneContext(ctx), p.JWKSURL),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Claims unmarshals raw fields returned by the server during discovery.
|
||||
//
|
||||
// var claims struct {
|
||||
// ScopesSupported []string `json:"scopes_supported"`
|
||||
// ClaimsSupported []string `json:"claims_supported"`
|
||||
// }
|
||||
//
|
||||
// if err := provider.Claims(&claims); err != nil {
|
||||
// // handle unmarshaling error
|
||||
// }
|
||||
//
|
||||
// For a list of fields defined by the OpenID Connect spec see:
|
||||
// https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
|
||||
func (p *Provider) Claims(v interface{}) error {
|
||||
if p.rawClaims == nil {
|
||||
return errors.New("oidc: claims not set")
|
||||
}
|
||||
return json.Unmarshal(p.rawClaims, v)
|
||||
}
|
||||
|
||||
// Endpoint returns the OAuth2 auth and token endpoints for the given provider.
|
||||
func (p *Provider) Endpoint() oauth2.Endpoint {
|
||||
return oauth2.Endpoint{AuthURL: p.authURL, TokenURL: p.tokenURL}
|
||||
}
|
||||
|
||||
// UserInfo represents the OpenID Connect userinfo claims.
|
||||
type UserInfo struct {
|
||||
Subject string `json:"sub"`
|
||||
Profile string `json:"profile"`
|
||||
Email string `json:"email"`
|
||||
EmailVerified bool `json:"email_verified"`
|
||||
|
||||
claims []byte
|
||||
}
|
||||
|
||||
type userInfoRaw struct {
|
||||
Subject string `json:"sub"`
|
||||
Profile string `json:"profile"`
|
||||
Email string `json:"email"`
|
||||
// Handle providers that return email_verified as a string
|
||||
// https://forums.aws.amazon.com/thread.jspa?messageID=949441󧳁 and
|
||||
// https://discuss.elastic.co/t/openid-error-after-authenticating-against-aws-cognito/206018/11
|
||||
EmailVerified stringAsBool `json:"email_verified"`
|
||||
}
|
||||
|
||||
// Claims unmarshals the raw JSON object claims into the provided object.
|
||||
func (u *UserInfo) Claims(v interface{}) error {
|
||||
if u.claims == nil {
|
||||
return errors.New("oidc: claims not set")
|
||||
}
|
||||
return json.Unmarshal(u.claims, v)
|
||||
}
|
||||
|
||||
// UserInfo uses the token source to query the provider's user info endpoint.
|
||||
func (p *Provider) UserInfo(ctx context.Context, tokenSource oauth2.TokenSource) (*UserInfo, error) {
|
||||
if p.userInfoURL == "" {
|
||||
return nil, errors.New("oidc: user info endpoint is not supported by this provider")
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", p.userInfoURL, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oidc: create GET request: %v", err)
|
||||
}
|
||||
|
||||
token, err := tokenSource.Token()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oidc: get access token: %v", err)
|
||||
}
|
||||
token.SetAuthHeader(req)
|
||||
|
||||
resp, err := doRequest(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("%s: %s", resp.Status, body)
|
||||
}
|
||||
|
||||
ct := resp.Header.Get("Content-Type")
|
||||
mediaType, _, parseErr := mime.ParseMediaType(ct)
|
||||
if parseErr == nil && mediaType == "application/jwt" {
|
||||
payload, err := p.remoteKeySet.VerifySignature(ctx, string(body))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oidc: invalid userinfo jwt signature %v", err)
|
||||
}
|
||||
body = payload
|
||||
}
|
||||
|
||||
var userInfo userInfoRaw
|
||||
if err := json.Unmarshal(body, &userInfo); err != nil {
|
||||
return nil, fmt.Errorf("oidc: failed to decode userinfo: %v", err)
|
||||
}
|
||||
return &UserInfo{
|
||||
Subject: userInfo.Subject,
|
||||
Profile: userInfo.Profile,
|
||||
Email: userInfo.Email,
|
||||
EmailVerified: bool(userInfo.EmailVerified),
|
||||
claims: body,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// IDToken is an OpenID Connect extension that provides a predictable representation
|
||||
// of an authorization event.
|
||||
//
|
||||
// The ID Token only holds fields OpenID Connect requires. To access additional
|
||||
// claims returned by the server, use the Claims method.
|
||||
type IDToken struct {
|
||||
// The URL of the server which issued this token. OpenID Connect
|
||||
// requires this value always be identical to the URL used for
|
||||
// initial discovery.
|
||||
//
|
||||
// Note: Because of a known issue with Google Accounts' implementation
|
||||
// this value may differ when using Google.
|
||||
//
|
||||
// See: https://developers.google.com/identity/protocols/OpenIDConnect#obtainuserinfo
|
||||
Issuer string
|
||||
|
||||
// The client ID, or set of client IDs, that this token is issued for. For
|
||||
// common uses, this is the client that initialized the auth flow.
|
||||
//
|
||||
// This package ensures the audience contains an expected value.
|
||||
Audience []string
|
||||
|
||||
// A unique string which identifies the end user.
|
||||
Subject string
|
||||
|
||||
// Expiry of the token. Ths package will not process tokens that have
|
||||
// expired unless that validation is explicitly turned off.
|
||||
Expiry time.Time
|
||||
// When the token was issued by the provider.
|
||||
IssuedAt time.Time
|
||||
|
||||
// Initial nonce provided during the authentication redirect.
|
||||
//
|
||||
// This package does NOT provided verification on the value of this field
|
||||
// and it's the user's responsibility to ensure it contains a valid value.
|
||||
Nonce string
|
||||
|
||||
// at_hash claim, if set in the ID token. Callers can verify an access token
|
||||
// that corresponds to the ID token using the VerifyAccessToken method.
|
||||
AccessTokenHash string
|
||||
|
||||
// signature algorithm used for ID token, needed to compute a verification hash of an
|
||||
// access token
|
||||
sigAlgorithm string
|
||||
|
||||
// Raw payload of the id_token.
|
||||
claims []byte
|
||||
|
||||
// Map of distributed claim names to claim sources
|
||||
distributedClaims map[string]claimSource
|
||||
}
|
||||
|
||||
// Claims unmarshals the raw JSON payload of the ID Token into a provided struct.
|
||||
//
|
||||
// idToken, err := idTokenVerifier.Verify(rawIDToken)
|
||||
// if err != nil {
|
||||
// // handle error
|
||||
// }
|
||||
// var claims struct {
|
||||
// Email string `json:"email"`
|
||||
// EmailVerified bool `json:"email_verified"`
|
||||
// }
|
||||
// if err := idToken.Claims(&claims); err != nil {
|
||||
// // handle error
|
||||
// }
|
||||
//
|
||||
func (i *IDToken) Claims(v interface{}) error {
|
||||
if i.claims == nil {
|
||||
return errors.New("oidc: claims not set")
|
||||
}
|
||||
return json.Unmarshal(i.claims, v)
|
||||
}
|
||||
|
||||
// VerifyAccessToken verifies that the hash of the access token that corresponds to the iD token
|
||||
// matches the hash in the id token. It returns an error if the hashes don't match.
|
||||
// It is the caller's responsibility to ensure that the optional access token hash is present for the ID token
|
||||
// before calling this method. See https://openid.net/specs/openid-connect-core-1_0.html#CodeIDToken
|
||||
func (i *IDToken) VerifyAccessToken(accessToken string) error {
|
||||
if i.AccessTokenHash == "" {
|
||||
return errNoAtHash
|
||||
}
|
||||
var h hash.Hash
|
||||
switch i.sigAlgorithm {
|
||||
case RS256, ES256, PS256:
|
||||
h = sha256.New()
|
||||
case RS384, ES384, PS384:
|
||||
h = sha512.New384()
|
||||
case RS512, ES512, PS512:
|
||||
h = sha512.New()
|
||||
default:
|
||||
return fmt.Errorf("oidc: unsupported signing algorithm %q", i.sigAlgorithm)
|
||||
}
|
||||
h.Write([]byte(accessToken)) // hash documents that Write will never return an error
|
||||
sum := h.Sum(nil)[:h.Size()/2]
|
||||
actual := base64.RawURLEncoding.EncodeToString(sum)
|
||||
if actual != i.AccessTokenHash {
|
||||
return errInvalidAtHash
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type idToken struct {
|
||||
Issuer string `json:"iss"`
|
||||
Subject string `json:"sub"`
|
||||
Audience audience `json:"aud"`
|
||||
Expiry jsonTime `json:"exp"`
|
||||
IssuedAt jsonTime `json:"iat"`
|
||||
NotBefore *jsonTime `json:"nbf"`
|
||||
Nonce string `json:"nonce"`
|
||||
AtHash string `json:"at_hash"`
|
||||
ClaimNames map[string]string `json:"_claim_names"`
|
||||
ClaimSources map[string]claimSource `json:"_claim_sources"`
|
||||
}
|
||||
|
||||
type claimSource struct {
|
||||
Endpoint string `json:"endpoint"`
|
||||
AccessToken string `json:"access_token"`
|
||||
}
|
||||
|
||||
type stringAsBool bool
|
||||
|
||||
func (sb *stringAsBool) UnmarshalJSON(b []byte) error {
|
||||
switch string(b) {
|
||||
case "true", `"true"`:
|
||||
*sb = true
|
||||
case "false", `"false"`:
|
||||
*sb = false
|
||||
default:
|
||||
return errors.New("invalid value for boolean")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type audience []string
|
||||
|
||||
func (a *audience) UnmarshalJSON(b []byte) error {
|
||||
var s string
|
||||
if json.Unmarshal(b, &s) == nil {
|
||||
*a = audience{s}
|
||||
return nil
|
||||
}
|
||||
var auds []string
|
||||
if err := json.Unmarshal(b, &auds); err != nil {
|
||||
return err
|
||||
}
|
||||
*a = auds
|
||||
return nil
|
||||
}
|
||||
|
||||
type jsonTime time.Time
|
||||
|
||||
func (j *jsonTime) UnmarshalJSON(b []byte) error {
|
||||
var n json.Number
|
||||
if err := json.Unmarshal(b, &n); err != nil {
|
||||
return err
|
||||
}
|
||||
var unix int64
|
||||
|
||||
if t, err := n.Int64(); err == nil {
|
||||
unix = t
|
||||
} else {
|
||||
f, err := n.Float64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
unix = int64(f)
|
||||
}
|
||||
*j = jsonTime(time.Unix(unix, 0))
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmarshalResp(r *http.Response, body []byte, v interface{}) error {
|
||||
err := json.Unmarshal(body, &v)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
ct := r.Header.Get("Content-Type")
|
||||
mediaType, _, parseErr := mime.ParseMediaType(ct)
|
||||
if parseErr == nil && mediaType == "application/json" {
|
||||
return fmt.Errorf("got Content-Type = application/json, but could not unmarshal as JSON: %v", err)
|
||||
}
|
||||
return fmt.Errorf("expected Content-Type = application/json, got %q: %v", ct, err)
|
||||
}
|
|
@ -0,0 +1,344 @@
|
|||
package oidc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
jose "github.com/go-jose/go-jose/v3"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
issuerGoogleAccounts = "https://accounts.google.com"
|
||||
issuerGoogleAccountsNoScheme = "accounts.google.com"
|
||||
)
|
||||
|
||||
// TokenExpiredError indicates that Verify failed because the token was expired. This
|
||||
// error does NOT indicate that the token is not also invalid for other reasons. Other
|
||||
// checks might have failed if the expiration check had not failed.
|
||||
type TokenExpiredError struct {
|
||||
// Expiry is the time when the token expired.
|
||||
Expiry time.Time
|
||||
}
|
||||
|
||||
func (e *TokenExpiredError) Error() string {
|
||||
return fmt.Sprintf("oidc: token is expired (Token Expiry: %v)", e.Expiry)
|
||||
}
|
||||
|
||||
// KeySet is a set of publc JSON Web Keys that can be used to validate the signature
|
||||
// of JSON web tokens. This is expected to be backed by a remote key set through
|
||||
// provider metadata discovery or an in-memory set of keys delivered out-of-band.
|
||||
type KeySet interface {
|
||||
// VerifySignature parses the JSON web token, verifies the signature, and returns
|
||||
// the raw payload. Header and claim fields are validated by other parts of the
|
||||
// package. For example, the KeySet does not need to check values such as signature
|
||||
// algorithm, issuer, and audience since the IDTokenVerifier validates these values
|
||||
// independently.
|
||||
//
|
||||
// If VerifySignature makes HTTP requests to verify the token, it's expected to
|
||||
// use any HTTP client associated with the context through ClientContext.
|
||||
VerifySignature(ctx context.Context, jwt string) (payload []byte, err error)
|
||||
}
|
||||
|
||||
// IDTokenVerifier provides verification for ID Tokens.
|
||||
type IDTokenVerifier struct {
|
||||
keySet KeySet
|
||||
config *Config
|
||||
issuer string
|
||||
}
|
||||
|
||||
// NewVerifier returns a verifier manually constructed from a key set and issuer URL.
|
||||
//
|
||||
// It's easier to use provider discovery to construct an IDTokenVerifier than creating
|
||||
// one directly. This method is intended to be used with provider that don't support
|
||||
// metadata discovery, or avoiding round trips when the key set URL is already known.
|
||||
//
|
||||
// This constructor can be used to create a verifier directly using the issuer URL and
|
||||
// JSON Web Key Set URL without using discovery:
|
||||
//
|
||||
// keySet := oidc.NewRemoteKeySet(ctx, "https://www.googleapis.com/oauth2/v3/certs")
|
||||
// verifier := oidc.NewVerifier("https://accounts.google.com", keySet, config)
|
||||
//
|
||||
// Or a static key set (e.g. for testing):
|
||||
//
|
||||
// keySet := &oidc.StaticKeySet{PublicKeys: []crypto.PublicKey{pub1, pub2}}
|
||||
// verifier := oidc.NewVerifier("https://accounts.google.com", keySet, config)
|
||||
//
|
||||
func NewVerifier(issuerURL string, keySet KeySet, config *Config) *IDTokenVerifier {
|
||||
return &IDTokenVerifier{keySet: keySet, config: config, issuer: issuerURL}
|
||||
}
|
||||
|
||||
// Config is the configuration for an IDTokenVerifier.
|
||||
type Config struct {
|
||||
// Expected audience of the token. For a majority of the cases this is expected to be
|
||||
// the ID of the client that initialized the login flow. It may occasionally differ if
|
||||
// the provider supports the authorizing party (azp) claim.
|
||||
//
|
||||
// If not provided, users must explicitly set SkipClientIDCheck.
|
||||
ClientID string
|
||||
// If specified, only this set of algorithms may be used to sign the JWT.
|
||||
//
|
||||
// If the IDTokenVerifier is created from a provider with (*Provider).Verifier, this
|
||||
// defaults to the set of algorithms the provider supports. Otherwise this values
|
||||
// defaults to RS256.
|
||||
SupportedSigningAlgs []string
|
||||
|
||||
// If true, no ClientID check performed. Must be true if ClientID field is empty.
|
||||
SkipClientIDCheck bool
|
||||
// If true, token expiry is not checked.
|
||||
SkipExpiryCheck bool
|
||||
|
||||
// SkipIssuerCheck is intended for specialized cases where the the caller wishes to
|
||||
// defer issuer validation. When enabled, callers MUST independently verify the Token's
|
||||
// Issuer is a known good value.
|
||||
//
|
||||
// Mismatched issuers often indicate client mis-configuration. If mismatches are
|
||||
// unexpected, evaluate if the provided issuer URL is incorrect instead of enabling
|
||||
// this option.
|
||||
SkipIssuerCheck bool
|
||||
|
||||
// Time function to check Token expiry. Defaults to time.Now
|
||||
Now func() time.Time
|
||||
|
||||
// InsecureSkipSignatureCheck causes this package to skip JWT signature validation.
|
||||
// It's intended for special cases where providers (such as Azure), use the "none"
|
||||
// algorithm.
|
||||
//
|
||||
// This option can only be enabled safely when the ID Token is received directly
|
||||
// from the provider after the token exchange.
|
||||
//
|
||||
// This option MUST NOT be used when receiving an ID Token from sources other
|
||||
// than the token endpoint.
|
||||
InsecureSkipSignatureCheck bool
|
||||
}
|
||||
|
||||
// Verifier returns an IDTokenVerifier that uses the provider's key set to verify JWTs.
|
||||
func (p *Provider) Verifier(config *Config) *IDTokenVerifier {
|
||||
if len(config.SupportedSigningAlgs) == 0 && len(p.algorithms) > 0 {
|
||||
// Make a copy so we don't modify the config values.
|
||||
cp := &Config{}
|
||||
*cp = *config
|
||||
cp.SupportedSigningAlgs = p.algorithms
|
||||
config = cp
|
||||
}
|
||||
return NewVerifier(p.issuer, p.remoteKeySet, config)
|
||||
}
|
||||
|
||||
func parseJWT(p string) ([]byte, error) {
|
||||
parts := strings.Split(p, ".")
|
||||
if len(parts) < 2 {
|
||||
return nil, fmt.Errorf("oidc: malformed jwt, expected 3 parts got %d", len(parts))
|
||||
}
|
||||
payload, err := base64.RawURLEncoding.DecodeString(parts[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oidc: malformed jwt payload: %v", err)
|
||||
}
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
func contains(sli []string, ele string) bool {
|
||||
for _, s := range sli {
|
||||
if s == ele {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Returns the Claims from the distributed JWT token
|
||||
func resolveDistributedClaim(ctx context.Context, verifier *IDTokenVerifier, src claimSource) ([]byte, error) {
|
||||
req, err := http.NewRequest("GET", src.Endpoint, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("malformed request: %v", err)
|
||||
}
|
||||
if src.AccessToken != "" {
|
||||
req.Header.Set("Authorization", "Bearer "+src.AccessToken)
|
||||
}
|
||||
|
||||
resp, err := doRequest(ctx, req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oidc: Request to endpoint failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read response body: %v", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("oidc: request failed: %v", resp.StatusCode)
|
||||
}
|
||||
|
||||
token, err := verifier.Verify(ctx, string(body))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("malformed response body: %v", err)
|
||||
}
|
||||
|
||||
return token.claims, nil
|
||||
}
|
||||
|
||||
// Verify parses a raw ID Token, verifies it's been signed by the provider, performs
|
||||
// any additional checks depending on the Config, and returns the payload.
|
||||
//
|
||||
// Verify does NOT do nonce validation, which is the callers responsibility.
|
||||
//
|
||||
// See: https://openid.net/specs/openid-connect-core-1_0.html#IDTokenValidation
|
||||
//
|
||||
// oauth2Token, err := oauth2Config.Exchange(ctx, r.URL.Query().Get("code"))
|
||||
// if err != nil {
|
||||
// // handle error
|
||||
// }
|
||||
//
|
||||
// // Extract the ID Token from oauth2 token.
|
||||
// rawIDToken, ok := oauth2Token.Extra("id_token").(string)
|
||||
// if !ok {
|
||||
// // handle error
|
||||
// }
|
||||
//
|
||||
// token, err := verifier.Verify(ctx, rawIDToken)
|
||||
//
|
||||
func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDToken, error) {
|
||||
// Throw out tokens with invalid claims before trying to verify the token. This lets
|
||||
// us do cheap checks before possibly re-syncing keys.
|
||||
payload, err := parseJWT(rawIDToken)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
|
||||
}
|
||||
var token idToken
|
||||
if err := json.Unmarshal(payload, &token); err != nil {
|
||||
return nil, fmt.Errorf("oidc: failed to unmarshal claims: %v", err)
|
||||
}
|
||||
|
||||
distributedClaims := make(map[string]claimSource)
|
||||
|
||||
//step through the token to map claim names to claim sources"
|
||||
for cn, src := range token.ClaimNames {
|
||||
if src == "" {
|
||||
return nil, fmt.Errorf("oidc: failed to obtain source from claim name")
|
||||
}
|
||||
s, ok := token.ClaimSources[src]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("oidc: source does not exist")
|
||||
}
|
||||
distributedClaims[cn] = s
|
||||
}
|
||||
|
||||
t := &IDToken{
|
||||
Issuer: token.Issuer,
|
||||
Subject: token.Subject,
|
||||
Audience: []string(token.Audience),
|
||||
Expiry: time.Time(token.Expiry),
|
||||
IssuedAt: time.Time(token.IssuedAt),
|
||||
Nonce: token.Nonce,
|
||||
AccessTokenHash: token.AtHash,
|
||||
claims: payload,
|
||||
distributedClaims: distributedClaims,
|
||||
}
|
||||
|
||||
// Check issuer.
|
||||
if !v.config.SkipIssuerCheck && t.Issuer != v.issuer {
|
||||
// Google sometimes returns "accounts.google.com" as the issuer claim instead of
|
||||
// the required "https://accounts.google.com". Detect this case and allow it only
|
||||
// for Google.
|
||||
//
|
||||
// We will not add hooks to let other providers go off spec like this.
|
||||
if !(v.issuer == issuerGoogleAccounts && t.Issuer == issuerGoogleAccountsNoScheme) {
|
||||
return nil, fmt.Errorf("oidc: id token issued by a different provider, expected %q got %q", v.issuer, t.Issuer)
|
||||
}
|
||||
}
|
||||
|
||||
// If a client ID has been provided, make sure it's part of the audience. SkipClientIDCheck must be true if ClientID is empty.
|
||||
//
|
||||
// This check DOES NOT ensure that the ClientID is the party to which the ID Token was issued (i.e. Authorized party).
|
||||
if !v.config.SkipClientIDCheck {
|
||||
if v.config.ClientID != "" {
|
||||
if !contains(t.Audience, v.config.ClientID) {
|
||||
return nil, fmt.Errorf("oidc: expected audience %q got %q", v.config.ClientID, t.Audience)
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("oidc: invalid configuration, clientID must be provided or SkipClientIDCheck must be set")
|
||||
}
|
||||
}
|
||||
|
||||
// If a SkipExpiryCheck is false, make sure token is not expired.
|
||||
if !v.config.SkipExpiryCheck {
|
||||
now := time.Now
|
||||
if v.config.Now != nil {
|
||||
now = v.config.Now
|
||||
}
|
||||
nowTime := now()
|
||||
|
||||
if t.Expiry.Before(nowTime) {
|
||||
return nil, &TokenExpiredError{Expiry: t.Expiry}
|
||||
}
|
||||
|
||||
// If nbf claim is provided in token, ensure that it is indeed in the past.
|
||||
if token.NotBefore != nil {
|
||||
nbfTime := time.Time(*token.NotBefore)
|
||||
// Set to 5 minutes since this is what other OpenID Connect providers do to deal with clock skew.
|
||||
// https://github.com/AzureAD/azure-activedirectory-identitymodel-extensions-for-dotnet/blob/6.12.2/src/Microsoft.IdentityModel.Tokens/TokenValidationParameters.cs#L149-L153
|
||||
leeway := 5 * time.Minute
|
||||
|
||||
if nowTime.Add(leeway).Before(nbfTime) {
|
||||
return nil, fmt.Errorf("oidc: current time %v before the nbf (not before) time: %v", nowTime, nbfTime)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if v.config.InsecureSkipSignatureCheck {
|
||||
return t, nil
|
||||
}
|
||||
|
||||
jws, err := jose.ParseSigned(rawIDToken)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
|
||||
}
|
||||
|
||||
switch len(jws.Signatures) {
|
||||
case 0:
|
||||
return nil, fmt.Errorf("oidc: id token not signed")
|
||||
case 1:
|
||||
default:
|
||||
return nil, fmt.Errorf("oidc: multiple signatures on id token not supported")
|
||||
}
|
||||
|
||||
sig := jws.Signatures[0]
|
||||
supportedSigAlgs := v.config.SupportedSigningAlgs
|
||||
if len(supportedSigAlgs) == 0 {
|
||||
supportedSigAlgs = []string{RS256}
|
||||
}
|
||||
|
||||
if !contains(supportedSigAlgs, sig.Header.Algorithm) {
|
||||
return nil, fmt.Errorf("oidc: id token signed with unsupported algorithm, expected %q got %q", supportedSigAlgs, sig.Header.Algorithm)
|
||||
}
|
||||
|
||||
t.sigAlgorithm = sig.Header.Algorithm
|
||||
|
||||
ctx = context.WithValue(ctx, parsedJWTKey, jws)
|
||||
gotPayload, err := v.keySet.VerifySignature(ctx, rawIDToken)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to verify signature: %v", err)
|
||||
}
|
||||
|
||||
// Ensure that the payload returned by the square actually matches the payload parsed earlier.
|
||||
if !bytes.Equal(gotPayload, payload) {
|
||||
return nil, errors.New("oidc: internal error, payload parsed did not match previous payload")
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// Nonce returns an auth code option which requires the ID Token created by the
|
||||
// OpenID Connect provider to contain the specified nonce.
|
||||
func Nonce(nonce string) oauth2.AuthCodeOption {
|
||||
return oauth2.SetAuthURLParam("nonce", nonce)
|
||||
}
|
|
@ -0,0 +1,2 @@
|
|||
jose-util/jose-util
|
||||
jose-util.t.err
|
|
@ -0,0 +1,53 @@
|
|||
# https://github.com/golangci/golangci-lint
|
||||
|
||||
run:
|
||||
skip-files:
|
||||
- doc_test.go
|
||||
modules-download-mode: readonly
|
||||
|
||||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
- gochecknoglobals
|
||||
- goconst
|
||||
- lll
|
||||
- maligned
|
||||
- nakedret
|
||||
- scopelint
|
||||
- unparam
|
||||
- funlen # added in 1.18 (requires go-jose changes before it can be enabled)
|
||||
|
||||
linters-settings:
|
||||
gocyclo:
|
||||
min-complexity: 35
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
- text: "don't use ALL_CAPS in Go names"
|
||||
linters:
|
||||
- golint
|
||||
- text: "hardcoded credentials"
|
||||
linters:
|
||||
- gosec
|
||||
- text: "weak cryptographic primitive"
|
||||
linters:
|
||||
- gosec
|
||||
- path: json/
|
||||
linters:
|
||||
- dupl
|
||||
- errcheck
|
||||
- gocritic
|
||||
- gocyclo
|
||||
- golint
|
||||
- govet
|
||||
- ineffassign
|
||||
- staticcheck
|
||||
- structcheck
|
||||
- stylecheck
|
||||
- unused
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- scopelint
|
||||
- path: jwk.go
|
||||
linters:
|
||||
- gocyclo
|
|
@ -0,0 +1,33 @@
|
|||
language: go
|
||||
|
||||
matrix:
|
||||
fast_finish: true
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
go:
|
||||
- "1.13.x"
|
||||
- "1.14.x"
|
||||
- tip
|
||||
|
||||
before_script:
|
||||
- export PATH=$HOME/.local/bin:$PATH
|
||||
|
||||
before_install:
|
||||
- go get -u github.com/mattn/goveralls github.com/wadey/gocovmerge
|
||||
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.18.0
|
||||
- pip install cram --user
|
||||
|
||||
script:
|
||||
- go test -v -covermode=count -coverprofile=profile.cov .
|
||||
- go test -v -covermode=count -coverprofile=cryptosigner/profile.cov ./cryptosigner
|
||||
- go test -v -covermode=count -coverprofile=cipher/profile.cov ./cipher
|
||||
- go test -v -covermode=count -coverprofile=jwt/profile.cov ./jwt
|
||||
- go test -v ./json # no coverage for forked encoding/json package
|
||||
- golangci-lint run
|
||||
- cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t # cram tests jose-util
|
||||
- cd ..
|
||||
|
||||
after_success:
|
||||
- gocovmerge *.cov */*.cov > merged.coverprofile
|
||||
- goveralls -coverprofile merged.coverprofile -service=travis-ci
|
|
@ -0,0 +1,10 @@
|
|||
Serious about security
|
||||
======================
|
||||
|
||||
Square recognizes the important contributions the security research community
|
||||
can make. We therefore encourage reporting security issues with the code
|
||||
contained in this repository.
|
||||
|
||||
If you believe you have discovered a security vulnerability, please follow the
|
||||
guidelines at <https://bugcrowd.com/squareopensource>.
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
# Contributing
|
||||
|
||||
If you would like to contribute code to go-jose you can do so through GitHub by
|
||||
forking the repository and sending a pull request.
|
||||
|
||||
When submitting code, please make every effort to follow existing conventions
|
||||
and style in order to keep the code as readable as possible. Please also make
|
||||
sure all tests pass by running `go test`, and format your code with `go fmt`.
|
||||
We also recommend using `golint` and `errcheck`.
|
||||
|
||||
Before your code can be accepted into the project you must also sign the
|
||||
Individual Contributor License Agreement. We use [cla-assistant.io][1] and you
|
||||
will be prompted to sign once a pull request is opened.
|
||||
|
||||
[1]: https://cla-assistant.io/
|
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,122 @@
|
|||
# Go JOSE
|
||||
|
||||
[](https://godoc.org/gopkg.in/go-jose/go-jose.v2)
|
||||
[](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt)
|
||||
[](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE)
|
||||
[](https://travis-ci.org/go-jose/go-jose)
|
||||
[](https://coveralls.io/r/go-jose/go-jose)
|
||||
|
||||
Package jose aims to provide an implementation of the Javascript Object Signing
|
||||
and Encryption set of standards. This includes support for JSON Web Encryption,
|
||||
JSON Web Signature, and JSON Web Token standards.
|
||||
|
||||
**Disclaimer**: This library contains encryption software that is subject to
|
||||
the U.S. Export Administration Regulations. You may not export, re-export,
|
||||
transfer or download this code or any part of it in violation of any United
|
||||
States law, directive or regulation. In particular this software may not be
|
||||
exported or re-exported in any form or on any media to Iran, North Sudan,
|
||||
Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any
|
||||
US maintained blocked list.
|
||||
|
||||
## Overview
|
||||
|
||||
The implementation follows the
|
||||
[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) (RFC 7516),
|
||||
[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) (RFC 7515), and
|
||||
[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications.
|
||||
Tables of supported algorithms are shown below. The library supports both
|
||||
the compact and JWS/JWE JSON Serialization formats, and has optional support for
|
||||
multiple recipients. It also comes with a small command-line utility
|
||||
([`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util))
|
||||
for dealing with JOSE messages in a shell.
|
||||
|
||||
**Note**: We use a forked version of the `encoding/json` package from the Go
|
||||
standard library which uses case-sensitive matching for member names (instead
|
||||
of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)).
|
||||
This is to avoid differences in interpretation of messages between go-jose and
|
||||
libraries in other languages.
|
||||
|
||||
### Versions
|
||||
|
||||
[Version 2](https://gopkg.in/go-jose/go-jose.v2)
|
||||
([branch](https://github.com/go-jose/go-jose/tree/v2),
|
||||
[doc](https://godoc.org/gopkg.in/go-jose/go-jose.v2)) is the current stable version:
|
||||
|
||||
import "gopkg.in/go-jose/go-jose.v2"
|
||||
|
||||
[Version 3](https://github.com/go-jose/go-jose)
|
||||
([branch](https://github.com/go-jose/go-jose/tree/master),
|
||||
[doc](https://godoc.org/github.com/go-jose/go-jose)) is the under development/unstable version (not released yet):
|
||||
|
||||
import "github.com/go-jose/go-jose/v3"
|
||||
|
||||
All new feature development takes place on the `master` branch, which we are
|
||||
preparing to release as version 3 soon. Version 2 will continue to receive
|
||||
critical bug and security fixes. Note that starting with version 3 we are
|
||||
using Go modules for versioning instead of `gopkg.in` as before. Version 3 also will require Go version 1.13 or higher.
|
||||
|
||||
Version 1 (on the `v1` branch) is frozen and not supported anymore.
|
||||
|
||||
### Supported algorithms
|
||||
|
||||
See below for a table of supported algorithms. Algorithm identifiers match
|
||||
the names in the [JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518)
|
||||
standard where possible. The Godoc reference has a list of constants.
|
||||
|
||||
Key encryption | Algorithm identifier(s)
|
||||
:------------------------- | :------------------------------
|
||||
RSA-PKCS#1v1.5 | RSA1_5
|
||||
RSA-OAEP | RSA-OAEP, RSA-OAEP-256
|
||||
AES key wrap | A128KW, A192KW, A256KW
|
||||
AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW
|
||||
ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW
|
||||
ECDH-ES (direct) | ECDH-ES<sup>1</sup>
|
||||
Direct encryption | dir<sup>1</sup>
|
||||
|
||||
<sup>1. Not supported in multi-recipient mode</sup>
|
||||
|
||||
Signing / MAC | Algorithm identifier(s)
|
||||
:------------------------- | :------------------------------
|
||||
RSASSA-PKCS#1v1.5 | RS256, RS384, RS512
|
||||
RSASSA-PSS | PS256, PS384, PS512
|
||||
HMAC | HS256, HS384, HS512
|
||||
ECDSA | ES256, ES384, ES512
|
||||
Ed25519 | EdDSA<sup>2</sup>
|
||||
|
||||
<sup>2. Only available in version 2 of the package</sup>
|
||||
|
||||
Content encryption | Algorithm identifier(s)
|
||||
:------------------------- | :------------------------------
|
||||
AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512
|
||||
AES-GCM | A128GCM, A192GCM, A256GCM
|
||||
|
||||
Compression | Algorithm identifiers(s)
|
||||
:------------------------- | -------------------------------
|
||||
DEFLATE (RFC 1951) | DEF
|
||||
|
||||
### Supported key types
|
||||
|
||||
See below for a table of supported key types. These are understood by the
|
||||
library, and can be passed to corresponding functions such as `NewEncrypter` or
|
||||
`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which
|
||||
allows attaching a key id.
|
||||
|
||||
Algorithm(s) | Corresponding types
|
||||
:------------------------- | -------------------------------
|
||||
RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey)
|
||||
ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey)
|
||||
EdDSA<sup>1</sup> | [ed25519.PublicKey](https://godoc.org/pkg/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/pkg/crypto/ed25519#PrivateKey)
|
||||
AES, HMAC | []byte
|
||||
|
||||
<sup>1. Only available in version 2 or later of the package</sup>
|
||||
|
||||
## Examples
|
||||
|
||||
[](https://godoc.org/gopkg.in/go-jose/go-jose.v2)
|
||||
[](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt)
|
||||
|
||||
Examples can be found in the Godoc
|
||||
reference for this package. The
|
||||
[`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util)
|
||||
subdirectory also contains a small command-line utility which might be useful
|
||||
as an example as well.
|
|
@ -0,0 +1,592 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/aes"
|
||||
"crypto/ecdsa"
|
||||
"crypto/ed25519"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
josecipher "github.com/go-jose/go-jose/v3/cipher"
|
||||
"github.com/go-jose/go-jose/v3/json"
|
||||
)
|
||||
|
||||
// A generic RSA-based encrypter/verifier
|
||||
type rsaEncrypterVerifier struct {
|
||||
publicKey *rsa.PublicKey
|
||||
}
|
||||
|
||||
// A generic RSA-based decrypter/signer
|
||||
type rsaDecrypterSigner struct {
|
||||
privateKey *rsa.PrivateKey
|
||||
}
|
||||
|
||||
// A generic EC-based encrypter/verifier
|
||||
type ecEncrypterVerifier struct {
|
||||
publicKey *ecdsa.PublicKey
|
||||
}
|
||||
|
||||
type edEncrypterVerifier struct {
|
||||
publicKey ed25519.PublicKey
|
||||
}
|
||||
|
||||
// A key generator for ECDH-ES
|
||||
type ecKeyGenerator struct {
|
||||
size int
|
||||
algID string
|
||||
publicKey *ecdsa.PublicKey
|
||||
}
|
||||
|
||||
// A generic EC-based decrypter/signer
|
||||
type ecDecrypterSigner struct {
|
||||
privateKey *ecdsa.PrivateKey
|
||||
}
|
||||
|
||||
type edDecrypterSigner struct {
|
||||
privateKey ed25519.PrivateKey
|
||||
}
|
||||
|
||||
// newRSARecipient creates recipientKeyInfo based on the given key.
|
||||
func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) {
|
||||
// Verify that key management algorithm is supported by this encrypter
|
||||
switch keyAlg {
|
||||
case RSA1_5, RSA_OAEP, RSA_OAEP_256:
|
||||
default:
|
||||
return recipientKeyInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
if publicKey == nil {
|
||||
return recipientKeyInfo{}, errors.New("invalid public key")
|
||||
}
|
||||
|
||||
return recipientKeyInfo{
|
||||
keyAlg: keyAlg,
|
||||
keyEncrypter: &rsaEncrypterVerifier{
|
||||
publicKey: publicKey,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// newRSASigner creates a recipientSigInfo based on the given key.
|
||||
func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) {
|
||||
// Verify that key management algorithm is supported by this encrypter
|
||||
switch sigAlg {
|
||||
case RS256, RS384, RS512, PS256, PS384, PS512:
|
||||
default:
|
||||
return recipientSigInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
if privateKey == nil {
|
||||
return recipientSigInfo{}, errors.New("invalid private key")
|
||||
}
|
||||
|
||||
return recipientSigInfo{
|
||||
sigAlg: sigAlg,
|
||||
publicKey: staticPublicKey(&JSONWebKey{
|
||||
Key: privateKey.Public(),
|
||||
}),
|
||||
signer: &rsaDecrypterSigner{
|
||||
privateKey: privateKey,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newEd25519Signer(sigAlg SignatureAlgorithm, privateKey ed25519.PrivateKey) (recipientSigInfo, error) {
|
||||
if sigAlg != EdDSA {
|
||||
return recipientSigInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
if privateKey == nil {
|
||||
return recipientSigInfo{}, errors.New("invalid private key")
|
||||
}
|
||||
return recipientSigInfo{
|
||||
sigAlg: sigAlg,
|
||||
publicKey: staticPublicKey(&JSONWebKey{
|
||||
Key: privateKey.Public(),
|
||||
}),
|
||||
signer: &edDecrypterSigner{
|
||||
privateKey: privateKey,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// newECDHRecipient creates recipientKeyInfo based on the given key.
|
||||
func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) {
|
||||
// Verify that key management algorithm is supported by this encrypter
|
||||
switch keyAlg {
|
||||
case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
|
||||
default:
|
||||
return recipientKeyInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) {
|
||||
return recipientKeyInfo{}, errors.New("invalid public key")
|
||||
}
|
||||
|
||||
return recipientKeyInfo{
|
||||
keyAlg: keyAlg,
|
||||
keyEncrypter: &ecEncrypterVerifier{
|
||||
publicKey: publicKey,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// newECDSASigner creates a recipientSigInfo based on the given key.
|
||||
func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) {
|
||||
// Verify that key management algorithm is supported by this encrypter
|
||||
switch sigAlg {
|
||||
case ES256, ES384, ES512:
|
||||
default:
|
||||
return recipientSigInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
if privateKey == nil {
|
||||
return recipientSigInfo{}, errors.New("invalid private key")
|
||||
}
|
||||
|
||||
return recipientSigInfo{
|
||||
sigAlg: sigAlg,
|
||||
publicKey: staticPublicKey(&JSONWebKey{
|
||||
Key: privateKey.Public(),
|
||||
}),
|
||||
signer: &ecDecrypterSigner{
|
||||
privateKey: privateKey,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Encrypt the given payload and update the object.
|
||||
func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
|
||||
encryptedKey, err := ctx.encrypt(cek, alg)
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
return recipientInfo{
|
||||
encryptedKey: encryptedKey,
|
||||
header: &rawHeader{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Encrypt the given payload. Based on the key encryption algorithm,
|
||||
// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
|
||||
func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) {
|
||||
switch alg {
|
||||
case RSA1_5:
|
||||
return rsa.EncryptPKCS1v15(RandReader, ctx.publicKey, cek)
|
||||
case RSA_OAEP:
|
||||
return rsa.EncryptOAEP(sha1.New(), RandReader, ctx.publicKey, cek, []byte{})
|
||||
case RSA_OAEP_256:
|
||||
return rsa.EncryptOAEP(sha256.New(), RandReader, ctx.publicKey, cek, []byte{})
|
||||
}
|
||||
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
// Decrypt the given payload and return the content encryption key.
|
||||
func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
|
||||
return ctx.decrypt(recipient.encryptedKey, headers.getAlgorithm(), generator)
|
||||
}
|
||||
|
||||
// Decrypt the given payload. Based on the key encryption algorithm,
|
||||
// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256).
|
||||
func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) {
|
||||
// Note: The random reader on decrypt operations is only used for blinding,
|
||||
// so stubbing is meanlingless (hence the direct use of rand.Reader).
|
||||
switch alg {
|
||||
case RSA1_5:
|
||||
defer func() {
|
||||
// DecryptPKCS1v15SessionKey sometimes panics on an invalid payload
|
||||
// because of an index out of bounds error, which we want to ignore.
|
||||
// This has been fixed in Go 1.3.1 (released 2014/08/13), the recover()
|
||||
// only exists for preventing crashes with unpatched versions.
|
||||
// See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k
|
||||
// See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33
|
||||
_ = recover()
|
||||
}()
|
||||
|
||||
// Perform some input validation.
|
||||
keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8
|
||||
if keyBytes != len(jek) {
|
||||
// Input size is incorrect, the encrypted payload should always match
|
||||
// the size of the public modulus (e.g. using a 2048 bit key will
|
||||
// produce 256 bytes of output). Reject this since it's invalid input.
|
||||
return nil, ErrCryptoFailure
|
||||
}
|
||||
|
||||
cek, _, err := generator.genKey()
|
||||
if err != nil {
|
||||
return nil, ErrCryptoFailure
|
||||
}
|
||||
|
||||
// When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to
|
||||
// prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing
|
||||
// the Million Message Attack on Cryptographic Message Syntax". We are
|
||||
// therefore deliberately ignoring errors here.
|
||||
_ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek)
|
||||
|
||||
return cek, nil
|
||||
case RSA_OAEP:
|
||||
// Use rand.Reader for RSA blinding
|
||||
return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{})
|
||||
case RSA_OAEP_256:
|
||||
// Use rand.Reader for RSA blinding
|
||||
return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{})
|
||||
}
|
||||
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
// Sign the given payload
|
||||
func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
|
||||
var hash crypto.Hash
|
||||
|
||||
switch alg {
|
||||
case RS256, PS256:
|
||||
hash = crypto.SHA256
|
||||
case RS384, PS384:
|
||||
hash = crypto.SHA384
|
||||
case RS512, PS512:
|
||||
hash = crypto.SHA512
|
||||
default:
|
||||
return Signature{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
hasher := hash.New()
|
||||
|
||||
// According to documentation, Write() on hash never fails
|
||||
_, _ = hasher.Write(payload)
|
||||
hashed := hasher.Sum(nil)
|
||||
|
||||
var out []byte
|
||||
var err error
|
||||
|
||||
switch alg {
|
||||
case RS256, RS384, RS512:
|
||||
out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed)
|
||||
case PS256, PS384, PS512:
|
||||
out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{
|
||||
SaltLength: rsa.PSSSaltLengthEqualsHash,
|
||||
})
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return Signature{}, err
|
||||
}
|
||||
|
||||
return Signature{
|
||||
Signature: out,
|
||||
protected: &rawHeader{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Verify the given payload
|
||||
func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
|
||||
var hash crypto.Hash
|
||||
|
||||
switch alg {
|
||||
case RS256, PS256:
|
||||
hash = crypto.SHA256
|
||||
case RS384, PS384:
|
||||
hash = crypto.SHA384
|
||||
case RS512, PS512:
|
||||
hash = crypto.SHA512
|
||||
default:
|
||||
return ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
hasher := hash.New()
|
||||
|
||||
// According to documentation, Write() on hash never fails
|
||||
_, _ = hasher.Write(payload)
|
||||
hashed := hasher.Sum(nil)
|
||||
|
||||
switch alg {
|
||||
case RS256, RS384, RS512:
|
||||
return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature)
|
||||
case PS256, PS384, PS512:
|
||||
return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil)
|
||||
}
|
||||
|
||||
return ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
// Encrypt the given payload and update the object.
|
||||
func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
|
||||
switch alg {
|
||||
case ECDH_ES:
|
||||
// ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key.
|
||||
return recipientInfo{
|
||||
header: &rawHeader{},
|
||||
}, nil
|
||||
case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW:
|
||||
default:
|
||||
return recipientInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
generator := ecKeyGenerator{
|
||||
algID: string(alg),
|
||||
publicKey: ctx.publicKey,
|
||||
}
|
||||
|
||||
switch alg {
|
||||
case ECDH_ES_A128KW:
|
||||
generator.size = 16
|
||||
case ECDH_ES_A192KW:
|
||||
generator.size = 24
|
||||
case ECDH_ES_A256KW:
|
||||
generator.size = 32
|
||||
}
|
||||
|
||||
kek, header, err := generator.genKey()
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
block, err := aes.NewCipher(kek)
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
jek, err := josecipher.KeyWrap(block, cek)
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
return recipientInfo{
|
||||
encryptedKey: jek,
|
||||
header: &header,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Get key size for EC key generator
|
||||
func (ctx ecKeyGenerator) keySize() int {
|
||||
return ctx.size
|
||||
}
|
||||
|
||||
// Get a content encryption key for ECDH-ES
|
||||
func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) {
|
||||
priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, RandReader)
|
||||
if err != nil {
|
||||
return nil, rawHeader{}, err
|
||||
}
|
||||
|
||||
out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size)
|
||||
|
||||
b, err := json.Marshal(&JSONWebKey{
|
||||
Key: &priv.PublicKey,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
headers := rawHeader{
|
||||
headerEPK: makeRawMessage(b),
|
||||
}
|
||||
|
||||
return out, headers, nil
|
||||
}
|
||||
|
||||
// Decrypt the given payload and return the content encryption key.
|
||||
func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
|
||||
epk, err := headers.getEPK()
|
||||
if err != nil {
|
||||
return nil, errors.New("go-jose/go-jose: invalid epk header")
|
||||
}
|
||||
if epk == nil {
|
||||
return nil, errors.New("go-jose/go-jose: missing epk header")
|
||||
}
|
||||
|
||||
publicKey, ok := epk.Key.(*ecdsa.PublicKey)
|
||||
if publicKey == nil || !ok {
|
||||
return nil, errors.New("go-jose/go-jose: invalid epk header")
|
||||
}
|
||||
|
||||
if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) {
|
||||
return nil, errors.New("go-jose/go-jose: invalid public key in epk header")
|
||||
}
|
||||
|
||||
apuData, err := headers.getAPU()
|
||||
if err != nil {
|
||||
return nil, errors.New("go-jose/go-jose: invalid apu header")
|
||||
}
|
||||
apvData, err := headers.getAPV()
|
||||
if err != nil {
|
||||
return nil, errors.New("go-jose/go-jose: invalid apv header")
|
||||
}
|
||||
|
||||
deriveKey := func(algID string, size int) []byte {
|
||||
return josecipher.DeriveECDHES(algID, apuData.bytes(), apvData.bytes(), ctx.privateKey, publicKey, size)
|
||||
}
|
||||
|
||||
var keySize int
|
||||
|
||||
algorithm := headers.getAlgorithm()
|
||||
switch algorithm {
|
||||
case ECDH_ES:
|
||||
// ECDH-ES uses direct key agreement, no key unwrapping necessary.
|
||||
return deriveKey(string(headers.getEncryption()), generator.keySize()), nil
|
||||
case ECDH_ES_A128KW:
|
||||
keySize = 16
|
||||
case ECDH_ES_A192KW:
|
||||
keySize = 24
|
||||
case ECDH_ES_A256KW:
|
||||
keySize = 32
|
||||
default:
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
key := deriveKey(string(algorithm), keySize)
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return josecipher.KeyUnwrap(block, recipient.encryptedKey)
|
||||
}
|
||||
|
||||
func (ctx edDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
|
||||
if alg != EdDSA {
|
||||
return Signature{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
sig, err := ctx.privateKey.Sign(RandReader, payload, crypto.Hash(0))
|
||||
if err != nil {
|
||||
return Signature{}, err
|
||||
}
|
||||
|
||||
return Signature{
|
||||
Signature: sig,
|
||||
protected: &rawHeader{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
|
||||
if alg != EdDSA {
|
||||
return ErrUnsupportedAlgorithm
|
||||
}
|
||||
ok := ed25519.Verify(ctx.publicKey, payload, signature)
|
||||
if !ok {
|
||||
return errors.New("go-jose/go-jose: ed25519 signature failed to verify")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sign the given payload
|
||||
func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
|
||||
var expectedBitSize int
|
||||
var hash crypto.Hash
|
||||
|
||||
switch alg {
|
||||
case ES256:
|
||||
expectedBitSize = 256
|
||||
hash = crypto.SHA256
|
||||
case ES384:
|
||||
expectedBitSize = 384
|
||||
hash = crypto.SHA384
|
||||
case ES512:
|
||||
expectedBitSize = 521
|
||||
hash = crypto.SHA512
|
||||
}
|
||||
|
||||
curveBits := ctx.privateKey.Curve.Params().BitSize
|
||||
if expectedBitSize != curveBits {
|
||||
return Signature{}, fmt.Errorf("go-jose/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits)
|
||||
}
|
||||
|
||||
hasher := hash.New()
|
||||
|
||||
// According to documentation, Write() on hash never fails
|
||||
_, _ = hasher.Write(payload)
|
||||
hashed := hasher.Sum(nil)
|
||||
|
||||
r, s, err := ecdsa.Sign(RandReader, ctx.privateKey, hashed)
|
||||
if err != nil {
|
||||
return Signature{}, err
|
||||
}
|
||||
|
||||
keyBytes := curveBits / 8
|
||||
if curveBits%8 > 0 {
|
||||
keyBytes++
|
||||
}
|
||||
|
||||
// We serialize the outputs (r and s) into big-endian byte arrays and pad
|
||||
// them with zeros on the left to make sure the sizes work out. Both arrays
|
||||
// must be keyBytes long, and the output must be 2*keyBytes long.
|
||||
rBytes := r.Bytes()
|
||||
rBytesPadded := make([]byte, keyBytes)
|
||||
copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
|
||||
|
||||
sBytes := s.Bytes()
|
||||
sBytesPadded := make([]byte, keyBytes)
|
||||
copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
|
||||
|
||||
out := append(rBytesPadded, sBytesPadded...)
|
||||
|
||||
return Signature{
|
||||
Signature: out,
|
||||
protected: &rawHeader{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Verify the given payload
|
||||
func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
|
||||
var keySize int
|
||||
var hash crypto.Hash
|
||||
|
||||
switch alg {
|
||||
case ES256:
|
||||
keySize = 32
|
||||
hash = crypto.SHA256
|
||||
case ES384:
|
||||
keySize = 48
|
||||
hash = crypto.SHA384
|
||||
case ES512:
|
||||
keySize = 66
|
||||
hash = crypto.SHA512
|
||||
default:
|
||||
return ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
if len(signature) != 2*keySize {
|
||||
return fmt.Errorf("go-jose/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize)
|
||||
}
|
||||
|
||||
hasher := hash.New()
|
||||
|
||||
// According to documentation, Write() on hash never fails
|
||||
_, _ = hasher.Write(payload)
|
||||
hashed := hasher.Sum(nil)
|
||||
|
||||
r := big.NewInt(0).SetBytes(signature[:keySize])
|
||||
s := big.NewInt(0).SetBytes(signature[keySize:])
|
||||
|
||||
match := ecdsa.Verify(ctx.publicKey, hashed, r, s)
|
||||
if !match {
|
||||
return errors.New("go-jose/go-jose: ecdsa signature failed to verify")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,196 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package josecipher
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/cipher"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"crypto/subtle"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"hash"
|
||||
)
|
||||
|
||||
const (
|
||||
nonceBytes = 16
|
||||
)
|
||||
|
||||
// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC.
|
||||
func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) {
|
||||
keySize := len(key) / 2
|
||||
integrityKey := key[:keySize]
|
||||
encryptionKey := key[keySize:]
|
||||
|
||||
blockCipher, err := newBlockCipher(encryptionKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var hash func() hash.Hash
|
||||
switch keySize {
|
||||
case 16:
|
||||
hash = sha256.New
|
||||
case 24:
|
||||
hash = sha512.New384
|
||||
case 32:
|
||||
hash = sha512.New
|
||||
}
|
||||
|
||||
return &cbcAEAD{
|
||||
hash: hash,
|
||||
blockCipher: blockCipher,
|
||||
authtagBytes: keySize,
|
||||
integrityKey: integrityKey,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// An AEAD based on CBC+HMAC
|
||||
type cbcAEAD struct {
|
||||
hash func() hash.Hash
|
||||
authtagBytes int
|
||||
integrityKey []byte
|
||||
blockCipher cipher.Block
|
||||
}
|
||||
|
||||
func (ctx *cbcAEAD) NonceSize() int {
|
||||
return nonceBytes
|
||||
}
|
||||
|
||||
func (ctx *cbcAEAD) Overhead() int {
|
||||
// Maximum overhead is block size (for padding) plus auth tag length, where
|
||||
// the length of the auth tag is equivalent to the key size.
|
||||
return ctx.blockCipher.BlockSize() + ctx.authtagBytes
|
||||
}
|
||||
|
||||
// Seal encrypts and authenticates the plaintext.
|
||||
func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte {
|
||||
// Output buffer -- must take care not to mangle plaintext input.
|
||||
ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)]
|
||||
copy(ciphertext, plaintext)
|
||||
ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize())
|
||||
|
||||
cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce)
|
||||
|
||||
cbc.CryptBlocks(ciphertext, ciphertext)
|
||||
authtag := ctx.computeAuthTag(data, nonce, ciphertext)
|
||||
|
||||
ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag)))
|
||||
copy(out, ciphertext)
|
||||
copy(out[len(ciphertext):], authtag)
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// Open decrypts and authenticates the ciphertext.
|
||||
func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
|
||||
if len(ciphertext) < ctx.authtagBytes {
|
||||
return nil, errors.New("go-jose/go-jose: invalid ciphertext (too short)")
|
||||
}
|
||||
|
||||
offset := len(ciphertext) - ctx.authtagBytes
|
||||
expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset])
|
||||
match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:])
|
||||
if match != 1 {
|
||||
return nil, errors.New("go-jose/go-jose: invalid ciphertext (auth tag mismatch)")
|
||||
}
|
||||
|
||||
cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce)
|
||||
|
||||
// Make copy of ciphertext buffer, don't want to modify in place
|
||||
buffer := append([]byte{}, ciphertext[:offset]...)
|
||||
|
||||
if len(buffer)%ctx.blockCipher.BlockSize() > 0 {
|
||||
return nil, errors.New("go-jose/go-jose: invalid ciphertext (invalid length)")
|
||||
}
|
||||
|
||||
cbc.CryptBlocks(buffer, buffer)
|
||||
|
||||
// Remove padding
|
||||
plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext)))
|
||||
copy(out, plaintext)
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// Compute an authentication tag
|
||||
func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte {
|
||||
buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8)
|
||||
n := 0
|
||||
n += copy(buffer, aad)
|
||||
n += copy(buffer[n:], nonce)
|
||||
n += copy(buffer[n:], ciphertext)
|
||||
binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8)
|
||||
|
||||
// According to documentation, Write() on hash.Hash never fails.
|
||||
hmac := hmac.New(ctx.hash, ctx.integrityKey)
|
||||
_, _ = hmac.Write(buffer)
|
||||
|
||||
return hmac.Sum(nil)[:ctx.authtagBytes]
|
||||
}
|
||||
|
||||
// resize ensures that the given slice has a capacity of at least n bytes.
|
||||
// If the capacity of the slice is less than n, a new slice is allocated
|
||||
// and the existing data will be copied.
|
||||
func resize(in []byte, n uint64) (head, tail []byte) {
|
||||
if uint64(cap(in)) >= n {
|
||||
head = in[:n]
|
||||
} else {
|
||||
head = make([]byte, n)
|
||||
copy(head, in)
|
||||
}
|
||||
|
||||
tail = head[len(in):]
|
||||
return
|
||||
}
|
||||
|
||||
// Apply padding
|
||||
func padBuffer(buffer []byte, blockSize int) []byte {
|
||||
missing := blockSize - (len(buffer) % blockSize)
|
||||
ret, out := resize(buffer, uint64(len(buffer))+uint64(missing))
|
||||
padding := bytes.Repeat([]byte{byte(missing)}, missing)
|
||||
copy(out, padding)
|
||||
return ret
|
||||
}
|
||||
|
||||
// Remove padding
|
||||
func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) {
|
||||
if len(buffer)%blockSize != 0 {
|
||||
return nil, errors.New("go-jose/go-jose: invalid padding")
|
||||
}
|
||||
|
||||
last := buffer[len(buffer)-1]
|
||||
count := int(last)
|
||||
|
||||
if count == 0 || count > blockSize || count > len(buffer) {
|
||||
return nil, errors.New("go-jose/go-jose: invalid padding")
|
||||
}
|
||||
|
||||
padding := bytes.Repeat([]byte{last}, count)
|
||||
if !bytes.HasSuffix(buffer, padding) {
|
||||
return nil, errors.New("go-jose/go-jose: invalid padding")
|
||||
}
|
||||
|
||||
return buffer[:len(buffer)-count], nil
|
||||
}
|
|
@ -0,0 +1,75 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package josecipher
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"encoding/binary"
|
||||
"hash"
|
||||
"io"
|
||||
)
|
||||
|
||||
type concatKDF struct {
|
||||
z, info []byte
|
||||
i uint32
|
||||
cache []byte
|
||||
hasher hash.Hash
|
||||
}
|
||||
|
||||
// NewConcatKDF builds a KDF reader based on the given inputs.
|
||||
func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader {
|
||||
buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo)))
|
||||
n := 0
|
||||
n += copy(buffer, algID)
|
||||
n += copy(buffer[n:], ptyUInfo)
|
||||
n += copy(buffer[n:], ptyVInfo)
|
||||
n += copy(buffer[n:], supPubInfo)
|
||||
copy(buffer[n:], supPrivInfo)
|
||||
|
||||
hasher := hash.New()
|
||||
|
||||
return &concatKDF{
|
||||
z: z,
|
||||
info: buffer,
|
||||
hasher: hasher,
|
||||
cache: []byte{},
|
||||
i: 1,
|
||||
}
|
||||
}
|
||||
|
||||
func (ctx *concatKDF) Read(out []byte) (int, error) {
|
||||
copied := copy(out, ctx.cache)
|
||||
ctx.cache = ctx.cache[copied:]
|
||||
|
||||
for copied < len(out) {
|
||||
ctx.hasher.Reset()
|
||||
|
||||
// Write on a hash.Hash never fails
|
||||
_ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i)
|
||||
_, _ = ctx.hasher.Write(ctx.z)
|
||||
_, _ = ctx.hasher.Write(ctx.info)
|
||||
|
||||
hash := ctx.hasher.Sum(nil)
|
||||
chunkCopied := copy(out[copied:], hash)
|
||||
copied += chunkCopied
|
||||
ctx.cache = hash[chunkCopied:]
|
||||
|
||||
ctx.i++
|
||||
}
|
||||
|
||||
return copied, nil
|
||||
}
|
|
@ -0,0 +1,86 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package josecipher
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA.
|
||||
// It is an error to call this function with a private/public key that are not on the same
|
||||
// curve. Callers must ensure that the keys are valid before calling this function. Output
|
||||
// size may be at most 1<<16 bytes (64 KiB).
|
||||
func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte {
|
||||
if size > 1<<16 {
|
||||
panic("ECDH-ES output size too large, must be less than or equal to 1<<16")
|
||||
}
|
||||
|
||||
// algId, partyUInfo, partyVInfo inputs must be prefixed with the length
|
||||
algID := lengthPrefixed([]byte(alg))
|
||||
ptyUInfo := lengthPrefixed(apuData)
|
||||
ptyVInfo := lengthPrefixed(apvData)
|
||||
|
||||
// suppPubInfo is the encoded length of the output size in bits
|
||||
supPubInfo := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8)
|
||||
|
||||
if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) {
|
||||
panic("public key not on same curve as private key")
|
||||
}
|
||||
|
||||
z, _ := priv.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes())
|
||||
zBytes := z.Bytes()
|
||||
|
||||
// Note that calling z.Bytes() on a big.Int may strip leading zero bytes from
|
||||
// the returned byte array. This can lead to a problem where zBytes will be
|
||||
// shorter than expected which breaks the key derivation. Therefore we must pad
|
||||
// to the full length of the expected coordinate here before calling the KDF.
|
||||
octSize := dSize(priv.Curve)
|
||||
if len(zBytes) != octSize {
|
||||
zBytes = append(bytes.Repeat([]byte{0}, octSize-len(zBytes)), zBytes...)
|
||||
}
|
||||
|
||||
reader := NewConcatKDF(crypto.SHA256, zBytes, algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{})
|
||||
key := make([]byte, size)
|
||||
|
||||
// Read on the KDF will never fail
|
||||
_, _ = reader.Read(key)
|
||||
|
||||
return key
|
||||
}
|
||||
|
||||
// dSize returns the size in octets for a coordinate on a elliptic curve.
|
||||
func dSize(curve elliptic.Curve) int {
|
||||
order := curve.Params().P
|
||||
bitLen := order.BitLen()
|
||||
size := bitLen / 8
|
||||
if bitLen%8 != 0 {
|
||||
size++
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
func lengthPrefixed(data []byte) []byte {
|
||||
out := make([]byte, len(data)+4)
|
||||
binary.BigEndian.PutUint32(out, uint32(len(data)))
|
||||
copy(out[4:], data)
|
||||
return out
|
||||
}
|
|
@ -0,0 +1,109 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package josecipher
|
||||
|
||||
import (
|
||||
"crypto/cipher"
|
||||
"crypto/subtle"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
)
|
||||
|
||||
var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6}
|
||||
|
||||
// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher.
|
||||
func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) {
|
||||
if len(cek)%8 != 0 {
|
||||
return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks")
|
||||
}
|
||||
|
||||
n := len(cek) / 8
|
||||
r := make([][]byte, n)
|
||||
|
||||
for i := range r {
|
||||
r[i] = make([]byte, 8)
|
||||
copy(r[i], cek[i*8:])
|
||||
}
|
||||
|
||||
buffer := make([]byte, 16)
|
||||
tBytes := make([]byte, 8)
|
||||
copy(buffer, defaultIV)
|
||||
|
||||
for t := 0; t < 6*n; t++ {
|
||||
copy(buffer[8:], r[t%n])
|
||||
|
||||
block.Encrypt(buffer, buffer)
|
||||
|
||||
binary.BigEndian.PutUint64(tBytes, uint64(t+1))
|
||||
|
||||
for i := 0; i < 8; i++ {
|
||||
buffer[i] ^= tBytes[i]
|
||||
}
|
||||
copy(r[t%n], buffer[8:])
|
||||
}
|
||||
|
||||
out := make([]byte, (n+1)*8)
|
||||
copy(out, buffer[:8])
|
||||
for i := range r {
|
||||
copy(out[(i+1)*8:], r[i])
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher.
|
||||
func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) {
|
||||
if len(ciphertext)%8 != 0 {
|
||||
return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks")
|
||||
}
|
||||
|
||||
n := (len(ciphertext) / 8) - 1
|
||||
r := make([][]byte, n)
|
||||
|
||||
for i := range r {
|
||||
r[i] = make([]byte, 8)
|
||||
copy(r[i], ciphertext[(i+1)*8:])
|
||||
}
|
||||
|
||||
buffer := make([]byte, 16)
|
||||
tBytes := make([]byte, 8)
|
||||
copy(buffer[:8], ciphertext[:8])
|
||||
|
||||
for t := 6*n - 1; t >= 0; t-- {
|
||||
binary.BigEndian.PutUint64(tBytes, uint64(t+1))
|
||||
|
||||
for i := 0; i < 8; i++ {
|
||||
buffer[i] ^= tBytes[i]
|
||||
}
|
||||
copy(buffer[8:], r[t%n])
|
||||
|
||||
block.Decrypt(buffer, buffer)
|
||||
|
||||
copy(r[t%n], buffer[8:])
|
||||
}
|
||||
|
||||
if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 {
|
||||
return nil, errors.New("go-jose/go-jose: failed to unwrap key")
|
||||
}
|
||||
|
||||
out := make([]byte, n*8)
|
||||
for i := range r {
|
||||
copy(out[i*8:], r[i])
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
|
@ -0,0 +1,544 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/go-jose/go-jose/v3/json"
|
||||
)
|
||||
|
||||
// Encrypter represents an encrypter which produces an encrypted JWE object.
|
||||
type Encrypter interface {
|
||||
Encrypt(plaintext []byte) (*JSONWebEncryption, error)
|
||||
EncryptWithAuthData(plaintext []byte, aad []byte) (*JSONWebEncryption, error)
|
||||
Options() EncrypterOptions
|
||||
}
|
||||
|
||||
// A generic content cipher
|
||||
type contentCipher interface {
|
||||
keySize() int
|
||||
encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error)
|
||||
decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error)
|
||||
}
|
||||
|
||||
// A key generator (for generating/getting a CEK)
|
||||
type keyGenerator interface {
|
||||
keySize() int
|
||||
genKey() ([]byte, rawHeader, error)
|
||||
}
|
||||
|
||||
// A generic key encrypter
|
||||
type keyEncrypter interface {
|
||||
encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key
|
||||
}
|
||||
|
||||
// A generic key decrypter
|
||||
type keyDecrypter interface {
|
||||
decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key
|
||||
}
|
||||
|
||||
// A generic encrypter based on the given key encrypter and content cipher.
|
||||
type genericEncrypter struct {
|
||||
contentAlg ContentEncryption
|
||||
compressionAlg CompressionAlgorithm
|
||||
cipher contentCipher
|
||||
recipients []recipientKeyInfo
|
||||
keyGenerator keyGenerator
|
||||
extraHeaders map[HeaderKey]interface{}
|
||||
}
|
||||
|
||||
type recipientKeyInfo struct {
|
||||
keyID string
|
||||
keyAlg KeyAlgorithm
|
||||
keyEncrypter keyEncrypter
|
||||
}
|
||||
|
||||
// EncrypterOptions represents options that can be set on new encrypters.
|
||||
type EncrypterOptions struct {
|
||||
Compression CompressionAlgorithm
|
||||
|
||||
// Optional map of additional keys to be inserted into the protected header
|
||||
// of a JWS object. Some specifications which make use of JWS like to insert
|
||||
// additional values here. All values must be JSON-serializable.
|
||||
ExtraHeaders map[HeaderKey]interface{}
|
||||
}
|
||||
|
||||
// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it
|
||||
// if necessary. It returns itself and so can be used in a fluent style.
|
||||
func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions {
|
||||
if eo.ExtraHeaders == nil {
|
||||
eo.ExtraHeaders = map[HeaderKey]interface{}{}
|
||||
}
|
||||
eo.ExtraHeaders[k] = v
|
||||
return eo
|
||||
}
|
||||
|
||||
// WithContentType adds a content type ("cty") header and returns the updated
|
||||
// EncrypterOptions.
|
||||
func (eo *EncrypterOptions) WithContentType(contentType ContentType) *EncrypterOptions {
|
||||
return eo.WithHeader(HeaderContentType, contentType)
|
||||
}
|
||||
|
||||
// WithType adds a type ("typ") header and returns the updated EncrypterOptions.
|
||||
func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions {
|
||||
return eo.WithHeader(HeaderType, typ)
|
||||
}
|
||||
|
||||
// Recipient represents an algorithm/key to encrypt messages to.
|
||||
//
|
||||
// PBES2Count and PBES2Salt correspond with the "p2c" and "p2s" headers used
|
||||
// on the password-based encryption algorithms PBES2-HS256+A128KW,
|
||||
// PBES2-HS384+A192KW, and PBES2-HS512+A256KW. If they are not provided a safe
|
||||
// default of 100000 will be used for the count and a 128-bit random salt will
|
||||
// be generated.
|
||||
type Recipient struct {
|
||||
Algorithm KeyAlgorithm
|
||||
Key interface{}
|
||||
KeyID string
|
||||
PBES2Count int
|
||||
PBES2Salt []byte
|
||||
}
|
||||
|
||||
// NewEncrypter creates an appropriate encrypter based on the key type
|
||||
func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) (Encrypter, error) {
|
||||
encrypter := &genericEncrypter{
|
||||
contentAlg: enc,
|
||||
recipients: []recipientKeyInfo{},
|
||||
cipher: getContentCipher(enc),
|
||||
}
|
||||
if opts != nil {
|
||||
encrypter.compressionAlg = opts.Compression
|
||||
encrypter.extraHeaders = opts.ExtraHeaders
|
||||
}
|
||||
|
||||
if encrypter.cipher == nil {
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
var keyID string
|
||||
var rawKey interface{}
|
||||
switch encryptionKey := rcpt.Key.(type) {
|
||||
case JSONWebKey:
|
||||
keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key
|
||||
case *JSONWebKey:
|
||||
keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key
|
||||
case OpaqueKeyEncrypter:
|
||||
keyID, rawKey = encryptionKey.KeyID(), encryptionKey
|
||||
default:
|
||||
rawKey = encryptionKey
|
||||
}
|
||||
|
||||
switch rcpt.Algorithm {
|
||||
case DIRECT:
|
||||
// Direct encryption mode must be treated differently
|
||||
if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) {
|
||||
return nil, ErrUnsupportedKeyType
|
||||
}
|
||||
if encrypter.cipher.keySize() != len(rawKey.([]byte)) {
|
||||
return nil, ErrInvalidKeySize
|
||||
}
|
||||
encrypter.keyGenerator = staticKeyGenerator{
|
||||
key: rawKey.([]byte),
|
||||
}
|
||||
recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, rawKey.([]byte))
|
||||
recipientInfo.keyID = keyID
|
||||
if rcpt.KeyID != "" {
|
||||
recipientInfo.keyID = rcpt.KeyID
|
||||
}
|
||||
encrypter.recipients = []recipientKeyInfo{recipientInfo}
|
||||
return encrypter, nil
|
||||
case ECDH_ES:
|
||||
// ECDH-ES (w/o key wrapping) is similar to DIRECT mode
|
||||
typeOf := reflect.TypeOf(rawKey)
|
||||
if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) {
|
||||
return nil, ErrUnsupportedKeyType
|
||||
}
|
||||
encrypter.keyGenerator = ecKeyGenerator{
|
||||
size: encrypter.cipher.keySize(),
|
||||
algID: string(enc),
|
||||
publicKey: rawKey.(*ecdsa.PublicKey),
|
||||
}
|
||||
recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, rawKey.(*ecdsa.PublicKey))
|
||||
recipientInfo.keyID = keyID
|
||||
if rcpt.KeyID != "" {
|
||||
recipientInfo.keyID = rcpt.KeyID
|
||||
}
|
||||
encrypter.recipients = []recipientKeyInfo{recipientInfo}
|
||||
return encrypter, nil
|
||||
default:
|
||||
// Can just add a standard recipient
|
||||
encrypter.keyGenerator = randomKeyGenerator{
|
||||
size: encrypter.cipher.keySize(),
|
||||
}
|
||||
err := encrypter.addRecipient(rcpt)
|
||||
return encrypter, err
|
||||
}
|
||||
}
|
||||
|
||||
// NewMultiEncrypter creates a multi-encrypter based on the given parameters
|
||||
func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *EncrypterOptions) (Encrypter, error) {
|
||||
cipher := getContentCipher(enc)
|
||||
|
||||
if cipher == nil {
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
if len(rcpts) == 0 {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: recipients is nil or empty")
|
||||
}
|
||||
|
||||
encrypter := &genericEncrypter{
|
||||
contentAlg: enc,
|
||||
recipients: []recipientKeyInfo{},
|
||||
cipher: cipher,
|
||||
keyGenerator: randomKeyGenerator{
|
||||
size: cipher.keySize(),
|
||||
},
|
||||
}
|
||||
|
||||
if opts != nil {
|
||||
encrypter.compressionAlg = opts.Compression
|
||||
encrypter.extraHeaders = opts.ExtraHeaders
|
||||
}
|
||||
|
||||
for _, recipient := range rcpts {
|
||||
err := encrypter.addRecipient(recipient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return encrypter, nil
|
||||
}
|
||||
|
||||
func (ctx *genericEncrypter) addRecipient(recipient Recipient) (err error) {
|
||||
var recipientInfo recipientKeyInfo
|
||||
|
||||
switch recipient.Algorithm {
|
||||
case DIRECT, ECDH_ES:
|
||||
return fmt.Errorf("go-jose/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm)
|
||||
}
|
||||
|
||||
recipientInfo, err = makeJWERecipient(recipient.Algorithm, recipient.Key)
|
||||
if recipient.KeyID != "" {
|
||||
recipientInfo.keyID = recipient.KeyID
|
||||
}
|
||||
|
||||
switch recipient.Algorithm {
|
||||
case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW:
|
||||
if sr, ok := recipientInfo.keyEncrypter.(*symmetricKeyCipher); ok {
|
||||
sr.p2c = recipient.PBES2Count
|
||||
sr.p2s = recipient.PBES2Salt
|
||||
}
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
ctx.recipients = append(ctx.recipients, recipientInfo)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) {
|
||||
switch encryptionKey := encryptionKey.(type) {
|
||||
case *rsa.PublicKey:
|
||||
return newRSARecipient(alg, encryptionKey)
|
||||
case *ecdsa.PublicKey:
|
||||
return newECDHRecipient(alg, encryptionKey)
|
||||
case []byte:
|
||||
return newSymmetricRecipient(alg, encryptionKey)
|
||||
case string:
|
||||
return newSymmetricRecipient(alg, []byte(encryptionKey))
|
||||
case *JSONWebKey:
|
||||
recipient, err := makeJWERecipient(alg, encryptionKey.Key)
|
||||
recipient.keyID = encryptionKey.KeyID
|
||||
return recipient, err
|
||||
}
|
||||
if encrypter, ok := encryptionKey.(OpaqueKeyEncrypter); ok {
|
||||
return newOpaqueKeyEncrypter(alg, encrypter)
|
||||
}
|
||||
return recipientKeyInfo{}, ErrUnsupportedKeyType
|
||||
}
|
||||
|
||||
// newDecrypter creates an appropriate decrypter based on the key type
|
||||
func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) {
|
||||
switch decryptionKey := decryptionKey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
return &rsaDecrypterSigner{
|
||||
privateKey: decryptionKey,
|
||||
}, nil
|
||||
case *ecdsa.PrivateKey:
|
||||
return &ecDecrypterSigner{
|
||||
privateKey: decryptionKey,
|
||||
}, nil
|
||||
case []byte:
|
||||
return &symmetricKeyCipher{
|
||||
key: decryptionKey,
|
||||
}, nil
|
||||
case string:
|
||||
return &symmetricKeyCipher{
|
||||
key: []byte(decryptionKey),
|
||||
}, nil
|
||||
case JSONWebKey:
|
||||
return newDecrypter(decryptionKey.Key)
|
||||
case *JSONWebKey:
|
||||
return newDecrypter(decryptionKey.Key)
|
||||
}
|
||||
if okd, ok := decryptionKey.(OpaqueKeyDecrypter); ok {
|
||||
return &opaqueKeyDecrypter{decrypter: okd}, nil
|
||||
}
|
||||
return nil, ErrUnsupportedKeyType
|
||||
}
|
||||
|
||||
// Implementation of encrypt method producing a JWE object.
|
||||
func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JSONWebEncryption, error) {
|
||||
return ctx.EncryptWithAuthData(plaintext, nil)
|
||||
}
|
||||
|
||||
// Implementation of encrypt method producing a JWE object.
|
||||
func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JSONWebEncryption, error) {
|
||||
obj := &JSONWebEncryption{}
|
||||
obj.aad = aad
|
||||
|
||||
obj.protected = &rawHeader{}
|
||||
err := obj.protected.set(headerEncryption, ctx.contentAlg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
obj.recipients = make([]recipientInfo, len(ctx.recipients))
|
||||
|
||||
if len(ctx.recipients) == 0 {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: no recipients to encrypt to")
|
||||
}
|
||||
|
||||
cek, headers, err := ctx.keyGenerator.genKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
obj.protected.merge(&headers)
|
||||
|
||||
for i, info := range ctx.recipients {
|
||||
recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = recipient.header.set(headerAlgorithm, info.keyAlg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if info.keyID != "" {
|
||||
err = recipient.header.set(headerKeyID, info.keyID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
obj.recipients[i] = recipient
|
||||
}
|
||||
|
||||
if len(ctx.recipients) == 1 {
|
||||
// Move per-recipient headers into main protected header if there's
|
||||
// only a single recipient.
|
||||
obj.protected.merge(obj.recipients[0].header)
|
||||
obj.recipients[0].header = nil
|
||||
}
|
||||
|
||||
if ctx.compressionAlg != NONE {
|
||||
plaintext, err = compress(ctx.compressionAlg, plaintext)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = obj.protected.set(headerCompression, ctx.compressionAlg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range ctx.extraHeaders {
|
||||
b, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
(*obj.protected)[k] = makeRawMessage(b)
|
||||
}
|
||||
|
||||
authData := obj.computeAuthData()
|
||||
parts, err := ctx.cipher.encrypt(cek, authData, plaintext)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
obj.iv = parts.iv
|
||||
obj.ciphertext = parts.ciphertext
|
||||
obj.tag = parts.tag
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func (ctx *genericEncrypter) Options() EncrypterOptions {
|
||||
return EncrypterOptions{
|
||||
Compression: ctx.compressionAlg,
|
||||
ExtraHeaders: ctx.extraHeaders,
|
||||
}
|
||||
}
|
||||
|
||||
// Decrypt and validate the object and return the plaintext. Note that this
|
||||
// function does not support multi-recipient, if you desire multi-recipient
|
||||
// decryption use DecryptMulti instead.
|
||||
func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) {
|
||||
headers := obj.mergedHeaders(nil)
|
||||
|
||||
if len(obj.recipients) > 1 {
|
||||
return nil, errors.New("go-jose/go-jose: too many recipients in payload; expecting only one")
|
||||
}
|
||||
|
||||
critical, err := headers.getCritical()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid crit header")
|
||||
}
|
||||
|
||||
if len(critical) > 0 {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header")
|
||||
}
|
||||
|
||||
key := tryJWKS(decryptionKey, obj.Header)
|
||||
decrypter, err := newDecrypter(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cipher := getContentCipher(headers.getEncryption())
|
||||
if cipher == nil {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(headers.getEncryption()))
|
||||
}
|
||||
|
||||
generator := randomKeyGenerator{
|
||||
size: cipher.keySize(),
|
||||
}
|
||||
|
||||
parts := &aeadParts{
|
||||
iv: obj.iv,
|
||||
ciphertext: obj.ciphertext,
|
||||
tag: obj.tag,
|
||||
}
|
||||
|
||||
authData := obj.computeAuthData()
|
||||
|
||||
var plaintext []byte
|
||||
recipient := obj.recipients[0]
|
||||
recipientHeaders := obj.mergedHeaders(&recipient)
|
||||
|
||||
cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator)
|
||||
if err == nil {
|
||||
// Found a valid CEK -- let's try to decrypt.
|
||||
plaintext, err = cipher.decrypt(cek, authData, parts)
|
||||
}
|
||||
|
||||
if plaintext == nil {
|
||||
return nil, ErrCryptoFailure
|
||||
}
|
||||
|
||||
// The "zip" header parameter may only be present in the protected header.
|
||||
if comp := obj.protected.getCompression(); comp != "" {
|
||||
plaintext, err = decompress(comp, plaintext)
|
||||
}
|
||||
|
||||
return plaintext, err
|
||||
}
|
||||
|
||||
// DecryptMulti decrypts and validates the object and returns the plaintexts,
|
||||
// with support for multiple recipients. It returns the index of the recipient
|
||||
// for which the decryption was successful, the merged headers for that recipient,
|
||||
// and the plaintext.
|
||||
func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) {
|
||||
globalHeaders := obj.mergedHeaders(nil)
|
||||
|
||||
critical, err := globalHeaders.getCritical()
|
||||
if err != nil {
|
||||
return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: invalid crit header")
|
||||
}
|
||||
|
||||
if len(critical) > 0 {
|
||||
return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header")
|
||||
}
|
||||
|
||||
key := tryJWKS(decryptionKey, obj.Header)
|
||||
decrypter, err := newDecrypter(key)
|
||||
if err != nil {
|
||||
return -1, Header{}, nil, err
|
||||
}
|
||||
|
||||
encryption := globalHeaders.getEncryption()
|
||||
cipher := getContentCipher(encryption)
|
||||
if cipher == nil {
|
||||
return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(encryption))
|
||||
}
|
||||
|
||||
generator := randomKeyGenerator{
|
||||
size: cipher.keySize(),
|
||||
}
|
||||
|
||||
parts := &aeadParts{
|
||||
iv: obj.iv,
|
||||
ciphertext: obj.ciphertext,
|
||||
tag: obj.tag,
|
||||
}
|
||||
|
||||
authData := obj.computeAuthData()
|
||||
|
||||
index := -1
|
||||
var plaintext []byte
|
||||
var headers rawHeader
|
||||
|
||||
for i, recipient := range obj.recipients {
|
||||
recipientHeaders := obj.mergedHeaders(&recipient)
|
||||
|
||||
cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator)
|
||||
if err == nil {
|
||||
// Found a valid CEK -- let's try to decrypt.
|
||||
plaintext, err = cipher.decrypt(cek, authData, parts)
|
||||
if err == nil {
|
||||
index = i
|
||||
headers = recipientHeaders
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if plaintext == nil {
|
||||
return -1, Header{}, nil, ErrCryptoFailure
|
||||
}
|
||||
|
||||
// The "zip" header parameter may only be present in the protected header.
|
||||
if comp := obj.protected.getCompression(); comp != "" {
|
||||
plaintext, _ = decompress(comp, plaintext)
|
||||
}
|
||||
|
||||
sanitized, err := headers.sanitized()
|
||||
if err != nil {
|
||||
return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to sanitize header: %v", err)
|
||||
}
|
||||
|
||||
return index, sanitized, plaintext, err
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
|
||||
Package jose aims to provide an implementation of the Javascript Object Signing
|
||||
and Encryption set of standards. It implements encryption and signing based on
|
||||
the JSON Web Encryption and JSON Web Signature standards, with optional JSON Web
|
||||
Token support available in a sub-package. The library supports both the compact
|
||||
and JWS/JWE JSON Serialization formats, and has optional support for multiple
|
||||
recipients.
|
||||
|
||||
*/
|
||||
package jose
|
|
@ -0,0 +1,191 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/flate"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"math/big"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"github.com/go-jose/go-jose/v3/json"
|
||||
)
|
||||
|
||||
// Helper function to serialize known-good objects.
|
||||
// Precondition: value is not a nil pointer.
|
||||
func mustSerializeJSON(value interface{}) []byte {
|
||||
out, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// We never want to serialize the top-level value "null," since it's not a
|
||||
// valid JOSE message. But if a caller passes in a nil pointer to this method,
|
||||
// MarshalJSON will happily serialize it as the top-level value "null". If
|
||||
// that value is then embedded in another operation, for instance by being
|
||||
// base64-encoded and fed as input to a signing algorithm
|
||||
// (https://github.com/go-jose/go-jose/issues/22), the result will be
|
||||
// incorrect. Because this method is intended for known-good objects, and a nil
|
||||
// pointer is not a known-good object, we are free to panic in this case.
|
||||
// Note: It's not possible to directly check whether the data pointed at by an
|
||||
// interface is a nil pointer, so we do this hacky workaround.
|
||||
// https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I
|
||||
if string(out) == "null" {
|
||||
panic("Tried to serialize a nil pointer.")
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Strip all newlines and whitespace
|
||||
func stripWhitespace(data string) string {
|
||||
buf := strings.Builder{}
|
||||
buf.Grow(len(data))
|
||||
for _, r := range data {
|
||||
if !unicode.IsSpace(r) {
|
||||
buf.WriteRune(r)
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// Perform compression based on algorithm
|
||||
func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) {
|
||||
switch algorithm {
|
||||
case DEFLATE:
|
||||
return deflate(input)
|
||||
default:
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
}
|
||||
|
||||
// Perform decompression based on algorithm
|
||||
func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) {
|
||||
switch algorithm {
|
||||
case DEFLATE:
|
||||
return inflate(input)
|
||||
default:
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
}
|
||||
|
||||
// Compress with DEFLATE
|
||||
func deflate(input []byte) ([]byte, error) {
|
||||
output := new(bytes.Buffer)
|
||||
|
||||
// Writing to byte buffer, err is always nil
|
||||
writer, _ := flate.NewWriter(output, 1)
|
||||
_, _ = io.Copy(writer, bytes.NewBuffer(input))
|
||||
|
||||
err := writer.Close()
|
||||
return output.Bytes(), err
|
||||
}
|
||||
|
||||
// Decompress with DEFLATE
|
||||
func inflate(input []byte) ([]byte, error) {
|
||||
output := new(bytes.Buffer)
|
||||
reader := flate.NewReader(bytes.NewBuffer(input))
|
||||
|
||||
_, err := io.Copy(output, reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = reader.Close()
|
||||
return output.Bytes(), err
|
||||
}
|
||||
|
||||
// byteBuffer represents a slice of bytes that can be serialized to url-safe base64.
|
||||
type byteBuffer struct {
|
||||
data []byte
|
||||
}
|
||||
|
||||
func newBuffer(data []byte) *byteBuffer {
|
||||
if data == nil {
|
||||
return nil
|
||||
}
|
||||
return &byteBuffer{
|
||||
data: data,
|
||||
}
|
||||
}
|
||||
|
||||
func newFixedSizeBuffer(data []byte, length int) *byteBuffer {
|
||||
if len(data) > length {
|
||||
panic("go-jose/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)")
|
||||
}
|
||||
pad := make([]byte, length-len(data))
|
||||
return newBuffer(append(pad, data...))
|
||||
}
|
||||
|
||||
func newBufferFromInt(num uint64) *byteBuffer {
|
||||
data := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(data, num)
|
||||
return newBuffer(bytes.TrimLeft(data, "\x00"))
|
||||
}
|
||||
|
||||
func (b *byteBuffer) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(b.base64())
|
||||
}
|
||||
|
||||
func (b *byteBuffer) UnmarshalJSON(data []byte) error {
|
||||
var encoded string
|
||||
err := json.Unmarshal(data, &encoded)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if encoded == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
decoded, err := base64URLDecode(encoded)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*b = *newBuffer(decoded)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *byteBuffer) base64() string {
|
||||
return base64.RawURLEncoding.EncodeToString(b.data)
|
||||
}
|
||||
|
||||
func (b *byteBuffer) bytes() []byte {
|
||||
// Handling nil here allows us to transparently handle nil slices when serializing.
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
return b.data
|
||||
}
|
||||
|
||||
func (b byteBuffer) bigInt() *big.Int {
|
||||
return new(big.Int).SetBytes(b.data)
|
||||
}
|
||||
|
||||
func (b byteBuffer) toInt() int {
|
||||
return int(b.bigInt().Int64())
|
||||
}
|
||||
|
||||
// base64URLDecode is implemented as defined in https://www.rfc-editor.org/rfc/rfc7515.html#appendix-C
|
||||
func base64URLDecode(value string) ([]byte, error) {
|
||||
value = strings.TrimRight(value, "=")
|
||||
return base64.RawURLEncoding.DecodeString(value)
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,13 @@
|
|||
# Safe JSON
|
||||
|
||||
This repository contains a fork of the `encoding/json` package from Go 1.6.
|
||||
|
||||
The following changes were made:
|
||||
|
||||
* Object deserialization uses case-sensitive member name matching instead of
|
||||
[case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html).
|
||||
This is to avoid differences in the interpretation of JOSE messages between
|
||||
go-jose and libraries written in other languages.
|
||||
* When deserializing a JSON object, we check for duplicate keys and reject the
|
||||
input whenever we detect a duplicate. Rather than trying to work with malformed
|
||||
data, we prefer to reject it right away.
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,141 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import "bytes"
|
||||
|
||||
// Compact appends to dst the JSON-encoded src with
|
||||
// insignificant space characters elided.
|
||||
func Compact(dst *bytes.Buffer, src []byte) error {
|
||||
return compact(dst, src, false)
|
||||
}
|
||||
|
||||
func compact(dst *bytes.Buffer, src []byte, escape bool) error {
|
||||
origLen := dst.Len()
|
||||
var scan scanner
|
||||
scan.reset()
|
||||
start := 0
|
||||
for i, c := range src {
|
||||
if escape && (c == '<' || c == '>' || c == '&') {
|
||||
if start < i {
|
||||
dst.Write(src[start:i])
|
||||
}
|
||||
dst.WriteString(`\u00`)
|
||||
dst.WriteByte(hex[c>>4])
|
||||
dst.WriteByte(hex[c&0xF])
|
||||
start = i + 1
|
||||
}
|
||||
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
|
||||
if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
|
||||
if start < i {
|
||||
dst.Write(src[start:i])
|
||||
}
|
||||
dst.WriteString(`\u202`)
|
||||
dst.WriteByte(hex[src[i+2]&0xF])
|
||||
start = i + 3
|
||||
}
|
||||
v := scan.step(&scan, c)
|
||||
if v >= scanSkipSpace {
|
||||
if v == scanError {
|
||||
break
|
||||
}
|
||||
if start < i {
|
||||
dst.Write(src[start:i])
|
||||
}
|
||||
start = i + 1
|
||||
}
|
||||
}
|
||||
if scan.eof() == scanError {
|
||||
dst.Truncate(origLen)
|
||||
return scan.err
|
||||
}
|
||||
if start < len(src) {
|
||||
dst.Write(src[start:])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
|
||||
dst.WriteByte('\n')
|
||||
dst.WriteString(prefix)
|
||||
for i := 0; i < depth; i++ {
|
||||
dst.WriteString(indent)
|
||||
}
|
||||
}
|
||||
|
||||
// Indent appends to dst an indented form of the JSON-encoded src.
|
||||
// Each element in a JSON object or array begins on a new,
|
||||
// indented line beginning with prefix followed by one or more
|
||||
// copies of indent according to the indentation nesting.
|
||||
// The data appended to dst does not begin with the prefix nor
|
||||
// any indentation, to make it easier to embed inside other formatted JSON data.
|
||||
// Although leading space characters (space, tab, carriage return, newline)
|
||||
// at the beginning of src are dropped, trailing space characters
|
||||
// at the end of src are preserved and copied to dst.
|
||||
// For example, if src has no trailing spaces, neither will dst;
|
||||
// if src ends in a trailing newline, so will dst.
|
||||
func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
|
||||
origLen := dst.Len()
|
||||
var scan scanner
|
||||
scan.reset()
|
||||
needIndent := false
|
||||
depth := 0
|
||||
for _, c := range src {
|
||||
scan.bytes++
|
||||
v := scan.step(&scan, c)
|
||||
if v == scanSkipSpace {
|
||||
continue
|
||||
}
|
||||
if v == scanError {
|
||||
break
|
||||
}
|
||||
if needIndent && v != scanEndObject && v != scanEndArray {
|
||||
needIndent = false
|
||||
depth++
|
||||
newline(dst, prefix, indent, depth)
|
||||
}
|
||||
|
||||
// Emit semantically uninteresting bytes
|
||||
// (in particular, punctuation in strings) unmodified.
|
||||
if v == scanContinue {
|
||||
dst.WriteByte(c)
|
||||
continue
|
||||
}
|
||||
|
||||
// Add spacing around real punctuation.
|
||||
switch c {
|
||||
case '{', '[':
|
||||
// delay indent so that empty object and array are formatted as {} and [].
|
||||
needIndent = true
|
||||
dst.WriteByte(c)
|
||||
|
||||
case ',':
|
||||
dst.WriteByte(c)
|
||||
newline(dst, prefix, indent, depth)
|
||||
|
||||
case ':':
|
||||
dst.WriteByte(c)
|
||||
dst.WriteByte(' ')
|
||||
|
||||
case '}', ']':
|
||||
if needIndent {
|
||||
// suppress indent in empty object/array
|
||||
needIndent = false
|
||||
} else {
|
||||
depth--
|
||||
newline(dst, prefix, indent, depth)
|
||||
}
|
||||
dst.WriteByte(c)
|
||||
|
||||
default:
|
||||
dst.WriteByte(c)
|
||||
}
|
||||
}
|
||||
if scan.eof() == scanError {
|
||||
dst.Truncate(origLen)
|
||||
return scan.err
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,623 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
// JSON value parser state machine.
|
||||
// Just about at the limit of what is reasonable to write by hand.
|
||||
// Some parts are a bit tedious, but overall it nicely factors out the
|
||||
// otherwise common code from the multiple scanning functions
|
||||
// in this package (Compact, Indent, checkValid, nextValue, etc).
|
||||
//
|
||||
// This file starts with two simple examples using the scanner
|
||||
// before diving into the scanner itself.
|
||||
|
||||
import "strconv"
|
||||
|
||||
// checkValid verifies that data is valid JSON-encoded data.
|
||||
// scan is passed in for use by checkValid to avoid an allocation.
|
||||
func checkValid(data []byte, scan *scanner) error {
|
||||
scan.reset()
|
||||
for _, c := range data {
|
||||
scan.bytes++
|
||||
if scan.step(scan, c) == scanError {
|
||||
return scan.err
|
||||
}
|
||||
}
|
||||
if scan.eof() == scanError {
|
||||
return scan.err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// nextValue splits data after the next whole JSON value,
|
||||
// returning that value and the bytes that follow it as separate slices.
|
||||
// scan is passed in for use by nextValue to avoid an allocation.
|
||||
func nextValue(data []byte, scan *scanner) (value, rest []byte, err error) {
|
||||
scan.reset()
|
||||
for i, c := range data {
|
||||
v := scan.step(scan, c)
|
||||
if v >= scanEndObject {
|
||||
switch v {
|
||||
// probe the scanner with a space to determine whether we will
|
||||
// get scanEnd on the next character. Otherwise, if the next character
|
||||
// is not a space, scanEndTop allocates a needless error.
|
||||
case scanEndObject, scanEndArray:
|
||||
if scan.step(scan, ' ') == scanEnd {
|
||||
return data[:i+1], data[i+1:], nil
|
||||
}
|
||||
case scanError:
|
||||
return nil, nil, scan.err
|
||||
case scanEnd:
|
||||
return data[:i], data[i:], nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if scan.eof() == scanError {
|
||||
return nil, nil, scan.err
|
||||
}
|
||||
return data, nil, nil
|
||||
}
|
||||
|
||||
// A SyntaxError is a description of a JSON syntax error.
|
||||
type SyntaxError struct {
|
||||
msg string // description of error
|
||||
Offset int64 // error occurred after reading Offset bytes
|
||||
}
|
||||
|
||||
func (e *SyntaxError) Error() string { return e.msg }
|
||||
|
||||
// A scanner is a JSON scanning state machine.
|
||||
// Callers call scan.reset() and then pass bytes in one at a time
|
||||
// by calling scan.step(&scan, c) for each byte.
|
||||
// The return value, referred to as an opcode, tells the
|
||||
// caller about significant parsing events like beginning
|
||||
// and ending literals, objects, and arrays, so that the
|
||||
// caller can follow along if it wishes.
|
||||
// The return value scanEnd indicates that a single top-level
|
||||
// JSON value has been completed, *before* the byte that
|
||||
// just got passed in. (The indication must be delayed in order
|
||||
// to recognize the end of numbers: is 123 a whole value or
|
||||
// the beginning of 12345e+6?).
|
||||
type scanner struct {
|
||||
// The step is a func to be called to execute the next transition.
|
||||
// Also tried using an integer constant and a single func
|
||||
// with a switch, but using the func directly was 10% faster
|
||||
// on a 64-bit Mac Mini, and it's nicer to read.
|
||||
step func(*scanner, byte) int
|
||||
|
||||
// Reached end of top-level value.
|
||||
endTop bool
|
||||
|
||||
// Stack of what we're in the middle of - array values, object keys, object values.
|
||||
parseState []int
|
||||
|
||||
// Error that happened, if any.
|
||||
err error
|
||||
|
||||
// 1-byte redo (see undo method)
|
||||
redo bool
|
||||
redoCode int
|
||||
redoState func(*scanner, byte) int
|
||||
|
||||
// total bytes consumed, updated by decoder.Decode
|
||||
bytes int64
|
||||
}
|
||||
|
||||
// These values are returned by the state transition functions
|
||||
// assigned to scanner.state and the method scanner.eof.
|
||||
// They give details about the current state of the scan that
|
||||
// callers might be interested to know about.
|
||||
// It is okay to ignore the return value of any particular
|
||||
// call to scanner.state: if one call returns scanError,
|
||||
// every subsequent call will return scanError too.
|
||||
const (
|
||||
// Continue.
|
||||
scanContinue = iota // uninteresting byte
|
||||
scanBeginLiteral // end implied by next result != scanContinue
|
||||
scanBeginObject // begin object
|
||||
scanObjectKey // just finished object key (string)
|
||||
scanObjectValue // just finished non-last object value
|
||||
scanEndObject // end object (implies scanObjectValue if possible)
|
||||
scanBeginArray // begin array
|
||||
scanArrayValue // just finished array value
|
||||
scanEndArray // end array (implies scanArrayValue if possible)
|
||||
scanSkipSpace // space byte; can skip; known to be last "continue" result
|
||||
|
||||
// Stop.
|
||||
scanEnd // top-level value ended *before* this byte; known to be first "stop" result
|
||||
scanError // hit an error, scanner.err.
|
||||
)
|
||||
|
||||
// These values are stored in the parseState stack.
|
||||
// They give the current state of a composite value
|
||||
// being scanned. If the parser is inside a nested value
|
||||
// the parseState describes the nested state, outermost at entry 0.
|
||||
const (
|
||||
parseObjectKey = iota // parsing object key (before colon)
|
||||
parseObjectValue // parsing object value (after colon)
|
||||
parseArrayValue // parsing array value
|
||||
)
|
||||
|
||||
// reset prepares the scanner for use.
|
||||
// It must be called before calling s.step.
|
||||
func (s *scanner) reset() {
|
||||
s.step = stateBeginValue
|
||||
s.parseState = s.parseState[0:0]
|
||||
s.err = nil
|
||||
s.redo = false
|
||||
s.endTop = false
|
||||
}
|
||||
|
||||
// eof tells the scanner that the end of input has been reached.
|
||||
// It returns a scan status just as s.step does.
|
||||
func (s *scanner) eof() int {
|
||||
if s.err != nil {
|
||||
return scanError
|
||||
}
|
||||
if s.endTop {
|
||||
return scanEnd
|
||||
}
|
||||
s.step(s, ' ')
|
||||
if s.endTop {
|
||||
return scanEnd
|
||||
}
|
||||
if s.err == nil {
|
||||
s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
|
||||
}
|
||||
return scanError
|
||||
}
|
||||
|
||||
// pushParseState pushes a new parse state p onto the parse stack.
|
||||
func (s *scanner) pushParseState(p int) {
|
||||
s.parseState = append(s.parseState, p)
|
||||
}
|
||||
|
||||
// popParseState pops a parse state (already obtained) off the stack
|
||||
// and updates s.step accordingly.
|
||||
func (s *scanner) popParseState() {
|
||||
n := len(s.parseState) - 1
|
||||
s.parseState = s.parseState[0:n]
|
||||
s.redo = false
|
||||
if n == 0 {
|
||||
s.step = stateEndTop
|
||||
s.endTop = true
|
||||
} else {
|
||||
s.step = stateEndValue
|
||||
}
|
||||
}
|
||||
|
||||
func isSpace(c byte) bool {
|
||||
return c == ' ' || c == '\t' || c == '\r' || c == '\n'
|
||||
}
|
||||
|
||||
// stateBeginValueOrEmpty is the state after reading `[`.
|
||||
func stateBeginValueOrEmpty(s *scanner, c byte) int {
|
||||
if c <= ' ' && isSpace(c) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
if c == ']' {
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
return stateBeginValue(s, c)
|
||||
}
|
||||
|
||||
// stateBeginValue is the state at the beginning of the input.
|
||||
func stateBeginValue(s *scanner, c byte) int {
|
||||
if c <= ' ' && isSpace(c) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
switch c {
|
||||
case '{':
|
||||
s.step = stateBeginStringOrEmpty
|
||||
s.pushParseState(parseObjectKey)
|
||||
return scanBeginObject
|
||||
case '[':
|
||||
s.step = stateBeginValueOrEmpty
|
||||
s.pushParseState(parseArrayValue)
|
||||
return scanBeginArray
|
||||
case '"':
|
||||
s.step = stateInString
|
||||
return scanBeginLiteral
|
||||
case '-':
|
||||
s.step = stateNeg
|
||||
return scanBeginLiteral
|
||||
case '0': // beginning of 0.123
|
||||
s.step = state0
|
||||
return scanBeginLiteral
|
||||
case 't': // beginning of true
|
||||
s.step = stateT
|
||||
return scanBeginLiteral
|
||||
case 'f': // beginning of false
|
||||
s.step = stateF
|
||||
return scanBeginLiteral
|
||||
case 'n': // beginning of null
|
||||
s.step = stateN
|
||||
return scanBeginLiteral
|
||||
}
|
||||
if '1' <= c && c <= '9' { // beginning of 1234.5
|
||||
s.step = state1
|
||||
return scanBeginLiteral
|
||||
}
|
||||
return s.error(c, "looking for beginning of value")
|
||||
}
|
||||
|
||||
// stateBeginStringOrEmpty is the state after reading `{`.
|
||||
func stateBeginStringOrEmpty(s *scanner, c byte) int {
|
||||
if c <= ' ' && isSpace(c) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
if c == '}' {
|
||||
n := len(s.parseState)
|
||||
s.parseState[n-1] = parseObjectValue
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
return stateBeginString(s, c)
|
||||
}
|
||||
|
||||
// stateBeginString is the state after reading `{"key": value,`.
|
||||
func stateBeginString(s *scanner, c byte) int {
|
||||
if c <= ' ' && isSpace(c) {
|
||||
return scanSkipSpace
|
||||
}
|
||||
if c == '"' {
|
||||
s.step = stateInString
|
||||
return scanBeginLiteral
|
||||
}
|
||||
return s.error(c, "looking for beginning of object key string")
|
||||
}
|
||||
|
||||
// stateEndValue is the state after completing a value,
|
||||
// such as after reading `{}` or `true` or `["x"`.
|
||||
func stateEndValue(s *scanner, c byte) int {
|
||||
n := len(s.parseState)
|
||||
if n == 0 {
|
||||
// Completed top-level before the current byte.
|
||||
s.step = stateEndTop
|
||||
s.endTop = true
|
||||
return stateEndTop(s, c)
|
||||
}
|
||||
if c <= ' ' && isSpace(c) {
|
||||
s.step = stateEndValue
|
||||
return scanSkipSpace
|
||||
}
|
||||
ps := s.parseState[n-1]
|
||||
switch ps {
|
||||
case parseObjectKey:
|
||||
if c == ':' {
|
||||
s.parseState[n-1] = parseObjectValue
|
||||
s.step = stateBeginValue
|
||||
return scanObjectKey
|
||||
}
|
||||
return s.error(c, "after object key")
|
||||
case parseObjectValue:
|
||||
if c == ',' {
|
||||
s.parseState[n-1] = parseObjectKey
|
||||
s.step = stateBeginString
|
||||
return scanObjectValue
|
||||
}
|
||||
if c == '}' {
|
||||
s.popParseState()
|
||||
return scanEndObject
|
||||
}
|
||||
return s.error(c, "after object key:value pair")
|
||||
case parseArrayValue:
|
||||
if c == ',' {
|
||||
s.step = stateBeginValue
|
||||
return scanArrayValue
|
||||
}
|
||||
if c == ']' {
|
||||
s.popParseState()
|
||||
return scanEndArray
|
||||
}
|
||||
return s.error(c, "after array element")
|
||||
}
|
||||
return s.error(c, "")
|
||||
}
|
||||
|
||||
// stateEndTop is the state after finishing the top-level value,
|
||||
// such as after reading `{}` or `[1,2,3]`.
|
||||
// Only space characters should be seen now.
|
||||
func stateEndTop(s *scanner, c byte) int {
|
||||
if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
|
||||
// Complain about non-space byte on next call.
|
||||
s.error(c, "after top-level value")
|
||||
}
|
||||
return scanEnd
|
||||
}
|
||||
|
||||
// stateInString is the state after reading `"`.
|
||||
func stateInString(s *scanner, c byte) int {
|
||||
if c == '"' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
if c == '\\' {
|
||||
s.step = stateInStringEsc
|
||||
return scanContinue
|
||||
}
|
||||
if c < 0x20 {
|
||||
return s.error(c, "in string literal")
|
||||
}
|
||||
return scanContinue
|
||||
}
|
||||
|
||||
// stateInStringEsc is the state after reading `"\` during a quoted string.
|
||||
func stateInStringEsc(s *scanner, c byte) int {
|
||||
switch c {
|
||||
case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
|
||||
s.step = stateInString
|
||||
return scanContinue
|
||||
case 'u':
|
||||
s.step = stateInStringEscU
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in string escape code")
|
||||
}
|
||||
|
||||
// stateInStringEscU is the state after reading `"\u` during a quoted string.
|
||||
func stateInStringEscU(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInStringEscU1
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
|
||||
func stateInStringEscU1(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInStringEscU12
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
|
||||
func stateInStringEscU12(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInStringEscU123
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
|
||||
func stateInStringEscU123(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
|
||||
s.step = stateInString
|
||||
return scanContinue
|
||||
}
|
||||
// numbers
|
||||
return s.error(c, "in \\u hexadecimal character escape")
|
||||
}
|
||||
|
||||
// stateNeg is the state after reading `-` during a number.
|
||||
func stateNeg(s *scanner, c byte) int {
|
||||
if c == '0' {
|
||||
s.step = state0
|
||||
return scanContinue
|
||||
}
|
||||
if '1' <= c && c <= '9' {
|
||||
s.step = state1
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in numeric literal")
|
||||
}
|
||||
|
||||
// state1 is the state after reading a non-zero integer during a number,
|
||||
// such as after reading `1` or `100` but not `0`.
|
||||
func state1(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
s.step = state1
|
||||
return scanContinue
|
||||
}
|
||||
return state0(s, c)
|
||||
}
|
||||
|
||||
// state0 is the state after reading `0` during a number.
|
||||
func state0(s *scanner, c byte) int {
|
||||
if c == '.' {
|
||||
s.step = stateDot
|
||||
return scanContinue
|
||||
}
|
||||
if c == 'e' || c == 'E' {
|
||||
s.step = stateE
|
||||
return scanContinue
|
||||
}
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
|
||||
// stateDot is the state after reading the integer and decimal point in a number,
|
||||
// such as after reading `1.`.
|
||||
func stateDot(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
s.step = stateDot0
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "after decimal point in numeric literal")
|
||||
}
|
||||
|
||||
// stateDot0 is the state after reading the integer, decimal point, and subsequent
|
||||
// digits of a number, such as after reading `3.14`.
|
||||
func stateDot0(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
return scanContinue
|
||||
}
|
||||
if c == 'e' || c == 'E' {
|
||||
s.step = stateE
|
||||
return scanContinue
|
||||
}
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
|
||||
// stateE is the state after reading the mantissa and e in a number,
|
||||
// such as after reading `314e` or `0.314e`.
|
||||
func stateE(s *scanner, c byte) int {
|
||||
if c == '+' || c == '-' {
|
||||
s.step = stateESign
|
||||
return scanContinue
|
||||
}
|
||||
return stateESign(s, c)
|
||||
}
|
||||
|
||||
// stateESign is the state after reading the mantissa, e, and sign in a number,
|
||||
// such as after reading `314e-` or `0.314e+`.
|
||||
func stateESign(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
s.step = stateE0
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in exponent of numeric literal")
|
||||
}
|
||||
|
||||
// stateE0 is the state after reading the mantissa, e, optional sign,
|
||||
// and at least one digit of the exponent in a number,
|
||||
// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
|
||||
func stateE0(s *scanner, c byte) int {
|
||||
if '0' <= c && c <= '9' {
|
||||
return scanContinue
|
||||
}
|
||||
return stateEndValue(s, c)
|
||||
}
|
||||
|
||||
// stateT is the state after reading `t`.
|
||||
func stateT(s *scanner, c byte) int {
|
||||
if c == 'r' {
|
||||
s.step = stateTr
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal true (expecting 'r')")
|
||||
}
|
||||
|
||||
// stateTr is the state after reading `tr`.
|
||||
func stateTr(s *scanner, c byte) int {
|
||||
if c == 'u' {
|
||||
s.step = stateTru
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal true (expecting 'u')")
|
||||
}
|
||||
|
||||
// stateTru is the state after reading `tru`.
|
||||
func stateTru(s *scanner, c byte) int {
|
||||
if c == 'e' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal true (expecting 'e')")
|
||||
}
|
||||
|
||||
// stateF is the state after reading `f`.
|
||||
func stateF(s *scanner, c byte) int {
|
||||
if c == 'a' {
|
||||
s.step = stateFa
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 'a')")
|
||||
}
|
||||
|
||||
// stateFa is the state after reading `fa`.
|
||||
func stateFa(s *scanner, c byte) int {
|
||||
if c == 'l' {
|
||||
s.step = stateFal
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 'l')")
|
||||
}
|
||||
|
||||
// stateFal is the state after reading `fal`.
|
||||
func stateFal(s *scanner, c byte) int {
|
||||
if c == 's' {
|
||||
s.step = stateFals
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 's')")
|
||||
}
|
||||
|
||||
// stateFals is the state after reading `fals`.
|
||||
func stateFals(s *scanner, c byte) int {
|
||||
if c == 'e' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal false (expecting 'e')")
|
||||
}
|
||||
|
||||
// stateN is the state after reading `n`.
|
||||
func stateN(s *scanner, c byte) int {
|
||||
if c == 'u' {
|
||||
s.step = stateNu
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal null (expecting 'u')")
|
||||
}
|
||||
|
||||
// stateNu is the state after reading `nu`.
|
||||
func stateNu(s *scanner, c byte) int {
|
||||
if c == 'l' {
|
||||
s.step = stateNul
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal null (expecting 'l')")
|
||||
}
|
||||
|
||||
// stateNul is the state after reading `nul`.
|
||||
func stateNul(s *scanner, c byte) int {
|
||||
if c == 'l' {
|
||||
s.step = stateEndValue
|
||||
return scanContinue
|
||||
}
|
||||
return s.error(c, "in literal null (expecting 'l')")
|
||||
}
|
||||
|
||||
// stateError is the state after reaching a syntax error,
|
||||
// such as after reading `[1}` or `5.1.2`.
|
||||
func stateError(s *scanner, c byte) int {
|
||||
return scanError
|
||||
}
|
||||
|
||||
// error records an error and switches to the error state.
|
||||
func (s *scanner) error(c byte, context string) int {
|
||||
s.step = stateError
|
||||
s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
|
||||
return scanError
|
||||
}
|
||||
|
||||
// quoteChar formats c as a quoted character literal
|
||||
func quoteChar(c byte) string {
|
||||
// special cases - different from quoted strings
|
||||
if c == '\'' {
|
||||
return `'\''`
|
||||
}
|
||||
if c == '"' {
|
||||
return `'"'`
|
||||
}
|
||||
|
||||
// use quoted string with different quotation marks
|
||||
s := strconv.Quote(string(c))
|
||||
return "'" + s[1:len(s)-1] + "'"
|
||||
}
|
||||
|
||||
// undo causes the scanner to return scanCode from the next state transition.
|
||||
// This gives callers a simple 1-byte undo mechanism.
|
||||
func (s *scanner) undo(scanCode int) {
|
||||
if s.redo {
|
||||
panic("json: invalid use of scanner")
|
||||
}
|
||||
s.redoCode = scanCode
|
||||
s.redoState = s.step
|
||||
s.step = stateRedo
|
||||
s.redo = true
|
||||
}
|
||||
|
||||
// stateRedo helps implement the scanner's 1-byte undo.
|
||||
func stateRedo(s *scanner, c byte) int {
|
||||
s.redo = false
|
||||
s.step = s.redoState
|
||||
return s.redoCode
|
||||
}
|
|
@ -0,0 +1,485 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// A Decoder reads and decodes JSON objects from an input stream.
|
||||
type Decoder struct {
|
||||
r io.Reader
|
||||
buf []byte
|
||||
d decodeState
|
||||
scanp int // start of unread data in buf
|
||||
scan scanner
|
||||
err error
|
||||
|
||||
tokenState int
|
||||
tokenStack []int
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder that reads from r.
|
||||
//
|
||||
// The decoder introduces its own buffering and may
|
||||
// read data from r beyond the JSON values requested.
|
||||
func NewDecoder(r io.Reader) *Decoder {
|
||||
return &Decoder{r: r}
|
||||
}
|
||||
|
||||
// Deprecated: Use `SetNumberType` instead
|
||||
// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
|
||||
// Number instead of as a float64.
|
||||
func (dec *Decoder) UseNumber() { dec.d.numberType = UnmarshalJSONNumber }
|
||||
|
||||
// SetNumberType causes the Decoder to unmarshal a number into an interface{} as a
|
||||
// Number, float64 or int64 depending on `t` enum value.
|
||||
func (dec *Decoder) SetNumberType(t NumberUnmarshalType) { dec.d.numberType = t }
|
||||
|
||||
// Decode reads the next JSON-encoded value from its
|
||||
// input and stores it in the value pointed to by v.
|
||||
//
|
||||
// See the documentation for Unmarshal for details about
|
||||
// the conversion of JSON into a Go value.
|
||||
func (dec *Decoder) Decode(v interface{}) error {
|
||||
if dec.err != nil {
|
||||
return dec.err
|
||||
}
|
||||
|
||||
if err := dec.tokenPrepareForDecode(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !dec.tokenValueAllowed() {
|
||||
return &SyntaxError{msg: "not at beginning of value"}
|
||||
}
|
||||
|
||||
// Read whole value into buffer.
|
||||
n, err := dec.readValue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dec.d.init(dec.buf[dec.scanp : dec.scanp+n])
|
||||
dec.scanp += n
|
||||
|
||||
// Don't save err from unmarshal into dec.err:
|
||||
// the connection is still usable since we read a complete JSON
|
||||
// object from it before the error happened.
|
||||
err = dec.d.unmarshal(v)
|
||||
|
||||
// fixup token streaming state
|
||||
dec.tokenValueEnd()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Buffered returns a reader of the data remaining in the Decoder's
|
||||
// buffer. The reader is valid until the next call to Decode.
|
||||
func (dec *Decoder) Buffered() io.Reader {
|
||||
return bytes.NewReader(dec.buf[dec.scanp:])
|
||||
}
|
||||
|
||||
// readValue reads a JSON value into dec.buf.
|
||||
// It returns the length of the encoding.
|
||||
func (dec *Decoder) readValue() (int, error) {
|
||||
dec.scan.reset()
|
||||
|
||||
scanp := dec.scanp
|
||||
var err error
|
||||
Input:
|
||||
for {
|
||||
// Look in the buffer for a new value.
|
||||
for i, c := range dec.buf[scanp:] {
|
||||
dec.scan.bytes++
|
||||
v := dec.scan.step(&dec.scan, c)
|
||||
if v == scanEnd {
|
||||
scanp += i
|
||||
break Input
|
||||
}
|
||||
// scanEnd is delayed one byte.
|
||||
// We might block trying to get that byte from src,
|
||||
// so instead invent a space byte.
|
||||
if (v == scanEndObject || v == scanEndArray) && dec.scan.step(&dec.scan, ' ') == scanEnd {
|
||||
scanp += i + 1
|
||||
break Input
|
||||
}
|
||||
if v == scanError {
|
||||
dec.err = dec.scan.err
|
||||
return 0, dec.scan.err
|
||||
}
|
||||
}
|
||||
scanp = len(dec.buf)
|
||||
|
||||
// Did the last read have an error?
|
||||
// Delayed until now to allow buffer scan.
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
if dec.scan.step(&dec.scan, ' ') == scanEnd {
|
||||
break Input
|
||||
}
|
||||
if nonSpace(dec.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
}
|
||||
dec.err = err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
n := scanp - dec.scanp
|
||||
err = dec.refill()
|
||||
scanp = dec.scanp + n
|
||||
}
|
||||
return scanp - dec.scanp, nil
|
||||
}
|
||||
|
||||
func (dec *Decoder) refill() error {
|
||||
// Make room to read more into the buffer.
|
||||
// First slide down data already consumed.
|
||||
if dec.scanp > 0 {
|
||||
n := copy(dec.buf, dec.buf[dec.scanp:])
|
||||
dec.buf = dec.buf[:n]
|
||||
dec.scanp = 0
|
||||
}
|
||||
|
||||
// Grow buffer if not large enough.
|
||||
const minRead = 512
|
||||
if cap(dec.buf)-len(dec.buf) < minRead {
|
||||
newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
|
||||
copy(newBuf, dec.buf)
|
||||
dec.buf = newBuf
|
||||
}
|
||||
|
||||
// Read. Delay error for next iteration (after scan).
|
||||
n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
|
||||
dec.buf = dec.buf[0 : len(dec.buf)+n]
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func nonSpace(b []byte) bool {
|
||||
for _, c := range b {
|
||||
if !isSpace(c) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// An Encoder writes JSON objects to an output stream.
|
||||
type Encoder struct {
|
||||
w io.Writer
|
||||
err error
|
||||
}
|
||||
|
||||
// NewEncoder returns a new encoder that writes to w.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{w: w}
|
||||
}
|
||||
|
||||
// Encode writes the JSON encoding of v to the stream,
|
||||
// followed by a newline character.
|
||||
//
|
||||
// See the documentation for Marshal for details about the
|
||||
// conversion of Go values to JSON.
|
||||
func (enc *Encoder) Encode(v interface{}) error {
|
||||
if enc.err != nil {
|
||||
return enc.err
|
||||
}
|
||||
e := newEncodeState()
|
||||
err := e.marshal(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Terminate each value with a newline.
|
||||
// This makes the output look a little nicer
|
||||
// when debugging, and some kind of space
|
||||
// is required if the encoded value was a number,
|
||||
// so that the reader knows there aren't more
|
||||
// digits coming.
|
||||
e.WriteByte('\n')
|
||||
|
||||
if _, err = enc.w.Write(e.Bytes()); err != nil {
|
||||
enc.err = err
|
||||
}
|
||||
encodeStatePool.Put(e)
|
||||
return err
|
||||
}
|
||||
|
||||
// RawMessage is a raw encoded JSON object.
|
||||
// It implements Marshaler and Unmarshaler and can
|
||||
// be used to delay JSON decoding or precompute a JSON encoding.
|
||||
type RawMessage []byte
|
||||
|
||||
// MarshalJSON returns *m as the JSON encoding of m.
|
||||
func (m *RawMessage) MarshalJSON() ([]byte, error) {
|
||||
return *m, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON sets *m to a copy of data.
|
||||
func (m *RawMessage) UnmarshalJSON(data []byte) error {
|
||||
if m == nil {
|
||||
return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
|
||||
}
|
||||
*m = append((*m)[0:0], data...)
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ Marshaler = (*RawMessage)(nil)
|
||||
var _ Unmarshaler = (*RawMessage)(nil)
|
||||
|
||||
// A Token holds a value of one of these types:
|
||||
//
|
||||
// Delim, for the four JSON delimiters [ ] { }
|
||||
// bool, for JSON booleans
|
||||
// float64, for JSON numbers
|
||||
// Number, for JSON numbers
|
||||
// string, for JSON string literals
|
||||
// nil, for JSON null
|
||||
//
|
||||
type Token interface{}
|
||||
|
||||
const (
|
||||
tokenTopValue = iota
|
||||
tokenArrayStart
|
||||
tokenArrayValue
|
||||
tokenArrayComma
|
||||
tokenObjectStart
|
||||
tokenObjectKey
|
||||
tokenObjectColon
|
||||
tokenObjectValue
|
||||
tokenObjectComma
|
||||
)
|
||||
|
||||
// advance tokenstate from a separator state to a value state
|
||||
func (dec *Decoder) tokenPrepareForDecode() error {
|
||||
// Note: Not calling peek before switch, to avoid
|
||||
// putting peek into the standard Decode path.
|
||||
// peek is only called when using the Token API.
|
||||
switch dec.tokenState {
|
||||
case tokenArrayComma:
|
||||
c, err := dec.peek()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c != ',' {
|
||||
return &SyntaxError{"expected comma after array element", 0}
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenState = tokenArrayValue
|
||||
case tokenObjectColon:
|
||||
c, err := dec.peek()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c != ':' {
|
||||
return &SyntaxError{"expected colon after object key", 0}
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenState = tokenObjectValue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dec *Decoder) tokenValueAllowed() bool {
|
||||
switch dec.tokenState {
|
||||
case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (dec *Decoder) tokenValueEnd() {
|
||||
switch dec.tokenState {
|
||||
case tokenArrayStart, tokenArrayValue:
|
||||
dec.tokenState = tokenArrayComma
|
||||
case tokenObjectValue:
|
||||
dec.tokenState = tokenObjectComma
|
||||
}
|
||||
}
|
||||
|
||||
// A Delim is a JSON array or object delimiter, one of [ ] { or }.
|
||||
type Delim rune
|
||||
|
||||
func (d Delim) String() string {
|
||||
return string(d)
|
||||
}
|
||||
|
||||
// Token returns the next JSON token in the input stream.
|
||||
// At the end of the input stream, Token returns nil, io.EOF.
|
||||
//
|
||||
// Token guarantees that the delimiters [ ] { } it returns are
|
||||
// properly nested and matched: if Token encounters an unexpected
|
||||
// delimiter in the input, it will return an error.
|
||||
//
|
||||
// The input stream consists of basic JSON values—bool, string,
|
||||
// number, and null—along with delimiters [ ] { } of type Delim
|
||||
// to mark the start and end of arrays and objects.
|
||||
// Commas and colons are elided.
|
||||
func (dec *Decoder) Token() (Token, error) {
|
||||
for {
|
||||
c, err := dec.peek()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch c {
|
||||
case '[':
|
||||
if !dec.tokenValueAllowed() {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
|
||||
dec.tokenState = tokenArrayStart
|
||||
return Delim('['), nil
|
||||
|
||||
case ']':
|
||||
if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
|
||||
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
|
||||
dec.tokenValueEnd()
|
||||
return Delim(']'), nil
|
||||
|
||||
case '{':
|
||||
if !dec.tokenValueAllowed() {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
|
||||
dec.tokenState = tokenObjectStart
|
||||
return Delim('{'), nil
|
||||
|
||||
case '}':
|
||||
if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
|
||||
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
|
||||
dec.tokenValueEnd()
|
||||
return Delim('}'), nil
|
||||
|
||||
case ':':
|
||||
if dec.tokenState != tokenObjectColon {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
dec.scanp++
|
||||
dec.tokenState = tokenObjectValue
|
||||
continue
|
||||
|
||||
case ',':
|
||||
if dec.tokenState == tokenArrayComma {
|
||||
dec.scanp++
|
||||
dec.tokenState = tokenArrayValue
|
||||
continue
|
||||
}
|
||||
if dec.tokenState == tokenObjectComma {
|
||||
dec.scanp++
|
||||
dec.tokenState = tokenObjectKey
|
||||
continue
|
||||
}
|
||||
return dec.tokenError(c)
|
||||
|
||||
case '"':
|
||||
if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey {
|
||||
var x string
|
||||
old := dec.tokenState
|
||||
dec.tokenState = tokenTopValue
|
||||
err := dec.Decode(&x)
|
||||
dec.tokenState = old
|
||||
if err != nil {
|
||||
clearOffset(err)
|
||||
return nil, err
|
||||
}
|
||||
dec.tokenState = tokenObjectColon
|
||||
return x, nil
|
||||
}
|
||||
fallthrough
|
||||
|
||||
default:
|
||||
if !dec.tokenValueAllowed() {
|
||||
return dec.tokenError(c)
|
||||
}
|
||||
var x interface{}
|
||||
if err := dec.Decode(&x); err != nil {
|
||||
clearOffset(err)
|
||||
return nil, err
|
||||
}
|
||||
return x, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func clearOffset(err error) {
|
||||
if s, ok := err.(*SyntaxError); ok {
|
||||
s.Offset = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (dec *Decoder) tokenError(c byte) (Token, error) {
|
||||
var context string
|
||||
switch dec.tokenState {
|
||||
case tokenTopValue:
|
||||
context = " looking for beginning of value"
|
||||
case tokenArrayStart, tokenArrayValue, tokenObjectValue:
|
||||
context = " looking for beginning of value"
|
||||
case tokenArrayComma:
|
||||
context = " after array element"
|
||||
case tokenObjectKey:
|
||||
context = " looking for beginning of object key string"
|
||||
case tokenObjectColon:
|
||||
context = " after object key"
|
||||
case tokenObjectComma:
|
||||
context = " after object key:value pair"
|
||||
}
|
||||
return nil, &SyntaxError{"invalid character " + quoteChar(c) + " " + context, 0}
|
||||
}
|
||||
|
||||
// More reports whether there is another element in the
|
||||
// current array or object being parsed.
|
||||
func (dec *Decoder) More() bool {
|
||||
c, err := dec.peek()
|
||||
return err == nil && c != ']' && c != '}'
|
||||
}
|
||||
|
||||
func (dec *Decoder) peek() (byte, error) {
|
||||
var err error
|
||||
for {
|
||||
for i := dec.scanp; i < len(dec.buf); i++ {
|
||||
c := dec.buf[i]
|
||||
if isSpace(c) {
|
||||
continue
|
||||
}
|
||||
dec.scanp = i
|
||||
return c, nil
|
||||
}
|
||||
// buffer has been scanned, now report any error
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = dec.refill()
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
TODO
|
||||
|
||||
// EncodeToken writes the given JSON token to the stream.
|
||||
// It returns an error if the delimiters [ ] { } are not properly used.
|
||||
//
|
||||
// EncodeToken does not call Flush, because usually it is part of
|
||||
// a larger operation such as Encode, and those will call Flush when finished.
|
||||
// Callers that create an Encoder and then invoke EncodeToken directly,
|
||||
// without using Encode, need to call Flush when finished to ensure that
|
||||
// the JSON is written to the underlying writer.
|
||||
func (e *Encoder) EncodeToken(t Token) error {
|
||||
...
|
||||
}
|
||||
|
||||
*/
|
|
@ -0,0 +1,44 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// tagOptions is the string following a comma in a struct field's "json"
|
||||
// tag, or the empty string. It does not include the leading comma.
|
||||
type tagOptions string
|
||||
|
||||
// parseTag splits a struct field's json tag into its name and
|
||||
// comma-separated options.
|
||||
func parseTag(tag string) (string, tagOptions) {
|
||||
if idx := strings.Index(tag, ","); idx != -1 {
|
||||
return tag[:idx], tagOptions(tag[idx+1:])
|
||||
}
|
||||
return tag, tagOptions("")
|
||||
}
|
||||
|
||||
// Contains reports whether a comma-separated list of options
|
||||
// contains a particular substr flag. substr must be surrounded by a
|
||||
// string boundary or commas.
|
||||
func (o tagOptions) Contains(optionName string) bool {
|
||||
if len(o) == 0 {
|
||||
return false
|
||||
}
|
||||
s := string(o)
|
||||
for s != "" {
|
||||
var next string
|
||||
i := strings.Index(s, ",")
|
||||
if i >= 0 {
|
||||
s, next = s[:i], s[i+1:]
|
||||
}
|
||||
if s == optionName {
|
||||
return true
|
||||
}
|
||||
s = next
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -0,0 +1,295 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/go-jose/go-jose/v3/json"
|
||||
)
|
||||
|
||||
// rawJSONWebEncryption represents a raw JWE JSON object. Used for parsing/serializing.
|
||||
type rawJSONWebEncryption struct {
|
||||
Protected *byteBuffer `json:"protected,omitempty"`
|
||||
Unprotected *rawHeader `json:"unprotected,omitempty"`
|
||||
Header *rawHeader `json:"header,omitempty"`
|
||||
Recipients []rawRecipientInfo `json:"recipients,omitempty"`
|
||||
Aad *byteBuffer `json:"aad,omitempty"`
|
||||
EncryptedKey *byteBuffer `json:"encrypted_key,omitempty"`
|
||||
Iv *byteBuffer `json:"iv,omitempty"`
|
||||
Ciphertext *byteBuffer `json:"ciphertext,omitempty"`
|
||||
Tag *byteBuffer `json:"tag,omitempty"`
|
||||
}
|
||||
|
||||
// rawRecipientInfo represents a raw JWE Per-Recipient header JSON object. Used for parsing/serializing.
|
||||
type rawRecipientInfo struct {
|
||||
Header *rawHeader `json:"header,omitempty"`
|
||||
EncryptedKey string `json:"encrypted_key,omitempty"`
|
||||
}
|
||||
|
||||
// JSONWebEncryption represents an encrypted JWE object after parsing.
|
||||
type JSONWebEncryption struct {
|
||||
Header Header
|
||||
protected, unprotected *rawHeader
|
||||
recipients []recipientInfo
|
||||
aad, iv, ciphertext, tag []byte
|
||||
original *rawJSONWebEncryption
|
||||
}
|
||||
|
||||
// recipientInfo represents a raw JWE Per-Recipient header JSON object after parsing.
|
||||
type recipientInfo struct {
|
||||
header *rawHeader
|
||||
encryptedKey []byte
|
||||
}
|
||||
|
||||
// GetAuthData retrieves the (optional) authenticated data attached to the object.
|
||||
func (obj JSONWebEncryption) GetAuthData() []byte {
|
||||
if obj.aad != nil {
|
||||
out := make([]byte, len(obj.aad))
|
||||
copy(out, obj.aad)
|
||||
return out
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the merged header values
|
||||
func (obj JSONWebEncryption) mergedHeaders(recipient *recipientInfo) rawHeader {
|
||||
out := rawHeader{}
|
||||
out.merge(obj.protected)
|
||||
out.merge(obj.unprotected)
|
||||
|
||||
if recipient != nil {
|
||||
out.merge(recipient.header)
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// Get the additional authenticated data from a JWE object.
|
||||
func (obj JSONWebEncryption) computeAuthData() []byte {
|
||||
var protected string
|
||||
|
||||
switch {
|
||||
case obj.original != nil && obj.original.Protected != nil:
|
||||
protected = obj.original.Protected.base64()
|
||||
case obj.protected != nil:
|
||||
protected = base64.RawURLEncoding.EncodeToString(mustSerializeJSON((obj.protected)))
|
||||
default:
|
||||
protected = ""
|
||||
}
|
||||
|
||||
output := []byte(protected)
|
||||
if obj.aad != nil {
|
||||
output = append(output, '.')
|
||||
output = append(output, []byte(base64.RawURLEncoding.EncodeToString(obj.aad))...)
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
// ParseEncrypted parses an encrypted message in compact or JWE JSON Serialization format.
|
||||
func ParseEncrypted(input string) (*JSONWebEncryption, error) {
|
||||
input = stripWhitespace(input)
|
||||
if strings.HasPrefix(input, "{") {
|
||||
return parseEncryptedFull(input)
|
||||
}
|
||||
|
||||
return parseEncryptedCompact(input)
|
||||
}
|
||||
|
||||
// parseEncryptedFull parses a message in compact format.
|
||||
func parseEncryptedFull(input string) (*JSONWebEncryption, error) {
|
||||
var parsed rawJSONWebEncryption
|
||||
err := json.Unmarshal([]byte(input), &parsed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return parsed.sanitized()
|
||||
}
|
||||
|
||||
// sanitized produces a cleaned-up JWE object from the raw JSON.
|
||||
func (parsed *rawJSONWebEncryption) sanitized() (*JSONWebEncryption, error) {
|
||||
obj := &JSONWebEncryption{
|
||||
original: parsed,
|
||||
unprotected: parsed.Unprotected,
|
||||
}
|
||||
|
||||
// Check that there is not a nonce in the unprotected headers
|
||||
if parsed.Unprotected != nil {
|
||||
if nonce := parsed.Unprotected.getNonce(); nonce != "" {
|
||||
return nil, ErrUnprotectedNonce
|
||||
}
|
||||
}
|
||||
if parsed.Header != nil {
|
||||
if nonce := parsed.Header.getNonce(); nonce != "" {
|
||||
return nil, ErrUnprotectedNonce
|
||||
}
|
||||
}
|
||||
|
||||
if parsed.Protected != nil && len(parsed.Protected.bytes()) > 0 {
|
||||
err := json.Unmarshal(parsed.Protected.bytes(), &obj.protected)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid protected header: %s, %s", err, parsed.Protected.base64())
|
||||
}
|
||||
}
|
||||
|
||||
// Note: this must be called _after_ we parse the protected header,
|
||||
// otherwise fields from the protected header will not get picked up.
|
||||
var err error
|
||||
mergedHeaders := obj.mergedHeaders(nil)
|
||||
obj.Header, err = mergedHeaders.sanitized()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: cannot sanitize merged headers: %v (%v)", err, mergedHeaders)
|
||||
}
|
||||
|
||||
if len(parsed.Recipients) == 0 {
|
||||
obj.recipients = []recipientInfo{
|
||||
{
|
||||
header: parsed.Header,
|
||||
encryptedKey: parsed.EncryptedKey.bytes(),
|
||||
},
|
||||
}
|
||||
} else {
|
||||
obj.recipients = make([]recipientInfo, len(parsed.Recipients))
|
||||
for r := range parsed.Recipients {
|
||||
encryptedKey, err := base64URLDecode(parsed.Recipients[r].EncryptedKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check that there is not a nonce in the unprotected header
|
||||
if parsed.Recipients[r].Header != nil && parsed.Recipients[r].Header.getNonce() != "" {
|
||||
return nil, ErrUnprotectedNonce
|
||||
}
|
||||
|
||||
obj.recipients[r].header = parsed.Recipients[r].Header
|
||||
obj.recipients[r].encryptedKey = encryptedKey
|
||||
}
|
||||
}
|
||||
|
||||
for _, recipient := range obj.recipients {
|
||||
headers := obj.mergedHeaders(&recipient)
|
||||
if headers.getAlgorithm() == "" || headers.getEncryption() == "" {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: message is missing alg/enc headers")
|
||||
}
|
||||
}
|
||||
|
||||
obj.iv = parsed.Iv.bytes()
|
||||
obj.ciphertext = parsed.Ciphertext.bytes()
|
||||
obj.tag = parsed.Tag.bytes()
|
||||
obj.aad = parsed.Aad.bytes()
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
// parseEncryptedCompact parses a message in compact format.
|
||||
func parseEncryptedCompact(input string) (*JSONWebEncryption, error) {
|
||||
parts := strings.Split(input, ".")
|
||||
if len(parts) != 5 {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: compact JWE format must have five parts")
|
||||
}
|
||||
|
||||
rawProtected, err := base64URLDecode(parts[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
encryptedKey, err := base64URLDecode(parts[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
iv, err := base64URLDecode(parts[2])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ciphertext, err := base64URLDecode(parts[3])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tag, err := base64URLDecode(parts[4])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
raw := &rawJSONWebEncryption{
|
||||
Protected: newBuffer(rawProtected),
|
||||
EncryptedKey: newBuffer(encryptedKey),
|
||||
Iv: newBuffer(iv),
|
||||
Ciphertext: newBuffer(ciphertext),
|
||||
Tag: newBuffer(tag),
|
||||
}
|
||||
|
||||
return raw.sanitized()
|
||||
}
|
||||
|
||||
// CompactSerialize serializes an object using the compact serialization format.
|
||||
func (obj JSONWebEncryption) CompactSerialize() (string, error) {
|
||||
if len(obj.recipients) != 1 || obj.unprotected != nil ||
|
||||
obj.protected == nil || obj.recipients[0].header != nil {
|
||||
return "", ErrNotSupported
|
||||
}
|
||||
|
||||
serializedProtected := mustSerializeJSON(obj.protected)
|
||||
|
||||
return fmt.Sprintf(
|
||||
"%s.%s.%s.%s.%s",
|
||||
base64.RawURLEncoding.EncodeToString(serializedProtected),
|
||||
base64.RawURLEncoding.EncodeToString(obj.recipients[0].encryptedKey),
|
||||
base64.RawURLEncoding.EncodeToString(obj.iv),
|
||||
base64.RawURLEncoding.EncodeToString(obj.ciphertext),
|
||||
base64.RawURLEncoding.EncodeToString(obj.tag)), nil
|
||||
}
|
||||
|
||||
// FullSerialize serializes an object using the full JSON serialization format.
|
||||
func (obj JSONWebEncryption) FullSerialize() string {
|
||||
raw := rawJSONWebEncryption{
|
||||
Unprotected: obj.unprotected,
|
||||
Iv: newBuffer(obj.iv),
|
||||
Ciphertext: newBuffer(obj.ciphertext),
|
||||
EncryptedKey: newBuffer(obj.recipients[0].encryptedKey),
|
||||
Tag: newBuffer(obj.tag),
|
||||
Aad: newBuffer(obj.aad),
|
||||
Recipients: []rawRecipientInfo{},
|
||||
}
|
||||
|
||||
if len(obj.recipients) > 1 {
|
||||
for _, recipient := range obj.recipients {
|
||||
info := rawRecipientInfo{
|
||||
Header: recipient.header,
|
||||
EncryptedKey: base64.RawURLEncoding.EncodeToString(recipient.encryptedKey),
|
||||
}
|
||||
raw.Recipients = append(raw.Recipients, info)
|
||||
}
|
||||
} else {
|
||||
// Use flattened serialization
|
||||
raw.Header = obj.recipients[0].header
|
||||
raw.EncryptedKey = newBuffer(obj.recipients[0].encryptedKey)
|
||||
}
|
||||
|
||||
if obj.protected != nil {
|
||||
raw.Protected = newBuffer(mustSerializeJSON(obj.protected))
|
||||
}
|
||||
|
||||
return string(mustSerializeJSON(raw))
|
||||
}
|
|
@ -0,0 +1,798 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/ed25519"
|
||||
"crypto/elliptic"
|
||||
"crypto/rsa"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/go-jose/go-jose/v3/json"
|
||||
)
|
||||
|
||||
// rawJSONWebKey represents a public or private key in JWK format, used for parsing/serializing.
|
||||
type rawJSONWebKey struct {
|
||||
Use string `json:"use,omitempty"`
|
||||
Kty string `json:"kty,omitempty"`
|
||||
Kid string `json:"kid,omitempty"`
|
||||
Crv string `json:"crv,omitempty"`
|
||||
Alg string `json:"alg,omitempty"`
|
||||
K *byteBuffer `json:"k,omitempty"`
|
||||
X *byteBuffer `json:"x,omitempty"`
|
||||
Y *byteBuffer `json:"y,omitempty"`
|
||||
N *byteBuffer `json:"n,omitempty"`
|
||||
E *byteBuffer `json:"e,omitempty"`
|
||||
// -- Following fields are only used for private keys --
|
||||
// RSA uses D, P and Q, while ECDSA uses only D. Fields Dp, Dq, and Qi are
|
||||
// completely optional. Therefore for RSA/ECDSA, D != nil is a contract that
|
||||
// we have a private key whereas D == nil means we have only a public key.
|
||||
D *byteBuffer `json:"d,omitempty"`
|
||||
P *byteBuffer `json:"p,omitempty"`
|
||||
Q *byteBuffer `json:"q,omitempty"`
|
||||
Dp *byteBuffer `json:"dp,omitempty"`
|
||||
Dq *byteBuffer `json:"dq,omitempty"`
|
||||
Qi *byteBuffer `json:"qi,omitempty"`
|
||||
// Certificates
|
||||
X5c []string `json:"x5c,omitempty"`
|
||||
X5u string `json:"x5u,omitempty"`
|
||||
X5tSHA1 string `json:"x5t,omitempty"`
|
||||
X5tSHA256 string `json:"x5t#S256,omitempty"`
|
||||
}
|
||||
|
||||
// JSONWebKey represents a public or private key in JWK format.
|
||||
type JSONWebKey struct {
|
||||
// Cryptographic key, can be a symmetric or asymmetric key.
|
||||
Key interface{}
|
||||
// Key identifier, parsed from `kid` header.
|
||||
KeyID string
|
||||
// Key algorithm, parsed from `alg` header.
|
||||
Algorithm string
|
||||
// Key use, parsed from `use` header.
|
||||
Use string
|
||||
|
||||
// X.509 certificate chain, parsed from `x5c` header.
|
||||
Certificates []*x509.Certificate
|
||||
// X.509 certificate URL, parsed from `x5u` header.
|
||||
CertificatesURL *url.URL
|
||||
// X.509 certificate thumbprint (SHA-1), parsed from `x5t` header.
|
||||
CertificateThumbprintSHA1 []byte
|
||||
// X.509 certificate thumbprint (SHA-256), parsed from `x5t#S256` header.
|
||||
CertificateThumbprintSHA256 []byte
|
||||
}
|
||||
|
||||
// MarshalJSON serializes the given key to its JSON representation.
|
||||
func (k JSONWebKey) MarshalJSON() ([]byte, error) {
|
||||
var raw *rawJSONWebKey
|
||||
var err error
|
||||
|
||||
switch key := k.Key.(type) {
|
||||
case ed25519.PublicKey:
|
||||
raw = fromEdPublicKey(key)
|
||||
case *ecdsa.PublicKey:
|
||||
raw, err = fromEcPublicKey(key)
|
||||
case *rsa.PublicKey:
|
||||
raw = fromRsaPublicKey(key)
|
||||
case ed25519.PrivateKey:
|
||||
raw, err = fromEdPrivateKey(key)
|
||||
case *ecdsa.PrivateKey:
|
||||
raw, err = fromEcPrivateKey(key)
|
||||
case *rsa.PrivateKey:
|
||||
raw, err = fromRsaPrivateKey(key)
|
||||
case []byte:
|
||||
raw, err = fromSymmetricKey(key)
|
||||
default:
|
||||
return nil, fmt.Errorf("go-jose/go-jose: unknown key type '%s'", reflect.TypeOf(key))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
raw.Kid = k.KeyID
|
||||
raw.Alg = k.Algorithm
|
||||
raw.Use = k.Use
|
||||
|
||||
for _, cert := range k.Certificates {
|
||||
raw.X5c = append(raw.X5c, base64.StdEncoding.EncodeToString(cert.Raw))
|
||||
}
|
||||
|
||||
x5tSHA1Len := len(k.CertificateThumbprintSHA1)
|
||||
x5tSHA256Len := len(k.CertificateThumbprintSHA256)
|
||||
if x5tSHA1Len > 0 {
|
||||
if x5tSHA1Len != sha1.Size {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid SHA-1 thumbprint (must be %d bytes, not %d)", sha1.Size, x5tSHA1Len)
|
||||
}
|
||||
raw.X5tSHA1 = base64.RawURLEncoding.EncodeToString(k.CertificateThumbprintSHA1)
|
||||
}
|
||||
if x5tSHA256Len > 0 {
|
||||
if x5tSHA256Len != sha256.Size {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid SHA-256 thumbprint (must be %d bytes, not %d)", sha256.Size, x5tSHA256Len)
|
||||
}
|
||||
raw.X5tSHA256 = base64.RawURLEncoding.EncodeToString(k.CertificateThumbprintSHA256)
|
||||
}
|
||||
|
||||
// If cert chain is attached (as opposed to being behind a URL), check the
|
||||
// keys thumbprints to make sure they match what is expected. This is to
|
||||
// ensure we don't accidentally produce a JWK with semantically inconsistent
|
||||
// data in the headers.
|
||||
if len(k.Certificates) > 0 {
|
||||
expectedSHA1 := sha1.Sum(k.Certificates[0].Raw)
|
||||
expectedSHA256 := sha256.Sum256(k.Certificates[0].Raw)
|
||||
|
||||
if len(k.CertificateThumbprintSHA1) > 0 && !bytes.Equal(k.CertificateThumbprintSHA1, expectedSHA1[:]) {
|
||||
return nil, errors.New("go-jose/go-jose: invalid SHA-1 thumbprint, does not match cert chain")
|
||||
}
|
||||
if len(k.CertificateThumbprintSHA256) > 0 && !bytes.Equal(k.CertificateThumbprintSHA256, expectedSHA256[:]) {
|
||||
return nil, errors.New("go-jose/go-jose: invalid or SHA-256 thumbprint, does not match cert chain")
|
||||
}
|
||||
}
|
||||
|
||||
if k.CertificatesURL != nil {
|
||||
raw.X5u = k.CertificatesURL.String()
|
||||
}
|
||||
|
||||
return json.Marshal(raw)
|
||||
}
|
||||
|
||||
// UnmarshalJSON reads a key from its JSON representation.
|
||||
func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) {
|
||||
var raw rawJSONWebKey
|
||||
err = json.Unmarshal(data, &raw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
certs, err := parseCertificateChain(raw.X5c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("go-jose/go-jose: failed to unmarshal x5c field: %s", err)
|
||||
}
|
||||
|
||||
var key interface{}
|
||||
var certPub interface{}
|
||||
var keyPub interface{}
|
||||
|
||||
if len(certs) > 0 {
|
||||
// We need to check that leaf public key matches the key embedded in this
|
||||
// JWK, as required by the standard (see RFC 7517, Section 4.7). Otherwise
|
||||
// the JWK parsed could be semantically invalid. Technically, should also
|
||||
// check key usage fields and other extensions on the cert here, but the
|
||||
// standard doesn't exactly explain how they're supposed to map from the
|
||||
// JWK representation to the X.509 extensions.
|
||||
certPub = certs[0].PublicKey
|
||||
}
|
||||
|
||||
switch raw.Kty {
|
||||
case "EC":
|
||||
if raw.D != nil {
|
||||
key, err = raw.ecPrivateKey()
|
||||
if err == nil {
|
||||
keyPub = key.(*ecdsa.PrivateKey).Public()
|
||||
}
|
||||
} else {
|
||||
key, err = raw.ecPublicKey()
|
||||
keyPub = key
|
||||
}
|
||||
case "RSA":
|
||||
if raw.D != nil {
|
||||
key, err = raw.rsaPrivateKey()
|
||||
if err == nil {
|
||||
keyPub = key.(*rsa.PrivateKey).Public()
|
||||
}
|
||||
} else {
|
||||
key, err = raw.rsaPublicKey()
|
||||
keyPub = key
|
||||
}
|
||||
case "oct":
|
||||
if certPub != nil {
|
||||
return errors.New("go-jose/go-jose: invalid JWK, found 'oct' (symmetric) key with cert chain")
|
||||
}
|
||||
key, err = raw.symmetricKey()
|
||||
case "OKP":
|
||||
if raw.Crv == "Ed25519" && raw.X != nil {
|
||||
if raw.D != nil {
|
||||
key, err = raw.edPrivateKey()
|
||||
if err == nil {
|
||||
keyPub = key.(ed25519.PrivateKey).Public()
|
||||
}
|
||||
} else {
|
||||
key, err = raw.edPublicKey()
|
||||
keyPub = key
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf("go-jose/go-jose: unknown curve %s'", raw.Crv)
|
||||
}
|
||||
default:
|
||||
err = fmt.Errorf("go-jose/go-jose: unknown json web key type '%s'", raw.Kty)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if certPub != nil && keyPub != nil {
|
||||
if !reflect.DeepEqual(certPub, keyPub) {
|
||||
return errors.New("go-jose/go-jose: invalid JWK, public keys in key and x5c fields do not match")
|
||||
}
|
||||
}
|
||||
|
||||
*k = JSONWebKey{Key: key, KeyID: raw.Kid, Algorithm: raw.Alg, Use: raw.Use, Certificates: certs}
|
||||
|
||||
if raw.X5u != "" {
|
||||
k.CertificatesURL, err = url.Parse(raw.X5u)
|
||||
if err != nil {
|
||||
return fmt.Errorf("go-jose/go-jose: invalid JWK, x5u header is invalid URL: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// x5t parameters are base64url-encoded SHA thumbprints
|
||||
// See RFC 7517, Section 4.8, https://tools.ietf.org/html/rfc7517#section-4.8
|
||||
x5tSHA1bytes, err := base64URLDecode(raw.X5tSHA1)
|
||||
if err != nil {
|
||||
return errors.New("go-jose/go-jose: invalid JWK, x5t header has invalid encoding")
|
||||
}
|
||||
|
||||
// RFC 7517, Section 4.8 is ambiguous as to whether the digest output should be byte or hex,
|
||||
// for this reason, after base64 decoding, if the size is sha1.Size it's likely that the value is a byte encoded
|
||||
// checksum so we skip this. Otherwise if the checksum was hex encoded we expect a 40 byte sized array so we'll
|
||||
// try to hex decode it. When Marshalling this value we'll always use a base64 encoded version of byte format checksum.
|
||||
if len(x5tSHA1bytes) == 2*sha1.Size {
|
||||
hx, err := hex.DecodeString(string(x5tSHA1bytes))
|
||||
if err != nil {
|
||||
return fmt.Errorf("go-jose/go-jose: invalid JWK, unable to hex decode x5t: %v", err)
|
||||
|
||||
}
|
||||
x5tSHA1bytes = hx
|
||||
}
|
||||
|
||||
k.CertificateThumbprintSHA1 = x5tSHA1bytes
|
||||
|
||||
x5tSHA256bytes, err := base64URLDecode(raw.X5tSHA256)
|
||||
if err != nil {
|
||||
return errors.New("go-jose/go-jose: invalid JWK, x5t#S256 header has invalid encoding")
|
||||
}
|
||||
|
||||
if len(x5tSHA256bytes) == 2*sha256.Size {
|
||||
hx256, err := hex.DecodeString(string(x5tSHA256bytes))
|
||||
if err != nil {
|
||||
return fmt.Errorf("go-jose/go-jose: invalid JWK, unable to hex decode x5t#S256: %v", err)
|
||||
}
|
||||
x5tSHA256bytes = hx256
|
||||
}
|
||||
|
||||
k.CertificateThumbprintSHA256 = x5tSHA256bytes
|
||||
|
||||
x5tSHA1Len := len(k.CertificateThumbprintSHA1)
|
||||
x5tSHA256Len := len(k.CertificateThumbprintSHA256)
|
||||
if x5tSHA1Len > 0 && x5tSHA1Len != sha1.Size {
|
||||
return errors.New("go-jose/go-jose: invalid JWK, x5t header is of incorrect size")
|
||||
}
|
||||
if x5tSHA256Len > 0 && x5tSHA256Len != sha256.Size {
|
||||
return errors.New("go-jose/go-jose: invalid JWK, x5t#S256 header is of incorrect size")
|
||||
}
|
||||
|
||||
// If certificate chain *and* thumbprints are set, verify correctness.
|
||||
if len(k.Certificates) > 0 {
|
||||
leaf := k.Certificates[0]
|
||||
sha1sum := sha1.Sum(leaf.Raw)
|
||||
sha256sum := sha256.Sum256(leaf.Raw)
|
||||
|
||||
if len(k.CertificateThumbprintSHA1) > 0 && !bytes.Equal(sha1sum[:], k.CertificateThumbprintSHA1) {
|
||||
return errors.New("go-jose/go-jose: invalid JWK, x5c thumbprint does not match x5t value")
|
||||
}
|
||||
|
||||
if len(k.CertificateThumbprintSHA256) > 0 && !bytes.Equal(sha256sum[:], k.CertificateThumbprintSHA256) {
|
||||
return errors.New("go-jose/go-jose: invalid JWK, x5c thumbprint does not match x5t#S256 value")
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// JSONWebKeySet represents a JWK Set object.
|
||||
type JSONWebKeySet struct {
|
||||
Keys []JSONWebKey `json:"keys"`
|
||||
}
|
||||
|
||||
// Key convenience method returns keys by key ID. Specification states
|
||||
// that a JWK Set "SHOULD" use distinct key IDs, but allows for some
|
||||
// cases where they are not distinct. Hence method returns a slice
|
||||
// of JSONWebKeys.
|
||||
func (s *JSONWebKeySet) Key(kid string) []JSONWebKey {
|
||||
var keys []JSONWebKey
|
||||
for _, key := range s.Keys {
|
||||
if key.KeyID == kid {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
}
|
||||
|
||||
return keys
|
||||
}
|
||||
|
||||
const rsaThumbprintTemplate = `{"e":"%s","kty":"RSA","n":"%s"}`
|
||||
const ecThumbprintTemplate = `{"crv":"%s","kty":"EC","x":"%s","y":"%s"}`
|
||||
const edThumbprintTemplate = `{"crv":"%s","kty":"OKP","x":"%s"}`
|
||||
|
||||
func ecThumbprintInput(curve elliptic.Curve, x, y *big.Int) (string, error) {
|
||||
coordLength := curveSize(curve)
|
||||
crv, err := curveName(curve)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(x.Bytes()) > coordLength || len(y.Bytes()) > coordLength {
|
||||
return "", errors.New("go-jose/go-jose: invalid elliptic key (too large)")
|
||||
}
|
||||
|
||||
return fmt.Sprintf(ecThumbprintTemplate, crv,
|
||||
newFixedSizeBuffer(x.Bytes(), coordLength).base64(),
|
||||
newFixedSizeBuffer(y.Bytes(), coordLength).base64()), nil
|
||||
}
|
||||
|
||||
func rsaThumbprintInput(n *big.Int, e int) (string, error) {
|
||||
return fmt.Sprintf(rsaThumbprintTemplate,
|
||||
newBufferFromInt(uint64(e)).base64(),
|
||||
newBuffer(n.Bytes()).base64()), nil
|
||||
}
|
||||
|
||||
func edThumbprintInput(ed ed25519.PublicKey) (string, error) {
|
||||
crv := "Ed25519"
|
||||
if len(ed) > 32 {
|
||||
return "", errors.New("go-jose/go-jose: invalid elliptic key (too large)")
|
||||
}
|
||||
return fmt.Sprintf(edThumbprintTemplate, crv,
|
||||
newFixedSizeBuffer(ed, 32).base64()), nil
|
||||
}
|
||||
|
||||
// Thumbprint computes the JWK Thumbprint of a key using the
|
||||
// indicated hash algorithm.
|
||||
func (k *JSONWebKey) Thumbprint(hash crypto.Hash) ([]byte, error) {
|
||||
var input string
|
||||
var err error
|
||||
switch key := k.Key.(type) {
|
||||
case ed25519.PublicKey:
|
||||
input, err = edThumbprintInput(key)
|
||||
case *ecdsa.PublicKey:
|
||||
input, err = ecThumbprintInput(key.Curve, key.X, key.Y)
|
||||
case *ecdsa.PrivateKey:
|
||||
input, err = ecThumbprintInput(key.Curve, key.X, key.Y)
|
||||
case *rsa.PublicKey:
|
||||
input, err = rsaThumbprintInput(key.N, key.E)
|
||||
case *rsa.PrivateKey:
|
||||
input, err = rsaThumbprintInput(key.N, key.E)
|
||||
case ed25519.PrivateKey:
|
||||
input, err = edThumbprintInput(ed25519.PublicKey(key[32:]))
|
||||
default:
|
||||
return nil, fmt.Errorf("go-jose/go-jose: unknown key type '%s'", reflect.TypeOf(key))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
h := hash.New()
|
||||
_, _ = h.Write([]byte(input))
|
||||
return h.Sum(nil), nil
|
||||
}
|
||||
|
||||
// IsPublic returns true if the JWK represents a public key (not symmetric, not private).
|
||||
func (k *JSONWebKey) IsPublic() bool {
|
||||
switch k.Key.(type) {
|
||||
case *ecdsa.PublicKey, *rsa.PublicKey, ed25519.PublicKey:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Public creates JSONWebKey with corresponding public key if JWK represents asymmetric private key.
|
||||
func (k *JSONWebKey) Public() JSONWebKey {
|
||||
if k.IsPublic() {
|
||||
return *k
|
||||
}
|
||||
ret := *k
|
||||
switch key := k.Key.(type) {
|
||||
case *ecdsa.PrivateKey:
|
||||
ret.Key = key.Public()
|
||||
case *rsa.PrivateKey:
|
||||
ret.Key = key.Public()
|
||||
case ed25519.PrivateKey:
|
||||
ret.Key = key.Public()
|
||||
default:
|
||||
return JSONWebKey{} // returning invalid key
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// Valid checks that the key contains the expected parameters.
|
||||
func (k *JSONWebKey) Valid() bool {
|
||||
if k.Key == nil {
|
||||
return false
|
||||
}
|
||||
switch key := k.Key.(type) {
|
||||
case *ecdsa.PublicKey:
|
||||
if key.Curve == nil || key.X == nil || key.Y == nil {
|
||||
return false
|
||||
}
|
||||
case *ecdsa.PrivateKey:
|
||||
if key.Curve == nil || key.X == nil || key.Y == nil || key.D == nil {
|
||||
return false
|
||||
}
|
||||
case *rsa.PublicKey:
|
||||
if key.N == nil || key.E == 0 {
|
||||
return false
|
||||
}
|
||||
case *rsa.PrivateKey:
|
||||
if key.N == nil || key.E == 0 || key.D == nil || len(key.Primes) < 2 {
|
||||
return false
|
||||
}
|
||||
case ed25519.PublicKey:
|
||||
if len(key) != 32 {
|
||||
return false
|
||||
}
|
||||
case ed25519.PrivateKey:
|
||||
if len(key) != 64 {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (key rawJSONWebKey) rsaPublicKey() (*rsa.PublicKey, error) {
|
||||
if key.N == nil || key.E == nil {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid RSA key, missing n/e values")
|
||||
}
|
||||
|
||||
return &rsa.PublicKey{
|
||||
N: key.N.bigInt(),
|
||||
E: key.E.toInt(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func fromEdPublicKey(pub ed25519.PublicKey) *rawJSONWebKey {
|
||||
return &rawJSONWebKey{
|
||||
Kty: "OKP",
|
||||
Crv: "Ed25519",
|
||||
X: newBuffer(pub),
|
||||
}
|
||||
}
|
||||
|
||||
func fromRsaPublicKey(pub *rsa.PublicKey) *rawJSONWebKey {
|
||||
return &rawJSONWebKey{
|
||||
Kty: "RSA",
|
||||
N: newBuffer(pub.N.Bytes()),
|
||||
E: newBufferFromInt(uint64(pub.E)),
|
||||
}
|
||||
}
|
||||
|
||||
func (key rawJSONWebKey) ecPublicKey() (*ecdsa.PublicKey, error) {
|
||||
var curve elliptic.Curve
|
||||
switch key.Crv {
|
||||
case "P-256":
|
||||
curve = elliptic.P256()
|
||||
case "P-384":
|
||||
curve = elliptic.P384()
|
||||
case "P-521":
|
||||
curve = elliptic.P521()
|
||||
default:
|
||||
return nil, fmt.Errorf("go-jose/go-jose: unsupported elliptic curve '%s'", key.Crv)
|
||||
}
|
||||
|
||||
if key.X == nil || key.Y == nil {
|
||||
return nil, errors.New("go-jose/go-jose: invalid EC key, missing x/y values")
|
||||
}
|
||||
|
||||
// The length of this octet string MUST be the full size of a coordinate for
|
||||
// the curve specified in the "crv" parameter.
|
||||
// https://tools.ietf.org/html/rfc7518#section-6.2.1.2
|
||||
if curveSize(curve) != len(key.X.data) {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid EC public key, wrong length for x")
|
||||
}
|
||||
|
||||
if curveSize(curve) != len(key.Y.data) {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid EC public key, wrong length for y")
|
||||
}
|
||||
|
||||
x := key.X.bigInt()
|
||||
y := key.Y.bigInt()
|
||||
|
||||
if !curve.IsOnCurve(x, y) {
|
||||
return nil, errors.New("go-jose/go-jose: invalid EC key, X/Y are not on declared curve")
|
||||
}
|
||||
|
||||
return &ecdsa.PublicKey{
|
||||
Curve: curve,
|
||||
X: x,
|
||||
Y: y,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func fromEcPublicKey(pub *ecdsa.PublicKey) (*rawJSONWebKey, error) {
|
||||
if pub == nil || pub.X == nil || pub.Y == nil {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid EC key (nil, or X/Y missing)")
|
||||
}
|
||||
|
||||
name, err := curveName(pub.Curve)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
size := curveSize(pub.Curve)
|
||||
|
||||
xBytes := pub.X.Bytes()
|
||||
yBytes := pub.Y.Bytes()
|
||||
|
||||
if len(xBytes) > size || len(yBytes) > size {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid EC key (X/Y too large)")
|
||||
}
|
||||
|
||||
key := &rawJSONWebKey{
|
||||
Kty: "EC",
|
||||
Crv: name,
|
||||
X: newFixedSizeBuffer(xBytes, size),
|
||||
Y: newFixedSizeBuffer(yBytes, size),
|
||||
}
|
||||
|
||||
return key, nil
|
||||
}
|
||||
|
||||
func (key rawJSONWebKey) edPrivateKey() (ed25519.PrivateKey, error) {
|
||||
var missing []string
|
||||
switch {
|
||||
case key.D == nil:
|
||||
missing = append(missing, "D")
|
||||
case key.X == nil:
|
||||
missing = append(missing, "X")
|
||||
}
|
||||
|
||||
if len(missing) > 0 {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid Ed25519 private key, missing %s value(s)", strings.Join(missing, ", "))
|
||||
}
|
||||
|
||||
privateKey := make([]byte, ed25519.PrivateKeySize)
|
||||
copy(privateKey[0:32], key.D.bytes())
|
||||
copy(privateKey[32:], key.X.bytes())
|
||||
rv := ed25519.PrivateKey(privateKey)
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
func (key rawJSONWebKey) edPublicKey() (ed25519.PublicKey, error) {
|
||||
if key.X == nil {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid Ed key, missing x value")
|
||||
}
|
||||
publicKey := make([]byte, ed25519.PublicKeySize)
|
||||
copy(publicKey[0:32], key.X.bytes())
|
||||
rv := ed25519.PublicKey(publicKey)
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
func (key rawJSONWebKey) rsaPrivateKey() (*rsa.PrivateKey, error) {
|
||||
var missing []string
|
||||
switch {
|
||||
case key.N == nil:
|
||||
missing = append(missing, "N")
|
||||
case key.E == nil:
|
||||
missing = append(missing, "E")
|
||||
case key.D == nil:
|
||||
missing = append(missing, "D")
|
||||
case key.P == nil:
|
||||
missing = append(missing, "P")
|
||||
case key.Q == nil:
|
||||
missing = append(missing, "Q")
|
||||
}
|
||||
|
||||
if len(missing) > 0 {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid RSA private key, missing %s value(s)", strings.Join(missing, ", "))
|
||||
}
|
||||
|
||||
rv := &rsa.PrivateKey{
|
||||
PublicKey: rsa.PublicKey{
|
||||
N: key.N.bigInt(),
|
||||
E: key.E.toInt(),
|
||||
},
|
||||
D: key.D.bigInt(),
|
||||
Primes: []*big.Int{
|
||||
key.P.bigInt(),
|
||||
key.Q.bigInt(),
|
||||
},
|
||||
}
|
||||
|
||||
if key.Dp != nil {
|
||||
rv.Precomputed.Dp = key.Dp.bigInt()
|
||||
}
|
||||
if key.Dq != nil {
|
||||
rv.Precomputed.Dq = key.Dq.bigInt()
|
||||
}
|
||||
if key.Qi != nil {
|
||||
rv.Precomputed.Qinv = key.Qi.bigInt()
|
||||
}
|
||||
|
||||
err := rv.Validate()
|
||||
return rv, err
|
||||
}
|
||||
|
||||
func fromEdPrivateKey(ed ed25519.PrivateKey) (*rawJSONWebKey, error) {
|
||||
raw := fromEdPublicKey(ed25519.PublicKey(ed[32:]))
|
||||
|
||||
raw.D = newBuffer(ed[0:32])
|
||||
return raw, nil
|
||||
}
|
||||
|
||||
func fromRsaPrivateKey(rsa *rsa.PrivateKey) (*rawJSONWebKey, error) {
|
||||
if len(rsa.Primes) != 2 {
|
||||
return nil, ErrUnsupportedKeyType
|
||||
}
|
||||
|
||||
raw := fromRsaPublicKey(&rsa.PublicKey)
|
||||
|
||||
raw.D = newBuffer(rsa.D.Bytes())
|
||||
raw.P = newBuffer(rsa.Primes[0].Bytes())
|
||||
raw.Q = newBuffer(rsa.Primes[1].Bytes())
|
||||
|
||||
if rsa.Precomputed.Dp != nil {
|
||||
raw.Dp = newBuffer(rsa.Precomputed.Dp.Bytes())
|
||||
}
|
||||
if rsa.Precomputed.Dq != nil {
|
||||
raw.Dq = newBuffer(rsa.Precomputed.Dq.Bytes())
|
||||
}
|
||||
if rsa.Precomputed.Qinv != nil {
|
||||
raw.Qi = newBuffer(rsa.Precomputed.Qinv.Bytes())
|
||||
}
|
||||
|
||||
return raw, nil
|
||||
}
|
||||
|
||||
func (key rawJSONWebKey) ecPrivateKey() (*ecdsa.PrivateKey, error) {
|
||||
var curve elliptic.Curve
|
||||
switch key.Crv {
|
||||
case "P-256":
|
||||
curve = elliptic.P256()
|
||||
case "P-384":
|
||||
curve = elliptic.P384()
|
||||
case "P-521":
|
||||
curve = elliptic.P521()
|
||||
default:
|
||||
return nil, fmt.Errorf("go-jose/go-jose: unsupported elliptic curve '%s'", key.Crv)
|
||||
}
|
||||
|
||||
if key.X == nil || key.Y == nil || key.D == nil {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key, missing x/y/d values")
|
||||
}
|
||||
|
||||
// The length of this octet string MUST be the full size of a coordinate for
|
||||
// the curve specified in the "crv" parameter.
|
||||
// https://tools.ietf.org/html/rfc7518#section-6.2.1.2
|
||||
if curveSize(curve) != len(key.X.data) {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key, wrong length for x")
|
||||
}
|
||||
|
||||
if curveSize(curve) != len(key.Y.data) {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key, wrong length for y")
|
||||
}
|
||||
|
||||
// https://tools.ietf.org/html/rfc7518#section-6.2.2.1
|
||||
if dSize(curve) != len(key.D.data) {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key, wrong length for d")
|
||||
}
|
||||
|
||||
x := key.X.bigInt()
|
||||
y := key.Y.bigInt()
|
||||
|
||||
if !curve.IsOnCurve(x, y) {
|
||||
return nil, errors.New("go-jose/go-jose: invalid EC key, X/Y are not on declared curve")
|
||||
}
|
||||
|
||||
return &ecdsa.PrivateKey{
|
||||
PublicKey: ecdsa.PublicKey{
|
||||
Curve: curve,
|
||||
X: x,
|
||||
Y: y,
|
||||
},
|
||||
D: key.D.bigInt(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func fromEcPrivateKey(ec *ecdsa.PrivateKey) (*rawJSONWebKey, error) {
|
||||
raw, err := fromEcPublicKey(&ec.PublicKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ec.D == nil {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key")
|
||||
}
|
||||
|
||||
raw.D = newFixedSizeBuffer(ec.D.Bytes(), dSize(ec.PublicKey.Curve))
|
||||
|
||||
return raw, nil
|
||||
}
|
||||
|
||||
// dSize returns the size in octets for the "d" member of an elliptic curve
|
||||
// private key.
|
||||
// The length of this octet string MUST be ceiling(log-base-2(n)/8)
|
||||
// octets (where n is the order of the curve).
|
||||
// https://tools.ietf.org/html/rfc7518#section-6.2.2.1
|
||||
func dSize(curve elliptic.Curve) int {
|
||||
order := curve.Params().P
|
||||
bitLen := order.BitLen()
|
||||
size := bitLen / 8
|
||||
if bitLen%8 != 0 {
|
||||
size++
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
func fromSymmetricKey(key []byte) (*rawJSONWebKey, error) {
|
||||
return &rawJSONWebKey{
|
||||
Kty: "oct",
|
||||
K: newBuffer(key),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (key rawJSONWebKey) symmetricKey() ([]byte, error) {
|
||||
if key.K == nil {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid OCT (symmetric) key, missing k value")
|
||||
}
|
||||
return key.K.bytes(), nil
|
||||
}
|
||||
|
||||
func tryJWKS(key interface{}, headers ...Header) interface{} {
|
||||
var jwks JSONWebKeySet
|
||||
|
||||
switch jwksType := key.(type) {
|
||||
case *JSONWebKeySet:
|
||||
jwks = *jwksType
|
||||
case JSONWebKeySet:
|
||||
jwks = jwksType
|
||||
default:
|
||||
return key
|
||||
}
|
||||
|
||||
var kid string
|
||||
for _, header := range headers {
|
||||
if header.KeyID != "" {
|
||||
kid = header.KeyID
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if kid == "" {
|
||||
return key
|
||||
}
|
||||
|
||||
keys := jwks.Key(kid)
|
||||
if len(keys) == 0 {
|
||||
return key
|
||||
}
|
||||
|
||||
return keys[0].Key
|
||||
}
|
|
@ -0,0 +1,366 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/go-jose/go-jose/v3/json"
|
||||
)
|
||||
|
||||
// rawJSONWebSignature represents a raw JWS JSON object. Used for parsing/serializing.
|
||||
type rawJSONWebSignature struct {
|
||||
Payload *byteBuffer `json:"payload,omitempty"`
|
||||
Signatures []rawSignatureInfo `json:"signatures,omitempty"`
|
||||
Protected *byteBuffer `json:"protected,omitempty"`
|
||||
Header *rawHeader `json:"header,omitempty"`
|
||||
Signature *byteBuffer `json:"signature,omitempty"`
|
||||
}
|
||||
|
||||
// rawSignatureInfo represents a single JWS signature over the JWS payload and protected header.
|
||||
type rawSignatureInfo struct {
|
||||
Protected *byteBuffer `json:"protected,omitempty"`
|
||||
Header *rawHeader `json:"header,omitempty"`
|
||||
Signature *byteBuffer `json:"signature,omitempty"`
|
||||
}
|
||||
|
||||
// JSONWebSignature represents a signed JWS object after parsing.
|
||||
type JSONWebSignature struct {
|
||||
payload []byte
|
||||
// Signatures attached to this object (may be more than one for multi-sig).
|
||||
// Be careful about accessing these directly, prefer to use Verify() or
|
||||
// VerifyMulti() to ensure that the data you're getting is verified.
|
||||
Signatures []Signature
|
||||
}
|
||||
|
||||
// Signature represents a single signature over the JWS payload and protected header.
|
||||
type Signature struct {
|
||||
// Merged header fields. Contains both protected and unprotected header
|
||||
// values. Prefer using Protected and Unprotected fields instead of this.
|
||||
// Values in this header may or may not have been signed and in general
|
||||
// should not be trusted.
|
||||
Header Header
|
||||
|
||||
// Protected header. Values in this header were signed and
|
||||
// will be verified as part of the signature verification process.
|
||||
Protected Header
|
||||
|
||||
// Unprotected header. Values in this header were not signed
|
||||
// and in general should not be trusted.
|
||||
Unprotected Header
|
||||
|
||||
// The actual signature value
|
||||
Signature []byte
|
||||
|
||||
protected *rawHeader
|
||||
header *rawHeader
|
||||
original *rawSignatureInfo
|
||||
}
|
||||
|
||||
// ParseSigned parses a signed message in compact or JWS JSON Serialization format.
|
||||
func ParseSigned(signature string) (*JSONWebSignature, error) {
|
||||
signature = stripWhitespace(signature)
|
||||
if strings.HasPrefix(signature, "{") {
|
||||
return parseSignedFull(signature)
|
||||
}
|
||||
|
||||
return parseSignedCompact(signature, nil)
|
||||
}
|
||||
|
||||
// ParseDetached parses a signed message in compact serialization format with detached payload.
|
||||
func ParseDetached(signature string, payload []byte) (*JSONWebSignature, error) {
|
||||
if payload == nil {
|
||||
return nil, errors.New("go-jose/go-jose: nil payload")
|
||||
}
|
||||
return parseSignedCompact(stripWhitespace(signature), payload)
|
||||
}
|
||||
|
||||
// Get a header value
|
||||
func (sig Signature) mergedHeaders() rawHeader {
|
||||
out := rawHeader{}
|
||||
out.merge(sig.protected)
|
||||
out.merge(sig.header)
|
||||
return out
|
||||
}
|
||||
|
||||
// Compute data to be signed
|
||||
func (obj JSONWebSignature) computeAuthData(payload []byte, signature *Signature) ([]byte, error) {
|
||||
var authData bytes.Buffer
|
||||
|
||||
protectedHeader := new(rawHeader)
|
||||
|
||||
if signature.original != nil && signature.original.Protected != nil {
|
||||
if err := json.Unmarshal(signature.original.Protected.bytes(), protectedHeader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
authData.WriteString(signature.original.Protected.base64())
|
||||
} else if signature.protected != nil {
|
||||
protectedHeader = signature.protected
|
||||
authData.WriteString(base64.RawURLEncoding.EncodeToString(mustSerializeJSON(protectedHeader)))
|
||||
}
|
||||
|
||||
needsBase64 := true
|
||||
|
||||
if protectedHeader != nil {
|
||||
var err error
|
||||
if needsBase64, err = protectedHeader.getB64(); err != nil {
|
||||
needsBase64 = true
|
||||
}
|
||||
}
|
||||
|
||||
authData.WriteByte('.')
|
||||
|
||||
if needsBase64 {
|
||||
authData.WriteString(base64.RawURLEncoding.EncodeToString(payload))
|
||||
} else {
|
||||
authData.Write(payload)
|
||||
}
|
||||
|
||||
return authData.Bytes(), nil
|
||||
}
|
||||
|
||||
// parseSignedFull parses a message in full format.
|
||||
func parseSignedFull(input string) (*JSONWebSignature, error) {
|
||||
var parsed rawJSONWebSignature
|
||||
err := json.Unmarshal([]byte(input), &parsed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return parsed.sanitized()
|
||||
}
|
||||
|
||||
// sanitized produces a cleaned-up JWS object from the raw JSON.
|
||||
func (parsed *rawJSONWebSignature) sanitized() (*JSONWebSignature, error) {
|
||||
if parsed.Payload == nil {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: missing payload in JWS message")
|
||||
}
|
||||
|
||||
obj := &JSONWebSignature{
|
||||
payload: parsed.Payload.bytes(),
|
||||
Signatures: make([]Signature, len(parsed.Signatures)),
|
||||
}
|
||||
|
||||
if len(parsed.Signatures) == 0 {
|
||||
// No signatures array, must be flattened serialization
|
||||
signature := Signature{}
|
||||
if parsed.Protected != nil && len(parsed.Protected.bytes()) > 0 {
|
||||
signature.protected = &rawHeader{}
|
||||
err := json.Unmarshal(parsed.Protected.bytes(), signature.protected)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Check that there is not a nonce in the unprotected header
|
||||
if parsed.Header != nil && parsed.Header.getNonce() != "" {
|
||||
return nil, ErrUnprotectedNonce
|
||||
}
|
||||
|
||||
signature.header = parsed.Header
|
||||
signature.Signature = parsed.Signature.bytes()
|
||||
// Make a fake "original" rawSignatureInfo to store the unprocessed
|
||||
// Protected header. This is necessary because the Protected header can
|
||||
// contain arbitrary fields not registered as part of the spec. See
|
||||
// https://tools.ietf.org/html/draft-ietf-jose-json-web-signature-41#section-4
|
||||
// If we unmarshal Protected into a rawHeader with its explicit list of fields,
|
||||
// we cannot marshal losslessly. So we have to keep around the original bytes.
|
||||
// This is used in computeAuthData, which will first attempt to use
|
||||
// the original bytes of a protected header, and fall back on marshaling the
|
||||
// header struct only if those bytes are not available.
|
||||
signature.original = &rawSignatureInfo{
|
||||
Protected: parsed.Protected,
|
||||
Header: parsed.Header,
|
||||
Signature: parsed.Signature,
|
||||
}
|
||||
|
||||
var err error
|
||||
signature.Header, err = signature.mergedHeaders().sanitized()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if signature.header != nil {
|
||||
signature.Unprotected, err = signature.header.sanitized()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if signature.protected != nil {
|
||||
signature.Protected, err = signature.protected.sanitized()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// As per RFC 7515 Section 4.1.3, only public keys are allowed to be embedded.
|
||||
jwk := signature.Header.JSONWebKey
|
||||
if jwk != nil && (!jwk.Valid() || !jwk.IsPublic()) {
|
||||
return nil, errors.New("go-jose/go-jose: invalid embedded jwk, must be public key")
|
||||
}
|
||||
|
||||
obj.Signatures = append(obj.Signatures, signature)
|
||||
}
|
||||
|
||||
for i, sig := range parsed.Signatures {
|
||||
if sig.Protected != nil && len(sig.Protected.bytes()) > 0 {
|
||||
obj.Signatures[i].protected = &rawHeader{}
|
||||
err := json.Unmarshal(sig.Protected.bytes(), obj.Signatures[i].protected)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Check that there is not a nonce in the unprotected header
|
||||
if sig.Header != nil && sig.Header.getNonce() != "" {
|
||||
return nil, ErrUnprotectedNonce
|
||||
}
|
||||
|
||||
var err error
|
||||
obj.Signatures[i].Header, err = obj.Signatures[i].mergedHeaders().sanitized()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if obj.Signatures[i].header != nil {
|
||||
obj.Signatures[i].Unprotected, err = obj.Signatures[i].header.sanitized()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if obj.Signatures[i].protected != nil {
|
||||
obj.Signatures[i].Protected, err = obj.Signatures[i].protected.sanitized()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
obj.Signatures[i].Signature = sig.Signature.bytes()
|
||||
|
||||
// As per RFC 7515 Section 4.1.3, only public keys are allowed to be embedded.
|
||||
jwk := obj.Signatures[i].Header.JSONWebKey
|
||||
if jwk != nil && (!jwk.Valid() || !jwk.IsPublic()) {
|
||||
return nil, errors.New("go-jose/go-jose: invalid embedded jwk, must be public key")
|
||||
}
|
||||
|
||||
// Copy value of sig
|
||||
original := sig
|
||||
|
||||
obj.Signatures[i].header = sig.Header
|
||||
obj.Signatures[i].original = &original
|
||||
}
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
// parseSignedCompact parses a message in compact format.
|
||||
func parseSignedCompact(input string, payload []byte) (*JSONWebSignature, error) {
|
||||
parts := strings.Split(input, ".")
|
||||
if len(parts) != 3 {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts")
|
||||
}
|
||||
|
||||
if parts[1] != "" && payload != nil {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: payload is not detached")
|
||||
}
|
||||
|
||||
rawProtected, err := base64URLDecode(parts[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if payload == nil {
|
||||
payload, err = base64URLDecode(parts[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
signature, err := base64URLDecode(parts[2])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
raw := &rawJSONWebSignature{
|
||||
Payload: newBuffer(payload),
|
||||
Protected: newBuffer(rawProtected),
|
||||
Signature: newBuffer(signature),
|
||||
}
|
||||
return raw.sanitized()
|
||||
}
|
||||
|
||||
func (obj JSONWebSignature) compactSerialize(detached bool) (string, error) {
|
||||
if len(obj.Signatures) != 1 || obj.Signatures[0].header != nil || obj.Signatures[0].protected == nil {
|
||||
return "", ErrNotSupported
|
||||
}
|
||||
|
||||
serializedProtected := base64.RawURLEncoding.EncodeToString(mustSerializeJSON(obj.Signatures[0].protected))
|
||||
payload := ""
|
||||
signature := base64.RawURLEncoding.EncodeToString(obj.Signatures[0].Signature)
|
||||
|
||||
if !detached {
|
||||
payload = base64.RawURLEncoding.EncodeToString(obj.payload)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s.%s.%s", serializedProtected, payload, signature), nil
|
||||
}
|
||||
|
||||
// CompactSerialize serializes an object using the compact serialization format.
|
||||
func (obj JSONWebSignature) CompactSerialize() (string, error) {
|
||||
return obj.compactSerialize(false)
|
||||
}
|
||||
|
||||
// DetachedCompactSerialize serializes an object using the compact serialization format with detached payload.
|
||||
func (obj JSONWebSignature) DetachedCompactSerialize() (string, error) {
|
||||
return obj.compactSerialize(true)
|
||||
}
|
||||
|
||||
// FullSerialize serializes an object using the full JSON serialization format.
|
||||
func (obj JSONWebSignature) FullSerialize() string {
|
||||
raw := rawJSONWebSignature{
|
||||
Payload: newBuffer(obj.payload),
|
||||
}
|
||||
|
||||
if len(obj.Signatures) == 1 {
|
||||
if obj.Signatures[0].protected != nil {
|
||||
serializedProtected := mustSerializeJSON(obj.Signatures[0].protected)
|
||||
raw.Protected = newBuffer(serializedProtected)
|
||||
}
|
||||
raw.Header = obj.Signatures[0].header
|
||||
raw.Signature = newBuffer(obj.Signatures[0].Signature)
|
||||
} else {
|
||||
raw.Signatures = make([]rawSignatureInfo, len(obj.Signatures))
|
||||
for i, signature := range obj.Signatures {
|
||||
raw.Signatures[i] = rawSignatureInfo{
|
||||
Header: signature.header,
|
||||
Signature: newBuffer(signature.Signature),
|
||||
}
|
||||
|
||||
if signature.protected != nil {
|
||||
raw.Signatures[i].Protected = newBuffer(mustSerializeJSON(signature.protected))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return string(mustSerializeJSON(raw))
|
||||
}
|
|
@ -0,0 +1,144 @@
|
|||
/*-
|
||||
* Copyright 2018 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
// OpaqueSigner is an interface that supports signing payloads with opaque
|
||||
// private key(s). Private key operations performed by implementers may, for
|
||||
// example, occur in a hardware module. An OpaqueSigner may rotate signing keys
|
||||
// transparently to the user of this interface.
|
||||
type OpaqueSigner interface {
|
||||
// Public returns the public key of the current signing key.
|
||||
Public() *JSONWebKey
|
||||
// Algs returns a list of supported signing algorithms.
|
||||
Algs() []SignatureAlgorithm
|
||||
// SignPayload signs a payload with the current signing key using the given
|
||||
// algorithm.
|
||||
SignPayload(payload []byte, alg SignatureAlgorithm) ([]byte, error)
|
||||
}
|
||||
|
||||
type opaqueSigner struct {
|
||||
signer OpaqueSigner
|
||||
}
|
||||
|
||||
func newOpaqueSigner(alg SignatureAlgorithm, signer OpaqueSigner) (recipientSigInfo, error) {
|
||||
var algSupported bool
|
||||
for _, salg := range signer.Algs() {
|
||||
if alg == salg {
|
||||
algSupported = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !algSupported {
|
||||
return recipientSigInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
return recipientSigInfo{
|
||||
sigAlg: alg,
|
||||
publicKey: signer.Public,
|
||||
signer: &opaqueSigner{
|
||||
signer: signer,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (o *opaqueSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
|
||||
out, err := o.signer.SignPayload(payload, alg)
|
||||
if err != nil {
|
||||
return Signature{}, err
|
||||
}
|
||||
|
||||
return Signature{
|
||||
Signature: out,
|
||||
protected: &rawHeader{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// OpaqueVerifier is an interface that supports verifying payloads with opaque
|
||||
// public key(s). An OpaqueSigner may rotate signing keys transparently to the
|
||||
// user of this interface.
|
||||
type OpaqueVerifier interface {
|
||||
VerifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error
|
||||
}
|
||||
|
||||
type opaqueVerifier struct {
|
||||
verifier OpaqueVerifier
|
||||
}
|
||||
|
||||
func (o *opaqueVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error {
|
||||
return o.verifier.VerifyPayload(payload, signature, alg)
|
||||
}
|
||||
|
||||
// OpaqueKeyEncrypter is an interface that supports encrypting keys with an opaque key.
|
||||
type OpaqueKeyEncrypter interface {
|
||||
// KeyID returns the kid
|
||||
KeyID() string
|
||||
// Algs returns a list of supported key encryption algorithms.
|
||||
Algs() []KeyAlgorithm
|
||||
// encryptKey encrypts the CEK using the given algorithm.
|
||||
encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error)
|
||||
}
|
||||
|
||||
type opaqueKeyEncrypter struct {
|
||||
encrypter OpaqueKeyEncrypter
|
||||
}
|
||||
|
||||
func newOpaqueKeyEncrypter(alg KeyAlgorithm, encrypter OpaqueKeyEncrypter) (recipientKeyInfo, error) {
|
||||
var algSupported bool
|
||||
for _, salg := range encrypter.Algs() {
|
||||
if alg == salg {
|
||||
algSupported = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !algSupported {
|
||||
return recipientKeyInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
return recipientKeyInfo{
|
||||
keyID: encrypter.KeyID(),
|
||||
keyAlg: alg,
|
||||
keyEncrypter: &opaqueKeyEncrypter{
|
||||
encrypter: encrypter,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (oke *opaqueKeyEncrypter) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
|
||||
return oke.encrypter.encryptKey(cek, alg)
|
||||
}
|
||||
|
||||
//OpaqueKeyDecrypter is an interface that supports decrypting keys with an opaque key.
|
||||
type OpaqueKeyDecrypter interface {
|
||||
DecryptKey(encryptedKey []byte, header Header) ([]byte, error)
|
||||
}
|
||||
|
||||
type opaqueKeyDecrypter struct {
|
||||
decrypter OpaqueKeyDecrypter
|
||||
}
|
||||
|
||||
func (okd *opaqueKeyDecrypter) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
|
||||
mergedHeaders := rawHeader{}
|
||||
mergedHeaders.merge(&headers)
|
||||
mergedHeaders.merge(recipient.header)
|
||||
|
||||
header, err := mergedHeaders.sanitized()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return okd.decrypter.DecryptKey(recipient.encryptedKey, header)
|
||||
}
|
|
@ -0,0 +1,520 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
import (
|
||||
"crypto/elliptic"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/go-jose/go-jose/v3/json"
|
||||
)
|
||||
|
||||
// KeyAlgorithm represents a key management algorithm.
|
||||
type KeyAlgorithm string
|
||||
|
||||
// SignatureAlgorithm represents a signature (or MAC) algorithm.
|
||||
type SignatureAlgorithm string
|
||||
|
||||
// ContentEncryption represents a content encryption algorithm.
|
||||
type ContentEncryption string
|
||||
|
||||
// CompressionAlgorithm represents an algorithm used for plaintext compression.
|
||||
type CompressionAlgorithm string
|
||||
|
||||
// ContentType represents type of the contained data.
|
||||
type ContentType string
|
||||
|
||||
var (
|
||||
// ErrCryptoFailure represents an error in cryptographic primitive. This
|
||||
// occurs when, for example, a message had an invalid authentication tag or
|
||||
// could not be decrypted.
|
||||
ErrCryptoFailure = errors.New("go-jose/go-jose: error in cryptographic primitive")
|
||||
|
||||
// ErrUnsupportedAlgorithm indicates that a selected algorithm is not
|
||||
// supported. This occurs when trying to instantiate an encrypter for an
|
||||
// algorithm that is not yet implemented.
|
||||
ErrUnsupportedAlgorithm = errors.New("go-jose/go-jose: unknown/unsupported algorithm")
|
||||
|
||||
// ErrUnsupportedKeyType indicates that the given key type/format is not
|
||||
// supported. This occurs when trying to instantiate an encrypter and passing
|
||||
// it a key of an unrecognized type or with unsupported parameters, such as
|
||||
// an RSA private key with more than two primes.
|
||||
ErrUnsupportedKeyType = errors.New("go-jose/go-jose: unsupported key type/format")
|
||||
|
||||
// ErrInvalidKeySize indicates that the given key is not the correct size
|
||||
// for the selected algorithm. This can occur, for example, when trying to
|
||||
// encrypt with AES-256 but passing only a 128-bit key as input.
|
||||
ErrInvalidKeySize = errors.New("go-jose/go-jose: invalid key size for algorithm")
|
||||
|
||||
// ErrNotSupported serialization of object is not supported. This occurs when
|
||||
// trying to compact-serialize an object which can't be represented in
|
||||
// compact form.
|
||||
ErrNotSupported = errors.New("go-jose/go-jose: compact serialization not supported for object")
|
||||
|
||||
// ErrUnprotectedNonce indicates that while parsing a JWS or JWE object, a
|
||||
// nonce header parameter was included in an unprotected header object.
|
||||
ErrUnprotectedNonce = errors.New("go-jose/go-jose: Nonce parameter included in unprotected header")
|
||||
)
|
||||
|
||||
// Key management algorithms
|
||||
const (
|
||||
ED25519 = KeyAlgorithm("ED25519")
|
||||
RSA1_5 = KeyAlgorithm("RSA1_5") // RSA-PKCS1v1.5
|
||||
RSA_OAEP = KeyAlgorithm("RSA-OAEP") // RSA-OAEP-SHA1
|
||||
RSA_OAEP_256 = KeyAlgorithm("RSA-OAEP-256") // RSA-OAEP-SHA256
|
||||
A128KW = KeyAlgorithm("A128KW") // AES key wrap (128)
|
||||
A192KW = KeyAlgorithm("A192KW") // AES key wrap (192)
|
||||
A256KW = KeyAlgorithm("A256KW") // AES key wrap (256)
|
||||
DIRECT = KeyAlgorithm("dir") // Direct encryption
|
||||
ECDH_ES = KeyAlgorithm("ECDH-ES") // ECDH-ES
|
||||
ECDH_ES_A128KW = KeyAlgorithm("ECDH-ES+A128KW") // ECDH-ES + AES key wrap (128)
|
||||
ECDH_ES_A192KW = KeyAlgorithm("ECDH-ES+A192KW") // ECDH-ES + AES key wrap (192)
|
||||
ECDH_ES_A256KW = KeyAlgorithm("ECDH-ES+A256KW") // ECDH-ES + AES key wrap (256)
|
||||
A128GCMKW = KeyAlgorithm("A128GCMKW") // AES-GCM key wrap (128)
|
||||
A192GCMKW = KeyAlgorithm("A192GCMKW") // AES-GCM key wrap (192)
|
||||
A256GCMKW = KeyAlgorithm("A256GCMKW") // AES-GCM key wrap (256)
|
||||
PBES2_HS256_A128KW = KeyAlgorithm("PBES2-HS256+A128KW") // PBES2 + HMAC-SHA256 + AES key wrap (128)
|
||||
PBES2_HS384_A192KW = KeyAlgorithm("PBES2-HS384+A192KW") // PBES2 + HMAC-SHA384 + AES key wrap (192)
|
||||
PBES2_HS512_A256KW = KeyAlgorithm("PBES2-HS512+A256KW") // PBES2 + HMAC-SHA512 + AES key wrap (256)
|
||||
)
|
||||
|
||||
// Signature algorithms
|
||||
const (
|
||||
EdDSA = SignatureAlgorithm("EdDSA")
|
||||
HS256 = SignatureAlgorithm("HS256") // HMAC using SHA-256
|
||||
HS384 = SignatureAlgorithm("HS384") // HMAC using SHA-384
|
||||
HS512 = SignatureAlgorithm("HS512") // HMAC using SHA-512
|
||||
RS256 = SignatureAlgorithm("RS256") // RSASSA-PKCS-v1.5 using SHA-256
|
||||
RS384 = SignatureAlgorithm("RS384") // RSASSA-PKCS-v1.5 using SHA-384
|
||||
RS512 = SignatureAlgorithm("RS512") // RSASSA-PKCS-v1.5 using SHA-512
|
||||
ES256 = SignatureAlgorithm("ES256") // ECDSA using P-256 and SHA-256
|
||||
ES384 = SignatureAlgorithm("ES384") // ECDSA using P-384 and SHA-384
|
||||
ES512 = SignatureAlgorithm("ES512") // ECDSA using P-521 and SHA-512
|
||||
PS256 = SignatureAlgorithm("PS256") // RSASSA-PSS using SHA256 and MGF1-SHA256
|
||||
PS384 = SignatureAlgorithm("PS384") // RSASSA-PSS using SHA384 and MGF1-SHA384
|
||||
PS512 = SignatureAlgorithm("PS512") // RSASSA-PSS using SHA512 and MGF1-SHA512
|
||||
)
|
||||
|
||||
// Content encryption algorithms
|
||||
const (
|
||||
A128CBC_HS256 = ContentEncryption("A128CBC-HS256") // AES-CBC + HMAC-SHA256 (128)
|
||||
A192CBC_HS384 = ContentEncryption("A192CBC-HS384") // AES-CBC + HMAC-SHA384 (192)
|
||||
A256CBC_HS512 = ContentEncryption("A256CBC-HS512") // AES-CBC + HMAC-SHA512 (256)
|
||||
A128GCM = ContentEncryption("A128GCM") // AES-GCM (128)
|
||||
A192GCM = ContentEncryption("A192GCM") // AES-GCM (192)
|
||||
A256GCM = ContentEncryption("A256GCM") // AES-GCM (256)
|
||||
)
|
||||
|
||||
// Compression algorithms
|
||||
const (
|
||||
NONE = CompressionAlgorithm("") // No compression
|
||||
DEFLATE = CompressionAlgorithm("DEF") // DEFLATE (RFC 1951)
|
||||
)
|
||||
|
||||
// A key in the protected header of a JWS object. Use of the Header...
|
||||
// constants is preferred to enhance type safety.
|
||||
type HeaderKey string
|
||||
|
||||
const (
|
||||
HeaderType = "typ" // string
|
||||
HeaderContentType = "cty" // string
|
||||
|
||||
// These are set by go-jose and shouldn't need to be set by consumers of the
|
||||
// library.
|
||||
headerAlgorithm = "alg" // string
|
||||
headerEncryption = "enc" // ContentEncryption
|
||||
headerCompression = "zip" // CompressionAlgorithm
|
||||
headerCritical = "crit" // []string
|
||||
|
||||
headerAPU = "apu" // *byteBuffer
|
||||
headerAPV = "apv" // *byteBuffer
|
||||
headerEPK = "epk" // *JSONWebKey
|
||||
headerIV = "iv" // *byteBuffer
|
||||
headerTag = "tag" // *byteBuffer
|
||||
headerX5c = "x5c" // []*x509.Certificate
|
||||
|
||||
headerJWK = "jwk" // *JSONWebKey
|
||||
headerKeyID = "kid" // string
|
||||
headerNonce = "nonce" // string
|
||||
headerB64 = "b64" // bool
|
||||
|
||||
headerP2C = "p2c" // *byteBuffer (int)
|
||||
headerP2S = "p2s" // *byteBuffer ([]byte)
|
||||
|
||||
)
|
||||
|
||||
// supportedCritical is the set of supported extensions that are understood and processed.
|
||||
var supportedCritical = map[string]bool{
|
||||
headerB64: true,
|
||||
}
|
||||
|
||||
// rawHeader represents the JOSE header for JWE/JWS objects (used for parsing).
|
||||
//
|
||||
// The decoding of the constituent items is deferred because we want to marshal
|
||||
// some members into particular structs rather than generic maps, but at the
|
||||
// same time we need to receive any extra fields unhandled by this library to
|
||||
// pass through to consuming code in case it wants to examine them.
|
||||
type rawHeader map[HeaderKey]*json.RawMessage
|
||||
|
||||
// Header represents the read-only JOSE header for JWE/JWS objects.
|
||||
type Header struct {
|
||||
KeyID string
|
||||
JSONWebKey *JSONWebKey
|
||||
Algorithm string
|
||||
Nonce string
|
||||
|
||||
// Unverified certificate chain parsed from x5c header.
|
||||
certificates []*x509.Certificate
|
||||
|
||||
// Any headers not recognised above get unmarshalled
|
||||
// from JSON in a generic manner and placed in this map.
|
||||
ExtraHeaders map[HeaderKey]interface{}
|
||||
}
|
||||
|
||||
// Certificates verifies & returns the certificate chain present
|
||||
// in the x5c header field of a message, if one was present. Returns
|
||||
// an error if there was no x5c header present or the chain could
|
||||
// not be validated with the given verify options.
|
||||
func (h Header) Certificates(opts x509.VerifyOptions) ([][]*x509.Certificate, error) {
|
||||
if len(h.certificates) == 0 {
|
||||
return nil, errors.New("go-jose/go-jose: no x5c header present in message")
|
||||
}
|
||||
|
||||
leaf := h.certificates[0]
|
||||
if opts.Intermediates == nil {
|
||||
opts.Intermediates = x509.NewCertPool()
|
||||
for _, intermediate := range h.certificates[1:] {
|
||||
opts.Intermediates.AddCert(intermediate)
|
||||
}
|
||||
}
|
||||
|
||||
return leaf.Verify(opts)
|
||||
}
|
||||
|
||||
func (parsed rawHeader) set(k HeaderKey, v interface{}) error {
|
||||
b, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parsed[k] = makeRawMessage(b)
|
||||
return nil
|
||||
}
|
||||
|
||||
// getString gets a string from the raw JSON, defaulting to "".
|
||||
func (parsed rawHeader) getString(k HeaderKey) string {
|
||||
v, ok := parsed[k]
|
||||
if !ok || v == nil {
|
||||
return ""
|
||||
}
|
||||
var s string
|
||||
err := json.Unmarshal(*v, &s)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// getByteBuffer gets a byte buffer from the raw JSON. Returns (nil, nil) if
|
||||
// not specified.
|
||||
func (parsed rawHeader) getByteBuffer(k HeaderKey) (*byteBuffer, error) {
|
||||
v := parsed[k]
|
||||
if v == nil {
|
||||
return nil, nil
|
||||
}
|
||||
var bb *byteBuffer
|
||||
err := json.Unmarshal(*v, &bb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bb, nil
|
||||
}
|
||||
|
||||
// getAlgorithm extracts parsed "alg" from the raw JSON as a KeyAlgorithm.
|
||||
func (parsed rawHeader) getAlgorithm() KeyAlgorithm {
|
||||
return KeyAlgorithm(parsed.getString(headerAlgorithm))
|
||||
}
|
||||
|
||||
// getSignatureAlgorithm extracts parsed "alg" from the raw JSON as a SignatureAlgorithm.
|
||||
func (parsed rawHeader) getSignatureAlgorithm() SignatureAlgorithm {
|
||||
return SignatureAlgorithm(parsed.getString(headerAlgorithm))
|
||||
}
|
||||
|
||||
// getEncryption extracts parsed "enc" from the raw JSON.
|
||||
func (parsed rawHeader) getEncryption() ContentEncryption {
|
||||
return ContentEncryption(parsed.getString(headerEncryption))
|
||||
}
|
||||
|
||||
// getCompression extracts parsed "zip" from the raw JSON.
|
||||
func (parsed rawHeader) getCompression() CompressionAlgorithm {
|
||||
return CompressionAlgorithm(parsed.getString(headerCompression))
|
||||
}
|
||||
|
||||
func (parsed rawHeader) getNonce() string {
|
||||
return parsed.getString(headerNonce)
|
||||
}
|
||||
|
||||
// getEPK extracts parsed "epk" from the raw JSON.
|
||||
func (parsed rawHeader) getEPK() (*JSONWebKey, error) {
|
||||
v := parsed[headerEPK]
|
||||
if v == nil {
|
||||
return nil, nil
|
||||
}
|
||||
var epk *JSONWebKey
|
||||
err := json.Unmarshal(*v, &epk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return epk, nil
|
||||
}
|
||||
|
||||
// getAPU extracts parsed "apu" from the raw JSON.
|
||||
func (parsed rawHeader) getAPU() (*byteBuffer, error) {
|
||||
return parsed.getByteBuffer(headerAPU)
|
||||
}
|
||||
|
||||
// getAPV extracts parsed "apv" from the raw JSON.
|
||||
func (parsed rawHeader) getAPV() (*byteBuffer, error) {
|
||||
return parsed.getByteBuffer(headerAPV)
|
||||
}
|
||||
|
||||
// getIV extracts parsed "iv" from the raw JSON.
|
||||
func (parsed rawHeader) getIV() (*byteBuffer, error) {
|
||||
return parsed.getByteBuffer(headerIV)
|
||||
}
|
||||
|
||||
// getTag extracts parsed "tag" from the raw JSON.
|
||||
func (parsed rawHeader) getTag() (*byteBuffer, error) {
|
||||
return parsed.getByteBuffer(headerTag)
|
||||
}
|
||||
|
||||
// getJWK extracts parsed "jwk" from the raw JSON.
|
||||
func (parsed rawHeader) getJWK() (*JSONWebKey, error) {
|
||||
v := parsed[headerJWK]
|
||||
if v == nil {
|
||||
return nil, nil
|
||||
}
|
||||
var jwk *JSONWebKey
|
||||
err := json.Unmarshal(*v, &jwk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return jwk, nil
|
||||
}
|
||||
|
||||
// getCritical extracts parsed "crit" from the raw JSON. If omitted, it
|
||||
// returns an empty slice.
|
||||
func (parsed rawHeader) getCritical() ([]string, error) {
|
||||
v := parsed[headerCritical]
|
||||
if v == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var q []string
|
||||
err := json.Unmarshal(*v, &q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return q, nil
|
||||
}
|
||||
|
||||
// getS2C extracts parsed "p2c" from the raw JSON.
|
||||
func (parsed rawHeader) getP2C() (int, error) {
|
||||
v := parsed[headerP2C]
|
||||
if v == nil {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
var p2c int
|
||||
err := json.Unmarshal(*v, &p2c)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return p2c, nil
|
||||
}
|
||||
|
||||
// getS2S extracts parsed "p2s" from the raw JSON.
|
||||
func (parsed rawHeader) getP2S() (*byteBuffer, error) {
|
||||
return parsed.getByteBuffer(headerP2S)
|
||||
}
|
||||
|
||||
// getB64 extracts parsed "b64" from the raw JSON, defaulting to true.
|
||||
func (parsed rawHeader) getB64() (bool, error) {
|
||||
v := parsed[headerB64]
|
||||
if v == nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
var b64 bool
|
||||
err := json.Unmarshal(*v, &b64)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
return b64, nil
|
||||
}
|
||||
|
||||
// sanitized produces a cleaned-up header object from the raw JSON.
|
||||
func (parsed rawHeader) sanitized() (h Header, err error) {
|
||||
for k, v := range parsed {
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
switch k {
|
||||
case headerJWK:
|
||||
var jwk *JSONWebKey
|
||||
err = json.Unmarshal(*v, &jwk)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to unmarshal JWK: %v: %#v", err, string(*v))
|
||||
return
|
||||
}
|
||||
h.JSONWebKey = jwk
|
||||
case headerKeyID:
|
||||
var s string
|
||||
err = json.Unmarshal(*v, &s)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to unmarshal key ID: %v: %#v", err, string(*v))
|
||||
return
|
||||
}
|
||||
h.KeyID = s
|
||||
case headerAlgorithm:
|
||||
var s string
|
||||
err = json.Unmarshal(*v, &s)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to unmarshal algorithm: %v: %#v", err, string(*v))
|
||||
return
|
||||
}
|
||||
h.Algorithm = s
|
||||
case headerNonce:
|
||||
var s string
|
||||
err = json.Unmarshal(*v, &s)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to unmarshal nonce: %v: %#v", err, string(*v))
|
||||
return
|
||||
}
|
||||
h.Nonce = s
|
||||
case headerX5c:
|
||||
c := []string{}
|
||||
err = json.Unmarshal(*v, &c)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to unmarshal x5c header: %v: %#v", err, string(*v))
|
||||
return
|
||||
}
|
||||
h.certificates, err = parseCertificateChain(c)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to unmarshal x5c header: %v: %#v", err, string(*v))
|
||||
return
|
||||
}
|
||||
default:
|
||||
if h.ExtraHeaders == nil {
|
||||
h.ExtraHeaders = map[HeaderKey]interface{}{}
|
||||
}
|
||||
var v2 interface{}
|
||||
err = json.Unmarshal(*v, &v2)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to unmarshal value: %v: %#v", err, string(*v))
|
||||
return
|
||||
}
|
||||
h.ExtraHeaders[k] = v2
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func parseCertificateChain(chain []string) ([]*x509.Certificate, error) {
|
||||
out := make([]*x509.Certificate, len(chain))
|
||||
for i, cert := range chain {
|
||||
raw, err := base64.StdEncoding.DecodeString(cert)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out[i], err = x509.ParseCertificate(raw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (parsed rawHeader) isSet(k HeaderKey) bool {
|
||||
dvr := parsed[k]
|
||||
if dvr == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var dv interface{}
|
||||
err := json.Unmarshal(*dvr, &dv)
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if dvStr, ok := dv.(string); ok {
|
||||
return dvStr != ""
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Merge headers from src into dst, giving precedence to headers from l.
|
||||
func (parsed rawHeader) merge(src *rawHeader) {
|
||||
if src == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for k, v := range *src {
|
||||
if parsed.isSet(k) {
|
||||
continue
|
||||
}
|
||||
|
||||
parsed[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// Get JOSE name of curve
|
||||
func curveName(crv elliptic.Curve) (string, error) {
|
||||
switch crv {
|
||||
case elliptic.P256():
|
||||
return "P-256", nil
|
||||
case elliptic.P384():
|
||||
return "P-384", nil
|
||||
case elliptic.P521():
|
||||
return "P-521", nil
|
||||
default:
|
||||
return "", fmt.Errorf("go-jose/go-jose: unsupported/unknown elliptic curve")
|
||||
}
|
||||
}
|
||||
|
||||
// Get size of curve in bytes
|
||||
func curveSize(crv elliptic.Curve) int {
|
||||
bits := crv.Params().BitSize
|
||||
|
||||
div := bits / 8
|
||||
mod := bits % 8
|
||||
|
||||
if mod == 0 {
|
||||
return div
|
||||
}
|
||||
|
||||
return div + 1
|
||||
}
|
||||
|
||||
func makeRawMessage(b []byte) *json.RawMessage {
|
||||
rm := json.RawMessage(b)
|
||||
return &rm
|
||||
}
|
|
@ -0,0 +1,450 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ecdsa"
|
||||
"crypto/ed25519"
|
||||
"crypto/rsa"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/go-jose/go-jose/v3/json"
|
||||
)
|
||||
|
||||
// NonceSource represents a source of random nonces to go into JWS objects
|
||||
type NonceSource interface {
|
||||
Nonce() (string, error)
|
||||
}
|
||||
|
||||
// Signer represents a signer which takes a payload and produces a signed JWS object.
|
||||
type Signer interface {
|
||||
Sign(payload []byte) (*JSONWebSignature, error)
|
||||
Options() SignerOptions
|
||||
}
|
||||
|
||||
// SigningKey represents an algorithm/key used to sign a message.
|
||||
type SigningKey struct {
|
||||
Algorithm SignatureAlgorithm
|
||||
Key interface{}
|
||||
}
|
||||
|
||||
// SignerOptions represents options that can be set when creating signers.
|
||||
type SignerOptions struct {
|
||||
NonceSource NonceSource
|
||||
EmbedJWK bool
|
||||
|
||||
// Optional map of additional keys to be inserted into the protected header
|
||||
// of a JWS object. Some specifications which make use of JWS like to insert
|
||||
// additional values here. All values must be JSON-serializable.
|
||||
ExtraHeaders map[HeaderKey]interface{}
|
||||
}
|
||||
|
||||
// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it
|
||||
// if necessary. It returns itself and so can be used in a fluent style.
|
||||
func (so *SignerOptions) WithHeader(k HeaderKey, v interface{}) *SignerOptions {
|
||||
if so.ExtraHeaders == nil {
|
||||
so.ExtraHeaders = map[HeaderKey]interface{}{}
|
||||
}
|
||||
so.ExtraHeaders[k] = v
|
||||
return so
|
||||
}
|
||||
|
||||
// WithContentType adds a content type ("cty") header and returns the updated
|
||||
// SignerOptions.
|
||||
func (so *SignerOptions) WithContentType(contentType ContentType) *SignerOptions {
|
||||
return so.WithHeader(HeaderContentType, contentType)
|
||||
}
|
||||
|
||||
// WithType adds a type ("typ") header and returns the updated SignerOptions.
|
||||
func (so *SignerOptions) WithType(typ ContentType) *SignerOptions {
|
||||
return so.WithHeader(HeaderType, typ)
|
||||
}
|
||||
|
||||
// WithCritical adds the given names to the critical ("crit") header and returns
|
||||
// the updated SignerOptions.
|
||||
func (so *SignerOptions) WithCritical(names ...string) *SignerOptions {
|
||||
if so.ExtraHeaders[headerCritical] == nil {
|
||||
so.WithHeader(headerCritical, make([]string, 0, len(names)))
|
||||
}
|
||||
crit := so.ExtraHeaders[headerCritical].([]string)
|
||||
so.ExtraHeaders[headerCritical] = append(crit, names...)
|
||||
return so
|
||||
}
|
||||
|
||||
// WithBase64 adds a base64url-encode payload ("b64") header and returns the updated
|
||||
// SignerOptions. When the "b64" value is "false", the payload is not base64 encoded.
|
||||
func (so *SignerOptions) WithBase64(b64 bool) *SignerOptions {
|
||||
if !b64 {
|
||||
so.WithHeader(headerB64, b64)
|
||||
so.WithCritical(headerB64)
|
||||
}
|
||||
return so
|
||||
}
|
||||
|
||||
type payloadSigner interface {
|
||||
signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error)
|
||||
}
|
||||
|
||||
type payloadVerifier interface {
|
||||
verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error
|
||||
}
|
||||
|
||||
type genericSigner struct {
|
||||
recipients []recipientSigInfo
|
||||
nonceSource NonceSource
|
||||
embedJWK bool
|
||||
extraHeaders map[HeaderKey]interface{}
|
||||
}
|
||||
|
||||
type recipientSigInfo struct {
|
||||
sigAlg SignatureAlgorithm
|
||||
publicKey func() *JSONWebKey
|
||||
signer payloadSigner
|
||||
}
|
||||
|
||||
func staticPublicKey(jwk *JSONWebKey) func() *JSONWebKey {
|
||||
return func() *JSONWebKey {
|
||||
return jwk
|
||||
}
|
||||
}
|
||||
|
||||
// NewSigner creates an appropriate signer based on the key type
|
||||
func NewSigner(sig SigningKey, opts *SignerOptions) (Signer, error) {
|
||||
return NewMultiSigner([]SigningKey{sig}, opts)
|
||||
}
|
||||
|
||||
// NewMultiSigner creates a signer for multiple recipients
|
||||
func NewMultiSigner(sigs []SigningKey, opts *SignerOptions) (Signer, error) {
|
||||
signer := &genericSigner{recipients: []recipientSigInfo{}}
|
||||
|
||||
if opts != nil {
|
||||
signer.nonceSource = opts.NonceSource
|
||||
signer.embedJWK = opts.EmbedJWK
|
||||
signer.extraHeaders = opts.ExtraHeaders
|
||||
}
|
||||
|
||||
for _, sig := range sigs {
|
||||
err := signer.addRecipient(sig.Algorithm, sig.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return signer, nil
|
||||
}
|
||||
|
||||
// newVerifier creates a verifier based on the key type
|
||||
func newVerifier(verificationKey interface{}) (payloadVerifier, error) {
|
||||
switch verificationKey := verificationKey.(type) {
|
||||
case ed25519.PublicKey:
|
||||
return &edEncrypterVerifier{
|
||||
publicKey: verificationKey,
|
||||
}, nil
|
||||
case *rsa.PublicKey:
|
||||
return &rsaEncrypterVerifier{
|
||||
publicKey: verificationKey,
|
||||
}, nil
|
||||
case *ecdsa.PublicKey:
|
||||
return &ecEncrypterVerifier{
|
||||
publicKey: verificationKey,
|
||||
}, nil
|
||||
case []byte:
|
||||
return &symmetricMac{
|
||||
key: verificationKey,
|
||||
}, nil
|
||||
case JSONWebKey:
|
||||
return newVerifier(verificationKey.Key)
|
||||
case *JSONWebKey:
|
||||
return newVerifier(verificationKey.Key)
|
||||
}
|
||||
if ov, ok := verificationKey.(OpaqueVerifier); ok {
|
||||
return &opaqueVerifier{verifier: ov}, nil
|
||||
}
|
||||
return nil, ErrUnsupportedKeyType
|
||||
}
|
||||
|
||||
func (ctx *genericSigner) addRecipient(alg SignatureAlgorithm, signingKey interface{}) error {
|
||||
recipient, err := makeJWSRecipient(alg, signingKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx.recipients = append(ctx.recipients, recipient)
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeJWSRecipient(alg SignatureAlgorithm, signingKey interface{}) (recipientSigInfo, error) {
|
||||
switch signingKey := signingKey.(type) {
|
||||
case ed25519.PrivateKey:
|
||||
return newEd25519Signer(alg, signingKey)
|
||||
case *rsa.PrivateKey:
|
||||
return newRSASigner(alg, signingKey)
|
||||
case *ecdsa.PrivateKey:
|
||||
return newECDSASigner(alg, signingKey)
|
||||
case []byte:
|
||||
return newSymmetricSigner(alg, signingKey)
|
||||
case JSONWebKey:
|
||||
return newJWKSigner(alg, signingKey)
|
||||
case *JSONWebKey:
|
||||
return newJWKSigner(alg, *signingKey)
|
||||
}
|
||||
if signer, ok := signingKey.(OpaqueSigner); ok {
|
||||
return newOpaqueSigner(alg, signer)
|
||||
}
|
||||
return recipientSigInfo{}, ErrUnsupportedKeyType
|
||||
}
|
||||
|
||||
func newJWKSigner(alg SignatureAlgorithm, signingKey JSONWebKey) (recipientSigInfo, error) {
|
||||
recipient, err := makeJWSRecipient(alg, signingKey.Key)
|
||||
if err != nil {
|
||||
return recipientSigInfo{}, err
|
||||
}
|
||||
if recipient.publicKey != nil && recipient.publicKey() != nil {
|
||||
// recipient.publicKey is a JWK synthesized for embedding when recipientSigInfo
|
||||
// was created for the inner key (such as a RSA or ECDSA public key). It contains
|
||||
// the pub key for embedding, but doesn't have extra params like key id.
|
||||
publicKey := signingKey
|
||||
publicKey.Key = recipient.publicKey().Key
|
||||
recipient.publicKey = staticPublicKey(&publicKey)
|
||||
|
||||
// This should be impossible, but let's check anyway.
|
||||
if !recipient.publicKey().IsPublic() {
|
||||
return recipientSigInfo{}, errors.New("go-jose/go-jose: public key was unexpectedly not public")
|
||||
}
|
||||
}
|
||||
return recipient, nil
|
||||
}
|
||||
|
||||
func (ctx *genericSigner) Sign(payload []byte) (*JSONWebSignature, error) {
|
||||
obj := &JSONWebSignature{}
|
||||
obj.payload = payload
|
||||
obj.Signatures = make([]Signature, len(ctx.recipients))
|
||||
|
||||
for i, recipient := range ctx.recipients {
|
||||
protected := map[HeaderKey]interface{}{
|
||||
headerAlgorithm: string(recipient.sigAlg),
|
||||
}
|
||||
|
||||
if recipient.publicKey != nil && recipient.publicKey() != nil {
|
||||
// We want to embed the JWK or set the kid header, but not both. Having a protected
|
||||
// header that contains an embedded JWK while also simultaneously containing the kid
|
||||
// header is confusing, and at least in ACME the two are considered to be mutually
|
||||
// exclusive. The fact that both can exist at the same time is a somewhat unfortunate
|
||||
// result of the JOSE spec. We've decided that this library will only include one or
|
||||
// the other to avoid this confusion.
|
||||
//
|
||||
// See https://github.com/go-jose/go-jose/issues/157 for more context.
|
||||
if ctx.embedJWK {
|
||||
protected[headerJWK] = recipient.publicKey()
|
||||
} else {
|
||||
keyID := recipient.publicKey().KeyID
|
||||
if keyID != "" {
|
||||
protected[headerKeyID] = keyID
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ctx.nonceSource != nil {
|
||||
nonce, err := ctx.nonceSource.Nonce()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: Error generating nonce: %v", err)
|
||||
}
|
||||
protected[headerNonce] = nonce
|
||||
}
|
||||
|
||||
for k, v := range ctx.extraHeaders {
|
||||
protected[k] = v
|
||||
}
|
||||
|
||||
serializedProtected := mustSerializeJSON(protected)
|
||||
needsBase64 := true
|
||||
|
||||
if b64, ok := protected[headerB64]; ok {
|
||||
if needsBase64, ok = b64.(bool); !ok {
|
||||
return nil, errors.New("go-jose/go-jose: Invalid b64 header parameter")
|
||||
}
|
||||
}
|
||||
|
||||
var input bytes.Buffer
|
||||
|
||||
input.WriteString(base64.RawURLEncoding.EncodeToString(serializedProtected))
|
||||
input.WriteByte('.')
|
||||
|
||||
if needsBase64 {
|
||||
input.WriteString(base64.RawURLEncoding.EncodeToString(payload))
|
||||
} else {
|
||||
input.Write(payload)
|
||||
}
|
||||
|
||||
signatureInfo, err := recipient.signer.signPayload(input.Bytes(), recipient.sigAlg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
signatureInfo.protected = &rawHeader{}
|
||||
for k, v := range protected {
|
||||
b, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: Error marshalling item %#v: %v", k, err)
|
||||
}
|
||||
(*signatureInfo.protected)[k] = makeRawMessage(b)
|
||||
}
|
||||
obj.Signatures[i] = signatureInfo
|
||||
}
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func (ctx *genericSigner) Options() SignerOptions {
|
||||
return SignerOptions{
|
||||
NonceSource: ctx.nonceSource,
|
||||
EmbedJWK: ctx.embedJWK,
|
||||
ExtraHeaders: ctx.extraHeaders,
|
||||
}
|
||||
}
|
||||
|
||||
// Verify validates the signature on the object and returns the payload.
|
||||
// This function does not support multi-signature, if you desire multi-sig
|
||||
// verification use VerifyMulti instead.
|
||||
//
|
||||
// Be careful when verifying signatures based on embedded JWKs inside the
|
||||
// payload header. You cannot assume that the key received in a payload is
|
||||
// trusted.
|
||||
func (obj JSONWebSignature) Verify(verificationKey interface{}) ([]byte, error) {
|
||||
err := obj.DetachedVerify(obj.payload, verificationKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.payload, nil
|
||||
}
|
||||
|
||||
// UnsafePayloadWithoutVerification returns the payload without
|
||||
// verifying it. The content returned from this function cannot be
|
||||
// trusted.
|
||||
func (obj JSONWebSignature) UnsafePayloadWithoutVerification() []byte {
|
||||
return obj.payload
|
||||
}
|
||||
|
||||
// DetachedVerify validates a detached signature on the given payload. In
|
||||
// most cases, you will probably want to use Verify instead. DetachedVerify
|
||||
// is only useful if you have a payload and signature that are separated from
|
||||
// each other.
|
||||
func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey interface{}) error {
|
||||
key := tryJWKS(verificationKey, obj.headers()...)
|
||||
verifier, err := newVerifier(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(obj.Signatures) > 1 {
|
||||
return errors.New("go-jose/go-jose: too many signatures in payload; expecting only one")
|
||||
}
|
||||
|
||||
signature := obj.Signatures[0]
|
||||
headers := signature.mergedHeaders()
|
||||
critical, err := headers.getCritical()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, name := range critical {
|
||||
if !supportedCritical[name] {
|
||||
return ErrCryptoFailure
|
||||
}
|
||||
}
|
||||
|
||||
input, err := obj.computeAuthData(payload, &signature)
|
||||
if err != nil {
|
||||
return ErrCryptoFailure
|
||||
}
|
||||
|
||||
alg := headers.getSignatureAlgorithm()
|
||||
err = verifier.verifyPayload(input, signature.Signature, alg)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return ErrCryptoFailure
|
||||
}
|
||||
|
||||
// VerifyMulti validates (one of the multiple) signatures on the object and
|
||||
// returns the index of the signature that was verified, along with the signature
|
||||
// object and the payload. We return the signature and index to guarantee that
|
||||
// callers are getting the verified value.
|
||||
func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signature, []byte, error) {
|
||||
idx, sig, err := obj.DetachedVerifyMulti(obj.payload, verificationKey)
|
||||
if err != nil {
|
||||
return -1, Signature{}, nil, err
|
||||
}
|
||||
return idx, sig, obj.payload, nil
|
||||
}
|
||||
|
||||
// DetachedVerifyMulti validates a detached signature on the given payload with
|
||||
// a signature/object that has potentially multiple signers. This returns the index
|
||||
// of the signature that was verified, along with the signature object. We return
|
||||
// the signature and index to guarantee that callers are getting the verified value.
|
||||
//
|
||||
// In most cases, you will probably want to use Verify or VerifyMulti instead.
|
||||
// DetachedVerifyMulti is only useful if you have a payload and signature that are
|
||||
// separated from each other, and the signature can have multiple signers at the
|
||||
// same time.
|
||||
func (obj JSONWebSignature) DetachedVerifyMulti(payload []byte, verificationKey interface{}) (int, Signature, error) {
|
||||
key := tryJWKS(verificationKey, obj.headers()...)
|
||||
verifier, err := newVerifier(key)
|
||||
if err != nil {
|
||||
return -1, Signature{}, err
|
||||
}
|
||||
|
||||
outer:
|
||||
for i, signature := range obj.Signatures {
|
||||
headers := signature.mergedHeaders()
|
||||
critical, err := headers.getCritical()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, name := range critical {
|
||||
if !supportedCritical[name] {
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
|
||||
input, err := obj.computeAuthData(payload, &signature)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
alg := headers.getSignatureAlgorithm()
|
||||
err = verifier.verifyPayload(input, signature.Signature, alg)
|
||||
if err == nil {
|
||||
return i, signature, nil
|
||||
}
|
||||
}
|
||||
|
||||
return -1, Signature{}, ErrCryptoFailure
|
||||
}
|
||||
|
||||
func (obj JSONWebSignature) headers() []Header {
|
||||
headers := make([]Header, len(obj.Signatures))
|
||||
for i, sig := range obj.Signatures {
|
||||
headers[i] = sig.Header
|
||||
}
|
||||
return headers
|
||||
}
|
|
@ -0,0 +1,495 @@
|
|||
/*-
|
||||
* Copyright 2014 Square Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package jose
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"crypto/subtle"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"golang.org/x/crypto/pbkdf2"
|
||||
|
||||
josecipher "github.com/go-jose/go-jose/v3/cipher"
|
||||
)
|
||||
|
||||
// RandReader is a cryptographically secure random number generator (stubbed out in tests).
|
||||
var RandReader = rand.Reader
|
||||
|
||||
const (
|
||||
// RFC7518 recommends a minimum of 1,000 iterations:
|
||||
// https://tools.ietf.org/html/rfc7518#section-4.8.1.2
|
||||
// NIST recommends a minimum of 10,000:
|
||||
// https://pages.nist.gov/800-63-3/sp800-63b.html
|
||||
// 1Password uses 100,000:
|
||||
// https://support.1password.com/pbkdf2/
|
||||
defaultP2C = 100000
|
||||
// Default salt size: 128 bits
|
||||
defaultP2SSize = 16
|
||||
)
|
||||
|
||||
// Dummy key cipher for shared symmetric key mode
|
||||
type symmetricKeyCipher struct {
|
||||
key []byte // Pre-shared content-encryption key
|
||||
p2c int // PBES2 Count
|
||||
p2s []byte // PBES2 Salt Input
|
||||
}
|
||||
|
||||
// Signer/verifier for MAC modes
|
||||
type symmetricMac struct {
|
||||
key []byte
|
||||
}
|
||||
|
||||
// Input/output from an AEAD operation
|
||||
type aeadParts struct {
|
||||
iv, ciphertext, tag []byte
|
||||
}
|
||||
|
||||
// A content cipher based on an AEAD construction
|
||||
type aeadContentCipher struct {
|
||||
keyBytes int
|
||||
authtagBytes int
|
||||
getAead func(key []byte) (cipher.AEAD, error)
|
||||
}
|
||||
|
||||
// Random key generator
|
||||
type randomKeyGenerator struct {
|
||||
size int
|
||||
}
|
||||
|
||||
// Static key generator
|
||||
type staticKeyGenerator struct {
|
||||
key []byte
|
||||
}
|
||||
|
||||
// Create a new content cipher based on AES-GCM
|
||||
func newAESGCM(keySize int) contentCipher {
|
||||
return &aeadContentCipher{
|
||||
keyBytes: keySize,
|
||||
authtagBytes: 16,
|
||||
getAead: func(key []byte) (cipher.AEAD, error) {
|
||||
aes, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cipher.NewGCM(aes)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Create a new content cipher based on AES-CBC+HMAC
|
||||
func newAESCBC(keySize int) contentCipher {
|
||||
return &aeadContentCipher{
|
||||
keyBytes: keySize * 2,
|
||||
authtagBytes: keySize,
|
||||
getAead: func(key []byte) (cipher.AEAD, error) {
|
||||
return josecipher.NewCBCHMAC(key, aes.NewCipher)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Get an AEAD cipher object for the given content encryption algorithm
|
||||
func getContentCipher(alg ContentEncryption) contentCipher {
|
||||
switch alg {
|
||||
case A128GCM:
|
||||
return newAESGCM(16)
|
||||
case A192GCM:
|
||||
return newAESGCM(24)
|
||||
case A256GCM:
|
||||
return newAESGCM(32)
|
||||
case A128CBC_HS256:
|
||||
return newAESCBC(16)
|
||||
case A192CBC_HS384:
|
||||
return newAESCBC(24)
|
||||
case A256CBC_HS512:
|
||||
return newAESCBC(32)
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// getPbkdf2Params returns the key length and hash function used in
|
||||
// pbkdf2.Key.
|
||||
func getPbkdf2Params(alg KeyAlgorithm) (int, func() hash.Hash) {
|
||||
switch alg {
|
||||
case PBES2_HS256_A128KW:
|
||||
return 16, sha256.New
|
||||
case PBES2_HS384_A192KW:
|
||||
return 24, sha512.New384
|
||||
case PBES2_HS512_A256KW:
|
||||
return 32, sha512.New
|
||||
default:
|
||||
panic("invalid algorithm")
|
||||
}
|
||||
}
|
||||
|
||||
// getRandomSalt generates a new salt of the given size.
|
||||
func getRandomSalt(size int) ([]byte, error) {
|
||||
salt := make([]byte, size)
|
||||
_, err := io.ReadFull(RandReader, salt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return salt, nil
|
||||
}
|
||||
|
||||
// newSymmetricRecipient creates a JWE encrypter based on AES-GCM key wrap.
|
||||
func newSymmetricRecipient(keyAlg KeyAlgorithm, key []byte) (recipientKeyInfo, error) {
|
||||
switch keyAlg {
|
||||
case DIRECT, A128GCMKW, A192GCMKW, A256GCMKW, A128KW, A192KW, A256KW:
|
||||
case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW:
|
||||
default:
|
||||
return recipientKeyInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
return recipientKeyInfo{
|
||||
keyAlg: keyAlg,
|
||||
keyEncrypter: &symmetricKeyCipher{
|
||||
key: key,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// newSymmetricSigner creates a recipientSigInfo based on the given key.
|
||||
func newSymmetricSigner(sigAlg SignatureAlgorithm, key []byte) (recipientSigInfo, error) {
|
||||
// Verify that key management algorithm is supported by this encrypter
|
||||
switch sigAlg {
|
||||
case HS256, HS384, HS512:
|
||||
default:
|
||||
return recipientSigInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
return recipientSigInfo{
|
||||
sigAlg: sigAlg,
|
||||
signer: &symmetricMac{
|
||||
key: key,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Generate a random key for the given content cipher
|
||||
func (ctx randomKeyGenerator) genKey() ([]byte, rawHeader, error) {
|
||||
key := make([]byte, ctx.size)
|
||||
_, err := io.ReadFull(RandReader, key)
|
||||
if err != nil {
|
||||
return nil, rawHeader{}, err
|
||||
}
|
||||
|
||||
return key, rawHeader{}, nil
|
||||
}
|
||||
|
||||
// Key size for random generator
|
||||
func (ctx randomKeyGenerator) keySize() int {
|
||||
return ctx.size
|
||||
}
|
||||
|
||||
// Generate a static key (for direct mode)
|
||||
func (ctx staticKeyGenerator) genKey() ([]byte, rawHeader, error) {
|
||||
cek := make([]byte, len(ctx.key))
|
||||
copy(cek, ctx.key)
|
||||
return cek, rawHeader{}, nil
|
||||
}
|
||||
|
||||
// Key size for static generator
|
||||
func (ctx staticKeyGenerator) keySize() int {
|
||||
return len(ctx.key)
|
||||
}
|
||||
|
||||
// Get key size for this cipher
|
||||
func (ctx aeadContentCipher) keySize() int {
|
||||
return ctx.keyBytes
|
||||
}
|
||||
|
||||
// Encrypt some data
|
||||
func (ctx aeadContentCipher) encrypt(key, aad, pt []byte) (*aeadParts, error) {
|
||||
// Get a new AEAD instance
|
||||
aead, err := ctx.getAead(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize a new nonce
|
||||
iv := make([]byte, aead.NonceSize())
|
||||
_, err = io.ReadFull(RandReader, iv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ciphertextAndTag := aead.Seal(nil, iv, pt, aad)
|
||||
offset := len(ciphertextAndTag) - ctx.authtagBytes
|
||||
|
||||
return &aeadParts{
|
||||
iv: iv,
|
||||
ciphertext: ciphertextAndTag[:offset],
|
||||
tag: ciphertextAndTag[offset:],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Decrypt some data
|
||||
func (ctx aeadContentCipher) decrypt(key, aad []byte, parts *aeadParts) ([]byte, error) {
|
||||
aead, err := ctx.getAead(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(parts.iv) != aead.NonceSize() || len(parts.tag) < ctx.authtagBytes {
|
||||
return nil, ErrCryptoFailure
|
||||
}
|
||||
|
||||
return aead.Open(nil, parts.iv, append(parts.ciphertext, parts.tag...), aad)
|
||||
}
|
||||
|
||||
// Encrypt the content encryption key.
|
||||
func (ctx *symmetricKeyCipher) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) {
|
||||
switch alg {
|
||||
case DIRECT:
|
||||
return recipientInfo{
|
||||
header: &rawHeader{},
|
||||
}, nil
|
||||
case A128GCMKW, A192GCMKW, A256GCMKW:
|
||||
aead := newAESGCM(len(ctx.key))
|
||||
|
||||
parts, err := aead.encrypt(ctx.key, []byte{}, cek)
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
header := &rawHeader{}
|
||||
|
||||
if err = header.set(headerIV, newBuffer(parts.iv)); err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
if err = header.set(headerTag, newBuffer(parts.tag)); err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
return recipientInfo{
|
||||
header: header,
|
||||
encryptedKey: parts.ciphertext,
|
||||
}, nil
|
||||
case A128KW, A192KW, A256KW:
|
||||
block, err := aes.NewCipher(ctx.key)
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
jek, err := josecipher.KeyWrap(block, cek)
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
return recipientInfo{
|
||||
encryptedKey: jek,
|
||||
header: &rawHeader{},
|
||||
}, nil
|
||||
case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW:
|
||||
if len(ctx.p2s) == 0 {
|
||||
salt, err := getRandomSalt(defaultP2SSize)
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
ctx.p2s = salt
|
||||
}
|
||||
|
||||
if ctx.p2c <= 0 {
|
||||
ctx.p2c = defaultP2C
|
||||
}
|
||||
|
||||
// salt is UTF8(Alg) || 0x00 || Salt Input
|
||||
salt := bytes.Join([][]byte{[]byte(alg), ctx.p2s}, []byte{0x00})
|
||||
|
||||
// derive key
|
||||
keyLen, h := getPbkdf2Params(alg)
|
||||
key := pbkdf2.Key(ctx.key, salt, ctx.p2c, keyLen, h)
|
||||
|
||||
// use AES cipher with derived key
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
jek, err := josecipher.KeyWrap(block, cek)
|
||||
if err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
header := &rawHeader{}
|
||||
|
||||
if err = header.set(headerP2C, ctx.p2c); err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
if err = header.set(headerP2S, newBuffer(ctx.p2s)); err != nil {
|
||||
return recipientInfo{}, err
|
||||
}
|
||||
|
||||
return recipientInfo{
|
||||
encryptedKey: jek,
|
||||
header: header,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return recipientInfo{}, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
// Decrypt the content encryption key.
|
||||
func (ctx *symmetricKeyCipher) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) {
|
||||
switch headers.getAlgorithm() {
|
||||
case DIRECT:
|
||||
cek := make([]byte, len(ctx.key))
|
||||
copy(cek, ctx.key)
|
||||
return cek, nil
|
||||
case A128GCMKW, A192GCMKW, A256GCMKW:
|
||||
aead := newAESGCM(len(ctx.key))
|
||||
|
||||
iv, err := headers.getIV()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid IV: %v", err)
|
||||
}
|
||||
tag, err := headers.getTag()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid tag: %v", err)
|
||||
}
|
||||
|
||||
parts := &aeadParts{
|
||||
iv: iv.bytes(),
|
||||
ciphertext: recipient.encryptedKey,
|
||||
tag: tag.bytes(),
|
||||
}
|
||||
|
||||
cek, err := aead.decrypt(ctx.key, []byte{}, parts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cek, nil
|
||||
case A128KW, A192KW, A256KW:
|
||||
block, err := aes.NewCipher(ctx.key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cek, err := josecipher.KeyUnwrap(block, recipient.encryptedKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cek, nil
|
||||
case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW:
|
||||
p2s, err := headers.getP2S()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid P2S: %v", err)
|
||||
}
|
||||
if p2s == nil || len(p2s.data) == 0 {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid P2S: must be present")
|
||||
}
|
||||
|
||||
p2c, err := headers.getP2C()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid P2C: %v", err)
|
||||
}
|
||||
if p2c <= 0 {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: invalid P2C: must be a positive integer")
|
||||
}
|
||||
|
||||
// salt is UTF8(Alg) || 0x00 || Salt Input
|
||||
alg := headers.getAlgorithm()
|
||||
salt := bytes.Join([][]byte{[]byte(alg), p2s.bytes()}, []byte{0x00})
|
||||
|
||||
// derive key
|
||||
keyLen, h := getPbkdf2Params(alg)
|
||||
key := pbkdf2.Key(ctx.key, salt, p2c, keyLen, h)
|
||||
|
||||
// use AES cipher with derived key
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cek, err := josecipher.KeyUnwrap(block, recipient.encryptedKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cek, nil
|
||||
}
|
||||
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
// Sign the given payload
|
||||
func (ctx symmetricMac) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) {
|
||||
mac, err := ctx.hmac(payload, alg)
|
||||
if err != nil {
|
||||
return Signature{}, errors.New("go-jose/go-jose: failed to compute hmac")
|
||||
}
|
||||
|
||||
return Signature{
|
||||
Signature: mac,
|
||||
protected: &rawHeader{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Verify the given payload
|
||||
func (ctx symmetricMac) verifyPayload(payload []byte, mac []byte, alg SignatureAlgorithm) error {
|
||||
expected, err := ctx.hmac(payload, alg)
|
||||
if err != nil {
|
||||
return errors.New("go-jose/go-jose: failed to compute hmac")
|
||||
}
|
||||
|
||||
if len(mac) != len(expected) {
|
||||
return errors.New("go-jose/go-jose: invalid hmac")
|
||||
}
|
||||
|
||||
match := subtle.ConstantTimeCompare(mac, expected)
|
||||
if match != 1 {
|
||||
return errors.New("go-jose/go-jose: invalid hmac")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compute the HMAC based on the given alg value
|
||||
func (ctx symmetricMac) hmac(payload []byte, alg SignatureAlgorithm) ([]byte, error) {
|
||||
var hash func() hash.Hash
|
||||
|
||||
switch alg {
|
||||
case HS256:
|
||||
hash = sha256.New
|
||||
case HS384:
|
||||
hash = sha512.New384
|
||||
case HS512:
|
||||
hash = sha512.New
|
||||
default:
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
hmac := hmac.New(hash, ctx.key)
|
||||
|
||||
// According to documentation, Write() on hash never fails
|
||||
_, _ = hmac.Write(payload)
|
||||
return hmac.Sum(nil), nil
|
||||
}
|
|
@ -0,0 +1,77 @@
|
|||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
|
||||
"github.com/go-openapi/strfmt"
|
||||
|
||||
"github.com/go-openapi/runtime"
|
||||
)
|
||||
|
||||
// PassThroughAuth never manipulates the request
|
||||
var PassThroughAuth runtime.ClientAuthInfoWriter
|
||||
|
||||
func init() {
|
||||
PassThroughAuth = runtime.ClientAuthInfoWriterFunc(func(_ runtime.ClientRequest, _ strfmt.Registry) error { return nil })
|
||||
}
|
||||
|
||||
// BasicAuth provides a basic auth info writer
|
||||
func BasicAuth(username, password string) runtime.ClientAuthInfoWriter {
|
||||
return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error {
|
||||
encoded := base64.StdEncoding.EncodeToString([]byte(username + ":" + password))
|
||||
return r.SetHeaderParam(runtime.HeaderAuthorization, "Basic "+encoded)
|
||||
})
|
||||
}
|
||||
|
||||
// APIKeyAuth provides an API key auth info writer
|
||||
func APIKeyAuth(name, in, value string) runtime.ClientAuthInfoWriter {
|
||||
if in == "query" {
|
||||
return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error {
|
||||
return r.SetQueryParam(name, value)
|
||||
})
|
||||
}
|
||||
|
||||
if in == "header" {
|
||||
return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error {
|
||||
return r.SetHeaderParam(name, value)
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BearerToken provides a header based oauth2 bearer access token auth info writer
|
||||
func BearerToken(token string) runtime.ClientAuthInfoWriter {
|
||||
return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error {
|
||||
return r.SetHeaderParam(runtime.HeaderAuthorization, "Bearer "+token)
|
||||
})
|
||||
}
|
||||
|
||||
// Compose combines multiple ClientAuthInfoWriters into a single one.
|
||||
// Useful when multiple auth headers are needed.
|
||||
func Compose(auths ...runtime.ClientAuthInfoWriter) runtime.ClientAuthInfoWriter {
|
||||
return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error {
|
||||
for _, auth := range auths {
|
||||
if auth == nil {
|
||||
continue
|
||||
}
|
||||
if err := auth.AuthenticateRequest(r, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
|
@ -0,0 +1,56 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// KeepAliveTransport drains the remaining body from a response
|
||||
// so that go will reuse the TCP connections.
|
||||
// This is not enabled by default because there are servers where
|
||||
// the response never gets closed and that would make the code hang forever.
|
||||
// So instead it's provided as a http client middleware that can be used to override
|
||||
// any request.
|
||||
func KeepAliveTransport(rt http.RoundTripper) http.RoundTripper {
|
||||
return &keepAliveTransport{wrapped: rt}
|
||||
}
|
||||
|
||||
type keepAliveTransport struct {
|
||||
wrapped http.RoundTripper
|
||||
}
|
||||
|
||||
func (k *keepAliveTransport) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
resp, err := k.wrapped.RoundTrip(r)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
resp.Body = &drainingReadCloser{rdr: resp.Body}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
type drainingReadCloser struct {
|
||||
rdr io.ReadCloser
|
||||
seenEOF uint32
|
||||
}
|
||||
|
||||
func (d *drainingReadCloser) Read(p []byte) (n int, err error) {
|
||||
n, err = d.rdr.Read(p)
|
||||
if err == io.EOF || n == 0 {
|
||||
atomic.StoreUint32(&d.seenEOF, 1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *drainingReadCloser) Close() error {
|
||||
// drain buffer
|
||||
if atomic.LoadUint32(&d.seenEOF) != 1 {
|
||||
// If the reader side (a HTTP server) is misbehaving, it still may send
|
||||
// some bytes, but the closer ignores them to keep the underling
|
||||
// connection open.
|
||||
//nolint:errcheck
|
||||
io.Copy(ioutil.Discard, d.rdr)
|
||||
}
|
||||
return d.rdr.Close()
|
||||
}
|
|
@ -0,0 +1,99 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/go-openapi/strfmt"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/opentracing/opentracing-go/ext"
|
||||
"github.com/opentracing/opentracing-go/log"
|
||||
|
||||
"github.com/go-openapi/runtime"
|
||||
)
|
||||
|
||||
type tracingTransport struct {
|
||||
transport runtime.ClientTransport
|
||||
host string
|
||||
opts []opentracing.StartSpanOption
|
||||
}
|
||||
|
||||
func newOpenTracingTransport(transport runtime.ClientTransport, host string, opts []opentracing.StartSpanOption,
|
||||
) runtime.ClientTransport {
|
||||
return &tracingTransport{
|
||||
transport: transport,
|
||||
host: host,
|
||||
opts: opts,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tracingTransport) Submit(op *runtime.ClientOperation) (interface{}, error) {
|
||||
if op.Context == nil {
|
||||
return t.transport.Submit(op)
|
||||
}
|
||||
|
||||
params := op.Params
|
||||
reader := op.Reader
|
||||
|
||||
var span opentracing.Span
|
||||
defer func() {
|
||||
if span != nil {
|
||||
span.Finish()
|
||||
}
|
||||
}()
|
||||
|
||||
op.Params = runtime.ClientRequestWriterFunc(func(req runtime.ClientRequest, reg strfmt.Registry) error {
|
||||
span = createClientSpan(op, req.GetHeaderParams(), t.host, t.opts)
|
||||
return params.WriteToRequest(req, reg)
|
||||
})
|
||||
|
||||
op.Reader = runtime.ClientResponseReaderFunc(func(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
|
||||
if span != nil {
|
||||
code := response.Code()
|
||||
ext.HTTPStatusCode.Set(span, uint16(code))
|
||||
if code >= 400 {
|
||||
ext.Error.Set(span, true)
|
||||
}
|
||||
}
|
||||
return reader.ReadResponse(response, consumer)
|
||||
})
|
||||
|
||||
submit, err := t.transport.Submit(op)
|
||||
if err != nil && span != nil {
|
||||
ext.Error.Set(span, true)
|
||||
span.LogFields(log.Error(err))
|
||||
}
|
||||
return submit, err
|
||||
}
|
||||
|
||||
func createClientSpan(op *runtime.ClientOperation, header http.Header, host string,
|
||||
opts []opentracing.StartSpanOption) opentracing.Span {
|
||||
ctx := op.Context
|
||||
span := opentracing.SpanFromContext(ctx)
|
||||
|
||||
if span != nil {
|
||||
opts = append(opts, ext.SpanKindRPCClient)
|
||||
span, _ = opentracing.StartSpanFromContextWithTracer(
|
||||
ctx, span.Tracer(), operationName(op), opts...)
|
||||
|
||||
ext.Component.Set(span, "go-openapi")
|
||||
ext.PeerHostname.Set(span, host)
|
||||
span.SetTag("http.path", op.PathPattern)
|
||||
ext.HTTPMethod.Set(span, op.Method)
|
||||
|
||||
_ = span.Tracer().Inject(
|
||||
span.Context(),
|
||||
opentracing.HTTPHeaders,
|
||||
opentracing.HTTPHeadersCarrier(header))
|
||||
|
||||
return span
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func operationName(op *runtime.ClientOperation) string {
|
||||
if op.ID != "" {
|
||||
return op.ID
|
||||
}
|
||||
return fmt.Sprintf("%s_%s", op.Method, op.PathPattern)
|
||||
}
|
|
@ -0,0 +1,474 @@
|
|||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-openapi/strfmt"
|
||||
|
||||
"github.com/go-openapi/runtime"
|
||||
)
|
||||
|
||||
// NewRequest creates a new swagger http client request
|
||||
func newRequest(method, pathPattern string, writer runtime.ClientRequestWriter) (*request, error) {
|
||||
return &request{
|
||||
pathPattern: pathPattern,
|
||||
method: method,
|
||||
writer: writer,
|
||||
header: make(http.Header),
|
||||
query: make(url.Values),
|
||||
timeout: DefaultTimeout,
|
||||
getBody: getRequestBuffer,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Request represents a swagger client request.
|
||||
//
|
||||
// This Request struct converts to a HTTP request.
|
||||
// There might be others that convert to other transports.
|
||||
// There is no error checking here, it is assumed to be used after a spec has been validated.
|
||||
// so impossible combinations should not arise (hopefully).
|
||||
//
|
||||
// The main purpose of this struct is to hide the machinery of adding params to a transport request.
|
||||
// The generated code only implements what is necessary to turn a param into a valid value for these methods.
|
||||
type request struct {
|
||||
pathPattern string
|
||||
method string
|
||||
writer runtime.ClientRequestWriter
|
||||
|
||||
pathParams map[string]string
|
||||
header http.Header
|
||||
query url.Values
|
||||
formFields url.Values
|
||||
fileFields map[string][]runtime.NamedReadCloser
|
||||
payload interface{}
|
||||
timeout time.Duration
|
||||
buf *bytes.Buffer
|
||||
|
||||
getBody func(r *request) []byte
|
||||
}
|
||||
|
||||
var (
|
||||
// ensure interface compliance
|
||||
_ runtime.ClientRequest = new(request)
|
||||
)
|
||||
|
||||
func (r *request) isMultipart(mediaType string) bool {
|
||||
if len(r.fileFields) > 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
return runtime.MultipartFormMime == mediaType
|
||||
}
|
||||
|
||||
// BuildHTTP creates a new http request based on the data from the params
|
||||
func (r *request) BuildHTTP(mediaType, basePath string, producers map[string]runtime.Producer, registry strfmt.Registry) (*http.Request, error) {
|
||||
return r.buildHTTP(mediaType, basePath, producers, registry, nil)
|
||||
}
|
||||
func escapeQuotes(s string) string {
|
||||
return strings.NewReplacer("\\", "\\\\", `"`, "\\\"").Replace(s)
|
||||
}
|
||||
|
||||
func logClose(err error, pw *io.PipeWriter) {
|
||||
log.Println(err)
|
||||
closeErr := pw.CloseWithError(err)
|
||||
if closeErr != nil {
|
||||
log.Println(closeErr)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *request) buildHTTP(mediaType, basePath string, producers map[string]runtime.Producer, registry strfmt.Registry, auth runtime.ClientAuthInfoWriter) (*http.Request, error) {
|
||||
// build the data
|
||||
if err := r.writer.WriteToRequest(r, registry); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Our body must be an io.Reader.
|
||||
// When we create the http.Request, if we pass it a
|
||||
// bytes.Buffer then it will wrap it in an io.ReadCloser
|
||||
// and set the content length automatically.
|
||||
var body io.Reader
|
||||
var pr *io.PipeReader
|
||||
var pw *io.PipeWriter
|
||||
|
||||
r.buf = bytes.NewBuffer(nil)
|
||||
if r.payload != nil || len(r.formFields) > 0 || len(r.fileFields) > 0 {
|
||||
body = r.buf
|
||||
if r.isMultipart(mediaType) {
|
||||
pr, pw = io.Pipe()
|
||||
body = pr
|
||||
}
|
||||
}
|
||||
|
||||
// check if this is a form type request
|
||||
if len(r.formFields) > 0 || len(r.fileFields) > 0 {
|
||||
if !r.isMultipart(mediaType) {
|
||||
r.header.Set(runtime.HeaderContentType, mediaType)
|
||||
formString := r.formFields.Encode()
|
||||
r.buf.WriteString(formString)
|
||||
goto DoneChoosingBodySource
|
||||
}
|
||||
|
||||
mp := multipart.NewWriter(pw)
|
||||
r.header.Set(runtime.HeaderContentType, mangleContentType(mediaType, mp.Boundary()))
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
mp.Close()
|
||||
pw.Close()
|
||||
}()
|
||||
|
||||
for fn, v := range r.formFields {
|
||||
for _, vi := range v {
|
||||
if err := mp.WriteField(fn, vi); err != nil {
|
||||
logClose(err, pw)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
for _, ff := range r.fileFields {
|
||||
for _, ffi := range ff {
|
||||
ffi.Close()
|
||||
}
|
||||
}
|
||||
}()
|
||||
for fn, f := range r.fileFields {
|
||||
for _, fi := range f {
|
||||
// Need to read the data so that we can detect the content type
|
||||
buf := make([]byte, 512)
|
||||
size, err := fi.Read(buf)
|
||||
if err != nil {
|
||||
logClose(err, pw)
|
||||
return
|
||||
}
|
||||
fileContentType := http.DetectContentType(buf)
|
||||
newFi := runtime.NamedReader(fi.Name(), io.MultiReader(bytes.NewReader(buf[:size]), fi))
|
||||
|
||||
// Create the MIME headers for the new part
|
||||
h := make(textproto.MIMEHeader)
|
||||
h.Set("Content-Disposition",
|
||||
fmt.Sprintf(`form-data; name="%s"; filename="%s"`,
|
||||
escapeQuotes(fn), escapeQuotes(filepath.Base(fi.Name()))))
|
||||
h.Set("Content-Type", fileContentType)
|
||||
|
||||
wrtr, err := mp.CreatePart(h)
|
||||
if err != nil {
|
||||
logClose(err, pw)
|
||||
return
|
||||
}
|
||||
if _, err := io.Copy(wrtr, newFi); err != nil {
|
||||
logClose(err, pw)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
goto DoneChoosingBodySource
|
||||
}
|
||||
|
||||
// if there is payload, use the producer to write the payload, and then
|
||||
// set the header to the content-type appropriate for the payload produced
|
||||
if r.payload != nil {
|
||||
// TODO: infer most appropriate content type based on the producer used,
|
||||
// and the `consumers` section of the spec/operation
|
||||
r.header.Set(runtime.HeaderContentType, mediaType)
|
||||
if rdr, ok := r.payload.(io.ReadCloser); ok {
|
||||
body = rdr
|
||||
goto DoneChoosingBodySource
|
||||
}
|
||||
|
||||
if rdr, ok := r.payload.(io.Reader); ok {
|
||||
body = rdr
|
||||
goto DoneChoosingBodySource
|
||||
}
|
||||
|
||||
producer := producers[mediaType]
|
||||
if err := producer.Produce(r.buf, r.payload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
DoneChoosingBodySource:
|
||||
|
||||
if runtime.CanHaveBody(r.method) && body != nil && r.header.Get(runtime.HeaderContentType) == "" {
|
||||
r.header.Set(runtime.HeaderContentType, mediaType)
|
||||
}
|
||||
|
||||
if auth != nil {
|
||||
// If we're not using r.buf as our http.Request's body,
|
||||
// either the payload is an io.Reader or io.ReadCloser,
|
||||
// or we're doing a multipart form/file.
|
||||
//
|
||||
// In those cases, if the AuthenticateRequest call asks for the body,
|
||||
// we must read it into a buffer and provide that, then use that buffer
|
||||
// as the body of our http.Request.
|
||||
//
|
||||
// This is done in-line with the GetBody() request rather than ahead
|
||||
// of time, because there's no way to know if the AuthenticateRequest
|
||||
// will even ask for the body of the request.
|
||||
//
|
||||
// If for some reason the copy fails, there's no way to return that
|
||||
// error to the GetBody() call, so return it afterwards.
|
||||
//
|
||||
// An error from the copy action is prioritized over any error
|
||||
// from the AuthenticateRequest call, because the mis-read
|
||||
// body may have interfered with the auth.
|
||||
//
|
||||
var copyErr error
|
||||
if buf, ok := body.(*bytes.Buffer); body != nil && (!ok || buf != r.buf) {
|
||||
var copied bool
|
||||
r.getBody = func(r *request) []byte {
|
||||
if copied {
|
||||
return getRequestBuffer(r)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
copied = true
|
||||
}()
|
||||
|
||||
if _, copyErr = io.Copy(r.buf, body); copyErr != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if closer, ok := body.(io.ReadCloser); ok {
|
||||
if copyErr = closer.Close(); copyErr != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
body = r.buf
|
||||
return getRequestBuffer(r)
|
||||
}
|
||||
}
|
||||
|
||||
authErr := auth.AuthenticateRequest(r, registry)
|
||||
|
||||
if copyErr != nil {
|
||||
return nil, fmt.Errorf("error retrieving the response body: %v", copyErr)
|
||||
}
|
||||
|
||||
if authErr != nil {
|
||||
return nil, authErr
|
||||
}
|
||||
}
|
||||
|
||||
// In case the basePath or the request pathPattern include static query parameters,
|
||||
// parse those out before constructing the final path. The parameters themselves
|
||||
// will be merged with the ones set by the client, with the priority given first to
|
||||
// the ones set by the client, then the path pattern, and lastly the base path.
|
||||
basePathURL, err := url.Parse(basePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
staticQueryParams := basePathURL.Query()
|
||||
|
||||
pathPatternURL, err := url.Parse(r.pathPattern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for name, values := range pathPatternURL.Query() {
|
||||
if _, present := staticQueryParams[name]; present {
|
||||
staticQueryParams.Del(name)
|
||||
}
|
||||
for _, value := range values {
|
||||
staticQueryParams.Add(name, value)
|
||||
}
|
||||
}
|
||||
|
||||
// create http request
|
||||
var reinstateSlash bool
|
||||
if pathPatternURL.Path != "" && pathPatternURL.Path != "/" && pathPatternURL.Path[len(pathPatternURL.Path)-1] == '/' {
|
||||
reinstateSlash = true
|
||||
}
|
||||
|
||||
urlPath := path.Join(basePathURL.Path, pathPatternURL.Path)
|
||||
for k, v := range r.pathParams {
|
||||
urlPath = strings.Replace(urlPath, "{"+k+"}", url.PathEscape(v), -1)
|
||||
}
|
||||
if reinstateSlash {
|
||||
urlPath = urlPath + "/"
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(r.method, urlPath, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
originalParams := r.GetQueryParams()
|
||||
|
||||
// Merge the query parameters extracted from the basePath with the ones set by
|
||||
// the client in this struct. In case of conflict, the client wins.
|
||||
for k, v := range staticQueryParams {
|
||||
_, present := originalParams[k]
|
||||
if !present {
|
||||
if err = r.SetQueryParam(k, v...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
req.URL.RawQuery = r.query.Encode()
|
||||
req.Header = r.header
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func mangleContentType(mediaType, boundary string) string {
|
||||
if strings.ToLower(mediaType) == runtime.URLencodedFormMime {
|
||||
return fmt.Sprintf("%s; boundary=%s", mediaType, boundary)
|
||||
}
|
||||
return "multipart/form-data; boundary=" + boundary
|
||||
}
|
||||
|
||||
func (r *request) GetMethod() string {
|
||||
return r.method
|
||||
}
|
||||
|
||||
func (r *request) GetPath() string {
|
||||
path := r.pathPattern
|
||||
for k, v := range r.pathParams {
|
||||
path = strings.Replace(path, "{"+k+"}", v, -1)
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
func (r *request) GetBody() []byte {
|
||||
return r.getBody(r)
|
||||
}
|
||||
|
||||
func getRequestBuffer(r *request) []byte {
|
||||
if r.buf == nil {
|
||||
return nil
|
||||
}
|
||||
return r.buf.Bytes()
|
||||
}
|
||||
|
||||
// SetHeaderParam adds a header param to the request
|
||||
// when there is only 1 value provided for the varargs, it will set it.
|
||||
// when there are several values provided for the varargs it will add it (no overriding)
|
||||
func (r *request) SetHeaderParam(name string, values ...string) error {
|
||||
if r.header == nil {
|
||||
r.header = make(http.Header)
|
||||
}
|
||||
r.header[http.CanonicalHeaderKey(name)] = values
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetHeaderParams returns the all headers currently set for the request
|
||||
func (r *request) GetHeaderParams() http.Header {
|
||||
return r.header
|
||||
}
|
||||
|
||||
// SetQueryParam adds a query param to the request
|
||||
// when there is only 1 value provided for the varargs, it will set it.
|
||||
// when there are several values provided for the varargs it will add it (no overriding)
|
||||
func (r *request) SetQueryParam(name string, values ...string) error {
|
||||
if r.query == nil {
|
||||
r.query = make(url.Values)
|
||||
}
|
||||
r.query[name] = values
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetQueryParams returns a copy of all query params currently set for the request
|
||||
func (r *request) GetQueryParams() url.Values {
|
||||
var result = make(url.Values)
|
||||
for key, value := range r.query {
|
||||
result[key] = append([]string{}, value...)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// SetFormParam adds a forn param to the request
|
||||
// when there is only 1 value provided for the varargs, it will set it.
|
||||
// when there are several values provided for the varargs it will add it (no overriding)
|
||||
func (r *request) SetFormParam(name string, values ...string) error {
|
||||
if r.formFields == nil {
|
||||
r.formFields = make(url.Values)
|
||||
}
|
||||
r.formFields[name] = values
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetPathParam adds a path param to the request
|
||||
func (r *request) SetPathParam(name string, value string) error {
|
||||
if r.pathParams == nil {
|
||||
r.pathParams = make(map[string]string)
|
||||
}
|
||||
|
||||
r.pathParams[name] = value
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetFileParam adds a file param to the request
|
||||
func (r *request) SetFileParam(name string, files ...runtime.NamedReadCloser) error {
|
||||
for _, file := range files {
|
||||
if actualFile, ok := file.(*os.File); ok {
|
||||
fi, err := os.Stat(actualFile.Name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fi.IsDir() {
|
||||
return fmt.Errorf("%q is a directory, only files are supported", file.Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if r.fileFields == nil {
|
||||
r.fileFields = make(map[string][]runtime.NamedReadCloser)
|
||||
}
|
||||
if r.formFields == nil {
|
||||
r.formFields = make(url.Values)
|
||||
}
|
||||
|
||||
r.fileFields[name] = files
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *request) GetFileParam() map[string][]runtime.NamedReadCloser {
|
||||
return r.fileFields
|
||||
}
|
||||
|
||||
// SetBodyParam sets a body parameter on the request.
|
||||
// This does not yet serialze the object, this happens as late as possible.
|
||||
func (r *request) SetBodyParam(payload interface{}) error {
|
||||
r.payload = payload
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *request) GetBodyParam() interface{} {
|
||||
return r.payload
|
||||
}
|
||||
|
||||
// SetTimeout sets the timeout for a request
|
||||
func (r *request) SetTimeout(timeout time.Duration) error {
|
||||
r.timeout = timeout
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/go-openapi/runtime"
|
||||
)
|
||||
|
||||
var _ runtime.ClientResponse = response{}
|
||||
|
||||
type response struct {
|
||||
resp *http.Response
|
||||
}
|
||||
|
||||
func (r response) Code() int {
|
||||
return r.resp.StatusCode
|
||||
}
|
||||
|
||||
func (r response) Message() string {
|
||||
return r.resp.Status
|
||||
}
|
||||
|
||||
func (r response) GetHeader(name string) string {
|
||||
return r.resp.Header.Get(name)
|
||||
}
|
||||
|
||||
func (r response) GetHeaders(name string) []string {
|
||||
return r.resp.Header.Values(name)
|
||||
}
|
||||
|
||||
func (r response) Body() io.ReadCloser {
|
||||
return r.resp.Body
|
||||
}
|
|
@ -0,0 +1,518 @@
|
|||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-openapi/strfmt"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
|
||||
"github.com/go-openapi/runtime"
|
||||
"github.com/go-openapi/runtime/logger"
|
||||
"github.com/go-openapi/runtime/middleware"
|
||||
"github.com/go-openapi/runtime/yamlpc"
|
||||
)
|
||||
|
||||
// TLSClientOptions to configure client authentication with mutual TLS
|
||||
type TLSClientOptions struct {
|
||||
// Certificate is the path to a PEM-encoded certificate to be used for
|
||||
// client authentication. If set then Key must also be set.
|
||||
Certificate string
|
||||
|
||||
// LoadedCertificate is the certificate to be used for client authentication.
|
||||
// This field is ignored if Certificate is set. If this field is set, LoadedKey
|
||||
// is also required.
|
||||
LoadedCertificate *x509.Certificate
|
||||
|
||||
// Key is the path to an unencrypted PEM-encoded private key for client
|
||||
// authentication. This field is required if Certificate is set.
|
||||
Key string
|
||||
|
||||
// LoadedKey is the key for client authentication. This field is required if
|
||||
// LoadedCertificate is set.
|
||||
LoadedKey crypto.PrivateKey
|
||||
|
||||
// CA is a path to a PEM-encoded certificate that specifies the root certificate
|
||||
// to use when validating the TLS certificate presented by the server. If this field
|
||||
// (and LoadedCA) is not set, the system certificate pool is used. This field is ignored if LoadedCA
|
||||
// is set.
|
||||
CA string
|
||||
|
||||
// LoadedCA specifies the root certificate to use when validating the server's TLS certificate.
|
||||
// If this field (and CA) is not set, the system certificate pool is used.
|
||||
LoadedCA *x509.Certificate
|
||||
|
||||
// LoadedCAPool specifies a pool of RootCAs to use when validating the server's TLS certificate.
|
||||
// If set, it will be combined with the the other loaded certificates (see LoadedCA and CA).
|
||||
// If neither LoadedCA or CA is set, the provided pool with override the system
|
||||
// certificate pool.
|
||||
// The caller must not use the supplied pool after calling TLSClientAuth.
|
||||
LoadedCAPool *x509.CertPool
|
||||
|
||||
// ServerName specifies the hostname to use when verifying the server certificate.
|
||||
// If this field is set then InsecureSkipVerify will be ignored and treated as
|
||||
// false.
|
||||
ServerName string
|
||||
|
||||
// InsecureSkipVerify controls whether the certificate chain and hostname presented
|
||||
// by the server are validated. If true, any certificate is accepted.
|
||||
InsecureSkipVerify bool
|
||||
|
||||
// VerifyPeerCertificate, if not nil, is called after normal
|
||||
// certificate verification. It receives the raw ASN.1 certificates
|
||||
// provided by the peer and also any verified chains that normal processing found.
|
||||
// If it returns a non-nil error, the handshake is aborted and that error results.
|
||||
//
|
||||
// If normal verification fails then the handshake will abort before
|
||||
// considering this callback. If normal verification is disabled by
|
||||
// setting InsecureSkipVerify then this callback will be considered but
|
||||
// the verifiedChains argument will always be nil.
|
||||
VerifyPeerCertificate func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error
|
||||
|
||||
// SessionTicketsDisabled may be set to true to disable session ticket and
|
||||
// PSK (resumption) support. Note that on clients, session ticket support is
|
||||
// also disabled if ClientSessionCache is nil.
|
||||
SessionTicketsDisabled bool
|
||||
|
||||
// ClientSessionCache is a cache of ClientSessionState entries for TLS
|
||||
// session resumption. It is only used by clients.
|
||||
ClientSessionCache tls.ClientSessionCache
|
||||
|
||||
// Prevents callers using unkeyed fields.
|
||||
_ struct{}
|
||||
}
|
||||
|
||||
// TLSClientAuth creates a tls.Config for mutual auth
|
||||
func TLSClientAuth(opts TLSClientOptions) (*tls.Config, error) {
|
||||
// create client tls config
|
||||
cfg := &tls.Config{}
|
||||
|
||||
// load client cert if specified
|
||||
if opts.Certificate != "" {
|
||||
cert, err := tls.LoadX509KeyPair(opts.Certificate, opts.Key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tls client cert: %v", err)
|
||||
}
|
||||
cfg.Certificates = []tls.Certificate{cert}
|
||||
} else if opts.LoadedCertificate != nil {
|
||||
block := pem.Block{Type: "CERTIFICATE", Bytes: opts.LoadedCertificate.Raw}
|
||||
certPem := pem.EncodeToMemory(&block)
|
||||
|
||||
var keyBytes []byte
|
||||
switch k := opts.LoadedKey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
keyBytes = x509.MarshalPKCS1PrivateKey(k)
|
||||
case *ecdsa.PrivateKey:
|
||||
var err error
|
||||
keyBytes, err = x509.MarshalECPrivateKey(k)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tls client priv key: %v", err)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("tls client priv key: unsupported key type")
|
||||
}
|
||||
|
||||
block = pem.Block{Type: "PRIVATE KEY", Bytes: keyBytes}
|
||||
keyPem := pem.EncodeToMemory(&block)
|
||||
|
||||
cert, err := tls.X509KeyPair(certPem, keyPem)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tls client cert: %v", err)
|
||||
}
|
||||
cfg.Certificates = []tls.Certificate{cert}
|
||||
}
|
||||
|
||||
cfg.InsecureSkipVerify = opts.InsecureSkipVerify
|
||||
|
||||
cfg.VerifyPeerCertificate = opts.VerifyPeerCertificate
|
||||
cfg.SessionTicketsDisabled = opts.SessionTicketsDisabled
|
||||
cfg.ClientSessionCache = opts.ClientSessionCache
|
||||
|
||||
// When no CA certificate is provided, default to the system cert pool
|
||||
// that way when a request is made to a server known by the system trust store,
|
||||
// the name is still verified
|
||||
if opts.LoadedCA != nil {
|
||||
caCertPool := basePool(opts.LoadedCAPool)
|
||||
caCertPool.AddCert(opts.LoadedCA)
|
||||
cfg.RootCAs = caCertPool
|
||||
} else if opts.CA != "" {
|
||||
// load ca cert
|
||||
caCert, err := ioutil.ReadFile(opts.CA)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("tls client ca: %v", err)
|
||||
}
|
||||
caCertPool := basePool(opts.LoadedCAPool)
|
||||
caCertPool.AppendCertsFromPEM(caCert)
|
||||
cfg.RootCAs = caCertPool
|
||||
} else if opts.LoadedCAPool != nil {
|
||||
cfg.RootCAs = opts.LoadedCAPool
|
||||
}
|
||||
|
||||
// apply servername overrride
|
||||
if opts.ServerName != "" {
|
||||
cfg.InsecureSkipVerify = false
|
||||
cfg.ServerName = opts.ServerName
|
||||
}
|
||||
|
||||
cfg.BuildNameToCertificate()
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func basePool(pool *x509.CertPool) *x509.CertPool {
|
||||
if pool == nil {
|
||||
return x509.NewCertPool()
|
||||
}
|
||||
return pool
|
||||
}
|
||||
|
||||
// TLSTransport creates a http client transport suitable for mutual tls auth
|
||||
func TLSTransport(opts TLSClientOptions) (http.RoundTripper, error) {
|
||||
cfg, err := TLSClientAuth(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &http.Transport{TLSClientConfig: cfg}, nil
|
||||
}
|
||||
|
||||
// TLSClient creates a http.Client for mutual auth
|
||||
func TLSClient(opts TLSClientOptions) (*http.Client, error) {
|
||||
transport, err := TLSTransport(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &http.Client{Transport: transport}, nil
|
||||
}
|
||||
|
||||
// DefaultTimeout the default request timeout
|
||||
var DefaultTimeout = 30 * time.Second
|
||||
|
||||
// Runtime represents an API client that uses the transport
|
||||
// to make http requests based on a swagger specification.
|
||||
type Runtime struct {
|
||||
DefaultMediaType string
|
||||
DefaultAuthentication runtime.ClientAuthInfoWriter
|
||||
Consumers map[string]runtime.Consumer
|
||||
Producers map[string]runtime.Producer
|
||||
|
||||
Transport http.RoundTripper
|
||||
Jar http.CookieJar
|
||||
//Spec *spec.Document
|
||||
Host string
|
||||
BasePath string
|
||||
Formats strfmt.Registry
|
||||
Context context.Context
|
||||
|
||||
Debug bool
|
||||
logger logger.Logger
|
||||
|
||||
clientOnce *sync.Once
|
||||
client *http.Client
|
||||
schemes []string
|
||||
}
|
||||
|
||||
// New creates a new default runtime for a swagger api runtime.Client
|
||||
func New(host, basePath string, schemes []string) *Runtime {
|
||||
var rt Runtime
|
||||
rt.DefaultMediaType = runtime.JSONMime
|
||||
|
||||
// TODO: actually infer this stuff from the spec
|
||||
rt.Consumers = map[string]runtime.Consumer{
|
||||
runtime.YAMLMime: yamlpc.YAMLConsumer(),
|
||||
runtime.JSONMime: runtime.JSONConsumer(),
|
||||
runtime.XMLMime: runtime.XMLConsumer(),
|
||||
runtime.TextMime: runtime.TextConsumer(),
|
||||
runtime.HTMLMime: runtime.TextConsumer(),
|
||||
runtime.CSVMime: runtime.CSVConsumer(),
|
||||
runtime.DefaultMime: runtime.ByteStreamConsumer(),
|
||||
}
|
||||
rt.Producers = map[string]runtime.Producer{
|
||||
runtime.YAMLMime: yamlpc.YAMLProducer(),
|
||||
runtime.JSONMime: runtime.JSONProducer(),
|
||||
runtime.XMLMime: runtime.XMLProducer(),
|
||||
runtime.TextMime: runtime.TextProducer(),
|
||||
runtime.HTMLMime: runtime.TextProducer(),
|
||||
runtime.CSVMime: runtime.CSVProducer(),
|
||||
runtime.DefaultMime: runtime.ByteStreamProducer(),
|
||||
}
|
||||
rt.Transport = http.DefaultTransport
|
||||
rt.Jar = nil
|
||||
rt.Host = host
|
||||
rt.BasePath = basePath
|
||||
rt.Context = context.Background()
|
||||
rt.clientOnce = new(sync.Once)
|
||||
if !strings.HasPrefix(rt.BasePath, "/") {
|
||||
rt.BasePath = "/" + rt.BasePath
|
||||
}
|
||||
|
||||
rt.Debug = logger.DebugEnabled()
|
||||
rt.logger = logger.StandardLogger{}
|
||||
|
||||
if len(schemes) > 0 {
|
||||
rt.schemes = schemes
|
||||
}
|
||||
return &rt
|
||||
}
|
||||
|
||||
// NewWithClient allows you to create a new transport with a configured http.Client
|
||||
func NewWithClient(host, basePath string, schemes []string, client *http.Client) *Runtime {
|
||||
rt := New(host, basePath, schemes)
|
||||
if client != nil {
|
||||
rt.clientOnce.Do(func() {
|
||||
rt.client = client
|
||||
})
|
||||
}
|
||||
return rt
|
||||
}
|
||||
|
||||
// WithOpenTracing adds opentracing support to the provided runtime.
|
||||
// A new client span is created for each request.
|
||||
// If the context of the client operation does not contain an active span, no span is created.
|
||||
// The provided opts are applied to each spans - for example to add global tags.
|
||||
func (r *Runtime) WithOpenTracing(opts ...opentracing.StartSpanOption) runtime.ClientTransport {
|
||||
return newOpenTracingTransport(r, r.Host, opts)
|
||||
}
|
||||
|
||||
func (r *Runtime) pickScheme(schemes []string) string {
|
||||
if v := r.selectScheme(r.schemes); v != "" {
|
||||
return v
|
||||
}
|
||||
if v := r.selectScheme(schemes); v != "" {
|
||||
return v
|
||||
}
|
||||
return "http"
|
||||
}
|
||||
|
||||
func (r *Runtime) selectScheme(schemes []string) string {
|
||||
schLen := len(schemes)
|
||||
if schLen == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
scheme := schemes[0]
|
||||
// prefer https, but skip when not possible
|
||||
if scheme != "https" && schLen > 1 {
|
||||
for _, sch := range schemes {
|
||||
if sch == "https" {
|
||||
scheme = sch
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return scheme
|
||||
}
|
||||
func transportOrDefault(left, right http.RoundTripper) http.RoundTripper {
|
||||
if left == nil {
|
||||
return right
|
||||
}
|
||||
return left
|
||||
}
|
||||
|
||||
// EnableConnectionReuse drains the remaining body from a response
|
||||
// so that go will reuse the TCP connections.
|
||||
//
|
||||
// This is not enabled by default because there are servers where
|
||||
// the response never gets closed and that would make the code hang forever.
|
||||
// So instead it's provided as a http client middleware that can be used to override
|
||||
// any request.
|
||||
func (r *Runtime) EnableConnectionReuse() {
|
||||
if r.client == nil {
|
||||
r.Transport = KeepAliveTransport(
|
||||
transportOrDefault(r.Transport, http.DefaultTransport),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
r.client.Transport = KeepAliveTransport(
|
||||
transportOrDefault(r.client.Transport,
|
||||
transportOrDefault(r.Transport, http.DefaultTransport),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
// takes a client operation and creates equivalent http.Request
|
||||
func (r *Runtime) createHttpRequest(operation *runtime.ClientOperation) (*request, *http.Request, error) {
|
||||
params, _, auth := operation.Params, operation.Reader, operation.AuthInfo
|
||||
|
||||
request, err := newRequest(operation.Method, operation.PathPattern, params)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var accept []string
|
||||
accept = append(accept, operation.ProducesMediaTypes...)
|
||||
if err = request.SetHeaderParam(runtime.HeaderAccept, accept...); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if auth == nil && r.DefaultAuthentication != nil {
|
||||
auth = runtime.ClientAuthInfoWriterFunc(func(req runtime.ClientRequest, reg strfmt.Registry) error {
|
||||
if req.GetHeaderParams().Get(runtime.HeaderAuthorization) != "" {
|
||||
return nil
|
||||
}
|
||||
return r.DefaultAuthentication.AuthenticateRequest(req, reg)
|
||||
})
|
||||
}
|
||||
//if auth != nil {
|
||||
// if err := auth.AuthenticateRequest(request, r.Formats); err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
//}
|
||||
|
||||
// TODO: pick appropriate media type
|
||||
cmt := r.DefaultMediaType
|
||||
for _, mediaType := range operation.ConsumesMediaTypes {
|
||||
// Pick first non-empty media type
|
||||
if mediaType != "" {
|
||||
cmt = mediaType
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := r.Producers[cmt]; !ok && cmt != runtime.MultipartFormMime && cmt != runtime.URLencodedFormMime {
|
||||
return nil, nil, fmt.Errorf("none of producers: %v registered. try %s", r.Producers, cmt)
|
||||
}
|
||||
|
||||
req, err := request.buildHTTP(cmt, r.BasePath, r.Producers, r.Formats, auth)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
req.URL.Scheme = r.pickScheme(operation.Schemes)
|
||||
req.URL.Host = r.Host
|
||||
req.Host = r.Host
|
||||
return request, req, nil
|
||||
}
|
||||
|
||||
func (r *Runtime) CreateHttpRequest(operation *runtime.ClientOperation) (req *http.Request, err error) {
|
||||
_, req, err = r.createHttpRequest(operation)
|
||||
return
|
||||
}
|
||||
|
||||
// Submit a request and when there is a body on success it will turn that into the result
|
||||
// all other things are turned into an api error for swagger which retains the status code
|
||||
func (r *Runtime) Submit(operation *runtime.ClientOperation) (interface{}, error) {
|
||||
_, readResponse, _ := operation.Params, operation.Reader, operation.AuthInfo
|
||||
|
||||
request, req, err := r.createHttpRequest(operation)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r.clientOnce.Do(func() {
|
||||
r.client = &http.Client{
|
||||
Transport: r.Transport,
|
||||
Jar: r.Jar,
|
||||
}
|
||||
})
|
||||
|
||||
if r.Debug {
|
||||
b, err2 := httputil.DumpRequestOut(req, true)
|
||||
if err2 != nil {
|
||||
return nil, err2
|
||||
}
|
||||
r.logger.Debugf("%s\n", string(b))
|
||||
}
|
||||
|
||||
var hasTimeout bool
|
||||
pctx := operation.Context
|
||||
if pctx == nil {
|
||||
pctx = r.Context
|
||||
} else {
|
||||
hasTimeout = true
|
||||
}
|
||||
if pctx == nil {
|
||||
pctx = context.Background()
|
||||
}
|
||||
var ctx context.Context
|
||||
var cancel context.CancelFunc
|
||||
if hasTimeout {
|
||||
ctx, cancel = context.WithCancel(pctx)
|
||||
} else {
|
||||
ctx, cancel = context.WithTimeout(pctx, request.timeout)
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
client := operation.Client
|
||||
if client == nil {
|
||||
client = r.client
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
res, err := client.Do(req) // make requests, by default follows 10 redirects before failing
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
ct := res.Header.Get(runtime.HeaderContentType)
|
||||
if ct == "" { // this should really really never occur
|
||||
ct = r.DefaultMediaType
|
||||
}
|
||||
|
||||
if r.Debug {
|
||||
printBody := true
|
||||
if ct == runtime.DefaultMime {
|
||||
printBody = false // Spare the terminal from a binary blob.
|
||||
}
|
||||
b, err2 := httputil.DumpResponse(res, printBody)
|
||||
if err2 != nil {
|
||||
return nil, err2
|
||||
}
|
||||
r.logger.Debugf("%s\n", string(b))
|
||||
}
|
||||
|
||||
mt, _, err := mime.ParseMediaType(ct)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse content type: %s", err)
|
||||
}
|
||||
|
||||
cons, ok := r.Consumers[mt]
|
||||
if !ok {
|
||||
if cons, ok = r.Consumers["*/*"]; !ok {
|
||||
// scream about not knowing what to do
|
||||
return nil, fmt.Errorf("no consumer: %q", ct)
|
||||
}
|
||||
}
|
||||
return readResponse.ReadResponse(response{res}, cons)
|
||||
}
|
||||
|
||||
// SetDebug changes the debug flag.
|
||||
// It ensures that client and middlewares have the set debug level.
|
||||
func (r *Runtime) SetDebug(debug bool) {
|
||||
r.Debug = debug
|
||||
middleware.Debug = debug
|
||||
}
|
||||
|
||||
// SetLogger changes the logger stream.
|
||||
// It ensures that client and middlewares use the same logger.
|
||||
func (r *Runtime) SetLogger(logger logger.Logger) {
|
||||
r.logger = logger
|
||||
middleware.Logger = logger
|
||||
}
|
|
@ -0,0 +1,20 @@
|
|||
package logger
|
||||
|
||||
import "os"
|
||||
|
||||
type Logger interface {
|
||||
Printf(format string, args ...interface{})
|
||||
Debugf(format string, args ...interface{})
|
||||
}
|
||||
|
||||
func DebugEnabled() bool {
|
||||
d := os.Getenv("SWAGGER_DEBUG")
|
||||
if d != "" && d != "false" && d != "0" {
|
||||
return true
|
||||
}
|
||||
d = os.Getenv("DEBUG")
|
||||
if d != "" && d != "false" && d != "0" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
package logger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
type StandardLogger struct{}
|
||||
|
||||
func (StandardLogger) Printf(format string, args ...interface{}) {
|
||||
if len(format) == 0 || format[len(format)-1] != '\n' {
|
||||
format += "\n"
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, format, args...)
|
||||
}
|
||||
|
||||
func (StandardLogger) Debugf(format string, args ...interface{}) {
|
||||
if len(format) == 0 || format[len(format)-1] != '\n' {
|
||||
format += "\n"
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, format, args...)
|
||||
}
|
|
@ -0,0 +1,622 @@
|
|||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package middleware
|
||||
|
||||
import (
|
||||
stdContext "context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/go-openapi/analysis"
|
||||
"github.com/go-openapi/errors"
|
||||
"github.com/go-openapi/loads"
|
||||
"github.com/go-openapi/spec"
|
||||
"github.com/go-openapi/strfmt"
|
||||
|
||||
"github.com/go-openapi/runtime"
|
||||
"github.com/go-openapi/runtime/logger"
|
||||
"github.com/go-openapi/runtime/middleware/untyped"
|
||||
"github.com/go-openapi/runtime/security"
|
||||
)
|
||||
|
||||
// Debug when true turns on verbose logging
|
||||
var Debug = logger.DebugEnabled()
|
||||
var Logger logger.Logger = logger.StandardLogger{}
|
||||
|
||||
func debugLog(format string, args ...interface{}) {
|
||||
if Debug {
|
||||
Logger.Printf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// A Builder can create middlewares
|
||||
type Builder func(http.Handler) http.Handler
|
||||
|
||||
// PassthroughBuilder returns the handler, aka the builder identity function
|
||||
func PassthroughBuilder(handler http.Handler) http.Handler { return handler }
|
||||
|
||||
// RequestBinder is an interface for types to implement
|
||||
// when they want to be able to bind from a request
|
||||
type RequestBinder interface {
|
||||
BindRequest(*http.Request, *MatchedRoute) error
|
||||
}
|
||||
|
||||
// Responder is an interface for types to implement
|
||||
// when they want to be considered for writing HTTP responses
|
||||
type Responder interface {
|
||||
WriteResponse(http.ResponseWriter, runtime.Producer)
|
||||
}
|
||||
|
||||
// ResponderFunc wraps a func as a Responder interface
|
||||
type ResponderFunc func(http.ResponseWriter, runtime.Producer)
|
||||
|
||||
// WriteResponse writes to the response
|
||||
func (fn ResponderFunc) WriteResponse(rw http.ResponseWriter, pr runtime.Producer) {
|
||||
fn(rw, pr)
|
||||
}
|
||||
|
||||
// Context is a type safe wrapper around an untyped request context
|
||||
// used throughout to store request context with the standard context attached
|
||||
// to the http.Request
|
||||
type Context struct {
|
||||
spec *loads.Document
|
||||
analyzer *analysis.Spec
|
||||
api RoutableAPI
|
||||
router Router
|
||||
}
|
||||
|
||||
type routableUntypedAPI struct {
|
||||
api *untyped.API
|
||||
hlock *sync.Mutex
|
||||
handlers map[string]map[string]http.Handler
|
||||
defaultConsumes string
|
||||
defaultProduces string
|
||||
}
|
||||
|
||||
func newRoutableUntypedAPI(spec *loads.Document, api *untyped.API, context *Context) *routableUntypedAPI {
|
||||
var handlers map[string]map[string]http.Handler
|
||||
if spec == nil || api == nil {
|
||||
return nil
|
||||
}
|
||||
analyzer := analysis.New(spec.Spec())
|
||||
for method, hls := range analyzer.Operations() {
|
||||
um := strings.ToUpper(method)
|
||||
for path, op := range hls {
|
||||
schemes := analyzer.SecurityRequirementsFor(op)
|
||||
|
||||
if oh, ok := api.OperationHandlerFor(method, path); ok {
|
||||
if handlers == nil {
|
||||
handlers = make(map[string]map[string]http.Handler)
|
||||
}
|
||||
if b, ok := handlers[um]; !ok || b == nil {
|
||||
handlers[um] = make(map[string]http.Handler)
|
||||
}
|
||||
|
||||
var handler http.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// lookup route info in the context
|
||||
route, rCtx, _ := context.RouteInfo(r)
|
||||
if rCtx != nil {
|
||||
r = rCtx
|
||||
}
|
||||
|
||||
// bind and validate the request using reflection
|
||||
var bound interface{}
|
||||
var validation error
|
||||
bound, r, validation = context.BindAndValidate(r, route)
|
||||
if validation != nil {
|
||||
context.Respond(w, r, route.Produces, route, validation)
|
||||
return
|
||||
}
|
||||
|
||||
// actually handle the request
|
||||
result, err := oh.Handle(bound)
|
||||
if err != nil {
|
||||
// respond with failure
|
||||
context.Respond(w, r, route.Produces, route, err)
|
||||
return
|
||||
}
|
||||
|
||||
// respond with success
|
||||
context.Respond(w, r, route.Produces, route, result)
|
||||
})
|
||||
|
||||
if len(schemes) > 0 {
|
||||
handler = newSecureAPI(context, handler)
|
||||
}
|
||||
handlers[um][path] = handler
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &routableUntypedAPI{
|
||||
api: api,
|
||||
hlock: new(sync.Mutex),
|
||||
handlers: handlers,
|
||||
defaultProduces: api.DefaultProduces,
|
||||
defaultConsumes: api.DefaultConsumes,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *routableUntypedAPI) HandlerFor(method, path string) (http.Handler, bool) {
|
||||
r.hlock.Lock()
|
||||
paths, ok := r.handlers[strings.ToUpper(method)]
|
||||
if !ok {
|
||||
r.hlock.Unlock()
|
||||
return nil, false
|
||||
}
|
||||
handler, ok := paths[path]
|
||||
r.hlock.Unlock()
|
||||
return handler, ok
|
||||
}
|
||||
func (r *routableUntypedAPI) ServeErrorFor(operationID string) func(http.ResponseWriter, *http.Request, error) {
|
||||
return r.api.ServeError
|
||||
}
|
||||
func (r *routableUntypedAPI) ConsumersFor(mediaTypes []string) map[string]runtime.Consumer {
|
||||
return r.api.ConsumersFor(mediaTypes)
|
||||
}
|
||||
func (r *routableUntypedAPI) ProducersFor(mediaTypes []string) map[string]runtime.Producer {
|
||||
return r.api.ProducersFor(mediaTypes)
|
||||
}
|
||||
func (r *routableUntypedAPI) AuthenticatorsFor(schemes map[string]spec.SecurityScheme) map[string]runtime.Authenticator {
|
||||
return r.api.AuthenticatorsFor(schemes)
|
||||
}
|
||||
func (r *routableUntypedAPI) Authorizer() runtime.Authorizer {
|
||||
return r.api.Authorizer()
|
||||
}
|
||||
func (r *routableUntypedAPI) Formats() strfmt.Registry {
|
||||
return r.api.Formats()
|
||||
}
|
||||
|
||||
func (r *routableUntypedAPI) DefaultProduces() string {
|
||||
return r.defaultProduces
|
||||
}
|
||||
|
||||
func (r *routableUntypedAPI) DefaultConsumes() string {
|
||||
return r.defaultConsumes
|
||||
}
|
||||
|
||||
// NewRoutableContext creates a new context for a routable API
|
||||
func NewRoutableContext(spec *loads.Document, routableAPI RoutableAPI, routes Router) *Context {
|
||||
var an *analysis.Spec
|
||||
if spec != nil {
|
||||
an = analysis.New(spec.Spec())
|
||||
}
|
||||
ctx := &Context{spec: spec, api: routableAPI, analyzer: an, router: routes}
|
||||
return ctx
|
||||
}
|
||||
|
||||
// NewContext creates a new context wrapper
|
||||
func NewContext(spec *loads.Document, api *untyped.API, routes Router) *Context {
|
||||
var an *analysis.Spec
|
||||
if spec != nil {
|
||||
an = analysis.New(spec.Spec())
|
||||
}
|
||||
ctx := &Context{spec: spec, analyzer: an}
|
||||
ctx.api = newRoutableUntypedAPI(spec, api, ctx)
|
||||
ctx.router = routes
|
||||
return ctx
|
||||
}
|
||||
|
||||
// Serve serves the specified spec with the specified api registrations as a http.Handler
|
||||
func Serve(spec *loads.Document, api *untyped.API) http.Handler {
|
||||
return ServeWithBuilder(spec, api, PassthroughBuilder)
|
||||
}
|
||||
|
||||
// ServeWithBuilder serves the specified spec with the specified api registrations as a http.Handler that is decorated
|
||||
// by the Builder
|
||||
func ServeWithBuilder(spec *loads.Document, api *untyped.API, builder Builder) http.Handler {
|
||||
context := NewContext(spec, api, nil)
|
||||
return context.APIHandler(builder)
|
||||
}
|
||||
|
||||
type contextKey int8
|
||||
|
||||
const (
|
||||
_ contextKey = iota
|
||||
ctxContentType
|
||||
ctxResponseFormat
|
||||
ctxMatchedRoute
|
||||
ctxBoundParams
|
||||
ctxSecurityPrincipal
|
||||
ctxSecurityScopes
|
||||
)
|
||||
|
||||
// MatchedRouteFrom request context value.
|
||||
func MatchedRouteFrom(req *http.Request) *MatchedRoute {
|
||||
mr := req.Context().Value(ctxMatchedRoute)
|
||||
if mr == nil {
|
||||
return nil
|
||||
}
|
||||
if res, ok := mr.(*MatchedRoute); ok {
|
||||
return res
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SecurityPrincipalFrom request context value.
|
||||
func SecurityPrincipalFrom(req *http.Request) interface{} {
|
||||
return req.Context().Value(ctxSecurityPrincipal)
|
||||
}
|
||||
|
||||
// SecurityScopesFrom request context value.
|
||||
func SecurityScopesFrom(req *http.Request) []string {
|
||||
rs := req.Context().Value(ctxSecurityScopes)
|
||||
if res, ok := rs.([]string); ok {
|
||||
return res
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type contentTypeValue struct {
|
||||
MediaType string
|
||||
Charset string
|
||||
}
|
||||
|
||||
// BasePath returns the base path for this API
|
||||
func (c *Context) BasePath() string {
|
||||
return c.spec.BasePath()
|
||||
}
|
||||
|
||||
// RequiredProduces returns the accepted content types for responses
|
||||
func (c *Context) RequiredProduces() []string {
|
||||
return c.analyzer.RequiredProduces()
|
||||
}
|
||||
|
||||
// BindValidRequest binds a params object to a request but only when the request is valid
|
||||
// if the request is not valid an error will be returned
|
||||
func (c *Context) BindValidRequest(request *http.Request, route *MatchedRoute, binder RequestBinder) error {
|
||||
var res []error
|
||||
var requestContentType string
|
||||
|
||||
// check and validate content type, select consumer
|
||||
if runtime.HasBody(request) {
|
||||
ct, _, err := runtime.ContentType(request.Header)
|
||||
if err != nil {
|
||||
res = append(res, err)
|
||||
} else {
|
||||
if err := validateContentType(route.Consumes, ct); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
if len(res) == 0 {
|
||||
cons, ok := route.Consumers[ct]
|
||||
if !ok {
|
||||
res = append(res, errors.New(500, "no consumer registered for %s", ct))
|
||||
} else {
|
||||
route.Consumer = cons
|
||||
requestContentType = ct
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// check and validate the response format
|
||||
if len(res) == 0 {
|
||||
// if the route does not provide Produces and a default contentType could not be identified
|
||||
// based on a body, typical for GET and DELETE requests, then default contentType to.
|
||||
if len(route.Produces) == 0 && requestContentType == "" {
|
||||
requestContentType = "*/*"
|
||||
}
|
||||
|
||||
if str := NegotiateContentType(request, route.Produces, requestContentType); str == "" {
|
||||
res = append(res, errors.InvalidResponseFormat(request.Header.Get(runtime.HeaderAccept), route.Produces))
|
||||
}
|
||||
}
|
||||
|
||||
// now bind the request with the provided binder
|
||||
// it's assumed the binder will also validate the request and return an error if the
|
||||
// request is invalid
|
||||
if binder != nil && len(res) == 0 {
|
||||
if err := binder.BindRequest(request, route); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(res) > 0 {
|
||||
return errors.CompositeValidationError(res...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ContentType gets the parsed value of a content type
|
||||
// Returns the media type, its charset and a shallow copy of the request
|
||||
// when its context doesn't contain the content type value, otherwise it returns
|
||||
// the same request
|
||||
// Returns the error that runtime.ContentType may retunrs.
|
||||
func (c *Context) ContentType(request *http.Request) (string, string, *http.Request, error) {
|
||||
var rCtx = request.Context()
|
||||
|
||||
if v, ok := rCtx.Value(ctxContentType).(*contentTypeValue); ok {
|
||||
return v.MediaType, v.Charset, request, nil
|
||||
}
|
||||
|
||||
mt, cs, err := runtime.ContentType(request.Header)
|
||||
if err != nil {
|
||||
return "", "", nil, err
|
||||
}
|
||||
rCtx = stdContext.WithValue(rCtx, ctxContentType, &contentTypeValue{mt, cs})
|
||||
return mt, cs, request.WithContext(rCtx), nil
|
||||
}
|
||||
|
||||
// LookupRoute looks a route up and returns true when it is found
|
||||
func (c *Context) LookupRoute(request *http.Request) (*MatchedRoute, bool) {
|
||||
if route, ok := c.router.Lookup(request.Method, request.URL.EscapedPath()); ok {
|
||||
return route, ok
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// RouteInfo tries to match a route for this request
|
||||
// Returns the matched route, a shallow copy of the request if its context
|
||||
// contains the matched router, otherwise the same request, and a bool to
|
||||
// indicate if it the request matches one of the routes, if it doesn't
|
||||
// then it returns false and nil for the other two return values
|
||||
func (c *Context) RouteInfo(request *http.Request) (*MatchedRoute, *http.Request, bool) {
|
||||
var rCtx = request.Context()
|
||||
|
||||
if v, ok := rCtx.Value(ctxMatchedRoute).(*MatchedRoute); ok {
|
||||
return v, request, ok
|
||||
}
|
||||
|
||||
if route, ok := c.LookupRoute(request); ok {
|
||||
rCtx = stdContext.WithValue(rCtx, ctxMatchedRoute, route)
|
||||
return route, request.WithContext(rCtx), ok
|
||||
}
|
||||
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// ResponseFormat negotiates the response content type
|
||||
// Returns the response format and a shallow copy of the request if its context
|
||||
// doesn't contain the response format, otherwise the same request
|
||||
func (c *Context) ResponseFormat(r *http.Request, offers []string) (string, *http.Request) {
|
||||
var rCtx = r.Context()
|
||||
|
||||
if v, ok := rCtx.Value(ctxResponseFormat).(string); ok {
|
||||
debugLog("[%s %s] found response format %q in context", r.Method, r.URL.Path, v)
|
||||
return v, r
|
||||
}
|
||||
|
||||
format := NegotiateContentType(r, offers, "")
|
||||
if format != "" {
|
||||
debugLog("[%s %s] set response format %q in context", r.Method, r.URL.Path, format)
|
||||
r = r.WithContext(stdContext.WithValue(rCtx, ctxResponseFormat, format))
|
||||
}
|
||||
debugLog("[%s %s] negotiated response format %q", r.Method, r.URL.Path, format)
|
||||
return format, r
|
||||
}
|
||||
|
||||
// AllowedMethods gets the allowed methods for the path of this request
|
||||
func (c *Context) AllowedMethods(request *http.Request) []string {
|
||||
return c.router.OtherMethods(request.Method, request.URL.EscapedPath())
|
||||
}
|
||||
|
||||
// ResetAuth removes the current principal from the request context
|
||||
func (c *Context) ResetAuth(request *http.Request) *http.Request {
|
||||
rctx := request.Context()
|
||||
rctx = stdContext.WithValue(rctx, ctxSecurityPrincipal, nil)
|
||||
rctx = stdContext.WithValue(rctx, ctxSecurityScopes, nil)
|
||||
return request.WithContext(rctx)
|
||||
}
|
||||
|
||||
// Authorize authorizes the request
|
||||
// Returns the principal object and a shallow copy of the request when its
|
||||
// context doesn't contain the principal, otherwise the same request or an error
|
||||
// (the last) if one of the authenticators returns one or an Unauthenticated error
|
||||
func (c *Context) Authorize(request *http.Request, route *MatchedRoute) (interface{}, *http.Request, error) {
|
||||
if route == nil || !route.HasAuth() {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
var rCtx = request.Context()
|
||||
if v := rCtx.Value(ctxSecurityPrincipal); v != nil {
|
||||
return v, request, nil
|
||||
}
|
||||
|
||||
applies, usr, err := route.Authenticators.Authenticate(request, route)
|
||||
if !applies || err != nil || !route.Authenticators.AllowsAnonymous() && usr == nil {
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return nil, nil, errors.Unauthenticated("invalid credentials")
|
||||
}
|
||||
if route.Authorizer != nil {
|
||||
if err := route.Authorizer.Authorize(request, usr); err != nil {
|
||||
if _, ok := err.(errors.Error); ok {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return nil, nil, errors.New(http.StatusForbidden, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
rCtx = request.Context()
|
||||
|
||||
rCtx = stdContext.WithValue(rCtx, ctxSecurityPrincipal, usr)
|
||||
rCtx = stdContext.WithValue(rCtx, ctxSecurityScopes, route.Authenticator.AllScopes())
|
||||
return usr, request.WithContext(rCtx), nil
|
||||
}
|
||||
|
||||
// BindAndValidate binds and validates the request
|
||||
// Returns the validation map and a shallow copy of the request when its context
|
||||
// doesn't contain the validation, otherwise it returns the same request or an
|
||||
// CompositeValidationError error
|
||||
func (c *Context) BindAndValidate(request *http.Request, matched *MatchedRoute) (interface{}, *http.Request, error) {
|
||||
var rCtx = request.Context()
|
||||
|
||||
if v, ok := rCtx.Value(ctxBoundParams).(*validation); ok {
|
||||
debugLog("got cached validation (valid: %t)", len(v.result) == 0)
|
||||
if len(v.result) > 0 {
|
||||
return v.bound, request, errors.CompositeValidationError(v.result...)
|
||||
}
|
||||
return v.bound, request, nil
|
||||
}
|
||||
result := validateRequest(c, request, matched)
|
||||
rCtx = stdContext.WithValue(rCtx, ctxBoundParams, result)
|
||||
request = request.WithContext(rCtx)
|
||||
if len(result.result) > 0 {
|
||||
return result.bound, request, errors.CompositeValidationError(result.result...)
|
||||
}
|
||||
debugLog("no validation errors found")
|
||||
return result.bound, request, nil
|
||||
}
|
||||
|
||||
// NotFound the default not found responder for when no route has been matched yet
|
||||
func (c *Context) NotFound(rw http.ResponseWriter, r *http.Request) {
|
||||
c.Respond(rw, r, []string{c.api.DefaultProduces()}, nil, errors.NotFound("not found"))
|
||||
}
|
||||
|
||||
// Respond renders the response after doing some content negotiation
|
||||
func (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []string, route *MatchedRoute, data interface{}) {
|
||||
debugLog("responding to %s %s with produces: %v", r.Method, r.URL.Path, produces)
|
||||
offers := []string{}
|
||||
for _, mt := range produces {
|
||||
if mt != c.api.DefaultProduces() {
|
||||
offers = append(offers, mt)
|
||||
}
|
||||
}
|
||||
// the default producer is last so more specific producers take precedence
|
||||
offers = append(offers, c.api.DefaultProduces())
|
||||
debugLog("offers: %v", offers)
|
||||
|
||||
var format string
|
||||
format, r = c.ResponseFormat(r, offers)
|
||||
rw.Header().Set(runtime.HeaderContentType, format)
|
||||
|
||||
if resp, ok := data.(Responder); ok {
|
||||
producers := route.Producers
|
||||
prod, ok := producers[format]
|
||||
if !ok {
|
||||
prods := c.api.ProducersFor(normalizeOffers([]string{c.api.DefaultProduces()}))
|
||||
pr, ok := prods[c.api.DefaultProduces()]
|
||||
if !ok {
|
||||
panic(errors.New(http.StatusInternalServerError, "can't find a producer for "+format))
|
||||
}
|
||||
prod = pr
|
||||
}
|
||||
resp.WriteResponse(rw, prod)
|
||||
return
|
||||
}
|
||||
|
||||
if err, ok := data.(error); ok {
|
||||
if format == "" {
|
||||
rw.Header().Set(runtime.HeaderContentType, runtime.JSONMime)
|
||||
}
|
||||
|
||||
if realm := security.FailedBasicAuth(r); realm != "" {
|
||||
rw.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", realm))
|
||||
}
|
||||
|
||||
if route == nil || route.Operation == nil {
|
||||
c.api.ServeErrorFor("")(rw, r, err)
|
||||
return
|
||||
}
|
||||
c.api.ServeErrorFor(route.Operation.ID)(rw, r, err)
|
||||
return
|
||||
}
|
||||
|
||||
if route == nil || route.Operation == nil {
|
||||
rw.WriteHeader(200)
|
||||
if r.Method == "HEAD" {
|
||||
return
|
||||
}
|
||||
producers := c.api.ProducersFor(normalizeOffers(offers))
|
||||
prod, ok := producers[format]
|
||||
if !ok {
|
||||
panic(errors.New(http.StatusInternalServerError, "can't find a producer for "+format))
|
||||
}
|
||||
if err := prod.Produce(rw, data); err != nil {
|
||||
panic(err) // let the recovery middleware deal with this
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if _, code, ok := route.Operation.SuccessResponse(); ok {
|
||||
rw.WriteHeader(code)
|
||||
if code == 204 || r.Method == "HEAD" {
|
||||
return
|
||||
}
|
||||
|
||||
producers := route.Producers
|
||||
prod, ok := producers[format]
|
||||
if !ok {
|
||||
if !ok {
|
||||
prods := c.api.ProducersFor(normalizeOffers([]string{c.api.DefaultProduces()}))
|
||||
pr, ok := prods[c.api.DefaultProduces()]
|
||||
if !ok {
|
||||
panic(errors.New(http.StatusInternalServerError, "can't find a producer for "+format))
|
||||
}
|
||||
prod = pr
|
||||
}
|
||||
}
|
||||
if err := prod.Produce(rw, data); err != nil {
|
||||
panic(err) // let the recovery middleware deal with this
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
c.api.ServeErrorFor(route.Operation.ID)(rw, r, errors.New(http.StatusInternalServerError, "can't produce response"))
|
||||
}
|
||||
|
||||
func (c *Context) APIHandlerSwaggerUI(builder Builder) http.Handler {
|
||||
b := builder
|
||||
if b == nil {
|
||||
b = PassthroughBuilder
|
||||
}
|
||||
|
||||
var title string
|
||||
sp := c.spec.Spec()
|
||||
if sp != nil && sp.Info != nil && sp.Info.Title != "" {
|
||||
title = sp.Info.Title
|
||||
}
|
||||
|
||||
swaggerUIOpts := SwaggerUIOpts{
|
||||
BasePath: c.BasePath(),
|
||||
Title: title,
|
||||
}
|
||||
|
||||
return Spec("", c.spec.Raw(), SwaggerUI(swaggerUIOpts, c.RoutesHandler(b)))
|
||||
}
|
||||
|
||||
// APIHandler returns a handler to serve the API, this includes a swagger spec, router and the contract defined in the swagger spec
|
||||
func (c *Context) APIHandler(builder Builder) http.Handler {
|
||||
b := builder
|
||||
if b == nil {
|
||||
b = PassthroughBuilder
|
||||
}
|
||||
|
||||
var title string
|
||||
sp := c.spec.Spec()
|
||||
if sp != nil && sp.Info != nil && sp.Info.Title != "" {
|
||||
title = sp.Info.Title
|
||||
}
|
||||
|
||||
redocOpts := RedocOpts{
|
||||
BasePath: c.BasePath(),
|
||||
Title: title,
|
||||
}
|
||||
|
||||
return Spec("", c.spec.Raw(), Redoc(redocOpts, c.RoutesHandler(b)))
|
||||
}
|
||||
|
||||
// RoutesHandler returns a handler to serve the API, just the routes and the contract defined in the swagger spec
|
||||
func (c *Context) RoutesHandler(builder Builder) http.Handler {
|
||||
b := builder
|
||||
if b == nil {
|
||||
b = PassthroughBuilder
|
||||
}
|
||||
return NewRouter(c, b(NewOperationExecutor(c)))
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
Copyright (c) 2014 Naoya Inada <naoina@kuune.org>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
|
@ -0,0 +1,180 @@
|
|||
# Denco [](https://travis-ci.org/naoina/denco)
|
||||
|
||||
The fast and flexible HTTP request router for [Go](http://golang.org).
|
||||
|
||||
Denco is based on Double-Array implementation of [Kocha-urlrouter](https://github.com/naoina/kocha-urlrouter).
|
||||
However, Denco is optimized and some features added.
|
||||
|
||||
## Features
|
||||
|
||||
* Fast (See [go-http-routing-benchmark](https://github.com/naoina/go-http-routing-benchmark))
|
||||
* [URL patterns](#url-patterns) (`/foo/:bar` and `/foo/*wildcard`)
|
||||
* Small (but enough) URL router API
|
||||
* HTTP request multiplexer like `http.ServeMux`
|
||||
|
||||
## Installation
|
||||
|
||||
go get -u github.com/go-openapi/runtime/middleware/denco
|
||||
|
||||
## Using as HTTP request multiplexer
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/go-openapi/runtime/middleware/denco"
|
||||
)
|
||||
|
||||
func Index(w http.ResponseWriter, r *http.Request, params denco.Params) {
|
||||
fmt.Fprintf(w, "Welcome to Denco!\n")
|
||||
}
|
||||
|
||||
func User(w http.ResponseWriter, r *http.Request, params denco.Params) {
|
||||
fmt.Fprintf(w, "Hello %s!\n", params.Get("name"))
|
||||
}
|
||||
|
||||
func main() {
|
||||
mux := denco.NewMux()
|
||||
handler, err := mux.Build([]denco.Handler{
|
||||
mux.GET("/", Index),
|
||||
mux.GET("/user/:name", User),
|
||||
mux.POST("/user/:name", User),
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
log.Fatal(http.ListenAndServe(":8080", handler))
|
||||
}
|
||||
```
|
||||
|
||||
## Using as URL router
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/go-openapi/runtime/middleware/denco"
|
||||
)
|
||||
|
||||
type route struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func main() {
|
||||
router := denco.New()
|
||||
router.Build([]denco.Record{
|
||||
{"/", &route{"root"}},
|
||||
{"/user/:id", &route{"user"}},
|
||||
{"/user/:name/:id", &route{"username"}},
|
||||
{"/static/*filepath", &route{"static"}},
|
||||
})
|
||||
|
||||
data, params, found := router.Lookup("/")
|
||||
// print `&main.route{name:"root"}, denco.Params(nil), true`.
|
||||
fmt.Printf("%#v, %#v, %#v\n", data, params, found)
|
||||
|
||||
data, params, found = router.Lookup("/user/hoge")
|
||||
// print `&main.route{name:"user"}, denco.Params{denco.Param{Name:"id", Value:"hoge"}}, true`.
|
||||
fmt.Printf("%#v, %#v, %#v\n", data, params, found)
|
||||
|
||||
data, params, found = router.Lookup("/user/hoge/7")
|
||||
// print `&main.route{name:"username"}, denco.Params{denco.Param{Name:"name", Value:"hoge"}, denco.Param{Name:"id", Value:"7"}}, true`.
|
||||
fmt.Printf("%#v, %#v, %#v\n", data, params, found)
|
||||
|
||||
data, params, found = router.Lookup("/static/path/to/file")
|
||||
// print `&main.route{name:"static"}, denco.Params{denco.Param{Name:"filepath", Value:"path/to/file"}}, true`.
|
||||
fmt.Printf("%#v, %#v, %#v\n", data, params, found)
|
||||
}
|
||||
```
|
||||
|
||||
See [Godoc](http://godoc.org/github.com/go-openapi/runtime/middleware/denco) for more details.
|
||||
|
||||
## Getting the value of path parameter
|
||||
|
||||
You can get the value of path parameter by 2 ways.
|
||||
|
||||
1. Using [`denco.Params.Get`](http://godoc.org/github.com/go-openapi/runtime/middleware/denco#Params.Get) method
|
||||
2. Find by loop
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/go-openapi/runtime/middleware/denco"
|
||||
)
|
||||
|
||||
func main() {
|
||||
router := denco.New()
|
||||
if err := router.Build([]denco.Record{
|
||||
{"/user/:name/:id", "route1"},
|
||||
}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// 1. Using denco.Params.Get method.
|
||||
_, params, _ := router.Lookup("/user/alice/1")
|
||||
name := params.Get("name")
|
||||
if name != "" {
|
||||
fmt.Printf("Hello %s.\n", name) // prints "Hello alice.".
|
||||
}
|
||||
|
||||
// 2. Find by loop.
|
||||
for _, param := range params {
|
||||
if param.Name == "name" {
|
||||
fmt.Printf("Hello %s.\n", name) // prints "Hello alice.".
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## URL patterns
|
||||
|
||||
Denco's route matching strategy is "most nearly matching".
|
||||
|
||||
When routes `/:name` and `/alice` have been built, URI `/alice` matches the route `/alice`, not `/:name`.
|
||||
Because URI `/alice` is more match with the route `/alice` than `/:name`.
|
||||
|
||||
For more example, when routes below have been built:
|
||||
|
||||
```
|
||||
/user/alice
|
||||
/user/:name
|
||||
/user/:name/:id
|
||||
/user/alice/:id
|
||||
/user/:id/bob
|
||||
```
|
||||
|
||||
Routes matching are:
|
||||
|
||||
```
|
||||
/user/alice => "/user/alice" (no match with "/user/:name")
|
||||
/user/bob => "/user/:name"
|
||||
/user/naoina/1 => "/user/:name/1"
|
||||
/user/alice/1 => "/user/alice/:id" (no match with "/user/:name/:id")
|
||||
/user/1/bob => "/user/:id/bob" (no match with "/user/:name/:id")
|
||||
/user/alice/bob => "/user/alice/:id" (no match with "/user/:name/:id" and "/user/:id/bob")
|
||||
```
|
||||
|
||||
## Limitation
|
||||
|
||||
Denco has some limitations below.
|
||||
|
||||
* Number of param records (such as `/:name`) must be less than 2^22
|
||||
* Number of elements of internal slice must be less than 2^22
|
||||
|
||||
## Benchmarks
|
||||
|
||||
cd $GOPATH/github.com/go-openapi/runtime/middleware/denco
|
||||
go test -bench . -benchmem
|
||||
|
||||
## License
|
||||
|
||||
Denco is licensed under the MIT License.
|
|
@ -0,0 +1,460 @@
|
|||
// Package denco provides fast URL router.
|
||||
package denco
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// ParamCharacter is a special character for path parameter.
|
||||
ParamCharacter = ':'
|
||||
|
||||
// WildcardCharacter is a special character for wildcard path parameter.
|
||||
WildcardCharacter = '*'
|
||||
|
||||
// TerminationCharacter is a special character for end of path.
|
||||
TerminationCharacter = '#'
|
||||
|
||||
// SeparatorCharacter separates path segments.
|
||||
SeparatorCharacter = '/'
|
||||
|
||||
// PathParamCharacter indicates a RESTCONF path param
|
||||
PathParamCharacter = '='
|
||||
|
||||
// MaxSize is max size of records and internal slice.
|
||||
MaxSize = (1 << 22) - 1
|
||||
)
|
||||
|
||||
// Router represents a URL router.
|
||||
type Router struct {
|
||||
// SizeHint expects the maximum number of path parameters in records to Build.
|
||||
// SizeHint will be used to determine the capacity of the memory to allocate.
|
||||
// By default, SizeHint will be determined from given records to Build.
|
||||
SizeHint int
|
||||
|
||||
static map[string]interface{}
|
||||
param *doubleArray
|
||||
}
|
||||
|
||||
// New returns a new Router.
|
||||
func New() *Router {
|
||||
return &Router{
|
||||
SizeHint: -1,
|
||||
static: make(map[string]interface{}),
|
||||
param: newDoubleArray(),
|
||||
}
|
||||
}
|
||||
|
||||
// Lookup returns data and path parameters that associated with path.
|
||||
// params is a slice of the Param that arranged in the order in which parameters appeared.
|
||||
// e.g. when built routing path is "/path/to/:id/:name" and given path is "/path/to/1/alice". params order is [{"id": "1"}, {"name": "alice"}], not [{"name": "alice"}, {"id": "1"}].
|
||||
func (rt *Router) Lookup(path string) (data interface{}, params Params, found bool) {
|
||||
if data, found := rt.static[path]; found {
|
||||
return data, nil, true
|
||||
}
|
||||
if len(rt.param.node) == 1 {
|
||||
return nil, nil, false
|
||||
}
|
||||
nd, params, found := rt.param.lookup(path, make([]Param, 0, rt.SizeHint), 1)
|
||||
if !found {
|
||||
return nil, nil, false
|
||||
}
|
||||
for i := 0; i < len(params); i++ {
|
||||
params[i].Name = nd.paramNames[i]
|
||||
}
|
||||
return nd.data, params, true
|
||||
}
|
||||
|
||||
// Build builds URL router from records.
|
||||
func (rt *Router) Build(records []Record) error {
|
||||
statics, params := makeRecords(records)
|
||||
if len(params) > MaxSize {
|
||||
return fmt.Errorf("denco: too many records")
|
||||
}
|
||||
if rt.SizeHint < 0 {
|
||||
rt.SizeHint = 0
|
||||
for _, p := range params {
|
||||
size := 0
|
||||
for _, k := range p.Key {
|
||||
if k == ParamCharacter || k == WildcardCharacter {
|
||||
size++
|
||||
}
|
||||
}
|
||||
if size > rt.SizeHint {
|
||||
rt.SizeHint = size
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, r := range statics {
|
||||
rt.static[r.Key] = r.Value
|
||||
}
|
||||
if err := rt.param.build(params, 1, 0, make(map[int]struct{})); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Param represents name and value of path parameter.
|
||||
type Param struct {
|
||||
Name string
|
||||
Value string
|
||||
}
|
||||
|
||||
// Params represents the name and value of path parameters.
|
||||
type Params []Param
|
||||
|
||||
// Get gets the first value associated with the given name.
|
||||
// If there are no values associated with the key, Get returns "".
|
||||
func (ps Params) Get(name string) string {
|
||||
for _, p := range ps {
|
||||
if p.Name == name {
|
||||
return p.Value
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type doubleArray struct {
|
||||
bc []baseCheck
|
||||
node []*node
|
||||
}
|
||||
|
||||
func newDoubleArray() *doubleArray {
|
||||
return &doubleArray{
|
||||
bc: []baseCheck{0},
|
||||
node: []*node{nil}, // A start index is adjusting to 1 because 0 will be used as a mark of non-existent node.
|
||||
}
|
||||
}
|
||||
|
||||
// baseCheck contains BASE, CHECK and Extra flags.
|
||||
// From the top, 22bits of BASE, 2bits of Extra flags and 8bits of CHECK.
|
||||
//
|
||||
// BASE (22bit) | Extra flags (2bit) | CHECK (8bit)
|
||||
// |----------------------|--|--------|
|
||||
// 32 10 8 0
|
||||
type baseCheck uint32
|
||||
|
||||
func (bc baseCheck) Base() int {
|
||||
return int(bc >> 10)
|
||||
}
|
||||
|
||||
func (bc *baseCheck) SetBase(base int) {
|
||||
*bc |= baseCheck(base) << 10
|
||||
}
|
||||
|
||||
func (bc baseCheck) Check() byte {
|
||||
return byte(bc)
|
||||
}
|
||||
|
||||
func (bc *baseCheck) SetCheck(check byte) {
|
||||
*bc |= baseCheck(check)
|
||||
}
|
||||
|
||||
func (bc baseCheck) IsEmpty() bool {
|
||||
return bc&0xfffffcff == 0
|
||||
}
|
||||
|
||||
func (bc baseCheck) IsSingleParam() bool {
|
||||
return bc¶mTypeSingle == paramTypeSingle
|
||||
}
|
||||
|
||||
func (bc baseCheck) IsWildcardParam() bool {
|
||||
return bc¶mTypeWildcard == paramTypeWildcard
|
||||
}
|
||||
|
||||
func (bc baseCheck) IsAnyParam() bool {
|
||||
return bc¶mTypeAny != 0
|
||||
}
|
||||
|
||||
func (bc *baseCheck) SetSingleParam() {
|
||||
*bc |= (1 << 8)
|
||||
}
|
||||
|
||||
func (bc *baseCheck) SetWildcardParam() {
|
||||
*bc |= (1 << 9)
|
||||
}
|
||||
|
||||
const (
|
||||
paramTypeSingle = 0x0100
|
||||
paramTypeWildcard = 0x0200
|
||||
paramTypeAny = 0x0300
|
||||
)
|
||||
|
||||
func (da *doubleArray) lookup(path string, params []Param, idx int) (*node, []Param, bool) {
|
||||
indices := make([]uint64, 0, 1)
|
||||
for i := 0; i < len(path); i++ {
|
||||
if da.bc[idx].IsAnyParam() {
|
||||
indices = append(indices, (uint64(i)<<32)|(uint64(idx)&0xffffffff))
|
||||
}
|
||||
c := path[i]
|
||||
if idx = nextIndex(da.bc[idx].Base(), c); idx >= len(da.bc) || da.bc[idx].Check() != c {
|
||||
goto BACKTRACKING
|
||||
}
|
||||
}
|
||||
if next := nextIndex(da.bc[idx].Base(), TerminationCharacter); next < len(da.bc) && da.bc[next].Check() == TerminationCharacter {
|
||||
return da.node[da.bc[next].Base()], params, true
|
||||
}
|
||||
BACKTRACKING:
|
||||
for j := len(indices) - 1; j >= 0; j-- {
|
||||
i, idx := int(indices[j]>>32), int(indices[j]&0xffffffff)
|
||||
if da.bc[idx].IsSingleParam() {
|
||||
idx := nextIndex(da.bc[idx].Base(), ParamCharacter)
|
||||
if idx >= len(da.bc) {
|
||||
break
|
||||
}
|
||||
next := NextSeparator(path, i)
|
||||
params := append(params, Param{Value: path[i:next]})
|
||||
if nd, params, found := da.lookup(path[next:], params, idx); found {
|
||||
return nd, params, true
|
||||
}
|
||||
}
|
||||
if da.bc[idx].IsWildcardParam() {
|
||||
idx := nextIndex(da.bc[idx].Base(), WildcardCharacter)
|
||||
params := append(params, Param{Value: path[i:]})
|
||||
return da.node[da.bc[idx].Base()], params, true
|
||||
}
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// build builds double-array from records.
|
||||
func (da *doubleArray) build(srcs []*record, idx, depth int, usedBase map[int]struct{}) error {
|
||||
sort.Stable(recordSlice(srcs))
|
||||
base, siblings, leaf, err := da.arrange(srcs, idx, depth, usedBase)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if leaf != nil {
|
||||
nd, err := makeNode(leaf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
da.bc[idx].SetBase(len(da.node))
|
||||
da.node = append(da.node, nd)
|
||||
}
|
||||
for _, sib := range siblings {
|
||||
da.setCheck(nextIndex(base, sib.c), sib.c)
|
||||
}
|
||||
for _, sib := range siblings {
|
||||
records := srcs[sib.start:sib.end]
|
||||
switch sib.c {
|
||||
case ParamCharacter:
|
||||
for _, r := range records {
|
||||
next := NextSeparator(r.Key, depth+1)
|
||||
name := r.Key[depth+1 : next]
|
||||
r.paramNames = append(r.paramNames, name)
|
||||
r.Key = r.Key[next:]
|
||||
}
|
||||
da.bc[idx].SetSingleParam()
|
||||
if err := da.build(records, nextIndex(base, sib.c), 0, usedBase); err != nil {
|
||||
return err
|
||||
}
|
||||
case WildcardCharacter:
|
||||
r := records[0]
|
||||
name := r.Key[depth+1 : len(r.Key)-1]
|
||||
r.paramNames = append(r.paramNames, name)
|
||||
r.Key = ""
|
||||
da.bc[idx].SetWildcardParam()
|
||||
if err := da.build(records, nextIndex(base, sib.c), 0, usedBase); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
if err := da.build(records, nextIndex(base, sib.c), depth+1, usedBase); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// setBase sets BASE.
|
||||
func (da *doubleArray) setBase(i, base int) {
|
||||
da.bc[i].SetBase(base)
|
||||
}
|
||||
|
||||
// setCheck sets CHECK.
|
||||
func (da *doubleArray) setCheck(i int, check byte) {
|
||||
da.bc[i].SetCheck(check)
|
||||
}
|
||||
|
||||
// findEmptyIndex returns an index of unused BASE/CHECK node.
|
||||
func (da *doubleArray) findEmptyIndex(start int) int {
|
||||
i := start
|
||||
for ; i < len(da.bc); i++ {
|
||||
if da.bc[i].IsEmpty() {
|
||||
break
|
||||
}
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// findBase returns good BASE.
|
||||
func (da *doubleArray) findBase(siblings []sibling, start int, usedBase map[int]struct{}) (base int) {
|
||||
for idx, firstChar := start+1, siblings[0].c; ; idx = da.findEmptyIndex(idx + 1) {
|
||||
base = nextIndex(idx, firstChar)
|
||||
if _, used := usedBase[base]; used {
|
||||
continue
|
||||
}
|
||||
i := 0
|
||||
for ; i < len(siblings); i++ {
|
||||
next := nextIndex(base, siblings[i].c)
|
||||
if len(da.bc) <= next {
|
||||
da.bc = append(da.bc, make([]baseCheck, next-len(da.bc)+1)...)
|
||||
}
|
||||
if !da.bc[next].IsEmpty() {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i == len(siblings) {
|
||||
break
|
||||
}
|
||||
}
|
||||
usedBase[base] = struct{}{}
|
||||
return base
|
||||
}
|
||||
|
||||
func (da *doubleArray) arrange(records []*record, idx, depth int, usedBase map[int]struct{}) (base int, siblings []sibling, leaf *record, err error) {
|
||||
siblings, leaf, err = makeSiblings(records, depth)
|
||||
if err != nil {
|
||||
return -1, nil, nil, err
|
||||
}
|
||||
if len(siblings) < 1 {
|
||||
return -1, nil, leaf, nil
|
||||
}
|
||||
base = da.findBase(siblings, idx, usedBase)
|
||||
if base > MaxSize {
|
||||
return -1, nil, nil, fmt.Errorf("denco: too many elements of internal slice")
|
||||
}
|
||||
da.setBase(idx, base)
|
||||
return base, siblings, leaf, err
|
||||
}
|
||||
|
||||
// node represents a node of Double-Array.
|
||||
type node struct {
|
||||
data interface{}
|
||||
|
||||
// Names of path parameters.
|
||||
paramNames []string
|
||||
}
|
||||
|
||||
// makeNode returns a new node from record.
|
||||
func makeNode(r *record) (*node, error) {
|
||||
dups := make(map[string]bool)
|
||||
for _, name := range r.paramNames {
|
||||
if dups[name] {
|
||||
return nil, fmt.Errorf("denco: path parameter `%v' is duplicated in the key `%v'", name, r.Key)
|
||||
}
|
||||
dups[name] = true
|
||||
}
|
||||
return &node{data: r.Value, paramNames: r.paramNames}, nil
|
||||
}
|
||||
|
||||
// sibling represents an intermediate data of build for Double-Array.
|
||||
type sibling struct {
|
||||
// An index of start of duplicated characters.
|
||||
start int
|
||||
|
||||
// An index of end of duplicated characters.
|
||||
end int
|
||||
|
||||
// A character of sibling.
|
||||
c byte
|
||||
}
|
||||
|
||||
// nextIndex returns a next index of array of BASE/CHECK.
|
||||
func nextIndex(base int, c byte) int {
|
||||
return base ^ int(c)
|
||||
}
|
||||
|
||||
// makeSiblings returns slice of sibling.
|
||||
func makeSiblings(records []*record, depth int) (sib []sibling, leaf *record, err error) {
|
||||
var (
|
||||
pc byte
|
||||
n int
|
||||
)
|
||||
for i, r := range records {
|
||||
if len(r.Key) <= depth {
|
||||
leaf = r
|
||||
continue
|
||||
}
|
||||
c := r.Key[depth]
|
||||
switch {
|
||||
case pc < c:
|
||||
sib = append(sib, sibling{start: i, c: c})
|
||||
case pc == c:
|
||||
continue
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("denco: BUG: routing table hasn't been sorted")
|
||||
}
|
||||
if n > 0 {
|
||||
sib[n-1].end = i
|
||||
}
|
||||
pc = c
|
||||
n++
|
||||
}
|
||||
if n == 0 {
|
||||
return nil, leaf, nil
|
||||
}
|
||||
sib[n-1].end = len(records)
|
||||
return sib, leaf, nil
|
||||
}
|
||||
|
||||
// Record represents a record data for router construction.
|
||||
type Record struct {
|
||||
// Key for router construction.
|
||||
Key string
|
||||
|
||||
// Result value for Key.
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
// NewRecord returns a new Record.
|
||||
func NewRecord(key string, value interface{}) Record {
|
||||
return Record{
|
||||
Key: key,
|
||||
Value: value,
|
||||
}
|
||||
}
|
||||
|
||||
// record represents a record that use to build the Double-Array.
|
||||
type record struct {
|
||||
Record
|
||||
paramNames []string
|
||||
}
|
||||
|
||||
// makeRecords returns the records that use to build Double-Arrays.
|
||||
func makeRecords(srcs []Record) (statics, params []*record) {
|
||||
termChar := string(TerminationCharacter)
|
||||
paramPrefix := string(SeparatorCharacter) + string(ParamCharacter)
|
||||
wildcardPrefix := string(SeparatorCharacter) + string(WildcardCharacter)
|
||||
restconfPrefix := string(PathParamCharacter) + string(ParamCharacter)
|
||||
for _, r := range srcs {
|
||||
if strings.Contains(r.Key, paramPrefix) || strings.Contains(r.Key, wildcardPrefix) ||strings.Contains(r.Key, restconfPrefix){
|
||||
r.Key += termChar
|
||||
params = append(params, &record{Record: r})
|
||||
} else {
|
||||
statics = append(statics, &record{Record: r})
|
||||
}
|
||||
}
|
||||
return statics, params
|
||||
}
|
||||
|
||||
// recordSlice represents a slice of Record for sort and implements the sort.Interface.
|
||||
type recordSlice []*record
|
||||
|
||||
// Len implements the sort.Interface.Len.
|
||||
func (rs recordSlice) Len() int {
|
||||
return len(rs)
|
||||
}
|
||||
|
||||
// Less implements the sort.Interface.Less.
|
||||
func (rs recordSlice) Less(i, j int) bool {
|
||||
return rs[i].Key < rs[j].Key
|
||||
}
|
||||
|
||||
// Swap implements the sort.Interface.Swap.
|
||||
func (rs recordSlice) Swap(i, j int) {
|
||||
rs[i], rs[j] = rs[j], rs[i]
|
||||
}
|
|
@ -0,0 +1,106 @@
|
|||
package denco
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// Mux represents a multiplexer for HTTP request.
|
||||
type Mux struct{}
|
||||
|
||||
// NewMux returns a new Mux.
|
||||
func NewMux() *Mux {
|
||||
return &Mux{}
|
||||
}
|
||||
|
||||
// GET is shorthand of Mux.Handler("GET", path, handler).
|
||||
func (m *Mux) GET(path string, handler HandlerFunc) Handler {
|
||||
return m.Handler("GET", path, handler)
|
||||
}
|
||||
|
||||
// POST is shorthand of Mux.Handler("POST", path, handler).
|
||||
func (m *Mux) POST(path string, handler HandlerFunc) Handler {
|
||||
return m.Handler("POST", path, handler)
|
||||
}
|
||||
|
||||
// PUT is shorthand of Mux.Handler("PUT", path, handler).
|
||||
func (m *Mux) PUT(path string, handler HandlerFunc) Handler {
|
||||
return m.Handler("PUT", path, handler)
|
||||
}
|
||||
|
||||
// HEAD is shorthand of Mux.Handler("HEAD", path, handler).
|
||||
func (m *Mux) HEAD(path string, handler HandlerFunc) Handler {
|
||||
return m.Handler("HEAD", path, handler)
|
||||
}
|
||||
|
||||
// Handler returns a handler for HTTP method.
|
||||
func (m *Mux) Handler(method, path string, handler HandlerFunc) Handler {
|
||||
return Handler{
|
||||
Method: method,
|
||||
Path: path,
|
||||
Func: handler,
|
||||
}
|
||||
}
|
||||
|
||||
// Build builds a http.Handler.
|
||||
func (m *Mux) Build(handlers []Handler) (http.Handler, error) {
|
||||
recordMap := make(map[string][]Record)
|
||||
for _, h := range handlers {
|
||||
recordMap[h.Method] = append(recordMap[h.Method], NewRecord(h.Path, h.Func))
|
||||
}
|
||||
mux := newServeMux()
|
||||
for m, records := range recordMap {
|
||||
router := New()
|
||||
if err := router.Build(records); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mux.routers[m] = router
|
||||
}
|
||||
return mux, nil
|
||||
}
|
||||
|
||||
// Handler represents a handler of HTTP request.
|
||||
type Handler struct {
|
||||
// Method is an HTTP method.
|
||||
Method string
|
||||
|
||||
// Path is a routing path for handler.
|
||||
Path string
|
||||
|
||||
// Func is a function of handler of HTTP request.
|
||||
Func HandlerFunc
|
||||
}
|
||||
|
||||
// The HandlerFunc type is aliased to type of handler function.
|
||||
type HandlerFunc func(w http.ResponseWriter, r *http.Request, params Params)
|
||||
|
||||
type serveMux struct {
|
||||
routers map[string]*Router
|
||||
}
|
||||
|
||||
func newServeMux() *serveMux {
|
||||
return &serveMux{
|
||||
routers: make(map[string]*Router),
|
||||
}
|
||||
}
|
||||
|
||||
// ServeHTTP implements http.Handler interface.
|
||||
func (mux *serveMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
handler, params := mux.handler(r.Method, r.URL.Path)
|
||||
handler(w, r, params)
|
||||
}
|
||||
|
||||
func (mux *serveMux) handler(method, path string) (HandlerFunc, []Param) {
|
||||
if router, found := mux.routers[method]; found {
|
||||
if handler, params, found := router.Lookup(path); found {
|
||||
return handler.(HandlerFunc), params
|
||||
}
|
||||
}
|
||||
return NotFound, nil
|
||||
}
|
||||
|
||||
// NotFound replies to the request with an HTTP 404 not found error.
|
||||
// NotFound is called when unknown HTTP method or a handler not found.
|
||||
// If you want to use the your own NotFound handler, please overwrite this variable.
|
||||
var NotFound = func(w http.ResponseWriter, r *http.Request, _ Params) {
|
||||
http.NotFound(w, r)
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
package denco
|
||||
|
||||
// NextSeparator returns an index of next separator in path.
|
||||
func NextSeparator(path string, start int) int {
|
||||
for start < len(path) {
|
||||
if c := path[start]; c == '/' || c == TerminationCharacter {
|
||||
break
|
||||
}
|
||||
start++
|
||||
}
|
||||
return start
|
||||
}
|
|
@ -0,0 +1,62 @@
|
|||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*Package middleware provides the library with helper functions for serving swagger APIs.
|
||||
|
||||
Pseudo middleware handler
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/go-openapi/errors"
|
||||
)
|
||||
|
||||
func newCompleteMiddleware(ctx *Context) http.Handler {
|
||||
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
// use context to lookup routes
|
||||
if matched, ok := ctx.RouteInfo(r); ok {
|
||||
|
||||
if matched.NeedsAuth() {
|
||||
if _, err := ctx.Authorize(r, matched); err != nil {
|
||||
ctx.Respond(rw, r, matched.Produces, matched, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
bound, validation := ctx.BindAndValidate(r, matched)
|
||||
if validation != nil {
|
||||
ctx.Respond(rw, r, matched.Produces, matched, validation)
|
||||
return
|
||||
}
|
||||
|
||||
result, err := matched.Handler.Handle(bound)
|
||||
if err != nil {
|
||||
ctx.Respond(rw, r, matched.Produces, matched, err)
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Respond(rw, r, matched.Produces, matched, result)
|
||||
return
|
||||
}
|
||||
|
||||
// Not found, check if it exists in the other methods first
|
||||
if others := ctx.AllowedMethods(r); len(others) > 0 {
|
||||
ctx.Respond(rw, r, ctx.spec.RequiredProduces(), nil, errors.MethodNotAllowed(r.Method, others))
|
||||
return
|
||||
}
|
||||
ctx.Respond(rw, r, ctx.spec.RequiredProduces(), nil, errors.NotFound("path %s was not found", r.URL.Path))
|
||||
})
|
||||
}
|
||||
*/
|
||||
package middleware
|
|
@ -0,0 +1,9 @@
|
|||
// +build go1.8
|
||||
|
||||
package middleware
|
||||
|
||||
import "net/url"
|
||||
|
||||
func pathUnescape(path string) (string, error) {
|
||||
return url.PathUnescape(path)
|
||||
}
|
|
@ -0,0 +1,329 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file or at
|
||||
// https://developers.google.com/open-source/licenses/bsd.
|
||||
|
||||
// this file was taken from the github.com/golang/gddo repository
|
||||
|
||||
// Package header provides functions for parsing HTTP headers.
|
||||
package header
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Octet types from RFC 2616.
|
||||
var octetTypes [256]octetType
|
||||
|
||||
type octetType byte
|
||||
|
||||
const (
|
||||
isToken octetType = 1 << iota
|
||||
isSpace
|
||||
)
|
||||
|
||||
func init() {
|
||||
// OCTET = <any 8-bit sequence of data>
|
||||
// CHAR = <any US-ASCII character (octets 0 - 127)>
|
||||
// CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
|
||||
// CR = <US-ASCII CR, carriage return (13)>
|
||||
// LF = <US-ASCII LF, linefeed (10)>
|
||||
// SP = <US-ASCII SP, space (32)>
|
||||
// HT = <US-ASCII HT, horizontal-tab (9)>
|
||||
// <"> = <US-ASCII double-quote mark (34)>
|
||||
// CRLF = CR LF
|
||||
// LWS = [CRLF] 1*( SP | HT )
|
||||
// TEXT = <any OCTET except CTLs, but including LWS>
|
||||
// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
|
||||
// | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
|
||||
// token = 1*<any CHAR except CTLs or separators>
|
||||
// qdtext = <any TEXT except <">>
|
||||
|
||||
for c := 0; c < 256; c++ {
|
||||
var t octetType
|
||||
isCtl := c <= 31 || c == 127
|
||||
isChar := 0 <= c && c <= 127
|
||||
isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
|
||||
if strings.ContainsRune(" \t\r\n", rune(c)) {
|
||||
t |= isSpace
|
||||
}
|
||||
if isChar && !isCtl && !isSeparator {
|
||||
t |= isToken
|
||||
}
|
||||
octetTypes[c] = t
|
||||
}
|
||||
}
|
||||
|
||||
// Copy returns a shallow copy of the header.
|
||||
func Copy(header http.Header) http.Header {
|
||||
h := make(http.Header)
|
||||
for k, vs := range header {
|
||||
h[k] = vs
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
var timeLayouts = []string{"Mon, 02 Jan 2006 15:04:05 GMT", time.RFC850, time.ANSIC}
|
||||
|
||||
// ParseTime parses the header as time. The zero value is returned if the
|
||||
// header is not present or there is an error parsing the
|
||||
// header.
|
||||
func ParseTime(header http.Header, key string) time.Time {
|
||||
if s := header.Get(key); s != "" {
|
||||
for _, layout := range timeLayouts {
|
||||
if t, err := time.Parse(layout, s); err == nil {
|
||||
return t.UTC()
|
||||
}
|
||||
}
|
||||
}
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
// ParseList parses a comma separated list of values. Commas are ignored in
|
||||
// quoted strings. Quoted values are not unescaped or unquoted. Whitespace is
|
||||
// trimmed.
|
||||
func ParseList(header http.Header, key string) []string {
|
||||
var result []string
|
||||
for _, s := range header[http.CanonicalHeaderKey(key)] {
|
||||
begin := 0
|
||||
end := 0
|
||||
escape := false
|
||||
quote := false
|
||||
for i := 0; i < len(s); i++ {
|
||||
b := s[i]
|
||||
switch {
|
||||
case escape:
|
||||
escape = false
|
||||
end = i + 1
|
||||
case quote:
|
||||
switch b {
|
||||
case '\\':
|
||||
escape = true
|
||||
case '"':
|
||||
quote = false
|
||||
}
|
||||
end = i + 1
|
||||
case b == '"':
|
||||
quote = true
|
||||
end = i + 1
|
||||
case octetTypes[b]&isSpace != 0:
|
||||
if begin == end {
|
||||
begin = i + 1
|
||||
end = begin
|
||||
}
|
||||
case b == ',':
|
||||
if begin < end {
|
||||
result = append(result, s[begin:end])
|
||||
}
|
||||
begin = i + 1
|
||||
end = begin
|
||||
default:
|
||||
end = i + 1
|
||||
}
|
||||
}
|
||||
if begin < end {
|
||||
result = append(result, s[begin:end])
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ParseValueAndParams parses a comma separated list of values with optional
|
||||
// semicolon separated name-value pairs. Content-Type and Content-Disposition
|
||||
// headers are in this format.
|
||||
func ParseValueAndParams(header http.Header, key string) (string, map[string]string) {
|
||||
return parseValueAndParams(header.Get(key))
|
||||
}
|
||||
|
||||
func parseValueAndParams(s string) (value string, params map[string]string) {
|
||||
params = make(map[string]string)
|
||||
value, s = expectTokenSlash(s)
|
||||
if value == "" {
|
||||
return
|
||||
}
|
||||
value = strings.ToLower(value)
|
||||
s = skipSpace(s)
|
||||
for strings.HasPrefix(s, ";") {
|
||||
var pkey string
|
||||
pkey, s = expectToken(skipSpace(s[1:]))
|
||||
if pkey == "" {
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(s, "=") {
|
||||
return
|
||||
}
|
||||
var pvalue string
|
||||
pvalue, s = expectTokenOrQuoted(s[1:])
|
||||
if pvalue == "" {
|
||||
return
|
||||
}
|
||||
pkey = strings.ToLower(pkey)
|
||||
params[pkey] = pvalue
|
||||
s = skipSpace(s)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AcceptSpec ...
|
||||
type AcceptSpec struct {
|
||||
Value string
|
||||
Q float64
|
||||
}
|
||||
|
||||
// ParseAccept2 ...
|
||||
func ParseAccept2(header http.Header, key string) (specs []AcceptSpec) {
|
||||
for _, en := range ParseList(header, key) {
|
||||
v, p := parseValueAndParams(en)
|
||||
var spec AcceptSpec
|
||||
spec.Value = v
|
||||
spec.Q = 1.0
|
||||
if p != nil {
|
||||
if q, ok := p["q"]; ok {
|
||||
spec.Q, _ = expectQuality(q)
|
||||
}
|
||||
}
|
||||
if spec.Q < 0.0 {
|
||||
continue
|
||||
}
|
||||
specs = append(specs, spec)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ParseAccept parses Accept* headers.
|
||||
func ParseAccept(header http.Header, key string) (specs []AcceptSpec) {
|
||||
loop:
|
||||
for _, s := range header[key] {
|
||||
for {
|
||||
var spec AcceptSpec
|
||||
spec.Value, s = expectTokenSlash(s)
|
||||
if spec.Value == "" {
|
||||
continue loop
|
||||
}
|
||||
spec.Q = 1.0
|
||||
s = skipSpace(s)
|
||||
if strings.HasPrefix(s, ";") {
|
||||
s = skipSpace(s[1:])
|
||||
for !strings.HasPrefix(s, "q=") && s != "" && !strings.HasPrefix(s, ",") {
|
||||
s = skipSpace(s[1:])
|
||||
}
|
||||
if strings.HasPrefix(s, "q=") {
|
||||
spec.Q, s = expectQuality(s[2:])
|
||||
if spec.Q < 0.0 {
|
||||
continue loop
|
||||
}
|
||||
}
|
||||
}
|
||||
specs = append(specs, spec)
|
||||
s = skipSpace(s)
|
||||
if !strings.HasPrefix(s, ",") {
|
||||
continue loop
|
||||
}
|
||||
s = skipSpace(s[1:])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func skipSpace(s string) (rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if octetTypes[s[i]]&isSpace == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[i:]
|
||||
}
|
||||
|
||||
func expectToken(s string) (token, rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if octetTypes[s[i]]&isToken == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[:i], s[i:]
|
||||
}
|
||||
|
||||
func expectTokenSlash(s string) (token, rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
b := s[i]
|
||||
if (octetTypes[b]&isToken == 0) && b != '/' {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[:i], s[i:]
|
||||
}
|
||||
|
||||
func expectQuality(s string) (q float64, rest string) {
|
||||
switch {
|
||||
case len(s) == 0:
|
||||
return -1, ""
|
||||
case s[0] == '0':
|
||||
// q is already 0
|
||||
s = s[1:]
|
||||
case s[0] == '1':
|
||||
s = s[1:]
|
||||
q = 1
|
||||
case s[0] == '.':
|
||||
// q is already 0
|
||||
default:
|
||||
return -1, ""
|
||||
}
|
||||
if !strings.HasPrefix(s, ".") {
|
||||
return q, s
|
||||
}
|
||||
s = s[1:]
|
||||
i := 0
|
||||
n := 0
|
||||
d := 1
|
||||
for ; i < len(s); i++ {
|
||||
b := s[i]
|
||||
if b < '0' || b > '9' {
|
||||
break
|
||||
}
|
||||
n = n*10 + int(b) - '0'
|
||||
d *= 10
|
||||
}
|
||||
return q + float64(n)/float64(d), s[i:]
|
||||
}
|
||||
|
||||
func expectTokenOrQuoted(s string) (value string, rest string) {
|
||||
if !strings.HasPrefix(s, "\"") {
|
||||
return expectToken(s)
|
||||
}
|
||||
s = s[1:]
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch s[i] {
|
||||
case '"':
|
||||
return s[:i], s[i+1:]
|
||||
case '\\':
|
||||
p := make([]byte, len(s)-1)
|
||||
j := copy(p, s[:i])
|
||||
escape := true
|
||||
for i = i + 1; i < len(s); i++ {
|
||||
b := s[i]
|
||||
switch {
|
||||
case escape:
|
||||
escape = false
|
||||
p[j] = b
|
||||
j++
|
||||
case b == '\\':
|
||||
escape = true
|
||||
case b == '"':
|
||||
return string(p[:j]), s[i+1:]
|
||||
default:
|
||||
p[j] = b
|
||||
j++
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
|
@ -0,0 +1,98 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file or at
|
||||
// https://developers.google.com/open-source/licenses/bsd.
|
||||
|
||||
// this file was taken from the github.com/golang/gddo repository
|
||||
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/go-openapi/runtime/middleware/header"
|
||||
)
|
||||
|
||||
// NegotiateContentEncoding returns the best offered content encoding for the
|
||||
// request's Accept-Encoding header. If two offers match with equal weight and
|
||||
// then the offer earlier in the list is preferred. If no offers are
|
||||
// acceptable, then "" is returned.
|
||||
func NegotiateContentEncoding(r *http.Request, offers []string) string {
|
||||
bestOffer := "identity"
|
||||
bestQ := -1.0
|
||||
specs := header.ParseAccept(r.Header, "Accept-Encoding")
|
||||
for _, offer := range offers {
|
||||
for _, spec := range specs {
|
||||
if spec.Q > bestQ &&
|
||||
(spec.Value == "*" || spec.Value == offer) {
|
||||
bestQ = spec.Q
|
||||
bestOffer = offer
|
||||
}
|
||||
}
|
||||
}
|
||||
if bestQ == 0 {
|
||||
bestOffer = ""
|
||||
}
|
||||
return bestOffer
|
||||
}
|
||||
|
||||
// NegotiateContentType returns the best offered content type for the request's
|
||||
// Accept header. If two offers match with equal weight, then the more specific
|
||||
// offer is preferred. For example, text/* trumps */*. If two offers match
|
||||
// with equal weight and specificity, then the offer earlier in the list is
|
||||
// preferred. If no offers match, then defaultOffer is returned.
|
||||
func NegotiateContentType(r *http.Request, offers []string, defaultOffer string) string {
|
||||
bestOffer := defaultOffer
|
||||
bestQ := -1.0
|
||||
bestWild := 3
|
||||
specs := header.ParseAccept(r.Header, "Accept")
|
||||
for _, rawOffer := range offers {
|
||||
offer := normalizeOffer(rawOffer)
|
||||
// No Accept header: just return the first offer.
|
||||
if len(specs) == 0 {
|
||||
return rawOffer
|
||||
}
|
||||
for _, spec := range specs {
|
||||
switch {
|
||||
case spec.Q == 0.0:
|
||||
// ignore
|
||||
case spec.Q < bestQ:
|
||||
// better match found
|
||||
case spec.Value == "*/*":
|
||||
if spec.Q > bestQ || bestWild > 2 {
|
||||
bestQ = spec.Q
|
||||
bestWild = 2
|
||||
bestOffer = rawOffer
|
||||
}
|
||||
case strings.HasSuffix(spec.Value, "/*"):
|
||||
if strings.HasPrefix(offer, spec.Value[:len(spec.Value)-1]) &&
|
||||
(spec.Q > bestQ || bestWild > 1) {
|
||||
bestQ = spec.Q
|
||||
bestWild = 1
|
||||
bestOffer = rawOffer
|
||||
}
|
||||
default:
|
||||
if spec.Value == offer &&
|
||||
(spec.Q > bestQ || bestWild > 0) {
|
||||
bestQ = spec.Q
|
||||
bestWild = 0
|
||||
bestOffer = rawOffer
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return bestOffer
|
||||
}
|
||||
|
||||
func normalizeOffers(orig []string) (norm []string) {
|
||||
for _, o := range orig {
|
||||
norm = append(norm, normalizeOffer(o))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func normalizeOffer(orig string) string {
|
||||
return strings.SplitN(orig, ";", 2)[0]
|
||||
}
|
67
vendor/github.com/go-openapi/runtime/middleware/not_implemented.go
generated
vendored
Normal file
67
vendor/github.com/go-openapi/runtime/middleware/not_implemented.go
generated
vendored
Normal file
|
@ -0,0 +1,67 @@
|
|||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/go-openapi/runtime"
|
||||
)
|
||||
|
||||
type errorResp struct {
|
||||
code int
|
||||
response interface{}
|
||||
headers http.Header
|
||||
}
|
||||
|
||||
func (e *errorResp) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
|
||||
for k, v := range e.headers {
|
||||
for _, val := range v {
|
||||
rw.Header().Add(k, val)
|
||||
}
|
||||
}
|
||||
if e.code > 0 {
|
||||
rw.WriteHeader(e.code)
|
||||
} else {
|
||||
rw.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
if err := producer.Produce(rw, e.response); err != nil {
|
||||
Logger.Printf("failed to write error response: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// NotImplemented the error response when the response is not implemented
|
||||
func NotImplemented(message string) Responder {
|
||||
return Error(http.StatusNotImplemented, message)
|
||||
}
|
||||
|
||||
// Error creates a generic responder for returning errors, the data will be serialized
|
||||
// with the matching producer for the request
|
||||
func Error(code int, data interface{}, headers ...http.Header) Responder {
|
||||
var hdr http.Header
|
||||
for _, h := range headers {
|
||||
for k, v := range h {
|
||||
if hdr == nil {
|
||||
hdr = make(http.Header)
|
||||
}
|
||||
hdr[k] = v
|
||||
}
|
||||
}
|
||||
return &errorResp{
|
||||
code: code,
|
||||
response: data,
|
||||
headers: hdr,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package middleware
|
||||
|
||||
import "net/http"
|
||||
|
||||
// NewOperationExecutor creates a context aware middleware that handles the operations after routing
|
||||
func NewOperationExecutor(ctx *Context) http.Handler {
|
||||
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
// use context to lookup routes
|
||||
route, rCtx, _ := ctx.RouteInfo(r)
|
||||
if rCtx != nil {
|
||||
r = rCtx
|
||||
}
|
||||
|
||||
route.Handler.ServeHTTP(rw, r)
|
||||
})
|
||||
}
|
|
@ -0,0 +1,481 @@
|
|||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
"github.com/go-openapi/errors"
|
||||
"github.com/go-openapi/spec"
|
||||
"github.com/go-openapi/strfmt"
|
||||
"github.com/go-openapi/swag"
|
||||
"github.com/go-openapi/validate"
|
||||
|
||||
"github.com/go-openapi/runtime"
|
||||
)
|
||||
|
||||
const defaultMaxMemory = 32 << 20
|
||||
|
||||
var textUnmarshalType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem()
|
||||
|
||||
func newUntypedParamBinder(param spec.Parameter, spec *spec.Swagger, formats strfmt.Registry) *untypedParamBinder {
|
||||
binder := new(untypedParamBinder)
|
||||
binder.Name = param.Name
|
||||
binder.parameter = ¶m
|
||||
binder.formats = formats
|
||||
if param.In != "body" {
|
||||
binder.validator = validate.NewParamValidator(¶m, formats)
|
||||
} else {
|
||||
binder.validator = validate.NewSchemaValidator(param.Schema, spec, param.Name, formats)
|
||||
}
|
||||
|
||||
return binder
|
||||
}
|
||||
|
||||
type untypedParamBinder struct {
|
||||
parameter *spec.Parameter
|
||||
formats strfmt.Registry
|
||||
Name string
|
||||
validator validate.EntityValidator
|
||||
}
|
||||
|
||||
func (p *untypedParamBinder) Type() reflect.Type {
|
||||
return p.typeForSchema(p.parameter.Type, p.parameter.Format, p.parameter.Items)
|
||||
}
|
||||
|
||||
func (p *untypedParamBinder) typeForSchema(tpe, format string, items *spec.Items) reflect.Type {
|
||||
switch tpe {
|
||||
case "boolean":
|
||||
return reflect.TypeOf(true)
|
||||
|
||||
case "string":
|
||||
if tt, ok := p.formats.GetType(format); ok {
|
||||
return tt
|
||||
}
|
||||
return reflect.TypeOf("")
|
||||
|
||||
case "integer":
|
||||
switch format {
|
||||
case "int8":
|
||||
return reflect.TypeOf(int8(0))
|
||||
case "int16":
|
||||
return reflect.TypeOf(int16(0))
|
||||
case "int32":
|
||||
return reflect.TypeOf(int32(0))
|
||||
case "int64":
|
||||
return reflect.TypeOf(int64(0))
|
||||
default:
|
||||
return reflect.TypeOf(int64(0))
|
||||
}
|
||||
|
||||
case "number":
|
||||
switch format {
|
||||
case "float":
|
||||
return reflect.TypeOf(float32(0))
|
||||
case "double":
|
||||
return reflect.TypeOf(float64(0))
|
||||
}
|
||||
|
||||
case "array":
|
||||
if items == nil {
|
||||
return nil
|
||||
}
|
||||
itemsType := p.typeForSchema(items.Type, items.Format, items.Items)
|
||||
if itemsType == nil {
|
||||
return nil
|
||||
}
|
||||
return reflect.MakeSlice(reflect.SliceOf(itemsType), 0, 0).Type()
|
||||
|
||||
case "file":
|
||||
return reflect.TypeOf(&runtime.File{}).Elem()
|
||||
|
||||
case "object":
|
||||
return reflect.TypeOf(map[string]interface{}{})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *untypedParamBinder) allowsMulti() bool {
|
||||
return p.parameter.In == "query" || p.parameter.In == "formData"
|
||||
}
|
||||
|
||||
func (p *untypedParamBinder) readValue(values runtime.Gettable, target reflect.Value) ([]string, bool, bool, error) {
|
||||
name, in, cf, tpe := p.parameter.Name, p.parameter.In, p.parameter.CollectionFormat, p.parameter.Type
|
||||
if tpe == "array" {
|
||||
if cf == "multi" {
|
||||
if !p.allowsMulti() {
|
||||
return nil, false, false, errors.InvalidCollectionFormat(name, in, cf)
|
||||
}
|
||||
vv, hasKey, _ := values.GetOK(name)
|
||||
return vv, false, hasKey, nil
|
||||
}
|
||||
|
||||
v, hk, hv := values.GetOK(name)
|
||||
if !hv {
|
||||
return nil, false, hk, nil
|
||||
}
|
||||
d, c, e := p.readFormattedSliceFieldValue(v[len(v)-1], target)
|
||||
return d, c, hk, e
|
||||
}
|
||||
|
||||
vv, hk, _ := values.GetOK(name)
|
||||
return vv, false, hk, nil
|
||||
}
|
||||
|
||||
func (p *untypedParamBinder) Bind(request *http.Request, routeParams RouteParams, consumer runtime.Consumer, target reflect.Value) error {
|
||||
// fmt.Println("binding", p.name, "as", p.Type())
|
||||
switch p.parameter.In {
|
||||
case "query":
|
||||
data, custom, hasKey, err := p.readValue(runtime.Values(request.URL.Query()), target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if custom {
|
||||
return nil
|
||||
}
|
||||
|
||||
return p.bindValue(data, hasKey, target)
|
||||
|
||||
case "header":
|
||||
data, custom, hasKey, err := p.readValue(runtime.Values(request.Header), target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if custom {
|
||||
return nil
|
||||
}
|
||||
return p.bindValue(data, hasKey, target)
|
||||
|
||||
case "path":
|
||||
data, custom, hasKey, err := p.readValue(routeParams, target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if custom {
|
||||
return nil
|
||||
}
|
||||
return p.bindValue(data, hasKey, target)
|
||||
|
||||
case "formData":
|
||||
var err error
|
||||
var mt string
|
||||
|
||||
mt, _, e := runtime.ContentType(request.Header)
|
||||
if e != nil {
|
||||
// because of the interface conversion go thinks the error is not nil
|
||||
// so we first check for nil and then set the err var if it's not nil
|
||||
err = e
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return errors.InvalidContentType("", []string{"multipart/form-data", "application/x-www-form-urlencoded"})
|
||||
}
|
||||
|
||||
if mt != "multipart/form-data" && mt != "application/x-www-form-urlencoded" {
|
||||
return errors.InvalidContentType(mt, []string{"multipart/form-data", "application/x-www-form-urlencoded"})
|
||||
}
|
||||
|
||||
if mt == "multipart/form-data" {
|
||||
if err = request.ParseMultipartForm(defaultMaxMemory); err != nil {
|
||||
return errors.NewParseError(p.Name, p.parameter.In, "", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err = request.ParseForm(); err != nil {
|
||||
return errors.NewParseError(p.Name, p.parameter.In, "", err)
|
||||
}
|
||||
|
||||
if p.parameter.Type == "file" {
|
||||
file, header, ffErr := request.FormFile(p.parameter.Name)
|
||||
if ffErr != nil {
|
||||
return errors.NewParseError(p.Name, p.parameter.In, "", ffErr)
|
||||
}
|
||||
target.Set(reflect.ValueOf(runtime.File{Data: file, Header: header}))
|
||||
return nil
|
||||
}
|
||||
|
||||
if request.MultipartForm != nil {
|
||||
data, custom, hasKey, rvErr := p.readValue(runtime.Values(request.MultipartForm.Value), target)
|
||||
if rvErr != nil {
|
||||
return rvErr
|
||||
}
|
||||
if custom {
|
||||
return nil
|
||||
}
|
||||
return p.bindValue(data, hasKey, target)
|
||||
}
|
||||
data, custom, hasKey, err := p.readValue(runtime.Values(request.PostForm), target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if custom {
|
||||
return nil
|
||||
}
|
||||
return p.bindValue(data, hasKey, target)
|
||||
|
||||
case "body":
|
||||
newValue := reflect.New(target.Type())
|
||||
if !runtime.HasBody(request) {
|
||||
if p.parameter.Default != nil {
|
||||
target.Set(reflect.ValueOf(p.parameter.Default))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
if err := consumer.Consume(request.Body, newValue.Interface()); err != nil {
|
||||
if err == io.EOF && p.parameter.Default != nil {
|
||||
target.Set(reflect.ValueOf(p.parameter.Default))
|
||||
return nil
|
||||
}
|
||||
tpe := p.parameter.Type
|
||||
if p.parameter.Format != "" {
|
||||
tpe = p.parameter.Format
|
||||
}
|
||||
return errors.InvalidType(p.Name, p.parameter.In, tpe, nil)
|
||||
}
|
||||
target.Set(reflect.Indirect(newValue))
|
||||
return nil
|
||||
default:
|
||||
return errors.New(500, fmt.Sprintf("invalid parameter location %q", p.parameter.In))
|
||||
}
|
||||
}
|
||||
|
||||
func (p *untypedParamBinder) bindValue(data []string, hasKey bool, target reflect.Value) error {
|
||||
if p.parameter.Type == "array" {
|
||||
return p.setSliceFieldValue(target, p.parameter.Default, data, hasKey)
|
||||
}
|
||||
var d string
|
||||
if len(data) > 0 {
|
||||
d = data[len(data)-1]
|
||||
}
|
||||
return p.setFieldValue(target, p.parameter.Default, d, hasKey)
|
||||
}
|
||||
|
||||
func (p *untypedParamBinder) setFieldValue(target reflect.Value, defaultValue interface{}, data string, hasKey bool) error {
|
||||
tpe := p.parameter.Type
|
||||
if p.parameter.Format != "" {
|
||||
tpe = p.parameter.Format
|
||||
}
|
||||
|
||||
if (!hasKey || (!p.parameter.AllowEmptyValue && data == "")) && p.parameter.Required && p.parameter.Default == nil {
|
||||
return errors.Required(p.Name, p.parameter.In, data)
|
||||
}
|
||||
|
||||
ok, err := p.tryUnmarshaler(target, defaultValue, data)
|
||||
if err != nil {
|
||||
return errors.InvalidType(p.Name, p.parameter.In, tpe, data)
|
||||
}
|
||||
if ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
defVal := reflect.Zero(target.Type())
|
||||
if defaultValue != nil {
|
||||
defVal = reflect.ValueOf(defaultValue)
|
||||
}
|
||||
|
||||
if tpe == "byte" {
|
||||
if data == "" {
|
||||
if target.CanSet() {
|
||||
target.SetBytes(defVal.Bytes())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
b, err := base64.StdEncoding.DecodeString(data)
|
||||
if err != nil {
|
||||
b, err = base64.URLEncoding.DecodeString(data)
|
||||
if err != nil {
|
||||
return errors.InvalidType(p.Name, p.parameter.In, tpe, data)
|
||||
}
|
||||
}
|
||||
if target.CanSet() {
|
||||
target.SetBytes(b)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
switch target.Kind() {
|
||||
case reflect.Bool:
|
||||
if data == "" {
|
||||
if target.CanSet() {
|
||||
target.SetBool(defVal.Bool())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
b, err := swag.ConvertBool(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if target.CanSet() {
|
||||
target.SetBool(b)
|
||||
}
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
if data == "" {
|
||||
if target.CanSet() {
|
||||
rd := defVal.Convert(reflect.TypeOf(int64(0)))
|
||||
target.SetInt(rd.Int())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
i, err := strconv.ParseInt(data, 10, 64)
|
||||
if err != nil {
|
||||
return errors.InvalidType(p.Name, p.parameter.In, tpe, data)
|
||||
}
|
||||
if target.OverflowInt(i) {
|
||||
return errors.InvalidType(p.Name, p.parameter.In, tpe, data)
|
||||
}
|
||||
if target.CanSet() {
|
||||
target.SetInt(i)
|
||||
}
|
||||
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
if data == "" {
|
||||
if target.CanSet() {
|
||||
rd := defVal.Convert(reflect.TypeOf(uint64(0)))
|
||||
target.SetUint(rd.Uint())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
u, err := strconv.ParseUint(data, 10, 64)
|
||||
if err != nil {
|
||||
return errors.InvalidType(p.Name, p.parameter.In, tpe, data)
|
||||
}
|
||||
if target.OverflowUint(u) {
|
||||
return errors.InvalidType(p.Name, p.parameter.In, tpe, data)
|
||||
}
|
||||
if target.CanSet() {
|
||||
target.SetUint(u)
|
||||
}
|
||||
|
||||
case reflect.Float32, reflect.Float64:
|
||||
if data == "" {
|
||||
if target.CanSet() {
|
||||
rd := defVal.Convert(reflect.TypeOf(float64(0)))
|
||||
target.SetFloat(rd.Float())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
f, err := strconv.ParseFloat(data, 64)
|
||||
if err != nil {
|
||||
return errors.InvalidType(p.Name, p.parameter.In, tpe, data)
|
||||
}
|
||||
if target.OverflowFloat(f) {
|
||||
return errors.InvalidType(p.Name, p.parameter.In, tpe, data)
|
||||
}
|
||||
if target.CanSet() {
|
||||
target.SetFloat(f)
|
||||
}
|
||||
|
||||
case reflect.String:
|
||||
value := data
|
||||
if value == "" {
|
||||
value = defVal.String()
|
||||
}
|
||||
// validate string
|
||||
if target.CanSet() {
|
||||
target.SetString(value)
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
if data == "" && defVal.Kind() == reflect.Ptr {
|
||||
if target.CanSet() {
|
||||
target.Set(defVal)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
newVal := reflect.New(target.Type().Elem())
|
||||
if err := p.setFieldValue(reflect.Indirect(newVal), defVal, data, hasKey); err != nil {
|
||||
return err
|
||||
}
|
||||
if target.CanSet() {
|
||||
target.Set(newVal)
|
||||
}
|
||||
|
||||
default:
|
||||
return errors.InvalidType(p.Name, p.parameter.In, tpe, data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *untypedParamBinder) tryUnmarshaler(target reflect.Value, defaultValue interface{}, data string) (bool, error) {
|
||||
if !target.CanSet() {
|
||||
return false, nil
|
||||
}
|
||||
// When a type implements encoding.TextUnmarshaler we'll use that instead of reflecting some more
|
||||
if reflect.PtrTo(target.Type()).Implements(textUnmarshalType) {
|
||||
if defaultValue != nil && len(data) == 0 {
|
||||
target.Set(reflect.ValueOf(defaultValue))
|
||||
return true, nil
|
||||
}
|
||||
value := reflect.New(target.Type())
|
||||
if err := value.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(data)); err != nil {
|
||||
return true, err
|
||||
}
|
||||
target.Set(reflect.Indirect(value))
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (p *untypedParamBinder) readFormattedSliceFieldValue(data string, target reflect.Value) ([]string, bool, error) {
|
||||
ok, err := p.tryUnmarshaler(target, p.parameter.Default, data)
|
||||
if err != nil {
|
||||
return nil, true, err
|
||||
}
|
||||
if ok {
|
||||
return nil, true, nil
|
||||
}
|
||||
|
||||
return swag.SplitByFormat(data, p.parameter.CollectionFormat), false, nil
|
||||
}
|
||||
|
||||
func (p *untypedParamBinder) setSliceFieldValue(target reflect.Value, defaultValue interface{}, data []string, hasKey bool) error {
|
||||
sz := len(data)
|
||||
if (!hasKey || (!p.parameter.AllowEmptyValue && (sz == 0 || (sz == 1 && data[0] == "")))) && p.parameter.Required && defaultValue == nil {
|
||||
return errors.Required(p.Name, p.parameter.In, data)
|
||||
}
|
||||
|
||||
defVal := reflect.Zero(target.Type())
|
||||
if defaultValue != nil {
|
||||
defVal = reflect.ValueOf(defaultValue)
|
||||
}
|
||||
|
||||
if !target.CanSet() {
|
||||
return nil
|
||||
}
|
||||
if sz == 0 {
|
||||
target.Set(defVal)
|
||||
return nil
|
||||
}
|
||||
|
||||
value := reflect.MakeSlice(reflect.SliceOf(target.Type().Elem()), sz, sz)
|
||||
|
||||
for i := 0; i < sz; i++ {
|
||||
if err := p.setFieldValue(value.Index(i), nil, data[i], hasKey); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
target.Set(value)
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
// +build !go1.8
|
||||
|
||||
package middleware
|
||||
|
||||
import "net/url"
|
||||
|
||||
func pathUnescape(path string) (string, error) {
|
||||
return url.QueryUnescape(path)
|
||||
}
|
|
@ -0,0 +1,90 @@
|
|||
package middleware
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"net/http"
|
||||
"path"
|
||||
)
|
||||
|
||||
// RapiDocOpts configures the RapiDoc middlewares
|
||||
type RapiDocOpts struct {
|
||||
// BasePath for the UI path, defaults to: /
|
||||
BasePath string
|
||||
// Path combines with BasePath for the full UI path, defaults to: docs
|
||||
Path string
|
||||
// SpecURL the url to find the spec for
|
||||
SpecURL string
|
||||
// RapiDocURL for the js that generates the rapidoc site, defaults to: https://cdn.jsdelivr.net/npm/rapidoc/bundles/rapidoc.standalone.js
|
||||
RapiDocURL string
|
||||
// Title for the documentation site, default to: API documentation
|
||||
Title string
|
||||
}
|
||||
|
||||
// EnsureDefaults in case some options are missing
|
||||
func (r *RapiDocOpts) EnsureDefaults() {
|
||||
if r.BasePath == "" {
|
||||
r.BasePath = "/"
|
||||
}
|
||||
if r.Path == "" {
|
||||
r.Path = "docs"
|
||||
}
|
||||
if r.SpecURL == "" {
|
||||
r.SpecURL = "/swagger.json"
|
||||
}
|
||||
if r.RapiDocURL == "" {
|
||||
r.RapiDocURL = rapidocLatest
|
||||
}
|
||||
if r.Title == "" {
|
||||
r.Title = "API documentation"
|
||||
}
|
||||
}
|
||||
|
||||
// RapiDoc creates a middleware to serve a documentation site for a swagger spec.
|
||||
// This allows for altering the spec before starting the http listener.
|
||||
//
|
||||
func RapiDoc(opts RapiDocOpts, next http.Handler) http.Handler {
|
||||
opts.EnsureDefaults()
|
||||
|
||||
pth := path.Join(opts.BasePath, opts.Path)
|
||||
tmpl := template.Must(template.New("rapidoc").Parse(rapidocTemplate))
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
_ = tmpl.Execute(buf, opts)
|
||||
b := buf.Bytes()
|
||||
|
||||
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == pth {
|
||||
rw.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
|
||||
_, _ = rw.Write(b)
|
||||
return
|
||||
}
|
||||
|
||||
if next == nil {
|
||||
rw.Header().Set("Content-Type", "text/plain")
|
||||
rw.WriteHeader(http.StatusNotFound)
|
||||
_, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth)))
|
||||
return
|
||||
}
|
||||
next.ServeHTTP(rw, r)
|
||||
})
|
||||
}
|
||||
|
||||
const (
|
||||
rapidocLatest = "https://unpkg.com/rapidoc/dist/rapidoc-min.js"
|
||||
rapidocTemplate = `<!doctype html>
|
||||
<html>
|
||||
<head>
|
||||
<title>{{ .Title }}</title>
|
||||
<meta charset="utf-8"> <!-- Important: rapi-doc uses utf8 charecters -->
|
||||
<script type="module" src="{{ .RapiDocURL }}"></script>
|
||||
</head>
|
||||
<body>
|
||||
<rapi-doc spec-url="{{ .SpecURL }}"></rapi-doc>
|
||||
</body>
|
||||
</html>
|
||||
`
|
||||
)
|
|
@ -0,0 +1,103 @@
|
|||
package middleware
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"net/http"
|
||||
"path"
|
||||
)
|
||||
|
||||
// RedocOpts configures the Redoc middlewares
|
||||
type RedocOpts struct {
|
||||
// BasePath for the UI path, defaults to: /
|
||||
BasePath string
|
||||
// Path combines with BasePath for the full UI path, defaults to: docs
|
||||
Path string
|
||||
// SpecURL the url to find the spec for
|
||||
SpecURL string
|
||||
// RedocURL for the js that generates the redoc site, defaults to: https://cdn.jsdelivr.net/npm/redoc/bundles/redoc.standalone.js
|
||||
RedocURL string
|
||||
// Title for the documentation site, default to: API documentation
|
||||
Title string
|
||||
}
|
||||
|
||||
// EnsureDefaults in case some options are missing
|
||||
func (r *RedocOpts) EnsureDefaults() {
|
||||
if r.BasePath == "" {
|
||||
r.BasePath = "/"
|
||||
}
|
||||
if r.Path == "" {
|
||||
r.Path = "docs"
|
||||
}
|
||||
if r.SpecURL == "" {
|
||||
r.SpecURL = "/swagger.json"
|
||||
}
|
||||
if r.RedocURL == "" {
|
||||
r.RedocURL = redocLatest
|
||||
}
|
||||
if r.Title == "" {
|
||||
r.Title = "API documentation"
|
||||
}
|
||||
}
|
||||
|
||||
// Redoc creates a middleware to serve a documentation site for a swagger spec.
|
||||
// This allows for altering the spec before starting the http listener.
|
||||
//
|
||||
func Redoc(opts RedocOpts, next http.Handler) http.Handler {
|
||||
opts.EnsureDefaults()
|
||||
|
||||
pth := path.Join(opts.BasePath, opts.Path)
|
||||
tmpl := template.Must(template.New("redoc").Parse(redocTemplate))
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
_ = tmpl.Execute(buf, opts)
|
||||
b := buf.Bytes()
|
||||
|
||||
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == pth {
|
||||
rw.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
|
||||
_, _ = rw.Write(b)
|
||||
return
|
||||
}
|
||||
|
||||
if next == nil {
|
||||
rw.Header().Set("Content-Type", "text/plain")
|
||||
rw.WriteHeader(http.StatusNotFound)
|
||||
_, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth)))
|
||||
return
|
||||
}
|
||||
next.ServeHTTP(rw, r)
|
||||
})
|
||||
}
|
||||
|
||||
const (
|
||||
redocLatest = "https://cdn.jsdelivr.net/npm/redoc/bundles/redoc.standalone.js"
|
||||
redocTemplate = `<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>{{ .Title }}</title>
|
||||
<!-- needed for adaptive design -->
|
||||
<meta charset="utf-8"/>
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<link href="https://fonts.googleapis.com/css?family=Montserrat:300,400,700|Roboto:300,400,700" rel="stylesheet">
|
||||
|
||||
<!--
|
||||
ReDoc doesn't change outer page styles
|
||||
-->
|
||||
<style>
|
||||
body {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<redoc spec-url='{{ .SpecURL }}'></redoc>
|
||||
<script src="{{ .RedocURL }}"> </script>
|
||||
</body>
|
||||
</html>
|
||||
`
|
||||
)
|
|
@ -0,0 +1,104 @@
|
|||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"reflect"
|
||||
|
||||
"github.com/go-openapi/errors"
|
||||
"github.com/go-openapi/spec"
|
||||
"github.com/go-openapi/strfmt"
|
||||
|
||||
"github.com/go-openapi/runtime"
|
||||
)
|
||||
|
||||
// UntypedRequestBinder binds and validates the data from a http request
|
||||
type UntypedRequestBinder struct {
|
||||
Spec *spec.Swagger
|
||||
Parameters map[string]spec.Parameter
|
||||
Formats strfmt.Registry
|
||||
paramBinders map[string]*untypedParamBinder
|
||||
}
|
||||
|
||||
// NewUntypedRequestBinder creates a new binder for reading a request.
|
||||
func NewUntypedRequestBinder(parameters map[string]spec.Parameter, spec *spec.Swagger, formats strfmt.Registry) *UntypedRequestBinder {
|
||||
binders := make(map[string]*untypedParamBinder)
|
||||
for fieldName, param := range parameters {
|
||||
binders[fieldName] = newUntypedParamBinder(param, spec, formats)
|
||||
}
|
||||
return &UntypedRequestBinder{
|
||||
Parameters: parameters,
|
||||
paramBinders: binders,
|
||||
Spec: spec,
|
||||
Formats: formats,
|
||||
}
|
||||
}
|
||||
|
||||
// Bind perform the databinding and validation
|
||||
func (o *UntypedRequestBinder) Bind(request *http.Request, routeParams RouteParams, consumer runtime.Consumer, data interface{}) error {
|
||||
val := reflect.Indirect(reflect.ValueOf(data))
|
||||
isMap := val.Kind() == reflect.Map
|
||||
var result []error
|
||||
debugLog("binding %d parameters for %s %s", len(o.Parameters), request.Method, request.URL.EscapedPath())
|
||||
for fieldName, param := range o.Parameters {
|
||||
binder := o.paramBinders[fieldName]
|
||||
debugLog("binding parameter %s for %s %s", fieldName, request.Method, request.URL.EscapedPath())
|
||||
var target reflect.Value
|
||||
if !isMap {
|
||||
binder.Name = fieldName
|
||||
target = val.FieldByName(fieldName)
|
||||
}
|
||||
|
||||
if isMap {
|
||||
tpe := binder.Type()
|
||||
if tpe == nil {
|
||||
if param.Schema.Type.Contains("array") {
|
||||
tpe = reflect.TypeOf([]interface{}{})
|
||||
} else {
|
||||
tpe = reflect.TypeOf(map[string]interface{}{})
|
||||
}
|
||||
}
|
||||
target = reflect.Indirect(reflect.New(tpe))
|
||||
}
|
||||
|
||||
if !target.IsValid() {
|
||||
result = append(result, errors.New(500, "parameter name %q is an unknown field", binder.Name))
|
||||
continue
|
||||
}
|
||||
|
||||
if err := binder.Bind(request, routeParams, consumer, target); err != nil {
|
||||
result = append(result, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if binder.validator != nil {
|
||||
rr := binder.validator.Validate(target.Interface())
|
||||
if rr != nil && rr.HasErrors() {
|
||||
result = append(result, rr.AsError())
|
||||
}
|
||||
}
|
||||
|
||||
if isMap {
|
||||
val.SetMapIndex(reflect.ValueOf(param.Name), target)
|
||||
}
|
||||
}
|
||||
|
||||
if len(result) > 0 {
|
||||
return errors.CompositeValidationError(result...)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,488 @@
|
|||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
fpath "path"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/go-openapi/runtime/security"
|
||||
"github.com/go-openapi/swag"
|
||||
|
||||
"github.com/go-openapi/analysis"
|
||||
"github.com/go-openapi/errors"
|
||||
"github.com/go-openapi/loads"
|
||||
"github.com/go-openapi/spec"
|
||||
"github.com/go-openapi/strfmt"
|
||||
|
||||
"github.com/go-openapi/runtime"
|
||||
"github.com/go-openapi/runtime/middleware/denco"
|
||||
)
|
||||
|
||||
// RouteParam is a object to capture route params in a framework agnostic way.
|
||||
// implementations of the muxer should use these route params to communicate with the
|
||||
// swagger framework
|
||||
type RouteParam struct {
|
||||
Name string
|
||||
Value string
|
||||
}
|
||||
|
||||
// RouteParams the collection of route params
|
||||
type RouteParams []RouteParam
|
||||
|
||||
// Get gets the value for the route param for the specified key
|
||||
func (r RouteParams) Get(name string) string {
|
||||
vv, _, _ := r.GetOK(name)
|
||||
if len(vv) > 0 {
|
||||
return vv[len(vv)-1]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetOK gets the value but also returns booleans to indicate if a key or value
|
||||
// is present. This aids in validation and satisfies an interface in use there
|
||||
//
|
||||
// The returned values are: data, has key, has value
|
||||
func (r RouteParams) GetOK(name string) ([]string, bool, bool) {
|
||||
for _, p := range r {
|
||||
if p.Name == name {
|
||||
return []string{p.Value}, true, p.Value != ""
|
||||
}
|
||||
}
|
||||
return nil, false, false
|
||||
}
|
||||
|
||||
// NewRouter creates a new context aware router middleware
|
||||
func NewRouter(ctx *Context, next http.Handler) http.Handler {
|
||||
if ctx.router == nil {
|
||||
ctx.router = DefaultRouter(ctx.spec, ctx.api)
|
||||
}
|
||||
|
||||
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
if _, rCtx, ok := ctx.RouteInfo(r); ok {
|
||||
next.ServeHTTP(rw, rCtx)
|
||||
return
|
||||
}
|
||||
|
||||
// Not found, check if it exists in the other methods first
|
||||
if others := ctx.AllowedMethods(r); len(others) > 0 {
|
||||
ctx.Respond(rw, r, ctx.analyzer.RequiredProduces(), nil, errors.MethodNotAllowed(r.Method, others))
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Respond(rw, r, ctx.analyzer.RequiredProduces(), nil, errors.NotFound("path %s was not found", r.URL.EscapedPath()))
|
||||
})
|
||||
}
|
||||
|
||||
// RoutableAPI represents an interface for things that can serve
|
||||
// as a provider of implementations for the swagger router
|
||||
type RoutableAPI interface {
|
||||
HandlerFor(string, string) (http.Handler, bool)
|
||||
ServeErrorFor(string) func(http.ResponseWriter, *http.Request, error)
|
||||
ConsumersFor([]string) map[string]runtime.Consumer
|
||||
ProducersFor([]string) map[string]runtime.Producer
|
||||
AuthenticatorsFor(map[string]spec.SecurityScheme) map[string]runtime.Authenticator
|
||||
Authorizer() runtime.Authorizer
|
||||
Formats() strfmt.Registry
|
||||
DefaultProduces() string
|
||||
DefaultConsumes() string
|
||||
}
|
||||
|
||||
// Router represents a swagger aware router
|
||||
type Router interface {
|
||||
Lookup(method, path string) (*MatchedRoute, bool)
|
||||
OtherMethods(method, path string) []string
|
||||
}
|
||||
|
||||
type defaultRouteBuilder struct {
|
||||
spec *loads.Document
|
||||
analyzer *analysis.Spec
|
||||
api RoutableAPI
|
||||
records map[string][]denco.Record
|
||||
}
|
||||
|
||||
type defaultRouter struct {
|
||||
spec *loads.Document
|
||||
routers map[string]*denco.Router
|
||||
}
|
||||
|
||||
func newDefaultRouteBuilder(spec *loads.Document, api RoutableAPI) *defaultRouteBuilder {
|
||||
return &defaultRouteBuilder{
|
||||
spec: spec,
|
||||
analyzer: analysis.New(spec.Spec()),
|
||||
api: api,
|
||||
records: make(map[string][]denco.Record),
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultRouter creates a default implemenation of the router
|
||||
func DefaultRouter(spec *loads.Document, api RoutableAPI) Router {
|
||||
builder := newDefaultRouteBuilder(spec, api)
|
||||
if spec != nil {
|
||||
for method, paths := range builder.analyzer.Operations() {
|
||||
for path, operation := range paths {
|
||||
fp := fpath.Join(spec.BasePath(), path)
|
||||
debugLog("adding route %s %s %q", method, fp, operation.ID)
|
||||
builder.AddRoute(method, fp, operation)
|
||||
}
|
||||
}
|
||||
}
|
||||
return builder.Build()
|
||||
}
|
||||
|
||||
// RouteAuthenticator is an authenticator that can compose several authenticators together.
|
||||
// It also knows when it contains an authenticator that allows for anonymous pass through.
|
||||
// Contains a group of 1 or more authenticators that have a logical AND relationship
|
||||
type RouteAuthenticator struct {
|
||||
Authenticator map[string]runtime.Authenticator
|
||||
Schemes []string
|
||||
Scopes map[string][]string
|
||||
allScopes []string
|
||||
commonScopes []string
|
||||
allowAnonymous bool
|
||||
}
|
||||
|
||||
func (ra *RouteAuthenticator) AllowsAnonymous() bool {
|
||||
return ra.allowAnonymous
|
||||
}
|
||||
|
||||
// AllScopes returns a list of unique scopes that is the combination
|
||||
// of all the scopes in the requirements
|
||||
func (ra *RouteAuthenticator) AllScopes() []string {
|
||||
return ra.allScopes
|
||||
}
|
||||
|
||||
// CommonScopes returns a list of unique scopes that are common in all the
|
||||
// scopes in the requirements
|
||||
func (ra *RouteAuthenticator) CommonScopes() []string {
|
||||
return ra.commonScopes
|
||||
}
|
||||
|
||||
// Authenticate Authenticator interface implementation
|
||||
func (ra *RouteAuthenticator) Authenticate(req *http.Request, route *MatchedRoute) (bool, interface{}, error) {
|
||||
if ra.allowAnonymous {
|
||||
route.Authenticator = ra
|
||||
return true, nil, nil
|
||||
}
|
||||
// iterate in proper order
|
||||
var lastResult interface{}
|
||||
for _, scheme := range ra.Schemes {
|
||||
if authenticator, ok := ra.Authenticator[scheme]; ok {
|
||||
applies, princ, err := authenticator.Authenticate(&security.ScopedAuthRequest{
|
||||
Request: req,
|
||||
RequiredScopes: ra.Scopes[scheme],
|
||||
})
|
||||
if !applies {
|
||||
return false, nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
route.Authenticator = ra
|
||||
return true, nil, err
|
||||
}
|
||||
lastResult = princ
|
||||
}
|
||||
}
|
||||
route.Authenticator = ra
|
||||
return true, lastResult, nil
|
||||
}
|
||||
|
||||
func stringSliceUnion(slices ...[]string) []string {
|
||||
unique := make(map[string]struct{})
|
||||
var result []string
|
||||
for _, slice := range slices {
|
||||
for _, entry := range slice {
|
||||
if _, ok := unique[entry]; ok {
|
||||
continue
|
||||
}
|
||||
unique[entry] = struct{}{}
|
||||
result = append(result, entry)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func stringSliceIntersection(slices ...[]string) []string {
|
||||
unique := make(map[string]int)
|
||||
var intersection []string
|
||||
|
||||
total := len(slices)
|
||||
var emptyCnt int
|
||||
for _, slice := range slices {
|
||||
if len(slice) == 0 {
|
||||
emptyCnt++
|
||||
continue
|
||||
}
|
||||
|
||||
for _, entry := range slice {
|
||||
unique[entry]++
|
||||
if unique[entry] == total-emptyCnt { // this entry appeared in all the non-empty slices
|
||||
intersection = append(intersection, entry)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return intersection
|
||||
}
|
||||
|
||||
// RouteAuthenticators represents a group of authenticators that represent a logical OR
|
||||
type RouteAuthenticators []RouteAuthenticator
|
||||
|
||||
// AllowsAnonymous returns true when there is an authenticator that means optional auth
|
||||
func (ras RouteAuthenticators) AllowsAnonymous() bool {
|
||||
for _, ra := range ras {
|
||||
if ra.AllowsAnonymous() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Authenticate method implemention so this collection can be used as authenticator
|
||||
func (ras RouteAuthenticators) Authenticate(req *http.Request, route *MatchedRoute) (bool, interface{}, error) {
|
||||
var lastError error
|
||||
var allowsAnon bool
|
||||
var anonAuth RouteAuthenticator
|
||||
|
||||
for _, ra := range ras {
|
||||
if ra.AllowsAnonymous() {
|
||||
anonAuth = ra
|
||||
allowsAnon = true
|
||||
continue
|
||||
}
|
||||
applies, usr, err := ra.Authenticate(req, route)
|
||||
if !applies || err != nil || usr == nil {
|
||||
if err != nil {
|
||||
lastError = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
return applies, usr, nil
|
||||
}
|
||||
|
||||
if allowsAnon && lastError == nil {
|
||||
route.Authenticator = &anonAuth
|
||||
return true, nil, lastError
|
||||
}
|
||||
return lastError != nil, nil, lastError
|
||||
}
|
||||
|
||||
type routeEntry struct {
|
||||
PathPattern string
|
||||
BasePath string
|
||||
Operation *spec.Operation
|
||||
Consumes []string
|
||||
Consumers map[string]runtime.Consumer
|
||||
Produces []string
|
||||
Producers map[string]runtime.Producer
|
||||
Parameters map[string]spec.Parameter
|
||||
Handler http.Handler
|
||||
Formats strfmt.Registry
|
||||
Binder *UntypedRequestBinder
|
||||
Authenticators RouteAuthenticators
|
||||
Authorizer runtime.Authorizer
|
||||
}
|
||||
|
||||
// MatchedRoute represents the route that was matched in this request
|
||||
type MatchedRoute struct {
|
||||
routeEntry
|
||||
Params RouteParams
|
||||
Consumer runtime.Consumer
|
||||
Producer runtime.Producer
|
||||
Authenticator *RouteAuthenticator
|
||||
}
|
||||
|
||||
// HasAuth returns true when the route has a security requirement defined
|
||||
func (m *MatchedRoute) HasAuth() bool {
|
||||
return len(m.Authenticators) > 0
|
||||
}
|
||||
|
||||
// NeedsAuth returns true when the request still
|
||||
// needs to perform authentication
|
||||
func (m *MatchedRoute) NeedsAuth() bool {
|
||||
return m.HasAuth() && m.Authenticator == nil
|
||||
}
|
||||
|
||||
func (d *defaultRouter) Lookup(method, path string) (*MatchedRoute, bool) {
|
||||
mth := strings.ToUpper(method)
|
||||
debugLog("looking up route for %s %s", method, path)
|
||||
if Debug {
|
||||
if len(d.routers) == 0 {
|
||||
debugLog("there are no known routers")
|
||||
}
|
||||
for meth := range d.routers {
|
||||
debugLog("got a router for %s", meth)
|
||||
}
|
||||
}
|
||||
if router, ok := d.routers[mth]; ok {
|
||||
if m, rp, ok := router.Lookup(fpath.Clean(path)); ok && m != nil {
|
||||
if entry, ok := m.(*routeEntry); ok {
|
||||
debugLog("found a route for %s %s with %d parameters", method, path, len(entry.Parameters))
|
||||
var params RouteParams
|
||||
for _, p := range rp {
|
||||
v, err := pathUnescape(p.Value)
|
||||
if err != nil {
|
||||
debugLog("failed to escape %q: %v", p.Value, err)
|
||||
v = p.Value
|
||||
}
|
||||
// a workaround to handle fragment/composing parameters until they are supported in denco router
|
||||
// check if this parameter is a fragment within a path segment
|
||||
if xpos := strings.Index(entry.PathPattern, fmt.Sprintf("{%s}", p.Name)) + len(p.Name) + 2; xpos < len(entry.PathPattern) && entry.PathPattern[xpos] != '/' {
|
||||
// extract fragment parameters
|
||||
ep := strings.Split(entry.PathPattern[xpos:], "/")[0]
|
||||
pnames, pvalues := decodeCompositParams(p.Name, v, ep, nil, nil)
|
||||
for i, pname := range pnames {
|
||||
params = append(params, RouteParam{Name: pname, Value: pvalues[i]})
|
||||
}
|
||||
} else {
|
||||
// use the parameter directly
|
||||
params = append(params, RouteParam{Name: p.Name, Value: v})
|
||||
}
|
||||
}
|
||||
return &MatchedRoute{routeEntry: *entry, Params: params}, true
|
||||
}
|
||||
} else {
|
||||
debugLog("couldn't find a route by path for %s %s", method, path)
|
||||
}
|
||||
} else {
|
||||
debugLog("couldn't find a route by method for %s %s", method, path)
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (d *defaultRouter) OtherMethods(method, path string) []string {
|
||||
mn := strings.ToUpper(method)
|
||||
var methods []string
|
||||
for k, v := range d.routers {
|
||||
if k != mn {
|
||||
if _, _, ok := v.Lookup(fpath.Clean(path)); ok {
|
||||
methods = append(methods, k)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
return methods
|
||||
}
|
||||
|
||||
// convert swagger parameters per path segment into a denco parameter as multiple parameters per segment are not supported in denco
|
||||
var pathConverter = regexp.MustCompile(`{(.+?)}([^/]*)`)
|
||||
|
||||
func decodeCompositParams(name string, value string, pattern string, names []string, values []string) ([]string, []string) {
|
||||
pleft := strings.Index(pattern, "{")
|
||||
names = append(names, name)
|
||||
if pleft < 0 {
|
||||
if strings.HasSuffix(value, pattern) {
|
||||
values = append(values, value[:len(value)-len(pattern)])
|
||||
} else {
|
||||
values = append(values, "")
|
||||
}
|
||||
} else {
|
||||
toskip := pattern[:pleft]
|
||||
pright := strings.Index(pattern, "}")
|
||||
vright := strings.Index(value, toskip)
|
||||
if vright >= 0 {
|
||||
values = append(values, value[:vright])
|
||||
} else {
|
||||
values = append(values, "")
|
||||
value = ""
|
||||
}
|
||||
return decodeCompositParams(pattern[pleft+1:pright], value[vright+len(toskip):], pattern[pright+1:], names, values)
|
||||
}
|
||||
return names, values
|
||||
}
|
||||
|
||||
func (d *defaultRouteBuilder) AddRoute(method, path string, operation *spec.Operation) {
|
||||
mn := strings.ToUpper(method)
|
||||
|
||||
bp := fpath.Clean(d.spec.BasePath())
|
||||
if len(bp) > 0 && bp[len(bp)-1] == '/' {
|
||||
bp = bp[:len(bp)-1]
|
||||
}
|
||||
|
||||
debugLog("operation: %#v", *operation)
|
||||
if handler, ok := d.api.HandlerFor(method, strings.TrimPrefix(path, bp)); ok {
|
||||
consumes := d.analyzer.ConsumesFor(operation)
|
||||
produces := d.analyzer.ProducesFor(operation)
|
||||
parameters := d.analyzer.ParamsFor(method, strings.TrimPrefix(path, bp))
|
||||
|
||||
// add API defaults if not part of the spec
|
||||
if defConsumes := d.api.DefaultConsumes(); defConsumes != "" && !swag.ContainsStringsCI(consumes, defConsumes) {
|
||||
consumes = append(consumes, defConsumes)
|
||||
}
|
||||
|
||||
if defProduces := d.api.DefaultProduces(); defProduces != "" && !swag.ContainsStringsCI(produces, defProduces) {
|
||||
produces = append(produces, defProduces)
|
||||
}
|
||||
|
||||
record := denco.NewRecord(pathConverter.ReplaceAllString(path, ":$1"), &routeEntry{
|
||||
BasePath: bp,
|
||||
PathPattern: path,
|
||||
Operation: operation,
|
||||
Handler: handler,
|
||||
Consumes: consumes,
|
||||
Produces: produces,
|
||||
Consumers: d.api.ConsumersFor(normalizeOffers(consumes)),
|
||||
Producers: d.api.ProducersFor(normalizeOffers(produces)),
|
||||
Parameters: parameters,
|
||||
Formats: d.api.Formats(),
|
||||
Binder: NewUntypedRequestBinder(parameters, d.spec.Spec(), d.api.Formats()),
|
||||
Authenticators: d.buildAuthenticators(operation),
|
||||
Authorizer: d.api.Authorizer(),
|
||||
})
|
||||
d.records[mn] = append(d.records[mn], record)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *defaultRouteBuilder) buildAuthenticators(operation *spec.Operation) RouteAuthenticators {
|
||||
requirements := d.analyzer.SecurityRequirementsFor(operation)
|
||||
var auths []RouteAuthenticator
|
||||
for _, reqs := range requirements {
|
||||
var schemes []string
|
||||
scopes := make(map[string][]string, len(reqs))
|
||||
var scopeSlices [][]string
|
||||
for _, req := range reqs {
|
||||
schemes = append(schemes, req.Name)
|
||||
scopes[req.Name] = req.Scopes
|
||||
scopeSlices = append(scopeSlices, req.Scopes)
|
||||
}
|
||||
|
||||
definitions := d.analyzer.SecurityDefinitionsForRequirements(reqs)
|
||||
authenticators := d.api.AuthenticatorsFor(definitions)
|
||||
auths = append(auths, RouteAuthenticator{
|
||||
Authenticator: authenticators,
|
||||
Schemes: schemes,
|
||||
Scopes: scopes,
|
||||
allScopes: stringSliceUnion(scopeSlices...),
|
||||
commonScopes: stringSliceIntersection(scopeSlices...),
|
||||
allowAnonymous: len(reqs) == 1 && reqs[0].Name == "",
|
||||
})
|
||||
}
|
||||
return auths
|
||||
}
|
||||
|
||||
func (d *defaultRouteBuilder) Build() *defaultRouter {
|
||||
routers := make(map[string]*denco.Router)
|
||||
for method, records := range d.records {
|
||||
router := denco.New()
|
||||
_ = router.Build(records)
|
||||
routers[method] = router
|
||||
}
|
||||
return &defaultRouter{
|
||||
spec: d.spec,
|
||||
routers: routers,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package middleware
|
||||
|
||||
import "net/http"
|
||||
|
||||
func newSecureAPI(ctx *Context, next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
route, rCtx, _ := ctx.RouteInfo(r)
|
||||
if rCtx != nil {
|
||||
r = rCtx
|
||||
}
|
||||
if route != nil && !route.NeedsAuth() {
|
||||
next.ServeHTTP(rw, r)
|
||||
return
|
||||
}
|
||||
|
||||
_, rCtx, err := ctx.Authorize(r, route)
|
||||
if err != nil {
|
||||
ctx.Respond(rw, r, route.Produces, route, err)
|
||||
return
|
||||
}
|
||||
r = rCtx
|
||||
|
||||
next.ServeHTTP(rw, r)
|
||||
})
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"path"
|
||||
)
|
||||
|
||||
// Spec creates a middleware to serve a swagger spec.
|
||||
// This allows for altering the spec before starting the http listener.
|
||||
// This can be useful if you want to serve the swagger spec from another path than /swagger.json
|
||||
//
|
||||
func Spec(basePath string, b []byte, next http.Handler) http.Handler {
|
||||
if basePath == "" {
|
||||
basePath = "/"
|
||||
}
|
||||
pth := path.Join(basePath, "swagger.json")
|
||||
|
||||
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == pth {
|
||||
rw.Header().Set("Content-Type", "application/json")
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
//#nosec
|
||||
_, _ = rw.Write(b)
|
||||
return
|
||||
}
|
||||
|
||||
if next == nil {
|
||||
rw.Header().Set("Content-Type", "application/json")
|
||||
rw.WriteHeader(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
next.ServeHTTP(rw, r)
|
||||
})
|
||||
}
|
|
@ -0,0 +1,162 @@
|
|||
package middleware
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"net/http"
|
||||
"path"
|
||||
)
|
||||
|
||||
// SwaggerUIOpts configures the Swaggerui middlewares
|
||||
type SwaggerUIOpts struct {
|
||||
// BasePath for the UI path, defaults to: /
|
||||
BasePath string
|
||||
// Path combines with BasePath for the full UI path, defaults to: docs
|
||||
Path string
|
||||
// SpecURL the url to find the spec for
|
||||
SpecURL string
|
||||
|
||||
// The three components needed to embed swagger-ui
|
||||
SwaggerURL string
|
||||
SwaggerPresetURL string
|
||||
SwaggerStylesURL string
|
||||
|
||||
Favicon32 string
|
||||
Favicon16 string
|
||||
|
||||
// Title for the documentation site, default to: API documentation
|
||||
Title string
|
||||
}
|
||||
|
||||
// EnsureDefaults in case some options are missing
|
||||
func (r *SwaggerUIOpts) EnsureDefaults() {
|
||||
if r.BasePath == "" {
|
||||
r.BasePath = "/"
|
||||
}
|
||||
if r.Path == "" {
|
||||
r.Path = "docs"
|
||||
}
|
||||
if r.SpecURL == "" {
|
||||
r.SpecURL = "/swagger.json"
|
||||
}
|
||||
if r.SwaggerURL == "" {
|
||||
r.SwaggerURL = swaggerLatest
|
||||
}
|
||||
if r.SwaggerPresetURL == "" {
|
||||
r.SwaggerPresetURL = swaggerPresetLatest
|
||||
}
|
||||
if r.SwaggerStylesURL == "" {
|
||||
r.SwaggerStylesURL = swaggerStylesLatest
|
||||
}
|
||||
if r.Favicon16 == "" {
|
||||
r.Favicon16 = swaggerFavicon16Latest
|
||||
}
|
||||
if r.Favicon32 == "" {
|
||||
r.Favicon32 = swaggerFavicon32Latest
|
||||
}
|
||||
if r.Title == "" {
|
||||
r.Title = "API documentation"
|
||||
}
|
||||
}
|
||||
|
||||
// SwaggerUI creates a middleware to serve a documentation site for a swagger spec.
|
||||
// This allows for altering the spec before starting the http listener.
|
||||
func SwaggerUI(opts SwaggerUIOpts, next http.Handler) http.Handler {
|
||||
opts.EnsureDefaults()
|
||||
|
||||
pth := path.Join(opts.BasePath, opts.Path)
|
||||
tmpl := template.Must(template.New("swaggerui").Parse(swaggeruiTemplate))
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
_ = tmpl.Execute(buf, &opts)
|
||||
b := buf.Bytes()
|
||||
|
||||
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
if path.Join(r.URL.Path) == pth {
|
||||
rw.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
|
||||
_, _ = rw.Write(b)
|
||||
return
|
||||
}
|
||||
|
||||
if next == nil {
|
||||
rw.Header().Set("Content-Type", "text/plain")
|
||||
rw.WriteHeader(http.StatusNotFound)
|
||||
_, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth)))
|
||||
return
|
||||
}
|
||||
next.ServeHTTP(rw, r)
|
||||
})
|
||||
}
|
||||
|
||||
const (
|
||||
swaggerLatest = "https://unpkg.com/swagger-ui-dist/swagger-ui-bundle.js"
|
||||
swaggerPresetLatest = "https://unpkg.com/swagger-ui-dist/swagger-ui-standalone-preset.js"
|
||||
swaggerStylesLatest = "https://unpkg.com/swagger-ui-dist/swagger-ui.css"
|
||||
swaggerFavicon32Latest = "https://unpkg.com/swagger-ui-dist/favicon-32x32.png"
|
||||
swaggerFavicon16Latest = "https://unpkg.com/swagger-ui-dist/favicon-16x16.png"
|
||||
swaggeruiTemplate = `
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>{{ .Title }}</title>
|
||||
|
||||
<link rel="stylesheet" type="text/css" href="{{ .SwaggerStylesURL }}" >
|
||||
<link rel="icon" type="image/png" href="{{ .Favicon32 }}" sizes="32x32" />
|
||||
<link rel="icon" type="image/png" href="{{ .Favicon16 }}" sizes="16x16" />
|
||||
<style>
|
||||
html
|
||||
{
|
||||
box-sizing: border-box;
|
||||
overflow: -moz-scrollbars-vertical;
|
||||
overflow-y: scroll;
|
||||
}
|
||||
|
||||
*,
|
||||
*:before,
|
||||
*:after
|
||||
{
|
||||
box-sizing: inherit;
|
||||
}
|
||||
|
||||
body
|
||||
{
|
||||
margin:0;
|
||||
background: #fafafa;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<div id="swagger-ui"></div>
|
||||
|
||||
<script src="{{ .SwaggerURL }}"> </script>
|
||||
<script src="{{ .SwaggerPresetURL }}"> </script>
|
||||
<script>
|
||||
window.onload = function() {
|
||||
// Begin Swagger UI call region
|
||||
const ui = SwaggerUIBundle({
|
||||
url: '{{ .SpecURL }}',
|
||||
dom_id: '#swagger-ui',
|
||||
deepLinking: true,
|
||||
presets: [
|
||||
SwaggerUIBundle.presets.apis,
|
||||
SwaggerUIStandalonePreset
|
||||
],
|
||||
plugins: [
|
||||
SwaggerUIBundle.plugins.DownloadUrl
|
||||
],
|
||||
layout: "StandaloneLayout"
|
||||
})
|
||||
// End Swagger UI call region
|
||||
|
||||
window.ui = ui
|
||||
}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
`
|
||||
)
|
|
@ -0,0 +1,286 @@
|
|||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package untyped
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/go-openapi/analysis"
|
||||
"github.com/go-openapi/errors"
|
||||
"github.com/go-openapi/loads"
|
||||
"github.com/go-openapi/spec"
|
||||
"github.com/go-openapi/strfmt"
|
||||
|
||||
"github.com/go-openapi/runtime"
|
||||
)
|
||||
|
||||
// NewAPI creates the default untyped API
|
||||
func NewAPI(spec *loads.Document) *API {
|
||||
var an *analysis.Spec
|
||||
if spec != nil && spec.Spec() != nil {
|
||||
an = analysis.New(spec.Spec())
|
||||
}
|
||||
api := &API{
|
||||
spec: spec,
|
||||
analyzer: an,
|
||||
consumers: make(map[string]runtime.Consumer, 10),
|
||||
producers: make(map[string]runtime.Producer, 10),
|
||||
authenticators: make(map[string]runtime.Authenticator),
|
||||
operations: make(map[string]map[string]runtime.OperationHandler),
|
||||
ServeError: errors.ServeError,
|
||||
Models: make(map[string]func() interface{}),
|
||||
formats: strfmt.NewFormats(),
|
||||
}
|
||||
return api.WithJSONDefaults()
|
||||
}
|
||||
|
||||
// API represents an untyped mux for a swagger spec
|
||||
type API struct {
|
||||
spec *loads.Document
|
||||
analyzer *analysis.Spec
|
||||
DefaultProduces string
|
||||
DefaultConsumes string
|
||||
consumers map[string]runtime.Consumer
|
||||
producers map[string]runtime.Producer
|
||||
authenticators map[string]runtime.Authenticator
|
||||
authorizer runtime.Authorizer
|
||||
operations map[string]map[string]runtime.OperationHandler
|
||||
ServeError func(http.ResponseWriter, *http.Request, error)
|
||||
Models map[string]func() interface{}
|
||||
formats strfmt.Registry
|
||||
}
|
||||
|
||||
// WithJSONDefaults loads the json defaults for this api
|
||||
func (d *API) WithJSONDefaults() *API {
|
||||
d.DefaultConsumes = runtime.JSONMime
|
||||
d.DefaultProduces = runtime.JSONMime
|
||||
d.consumers[runtime.JSONMime] = runtime.JSONConsumer()
|
||||
d.producers[runtime.JSONMime] = runtime.JSONProducer()
|
||||
return d
|
||||
}
|
||||
|
||||
// WithoutJSONDefaults clears the json defaults for this api
|
||||
func (d *API) WithoutJSONDefaults() *API {
|
||||
d.DefaultConsumes = ""
|
||||
d.DefaultProduces = ""
|
||||
delete(d.consumers, runtime.JSONMime)
|
||||
delete(d.producers, runtime.JSONMime)
|
||||
return d
|
||||
}
|
||||
|
||||
// Formats returns the registered string formats
|
||||
func (d *API) Formats() strfmt.Registry {
|
||||
if d.formats == nil {
|
||||
d.formats = strfmt.NewFormats()
|
||||
}
|
||||
return d.formats
|
||||
}
|
||||
|
||||
// RegisterFormat registers a custom format validator
|
||||
func (d *API) RegisterFormat(name string, format strfmt.Format, validator strfmt.Validator) {
|
||||
if d.formats == nil {
|
||||
d.formats = strfmt.NewFormats()
|
||||
}
|
||||
d.formats.Add(name, format, validator)
|
||||
}
|
||||
|
||||
// RegisterAuth registers an auth handler in this api
|
||||
func (d *API) RegisterAuth(scheme string, handler runtime.Authenticator) {
|
||||
if d.authenticators == nil {
|
||||
d.authenticators = make(map[string]runtime.Authenticator)
|
||||
}
|
||||
d.authenticators[scheme] = handler
|
||||
}
|
||||
|
||||
// RegisterAuthorizer registers an authorizer handler in this api
|
||||
func (d *API) RegisterAuthorizer(handler runtime.Authorizer) {
|
||||
d.authorizer = handler
|
||||
}
|
||||
|
||||
// RegisterConsumer registers a consumer for a media type.
|
||||
func (d *API) RegisterConsumer(mediaType string, handler runtime.Consumer) {
|
||||
if d.consumers == nil {
|
||||
d.consumers = make(map[string]runtime.Consumer, 10)
|
||||
}
|
||||
d.consumers[strings.ToLower(mediaType)] = handler
|
||||
}
|
||||
|
||||
// RegisterProducer registers a producer for a media type
|
||||
func (d *API) RegisterProducer(mediaType string, handler runtime.Producer) {
|
||||
if d.producers == nil {
|
||||
d.producers = make(map[string]runtime.Producer, 10)
|
||||
}
|
||||
d.producers[strings.ToLower(mediaType)] = handler
|
||||
}
|
||||
|
||||
// RegisterOperation registers an operation handler for an operation name
|
||||
func (d *API) RegisterOperation(method, path string, handler runtime.OperationHandler) {
|
||||
if d.operations == nil {
|
||||
d.operations = make(map[string]map[string]runtime.OperationHandler, 30)
|
||||
}
|
||||
um := strings.ToUpper(method)
|
||||
if b, ok := d.operations[um]; !ok || b == nil {
|
||||
d.operations[um] = make(map[string]runtime.OperationHandler)
|
||||
}
|
||||
d.operations[um][path] = handler
|
||||
}
|
||||
|
||||
// OperationHandlerFor returns the operation handler for the specified id if it can be found
|
||||
func (d *API) OperationHandlerFor(method, path string) (runtime.OperationHandler, bool) {
|
||||
if d.operations == nil {
|
||||
return nil, false
|
||||
}
|
||||
if pi, ok := d.operations[strings.ToUpper(method)]; ok {
|
||||
h, ok := pi[path]
|
||||
return h, ok
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// ConsumersFor gets the consumers for the specified media types
|
||||
func (d *API) ConsumersFor(mediaTypes []string) map[string]runtime.Consumer {
|
||||
result := make(map[string]runtime.Consumer)
|
||||
for _, mt := range mediaTypes {
|
||||
if consumer, ok := d.consumers[mt]; ok {
|
||||
result[mt] = consumer
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ProducersFor gets the producers for the specified media types
|
||||
func (d *API) ProducersFor(mediaTypes []string) map[string]runtime.Producer {
|
||||
result := make(map[string]runtime.Producer)
|
||||
for _, mt := range mediaTypes {
|
||||
if producer, ok := d.producers[mt]; ok {
|
||||
result[mt] = producer
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// AuthenticatorsFor gets the authenticators for the specified security schemes
|
||||
func (d *API) AuthenticatorsFor(schemes map[string]spec.SecurityScheme) map[string]runtime.Authenticator {
|
||||
result := make(map[string]runtime.Authenticator)
|
||||
for k := range schemes {
|
||||
if a, ok := d.authenticators[k]; ok {
|
||||
result[k] = a
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Authorizer returns the registered authorizer
|
||||
func (d *API) Authorizer() runtime.Authorizer {
|
||||
return d.authorizer
|
||||
}
|
||||
|
||||
// Validate validates this API for any missing items
|
||||
func (d *API) Validate() error {
|
||||
return d.validate()
|
||||
}
|
||||
|
||||
// validateWith validates the registrations in this API against the provided spec analyzer
|
||||
func (d *API) validate() error {
|
||||
var consumes []string
|
||||
for k := range d.consumers {
|
||||
consumes = append(consumes, k)
|
||||
}
|
||||
|
||||
var produces []string
|
||||
for k := range d.producers {
|
||||
produces = append(produces, k)
|
||||
}
|
||||
|
||||
var authenticators []string
|
||||
for k := range d.authenticators {
|
||||
authenticators = append(authenticators, k)
|
||||
}
|
||||
|
||||
var operations []string
|
||||
for m, v := range d.operations {
|
||||
for p := range v {
|
||||
operations = append(operations, fmt.Sprintf("%s %s", strings.ToUpper(m), p))
|
||||
}
|
||||
}
|
||||
|
||||
var definedAuths []string
|
||||
for k := range d.spec.Spec().SecurityDefinitions {
|
||||
definedAuths = append(definedAuths, k)
|
||||
}
|
||||
|
||||
if err := d.verify("consumes", consumes, d.analyzer.RequiredConsumes()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := d.verify("produces", produces, d.analyzer.RequiredProduces()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := d.verify("operation", operations, d.analyzer.OperationMethodPaths()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
requiredAuths := d.analyzer.RequiredSecuritySchemes()
|
||||
if err := d.verify("auth scheme", authenticators, requiredAuths); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := d.verify("security definitions", definedAuths, requiredAuths); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *API) verify(name string, registrations []string, expectations []string) error {
|
||||
sort.Strings(registrations)
|
||||
sort.Strings(expectations)
|
||||
|
||||
expected := map[string]struct{}{}
|
||||
seen := map[string]struct{}{}
|
||||
|
||||
for _, v := range expectations {
|
||||
expected[v] = struct{}{}
|
||||
}
|
||||
|
||||
var unspecified []string
|
||||
for _, v := range registrations {
|
||||
seen[v] = struct{}{}
|
||||
if _, ok := expected[v]; !ok {
|
||||
unspecified = append(unspecified, v)
|
||||
}
|
||||
}
|
||||
|
||||
for k := range seen {
|
||||
delete(expected, k)
|
||||
}
|
||||
|
||||
var unregistered []string
|
||||
for k := range expected {
|
||||
unregistered = append(unregistered, k)
|
||||
}
|
||||
sort.Strings(unspecified)
|
||||
sort.Strings(unregistered)
|
||||
|
||||
if len(unregistered) > 0 || len(unspecified) > 0 {
|
||||
return &errors.APIVerificationFailed{
|
||||
Section: name,
|
||||
MissingSpecification: unspecified,
|
||||
MissingRegistration: unregistered,
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,126 @@
|
|||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"mime"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/go-openapi/errors"
|
||||
"github.com/go-openapi/swag"
|
||||
|
||||
"github.com/go-openapi/runtime"
|
||||
)
|
||||
|
||||
type validation struct {
|
||||
context *Context
|
||||
result []error
|
||||
request *http.Request
|
||||
route *MatchedRoute
|
||||
bound map[string]interface{}
|
||||
}
|
||||
|
||||
// ContentType validates the content type of a request
|
||||
func validateContentType(allowed []string, actual string) error {
|
||||
debugLog("validating content type for %q against [%s]", actual, strings.Join(allowed, ", "))
|
||||
if len(allowed) == 0 {
|
||||
return nil
|
||||
}
|
||||
mt, _, err := mime.ParseMediaType(actual)
|
||||
if err != nil {
|
||||
return errors.InvalidContentType(actual, allowed)
|
||||
}
|
||||
if swag.ContainsStringsCI(allowed, mt) {
|
||||
return nil
|
||||
}
|
||||
if swag.ContainsStringsCI(allowed, "*/*") {
|
||||
return nil
|
||||
}
|
||||
parts := strings.Split(actual, "/")
|
||||
if len(parts) == 2 && swag.ContainsStringsCI(allowed, parts[0]+"/*") {
|
||||
return nil
|
||||
}
|
||||
return errors.InvalidContentType(actual, allowed)
|
||||
}
|
||||
|
||||
func validateRequest(ctx *Context, request *http.Request, route *MatchedRoute) *validation {
|
||||
debugLog("validating request %s %s", request.Method, request.URL.EscapedPath())
|
||||
validate := &validation{
|
||||
context: ctx,
|
||||
request: request,
|
||||
route: route,
|
||||
bound: make(map[string]interface{}),
|
||||
}
|
||||
|
||||
validate.contentType()
|
||||
if len(validate.result) == 0 {
|
||||
validate.responseFormat()
|
||||
}
|
||||
if len(validate.result) == 0 {
|
||||
validate.parameters()
|
||||
}
|
||||
|
||||
return validate
|
||||
}
|
||||
|
||||
func (v *validation) parameters() {
|
||||
debugLog("validating request parameters for %s %s", v.request.Method, v.request.URL.EscapedPath())
|
||||
if result := v.route.Binder.Bind(v.request, v.route.Params, v.route.Consumer, v.bound); result != nil {
|
||||
if result.Error() == "validation failure list" {
|
||||
for _, e := range result.(*errors.Validation).Value.([]interface{}) {
|
||||
v.result = append(v.result, e.(error))
|
||||
}
|
||||
return
|
||||
}
|
||||
v.result = append(v.result, result)
|
||||
}
|
||||
}
|
||||
|
||||
func (v *validation) contentType() {
|
||||
if len(v.result) == 0 && runtime.HasBody(v.request) {
|
||||
debugLog("validating body content type for %s %s", v.request.Method, v.request.URL.EscapedPath())
|
||||
ct, _, req, err := v.context.ContentType(v.request)
|
||||
if err != nil {
|
||||
v.result = append(v.result, err)
|
||||
} else {
|
||||
v.request = req
|
||||
}
|
||||
|
||||
if len(v.result) == 0 {
|
||||
if err := validateContentType(v.route.Consumes, ct); err != nil {
|
||||
v.result = append(v.result, err)
|
||||
}
|
||||
}
|
||||
if ct != "" && v.route.Consumer == nil {
|
||||
cons, ok := v.route.Consumers[ct]
|
||||
if !ok {
|
||||
v.result = append(v.result, errors.New(500, "no consumer registered for %s", ct))
|
||||
} else {
|
||||
v.route.Consumer = cons
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (v *validation) responseFormat() {
|
||||
// if the route provides values for Produces and no format could be identify then return an error.
|
||||
// if the route does not specify values for Produces then treat request as valid since the API designer
|
||||
// choose not to specify the format for responses.
|
||||
if str, rCtx := v.context.ResponseFormat(v.request, v.route.Produces); str == "" && len(v.route.Produces) > 0 {
|
||||
v.request = rCtx
|
||||
v.result = append(v.result, errors.InvalidResponseFormat(v.request.Header.Get(runtime.HeaderAccept), v.route.Produces))
|
||||
}
|
||||
}
|
|
@ -0,0 +1,276 @@
|
|||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package security
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/go-openapi/errors"
|
||||
|
||||
"github.com/go-openapi/runtime"
|
||||
)
|
||||
|
||||
const (
|
||||
query = "query"
|
||||
header = "header"
|
||||
)
|
||||
|
||||
// HttpAuthenticator is a function that authenticates a HTTP request
|
||||
func HttpAuthenticator(handler func(*http.Request) (bool, interface{}, error)) runtime.Authenticator {
|
||||
return runtime.AuthenticatorFunc(func(params interface{}) (bool, interface{}, error) {
|
||||
if request, ok := params.(*http.Request); ok {
|
||||
return handler(request)
|
||||
}
|
||||
if scoped, ok := params.(*ScopedAuthRequest); ok {
|
||||
return handler(scoped.Request)
|
||||
}
|
||||
return false, nil, nil
|
||||
})
|
||||
}
|
||||
|
||||
// ScopedAuthenticator is a function that authenticates a HTTP request against a list of valid scopes
|
||||
func ScopedAuthenticator(handler func(*ScopedAuthRequest) (bool, interface{}, error)) runtime.Authenticator {
|
||||
return runtime.AuthenticatorFunc(func(params interface{}) (bool, interface{}, error) {
|
||||
if request, ok := params.(*ScopedAuthRequest); ok {
|
||||
return handler(request)
|
||||
}
|
||||
return false, nil, nil
|
||||
})
|
||||
}
|
||||
|
||||
// UserPassAuthentication authentication function
|
||||
type UserPassAuthentication func(string, string) (interface{}, error)
|
||||
|
||||
// UserPassAuthenticationCtx authentication function with context.Context
|
||||
type UserPassAuthenticationCtx func(context.Context, string, string) (context.Context, interface{}, error)
|
||||
|
||||
// TokenAuthentication authentication function
|
||||
type TokenAuthentication func(string) (interface{}, error)
|
||||
|
||||
// TokenAuthenticationCtx authentication function with context.Context
|
||||
type TokenAuthenticationCtx func(context.Context, string) (context.Context, interface{}, error)
|
||||
|
||||
// ScopedTokenAuthentication authentication function
|
||||
type ScopedTokenAuthentication func(string, []string) (interface{}, error)
|
||||
|
||||
// ScopedTokenAuthenticationCtx authentication function with context.Context
|
||||
type ScopedTokenAuthenticationCtx func(context.Context, string, []string) (context.Context, interface{}, error)
|
||||
|
||||
var DefaultRealmName = "API"
|
||||
|
||||
type secCtxKey uint8
|
||||
|
||||
const (
|
||||
failedBasicAuth secCtxKey = iota
|
||||
oauth2SchemeName
|
||||
)
|
||||
|
||||
func FailedBasicAuth(r *http.Request) string {
|
||||
return FailedBasicAuthCtx(r.Context())
|
||||
}
|
||||
|
||||
func FailedBasicAuthCtx(ctx context.Context) string {
|
||||
v, ok := ctx.Value(failedBasicAuth).(string)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func OAuth2SchemeName(r *http.Request) string {
|
||||
return OAuth2SchemeNameCtx(r.Context())
|
||||
}
|
||||
|
||||
func OAuth2SchemeNameCtx(ctx context.Context) string {
|
||||
v, ok := ctx.Value(oauth2SchemeName).(string)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// BasicAuth creates a basic auth authenticator with the provided authentication function
|
||||
func BasicAuth(authenticate UserPassAuthentication) runtime.Authenticator {
|
||||
return BasicAuthRealm(DefaultRealmName, authenticate)
|
||||
}
|
||||
|
||||
// BasicAuthRealm creates a basic auth authenticator with the provided authentication function and realm name
|
||||
func BasicAuthRealm(realm string, authenticate UserPassAuthentication) runtime.Authenticator {
|
||||
if realm == "" {
|
||||
realm = DefaultRealmName
|
||||
}
|
||||
|
||||
return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) {
|
||||
if usr, pass, ok := r.BasicAuth(); ok {
|
||||
p, err := authenticate(usr, pass)
|
||||
if err != nil {
|
||||
*r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm))
|
||||
}
|
||||
return true, p, err
|
||||
}
|
||||
*r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm))
|
||||
return false, nil, nil
|
||||
})
|
||||
}
|
||||
|
||||
// BasicAuthCtx creates a basic auth authenticator with the provided authentication function with support for context.Context
|
||||
func BasicAuthCtx(authenticate UserPassAuthenticationCtx) runtime.Authenticator {
|
||||
return BasicAuthRealmCtx(DefaultRealmName, authenticate)
|
||||
}
|
||||
|
||||
// BasicAuthRealmCtx creates a basic auth authenticator with the provided authentication function and realm name with support for context.Context
|
||||
func BasicAuthRealmCtx(realm string, authenticate UserPassAuthenticationCtx) runtime.Authenticator {
|
||||
if realm == "" {
|
||||
realm = DefaultRealmName
|
||||
}
|
||||
|
||||
return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) {
|
||||
if usr, pass, ok := r.BasicAuth(); ok {
|
||||
ctx, p, err := authenticate(r.Context(), usr, pass)
|
||||
if err != nil {
|
||||
ctx = context.WithValue(ctx, failedBasicAuth, realm)
|
||||
}
|
||||
*r = *r.WithContext(ctx)
|
||||
return true, p, err
|
||||
}
|
||||
*r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm))
|
||||
return false, nil, nil
|
||||
})
|
||||
}
|
||||
|
||||
// APIKeyAuth creates an authenticator that uses a token for authorization.
|
||||
// This token can be obtained from either a header or a query string
|
||||
func APIKeyAuth(name, in string, authenticate TokenAuthentication) runtime.Authenticator {
|
||||
inl := strings.ToLower(in)
|
||||
if inl != query && inl != header {
|
||||
// panic because this is most likely a typo
|
||||
panic(errors.New(500, "api key auth: in value needs to be either \"query\" or \"header\"."))
|
||||
}
|
||||
|
||||
var getToken func(*http.Request) string
|
||||
switch inl {
|
||||
case header:
|
||||
getToken = func(r *http.Request) string { return r.Header.Get(name) }
|
||||
case query:
|
||||
getToken = func(r *http.Request) string { return r.URL.Query().Get(name) }
|
||||
}
|
||||
|
||||
return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) {
|
||||
token := getToken(r)
|
||||
if token == "" {
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
p, err := authenticate(token)
|
||||
return true, p, err
|
||||
})
|
||||
}
|
||||
|
||||
// APIKeyAuthCtx creates an authenticator that uses a token for authorization with support for context.Context.
|
||||
// This token can be obtained from either a header or a query string
|
||||
func APIKeyAuthCtx(name, in string, authenticate TokenAuthenticationCtx) runtime.Authenticator {
|
||||
inl := strings.ToLower(in)
|
||||
if inl != query && inl != header {
|
||||
// panic because this is most likely a typo
|
||||
panic(errors.New(500, "api key auth: in value needs to be either \"query\" or \"header\"."))
|
||||
}
|
||||
|
||||
var getToken func(*http.Request) string
|
||||
switch inl {
|
||||
case header:
|
||||
getToken = func(r *http.Request) string { return r.Header.Get(name) }
|
||||
case query:
|
||||
getToken = func(r *http.Request) string { return r.URL.Query().Get(name) }
|
||||
}
|
||||
|
||||
return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) {
|
||||
token := getToken(r)
|
||||
if token == "" {
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
ctx, p, err := authenticate(r.Context(), token)
|
||||
*r = *r.WithContext(ctx)
|
||||
return true, p, err
|
||||
})
|
||||
}
|
||||
|
||||
// ScopedAuthRequest contains both a http request and the required scopes for a particular operation
|
||||
type ScopedAuthRequest struct {
|
||||
Request *http.Request
|
||||
RequiredScopes []string
|
||||
}
|
||||
|
||||
// BearerAuth for use with oauth2 flows
|
||||
func BearerAuth(name string, authenticate ScopedTokenAuthentication) runtime.Authenticator {
|
||||
const prefix = "Bearer "
|
||||
return ScopedAuthenticator(func(r *ScopedAuthRequest) (bool, interface{}, error) {
|
||||
var token string
|
||||
hdr := r.Request.Header.Get(runtime.HeaderAuthorization)
|
||||
if strings.HasPrefix(hdr, prefix) {
|
||||
token = strings.TrimPrefix(hdr, prefix)
|
||||
}
|
||||
if token == "" {
|
||||
qs := r.Request.URL.Query()
|
||||
token = qs.Get("access_token")
|
||||
}
|
||||
//#nosec
|
||||
ct, _, _ := runtime.ContentType(r.Request.Header)
|
||||
if token == "" && (ct == "application/x-www-form-urlencoded" || ct == "multipart/form-data") {
|
||||
token = r.Request.FormValue("access_token")
|
||||
}
|
||||
|
||||
if token == "" {
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
rctx := context.WithValue(r.Request.Context(), oauth2SchemeName, name)
|
||||
*r.Request = *r.Request.WithContext(rctx)
|
||||
p, err := authenticate(token, r.RequiredScopes)
|
||||
return true, p, err
|
||||
})
|
||||
}
|
||||
|
||||
// BearerAuthCtx for use with oauth2 flows with support for context.Context.
|
||||
func BearerAuthCtx(name string, authenticate ScopedTokenAuthenticationCtx) runtime.Authenticator {
|
||||
const prefix = "Bearer "
|
||||
return ScopedAuthenticator(func(r *ScopedAuthRequest) (bool, interface{}, error) {
|
||||
var token string
|
||||
hdr := r.Request.Header.Get(runtime.HeaderAuthorization)
|
||||
if strings.HasPrefix(hdr, prefix) {
|
||||
token = strings.TrimPrefix(hdr, prefix)
|
||||
}
|
||||
if token == "" {
|
||||
qs := r.Request.URL.Query()
|
||||
token = qs.Get("access_token")
|
||||
}
|
||||
//#nosec
|
||||
ct, _, _ := runtime.ContentType(r.Request.Header)
|
||||
if token == "" && (ct == "application/x-www-form-urlencoded" || ct == "multipart/form-data") {
|
||||
token = r.Request.FormValue("access_token")
|
||||
}
|
||||
|
||||
if token == "" {
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
rctx := context.WithValue(r.Request.Context(), oauth2SchemeName, name)
|
||||
ctx, p, err := authenticate(rctx, token, r.RequiredScopes)
|
||||
*r.Request = *r.Request.WithContext(ctx)
|
||||
return true, p, err
|
||||
})
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package security
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/go-openapi/runtime"
|
||||
)
|
||||
|
||||
// Authorized provides a default implementation of the Authorizer interface where all
|
||||
// requests are authorized (successful)
|
||||
func Authorized() runtime.Authorizer {
|
||||
return runtime.AuthorizerFunc(func(_ *http.Request, _ interface{}) error { return nil })
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package yamlpc
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/go-openapi/runtime"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// YAMLConsumer creates a consumer for yaml data
|
||||
func YAMLConsumer() runtime.Consumer {
|
||||
return runtime.ConsumerFunc(func(r io.Reader, v interface{}) error {
|
||||
dec := yaml.NewDecoder(r)
|
||||
return dec.Decode(v)
|
||||
})
|
||||
}
|
||||
|
||||
// YAMLProducer creates a producer for yaml data
|
||||
func YAMLProducer() runtime.Producer {
|
||||
return runtime.ProducerFunc(func(w io.Writer, v interface{}) error {
|
||||
enc := yaml.NewEncoder(w)
|
||||
defer enc.Close()
|
||||
return enc.Encode(v)
|
||||
})
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
|
@ -0,0 +1,26 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.13.1
|
||||
- tip
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
notifications:
|
||||
email:
|
||||
recipients: dean.karn@gmail.com
|
||||
on_success: change
|
||||
on_failure: always
|
||||
|
||||
before_install:
|
||||
- go install github.com/mattn/goveralls
|
||||
|
||||
# Only clone the most recent commit.
|
||||
git:
|
||||
depth: 1
|
||||
|
||||
script:
|
||||
- go test -v -race -covermode=atomic -coverprofile=coverage.coverprofile ./...
|
||||
|
||||
after_success: |
|
||||
goveralls -coverprofile=coverage.coverprofile -service travis-ci -repotoken $COVERALLS_TOKEN
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2016 Go Playground
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -0,0 +1,172 @@
|
|||
## locales
|
||||
<img align="right" src="https://raw.githubusercontent.com/go-playground/locales/master/logo.png">
|
||||
[](https://travis-ci.org/go-playground/locales)
|
||||
[](https://goreportcard.com/report/github.com/go-playground/locales)
|
||||
[](https://godoc.org/github.com/go-playground/locales)
|
||||

|
||||
[](https://gitter.im/go-playground/locales?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
|
||||
|
||||
Locales is a set of locales generated from the [Unicode CLDR Project](http://cldr.unicode.org/) which can be used independently or within
|
||||
an i18n package; these were built for use with, but not exclusive to, [Universal Translator](https://github.com/go-playground/universal-translator).
|
||||
|
||||
Features
|
||||
--------
|
||||
- [x] Rules generated from the latest [CLDR](http://cldr.unicode.org/index/downloads) data, v36.0.1
|
||||
- [x] Contains Cardinal, Ordinal and Range Plural Rules
|
||||
- [x] Contains Month, Weekday and Timezone translations built in
|
||||
- [x] Contains Date & Time formatting functions
|
||||
- [x] Contains Number, Currency, Accounting and Percent formatting functions
|
||||
- [x] Supports the "Gregorian" calendar only ( my time isn't unlimited, had to draw the line somewhere )
|
||||
|
||||
Full Tests
|
||||
--------------------
|
||||
I could sure use your help adding tests for every locale, it is a huge undertaking and I just don't have the free time to do it all at the moment;
|
||||
any help would be **greatly appreciated!!!!** please see [issue](https://github.com/go-playground/locales/issues/1) for details.
|
||||
|
||||
Installation
|
||||
-----------
|
||||
|
||||
Use go get
|
||||
|
||||
```shell
|
||||
go get github.com/go-playground/locales
|
||||
```
|
||||
|
||||
NOTES
|
||||
--------
|
||||
You'll notice most return types are []byte, this is because most of the time the results will be concatenated with a larger body
|
||||
of text and can avoid some allocations if already appending to a byte array, otherwise just cast as string.
|
||||
|
||||
Usage
|
||||
-------
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/go-playground/locales/currency"
|
||||
"github.com/go-playground/locales/en_CA"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
loc, _ := time.LoadLocation("America/Toronto")
|
||||
datetime := time.Date(2016, 02, 03, 9, 0, 1, 0, loc)
|
||||
|
||||
l := en_CA.New()
|
||||
|
||||
// Dates
|
||||
fmt.Println(l.FmtDateFull(datetime))
|
||||
fmt.Println(l.FmtDateLong(datetime))
|
||||
fmt.Println(l.FmtDateMedium(datetime))
|
||||
fmt.Println(l.FmtDateShort(datetime))
|
||||
|
||||
// Times
|
||||
fmt.Println(l.FmtTimeFull(datetime))
|
||||
fmt.Println(l.FmtTimeLong(datetime))
|
||||
fmt.Println(l.FmtTimeMedium(datetime))
|
||||
fmt.Println(l.FmtTimeShort(datetime))
|
||||
|
||||
// Months Wide
|
||||
fmt.Println(l.MonthWide(time.January))
|
||||
fmt.Println(l.MonthWide(time.February))
|
||||
fmt.Println(l.MonthWide(time.March))
|
||||
// ...
|
||||
|
||||
// Months Abbreviated
|
||||
fmt.Println(l.MonthAbbreviated(time.January))
|
||||
fmt.Println(l.MonthAbbreviated(time.February))
|
||||
fmt.Println(l.MonthAbbreviated(time.March))
|
||||
// ...
|
||||
|
||||
// Months Narrow
|
||||
fmt.Println(l.MonthNarrow(time.January))
|
||||
fmt.Println(l.MonthNarrow(time.February))
|
||||
fmt.Println(l.MonthNarrow(time.March))
|
||||
// ...
|
||||
|
||||
// Weekdays Wide
|
||||
fmt.Println(l.WeekdayWide(time.Sunday))
|
||||
fmt.Println(l.WeekdayWide(time.Monday))
|
||||
fmt.Println(l.WeekdayWide(time.Tuesday))
|
||||
// ...
|
||||
|
||||
// Weekdays Abbreviated
|
||||
fmt.Println(l.WeekdayAbbreviated(time.Sunday))
|
||||
fmt.Println(l.WeekdayAbbreviated(time.Monday))
|
||||
fmt.Println(l.WeekdayAbbreviated(time.Tuesday))
|
||||
// ...
|
||||
|
||||
// Weekdays Short
|
||||
fmt.Println(l.WeekdayShort(time.Sunday))
|
||||
fmt.Println(l.WeekdayShort(time.Monday))
|
||||
fmt.Println(l.WeekdayShort(time.Tuesday))
|
||||
// ...
|
||||
|
||||
// Weekdays Narrow
|
||||
fmt.Println(l.WeekdayNarrow(time.Sunday))
|
||||
fmt.Println(l.WeekdayNarrow(time.Monday))
|
||||
fmt.Println(l.WeekdayNarrow(time.Tuesday))
|
||||
// ...
|
||||
|
||||
var f64 float64
|
||||
|
||||
f64 = -10356.4523
|
||||
|
||||
// Number
|
||||
fmt.Println(l.FmtNumber(f64, 2))
|
||||
|
||||
// Currency
|
||||
fmt.Println(l.FmtCurrency(f64, 2, currency.CAD))
|
||||
fmt.Println(l.FmtCurrency(f64, 2, currency.USD))
|
||||
|
||||
// Accounting
|
||||
fmt.Println(l.FmtAccounting(f64, 2, currency.CAD))
|
||||
fmt.Println(l.FmtAccounting(f64, 2, currency.USD))
|
||||
|
||||
f64 = 78.12
|
||||
|
||||
// Percent
|
||||
fmt.Println(l.FmtPercent(f64, 0))
|
||||
|
||||
// Plural Rules for locale, so you know what rules you must cover
|
||||
fmt.Println(l.PluralsCardinal())
|
||||
fmt.Println(l.PluralsOrdinal())
|
||||
|
||||
// Cardinal Plural Rules
|
||||
fmt.Println(l.CardinalPluralRule(1, 0))
|
||||
fmt.Println(l.CardinalPluralRule(1.0, 0))
|
||||
fmt.Println(l.CardinalPluralRule(1.0, 1))
|
||||
fmt.Println(l.CardinalPluralRule(3, 0))
|
||||
|
||||
// Ordinal Plural Rules
|
||||
fmt.Println(l.OrdinalPluralRule(21, 0)) // 21st
|
||||
fmt.Println(l.OrdinalPluralRule(22, 0)) // 22nd
|
||||
fmt.Println(l.OrdinalPluralRule(33, 0)) // 33rd
|
||||
fmt.Println(l.OrdinalPluralRule(34, 0)) // 34th
|
||||
|
||||
// Range Plural Rules
|
||||
fmt.Println(l.RangePluralRule(1, 0, 1, 0)) // 1-1
|
||||
fmt.Println(l.RangePluralRule(1, 0, 2, 0)) // 1-2
|
||||
fmt.Println(l.RangePluralRule(5, 0, 8, 0)) // 5-8
|
||||
}
|
||||
```
|
||||
|
||||
NOTES:
|
||||
-------
|
||||
These rules were generated from the [Unicode CLDR Project](http://cldr.unicode.org/), if you encounter any issues
|
||||
I strongly encourage contributing to the CLDR project to get the locale information corrected and the next time
|
||||
these locales are regenerated the fix will come with.
|
||||
|
||||
I do however realize that time constraints are often important and so there are two options:
|
||||
|
||||
1. Create your own locale, copy, paste and modify, and ensure it complies with the `Translator` interface.
|
||||
2. Add an exception in the locale generation code directly and once regenerated, fix will be in place.
|
||||
|
||||
Please to not make fixes inside the locale files, they WILL get overwritten when the locales are regenerated.
|
||||
|
||||
License
|
||||
------
|
||||
Distributed under MIT License, please see license file in code for more details.
|
|
@ -0,0 +1,311 @@
|
|||
package currency
|
||||
|
||||
// Type is the currency type associated with the locales currency enum
|
||||
type Type int
|
||||
|
||||
// locale currencies
|
||||
const (
|
||||
ADP Type = iota
|
||||
AED
|
||||
AFA
|
||||
AFN
|
||||
ALK
|
||||
ALL
|
||||
AMD
|
||||
ANG
|
||||
AOA
|
||||
AOK
|
||||
AON
|
||||
AOR
|
||||
ARA
|
||||
ARL
|
||||
ARM
|
||||
ARP
|
||||
ARS
|
||||
ATS
|
||||
AUD
|
||||
AWG
|
||||
AZM
|
||||
AZN
|
||||
BAD
|
||||
BAM
|
||||
BAN
|
||||
BBD
|
||||
BDT
|
||||
BEC
|
||||
BEF
|
||||
BEL
|
||||
BGL
|
||||
BGM
|
||||
BGN
|
||||
BGO
|
||||
BHD
|
||||
BIF
|
||||
BMD
|
||||
BND
|
||||
BOB
|
||||
BOL
|
||||
BOP
|
||||
BOV
|
||||
BRB
|
||||
BRC
|
||||
BRE
|
||||
BRL
|
||||
BRN
|
||||
BRR
|
||||
BRZ
|
||||
BSD
|
||||
BTN
|
||||
BUK
|
||||
BWP
|
||||
BYB
|
||||
BYN
|
||||
BYR
|
||||
BZD
|
||||
CAD
|
||||
CDF
|
||||
CHE
|
||||
CHF
|
||||
CHW
|
||||
CLE
|
||||
CLF
|
||||
CLP
|
||||
CNH
|
||||
CNX
|
||||
CNY
|
||||
COP
|
||||
COU
|
||||
CRC
|
||||
CSD
|
||||
CSK
|
||||
CUC
|
||||
CUP
|
||||
CVE
|
||||
CYP
|
||||
CZK
|
||||
DDM
|
||||
DEM
|
||||
DJF
|
||||
DKK
|
||||
DOP
|
||||
DZD
|
||||
ECS
|
||||
ECV
|
||||
EEK
|
||||
EGP
|
||||
ERN
|
||||
ESA
|
||||
ESB
|
||||
ESP
|
||||
ETB
|
||||
EUR
|
||||
FIM
|
||||
FJD
|
||||
FKP
|
||||
FRF
|
||||
GBP
|
||||
GEK
|
||||
GEL
|
||||
GHC
|
||||
GHS
|
||||
GIP
|
||||
GMD
|
||||
GNF
|
||||
GNS
|
||||
GQE
|
||||
GRD
|
||||
GTQ
|
||||
GWE
|
||||
GWP
|
||||
GYD
|
||||
HKD
|
||||
HNL
|
||||
HRD
|
||||
HRK
|
||||
HTG
|
||||
HUF
|
||||
IDR
|
||||
IEP
|
||||
ILP
|
||||
ILR
|
||||
ILS
|
||||
INR
|
||||
IQD
|
||||
IRR
|
||||
ISJ
|
||||
ISK
|
||||
ITL
|
||||
JMD
|
||||
JOD
|
||||
JPY
|
||||
KES
|
||||
KGS
|
||||
KHR
|
||||
KMF
|
||||
KPW
|
||||
KRH
|
||||
KRO
|
||||
KRW
|
||||
KWD
|
||||
KYD
|
||||
KZT
|
||||
LAK
|
||||
LBP
|
||||
LKR
|
||||
LRD
|
||||
LSL
|
||||
LTL
|
||||
LTT
|
||||
LUC
|
||||
LUF
|
||||
LUL
|
||||
LVL
|
||||
LVR
|
||||
LYD
|
||||
MAD
|
||||
MAF
|
||||
MCF
|
||||
MDC
|
||||
MDL
|
||||
MGA
|
||||
MGF
|
||||
MKD
|
||||
MKN
|
||||
MLF
|
||||
MMK
|
||||
MNT
|
||||
MOP
|
||||
MRO
|
||||
MRU
|
||||
MTL
|
||||
MTP
|
||||
MUR
|
||||
MVP
|
||||
MVR
|
||||
MWK
|
||||
MXN
|
||||
MXP
|
||||
MXV
|
||||
MYR
|
||||
MZE
|
||||
MZM
|
||||
MZN
|
||||
NAD
|
||||
NGN
|
||||
NIC
|
||||
NIO
|
||||
NLG
|
||||
NOK
|
||||
NPR
|
||||
NZD
|
||||
OMR
|
||||
PAB
|
||||
PEI
|
||||
PEN
|
||||
PES
|
||||
PGK
|
||||
PHP
|
||||
PKR
|
||||
PLN
|
||||
PLZ
|
||||
PTE
|
||||
PYG
|
||||
QAR
|
||||
RHD
|
||||
ROL
|
||||
RON
|
||||
RSD
|
||||
RUB
|
||||
RUR
|
||||
RWF
|
||||
SAR
|
||||
SBD
|
||||
SCR
|
||||
SDD
|
||||
SDG
|
||||
SDP
|
||||
SEK
|
||||
SGD
|
||||
SHP
|
||||
SIT
|
||||
SKK
|
||||
SLL
|
||||
SOS
|
||||
SRD
|
||||
SRG
|
||||
SSP
|
||||
STD
|
||||
STN
|
||||
SUR
|
||||
SVC
|
||||
SYP
|
||||
SZL
|
||||
THB
|
||||
TJR
|
||||
TJS
|
||||
TMM
|
||||
TMT
|
||||
TND
|
||||
TOP
|
||||
TPE
|
||||
TRL
|
||||
TRY
|
||||
TTD
|
||||
TWD
|
||||
TZS
|
||||
UAH
|
||||
UAK
|
||||
UGS
|
||||
UGX
|
||||
USD
|
||||
USN
|
||||
USS
|
||||
UYI
|
||||
UYP
|
||||
UYU
|
||||
UYW
|
||||
UZS
|
||||
VEB
|
||||
VEF
|
||||
VES
|
||||
VND
|
||||
VNN
|
||||
VUV
|
||||
WST
|
||||
XAF
|
||||
XAG
|
||||
XAU
|
||||
XBA
|
||||
XBB
|
||||
XBC
|
||||
XBD
|
||||
XCD
|
||||
XDR
|
||||
XEU
|
||||
XFO
|
||||
XFU
|
||||
XOF
|
||||
XPD
|
||||
XPF
|
||||
XPT
|
||||
XRE
|
||||
XSU
|
||||
XTS
|
||||
XUA
|
||||
XXX
|
||||
YDD
|
||||
YER
|
||||
YUD
|
||||
YUM
|
||||
YUN
|
||||
YUR
|
||||
ZAL
|
||||
ZAR
|
||||
ZMK
|
||||
ZMW
|
||||
ZRN
|
||||
ZRZ
|
||||
ZWD
|
||||
ZWL
|
||||
ZWR
|
||||
)
|
Binary file not shown.
After Width: | Height: | Size: 36 KiB |
|
@ -0,0 +1,293 @@
|
|||
package locales
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/go-playground/locales/currency"
|
||||
)
|
||||
|
||||
// // ErrBadNumberValue is returned when the number passed for
|
||||
// // plural rule determination cannot be parsed
|
||||
// type ErrBadNumberValue struct {
|
||||
// NumberValue string
|
||||
// InnerError error
|
||||
// }
|
||||
|
||||
// // Error returns ErrBadNumberValue error string
|
||||
// func (e *ErrBadNumberValue) Error() string {
|
||||
// return fmt.Sprintf("Invalid Number Value '%s' %s", e.NumberValue, e.InnerError)
|
||||
// }
|
||||
|
||||
// var _ error = new(ErrBadNumberValue)
|
||||
|
||||
// PluralRule denotes the type of plural rules
|
||||
type PluralRule int
|
||||
|
||||
// PluralRule's
|
||||
const (
|
||||
PluralRuleUnknown PluralRule = iota
|
||||
PluralRuleZero // zero
|
||||
PluralRuleOne // one - singular
|
||||
PluralRuleTwo // two - dual
|
||||
PluralRuleFew // few - paucal
|
||||
PluralRuleMany // many - also used for fractions if they have a separate class
|
||||
PluralRuleOther // other - required—general plural form—also used if the language only has a single form
|
||||
)
|
||||
|
||||
const (
|
||||
pluralsString = "UnknownZeroOneTwoFewManyOther"
|
||||
)
|
||||
|
||||
// Translator encapsulates an instance of a locale
|
||||
// NOTE: some values are returned as a []byte just in case the caller
|
||||
// wishes to add more and can help avoid allocations; otherwise just cast as string
|
||||
type Translator interface {
|
||||
|
||||
// The following Functions are for overriding, debugging or developing
|
||||
// with a Translator Locale
|
||||
|
||||
// Locale returns the string value of the translator
|
||||
Locale() string
|
||||
|
||||
// returns an array of cardinal plural rules associated
|
||||
// with this translator
|
||||
PluralsCardinal() []PluralRule
|
||||
|
||||
// returns an array of ordinal plural rules associated
|
||||
// with this translator
|
||||
PluralsOrdinal() []PluralRule
|
||||
|
||||
// returns an array of range plural rules associated
|
||||
// with this translator
|
||||
PluralsRange() []PluralRule
|
||||
|
||||
// returns the cardinal PluralRule given 'num' and digits/precision of 'v' for locale
|
||||
CardinalPluralRule(num float64, v uint64) PluralRule
|
||||
|
||||
// returns the ordinal PluralRule given 'num' and digits/precision of 'v' for locale
|
||||
OrdinalPluralRule(num float64, v uint64) PluralRule
|
||||
|
||||
// returns the ordinal PluralRule given 'num1', 'num2' and digits/precision of 'v1' and 'v2' for locale
|
||||
RangePluralRule(num1 float64, v1 uint64, num2 float64, v2 uint64) PluralRule
|
||||
|
||||
// returns the locales abbreviated month given the 'month' provided
|
||||
MonthAbbreviated(month time.Month) string
|
||||
|
||||
// returns the locales abbreviated months
|
||||
MonthsAbbreviated() []string
|
||||
|
||||
// returns the locales narrow month given the 'month' provided
|
||||
MonthNarrow(month time.Month) string
|
||||
|
||||
// returns the locales narrow months
|
||||
MonthsNarrow() []string
|
||||
|
||||
// returns the locales wide month given the 'month' provided
|
||||
MonthWide(month time.Month) string
|
||||
|
||||
// returns the locales wide months
|
||||
MonthsWide() []string
|
||||
|
||||
// returns the locales abbreviated weekday given the 'weekday' provided
|
||||
WeekdayAbbreviated(weekday time.Weekday) string
|
||||
|
||||
// returns the locales abbreviated weekdays
|
||||
WeekdaysAbbreviated() []string
|
||||
|
||||
// returns the locales narrow weekday given the 'weekday' provided
|
||||
WeekdayNarrow(weekday time.Weekday) string
|
||||
|
||||
// WeekdaysNarrowreturns the locales narrow weekdays
|
||||
WeekdaysNarrow() []string
|
||||
|
||||
// returns the locales short weekday given the 'weekday' provided
|
||||
WeekdayShort(weekday time.Weekday) string
|
||||
|
||||
// returns the locales short weekdays
|
||||
WeekdaysShort() []string
|
||||
|
||||
// returns the locales wide weekday given the 'weekday' provided
|
||||
WeekdayWide(weekday time.Weekday) string
|
||||
|
||||
// returns the locales wide weekdays
|
||||
WeekdaysWide() []string
|
||||
|
||||
// The following Functions are common Formatting functionsfor the Translator's Locale
|
||||
|
||||
// returns 'num' with digits/precision of 'v' for locale and handles both Whole and Real numbers based on 'v'
|
||||
FmtNumber(num float64, v uint64) string
|
||||
|
||||
// returns 'num' with digits/precision of 'v' for locale and handles both Whole and Real numbers based on 'v'
|
||||
// NOTE: 'num' passed into FmtPercent is assumed to be in percent already
|
||||
FmtPercent(num float64, v uint64) string
|
||||
|
||||
// returns the currency representation of 'num' with digits/precision of 'v' for locale
|
||||
FmtCurrency(num float64, v uint64, currency currency.Type) string
|
||||
|
||||
// returns the currency representation of 'num' with digits/precision of 'v' for locale
|
||||
// in accounting notation.
|
||||
FmtAccounting(num float64, v uint64, currency currency.Type) string
|
||||
|
||||
// returns the short date representation of 't' for locale
|
||||
FmtDateShort(t time.Time) string
|
||||
|
||||
// returns the medium date representation of 't' for locale
|
||||
FmtDateMedium(t time.Time) string
|
||||
|
||||
// returns the long date representation of 't' for locale
|
||||
FmtDateLong(t time.Time) string
|
||||
|
||||
// returns the full date representation of 't' for locale
|
||||
FmtDateFull(t time.Time) string
|
||||
|
||||
// returns the short time representation of 't' for locale
|
||||
FmtTimeShort(t time.Time) string
|
||||
|
||||
// returns the medium time representation of 't' for locale
|
||||
FmtTimeMedium(t time.Time) string
|
||||
|
||||
// returns the long time representation of 't' for locale
|
||||
FmtTimeLong(t time.Time) string
|
||||
|
||||
// returns the full time representation of 't' for locale
|
||||
FmtTimeFull(t time.Time) string
|
||||
}
|
||||
|
||||
// String returns the string value of PluralRule
|
||||
func (p PluralRule) String() string {
|
||||
|
||||
switch p {
|
||||
case PluralRuleZero:
|
||||
return pluralsString[7:11]
|
||||
case PluralRuleOne:
|
||||
return pluralsString[11:14]
|
||||
case PluralRuleTwo:
|
||||
return pluralsString[14:17]
|
||||
case PluralRuleFew:
|
||||
return pluralsString[17:20]
|
||||
case PluralRuleMany:
|
||||
return pluralsString[20:24]
|
||||
case PluralRuleOther:
|
||||
return pluralsString[24:]
|
||||
default:
|
||||
return pluralsString[:7]
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Precision Notes:
|
||||
//
|
||||
// must specify a precision >= 0, and here is why https://play.golang.org/p/LyL90U0Vyh
|
||||
//
|
||||
// v := float64(3.141)
|
||||
// i := float64(int64(v))
|
||||
//
|
||||
// fmt.Println(v - i)
|
||||
//
|
||||
// or
|
||||
//
|
||||
// s := strconv.FormatFloat(v-i, 'f', -1, 64)
|
||||
// fmt.Println(s)
|
||||
//
|
||||
// these will not print what you'd expect: 0.14100000000000001
|
||||
// and so this library requires a precision to be specified, or
|
||||
// inaccurate plural rules could be applied.
|
||||
//
|
||||
//
|
||||
//
|
||||
// n - absolute value of the source number (integer and decimals).
|
||||
// i - integer digits of n.
|
||||
// v - number of visible fraction digits in n, with trailing zeros.
|
||||
// w - number of visible fraction digits in n, without trailing zeros.
|
||||
// f - visible fractional digits in n, with trailing zeros.
|
||||
// t - visible fractional digits in n, without trailing zeros.
|
||||
//
|
||||
//
|
||||
// Func(num float64, v uint64) // v = digits/precision and prevents -1 as a special case as this can lead to very unexpected behaviour, see precision note's above.
|
||||
//
|
||||
// n := math.Abs(num)
|
||||
// i := int64(n)
|
||||
// v := v
|
||||
//
|
||||
//
|
||||
// w := strconv.FormatFloat(num-float64(i), 'f', int(v), 64) // then parse backwards on string until no more zero's....
|
||||
// f := strconv.FormatFloat(n, 'f', int(v), 64) // then turn everything after decimal into an int64
|
||||
// t := strconv.FormatFloat(n, 'f', int(v), 64) // then parse backwards on string until no more zero's....
|
||||
//
|
||||
//
|
||||
//
|
||||
// General Inclusion Rules
|
||||
// - v will always be available inherently
|
||||
// - all require n
|
||||
// - w requires i
|
||||
//
|
||||
|
||||
// W returns the number of visible fraction digits in N, without trailing zeros.
|
||||
func W(n float64, v uint64) (w int64) {
|
||||
|
||||
s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64)
|
||||
|
||||
// with either be '0' or '0.xxxx', so if 1 then w will be zero
|
||||
// otherwise need to parse
|
||||
if len(s) != 1 {
|
||||
|
||||
s = s[2:]
|
||||
end := len(s) + 1
|
||||
|
||||
for i := end; i >= 0; i-- {
|
||||
if s[i] != '0' {
|
||||
end = i + 1
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
w = int64(len(s[:end]))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// F returns the visible fractional digits in N, with trailing zeros.
|
||||
func F(n float64, v uint64) (f int64) {
|
||||
|
||||
s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64)
|
||||
|
||||
// with either be '0' or '0.xxxx', so if 1 then f will be zero
|
||||
// otherwise need to parse
|
||||
if len(s) != 1 {
|
||||
|
||||
// ignoring error, because it can't fail as we generated
|
||||
// the string internally from a real number
|
||||
f, _ = strconv.ParseInt(s[2:], 10, 64)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// T returns the visible fractional digits in N, without trailing zeros.
|
||||
func T(n float64, v uint64) (t int64) {
|
||||
|
||||
s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64)
|
||||
|
||||
// with either be '0' or '0.xxxx', so if 1 then t will be zero
|
||||
// otherwise need to parse
|
||||
if len(s) != 1 {
|
||||
|
||||
s = s[2:]
|
||||
end := len(s) + 1
|
||||
|
||||
for i := end; i >= 0; i-- {
|
||||
if s[i] != '0' {
|
||||
end = i + 1
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// ignoring error, because it can't fail as we generated
|
||||
// the string internally from a real number
|
||||
t, _ = strconv.ParseInt(s[:end], 10, 64)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
*.coverprofile
|
|
@ -0,0 +1,27 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.13.4
|
||||
- tip
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
notifications:
|
||||
email:
|
||||
recipients: dean.karn@gmail.com
|
||||
on_success: change
|
||||
on_failure: always
|
||||
|
||||
before_install:
|
||||
- go install github.com/mattn/goveralls
|
||||
|
||||
# Only clone the most recent commit.
|
||||
git:
|
||||
depth: 1
|
||||
|
||||
script:
|
||||
- go test -v -race -covermode=atomic -coverprofile=coverage.coverprofile ./...
|
||||
|
||||
after_success: |
|
||||
[ $TRAVIS_GO_VERSION = 1.13.4 ] &&
|
||||
goveralls -coverprofile=coverage.coverprofile -service travis-ci -repotoken $COVERALLS_TOKEN
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue