Merge pull request #21069 from umohnani8/new-farmbuild-2

farm build: push built images to registry
This commit is contained in:
openshift-merge-bot[bot] 2024-01-15 18:41:29 +00:00 committed by GitHub
commit 3a46fe858f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
97 changed files with 2769 additions and 1253 deletions

View File

@ -49,7 +49,7 @@ type BuildFlagsWrapper struct {
// FarmBuildHiddenFlags are the flags hidden from the farm build command because they are either not
// supported or don't make sense in the farm build use case
var FarmBuildHiddenFlags = []string{"arch", "all-platforms", "compress", "cw", "disable-content-trust",
"logsplit", "manifest", "os", "output", "platform", "sign-by", "signature-policy", "stdin", "tls-verify",
"logsplit", "manifest", "os", "output", "platform", "sign-by", "signature-policy", "stdin",
"variant"}
func DefineBuildFlags(cmd *cobra.Command, buildOpts *BuildFlagsWrapper, isFarmBuild bool) {
@ -252,6 +252,7 @@ func ParseBuildOpts(cmd *cobra.Command, args []string, buildOpts *BuildFlagsWrap
}
apiBuildOpts.BuildOptions = *buildahDefineOpts
apiBuildOpts.ContainerFiles = containerFiles
apiBuildOpts.Authfile = buildOpts.Authfile
return &apiBuildOpts, err
}

View File

@ -4,12 +4,13 @@ import (
"errors"
"fmt"
"os"
"strings"
"github.com/containers/common/pkg/completion"
"github.com/containers/common/pkg/config"
"github.com/containers/podman/v4/cmd/podman/common"
"github.com/containers/podman/v4/cmd/podman/registry"
"github.com/containers/podman/v4/cmd/podman/utils"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/farm"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
@ -25,12 +26,13 @@ type buildOptions struct {
var (
farmBuildDescription = `Build images on farm nodes, then bundle them into a manifest list`
buildCommand = &cobra.Command{
Use: "build [options] [CONTEXT]",
Short: "Build a container image for multiple architectures",
Long: farmBuildDescription,
RunE: build,
Example: "podman farm build [flags] buildContextDirectory",
Args: cobra.ExactArgs(1),
Use: "build [options] [CONTEXT]",
Short: "Build a container image for multiple architectures",
Long: farmBuildDescription,
RunE: build,
Example: "podman farm build [flags] buildContextDirectory",
ValidArgsFunction: common.AutocompleteDefaultOneArg,
Args: cobra.MaximumNArgs(1),
}
buildOpts = buildOptions{
buildOptions: common.BuildFlagsWrapper{},
@ -45,20 +47,10 @@ func init() {
flags := buildCommand.Flags()
flags.SetNormalizeFunc(utils.AliasFlags)
localFlagName := "local"
// Default for local is true and hide this flag for the remote use case
if !registry.IsRemote() {
flags.BoolVarP(&buildOpts.local, localFlagName, "l", true, "Build image on local machine as well as on farm nodes")
}
cleanupFlag := "cleanup"
flags.BoolVar(&buildOpts.buildOptions.Cleanup, cleanupFlag, false, "Remove built images from farm nodes on success")
platformsFlag := "platforms"
buildCommand.PersistentFlags().StringSliceVar(&buildOpts.platforms, platformsFlag, nil, "Build only on farm nodes that match the given platforms")
common.DefineBuildFlags(buildCommand, &buildOpts.buildOptions, true)
podmanConfig := registry.PodmanConfig()
farmFlagName := "farm"
// If remote, don't read the client's containers.conf file
defaultFarm := ""
@ -66,6 +58,17 @@ func init() {
defaultFarm = podmanConfig.ContainersConfDefaultsRO.Farms.Default
}
flags.StringVar(&buildOpts.farm, farmFlagName, defaultFarm, "Farm to use for builds")
_ = buildCommand.RegisterFlagCompletionFunc(farmFlagName, common.AutoCompleteFarms)
localFlagName := "local"
// Default for local is true
flags.BoolVarP(&buildOpts.local, localFlagName, "l", true, "Build image on local machine as well as on farm nodes")
platformsFlag := "platforms"
buildCommand.PersistentFlags().StringSliceVar(&buildOpts.platforms, platformsFlag, nil, "Build only on farm nodes that match the given platforms")
_ = buildCommand.RegisterFlagCompletionFunc(platformsFlag, completion.AutocompletePlatform)
common.DefineBuildFlags(buildCommand, &buildOpts.buildOptions, true)
}
func build(cmd *cobra.Command, args []string) error {
@ -79,7 +82,18 @@ func build(cmd *cobra.Command, args []string) error {
if !cmd.Flags().Changed("tag") {
return errors.New("cannot create manifest list without a name, value for --tag is required")
}
opts, err := common.ParseBuildOpts(cmd, args, &buildOpts.buildOptions)
// Ensure that the user gives a full name so we can push the built images from
// the node to the given registry and repository
// Should be of the format registry/repository/imageName
tag, err := cmd.Flags().GetStringArray("tag")
if err != nil {
return err
}
if !strings.Contains(tag[0], "/") {
return fmt.Errorf("%q is not a full image reference name", tag[0])
}
bopts := buildOpts.buildOptions
opts, err := common.ParseBuildOpts(cmd, args, &bopts)
if err != nil {
return err
}
@ -102,6 +116,11 @@ func build(cmd *cobra.Command, args []string) error {
return err
}
opts.IIDFile = iidFile
tlsVerify, err := cmd.Flags().GetBool("tls-verify")
if err != nil {
return err
}
opts.SkipTLSVerify = !tlsVerify
cfg, err := config.ReadCustomConfig()
if err != nil {
@ -117,13 +136,9 @@ func build(cmd *cobra.Command, args []string) error {
defaultFarm = f
}
var localEngine entities.ImageEngine
if buildOpts.local {
localEngine = registry.ImageEngine()
}
localEngine := registry.ImageEngine()
ctx := registry.Context()
farm, err := farm.NewFarm(ctx, defaultFarm, localEngine)
farm, err := farm.NewFarm(ctx, defaultFarm, localEngine, buildOpts.local)
if err != nil {
return fmt.Errorf("initializing: %w", err)
}
@ -137,7 +152,7 @@ func build(cmd *cobra.Command, args []string) error {
manifestName := opts.Output
// Set Output to "" so that the images built on the farm nodes have no name
opts.Output = ""
if err = farm.Build(ctx, schedule, *opts, manifestName); err != nil {
if err = farm.Build(ctx, schedule, *opts, manifestName, localEngine); err != nil {
return fmt.Errorf("build: %w", err)
}
logrus.Infof("build: ok")

View File

@ -16,7 +16,7 @@ var (
The "podman system connection add --farm" command can be used to add a new connection to a new or existing farm.`
createCommand = &cobra.Command{
Use: "create [options] NAME [CONNECTIONS...]",
Use: "create NAME [CONNECTIONS...]",
Args: cobra.MinimumNArgs(1),
Short: "Create a new farm",
Long: farmCreateDescription,

View File

@ -20,5 +20,4 @@ func init() {
registry.Commands = append(registry.Commands, registry.CliCommand{
Command: farmCmd,
})
farmCmd.Hidden = true
}

View File

@ -1,5 +1,5 @@
####> This option file is used in:
####> podman auto update, build, container runlabel, create, kube play, login, manifest add, manifest create, manifest inspect, manifest push, pull, push, run, search
####> podman auto update, build, container runlabel, create, farm build, kube play, login, manifest add, manifest create, manifest inspect, manifest push, pull, push, run, search
####> If file is edited, make sure the changes
####> are applicable to all of those.
#### **--tls-verify**

View File

@ -8,7 +8,10 @@ podman\-farm\-build - Build images on farm nodes, then bundle them into a manife
## DESCRIPTION
**podman farm build** Builds an image on all nodes in a farm and bundles them up into a manifest list.
It executes the `podman build` command on the nodes in the farm with the given Containerfile.
It executes the `podman build` command on the nodes in the farm with the given Containerfile. Once the
images are built on all the farm nodes, the images will be pushed to the registry given via the **--tag**
flag. Once all the images have been pushed, a manifest list will be created locally and pushed to the registry
as well.
The manifest list will contain an image per native architecture type that is present in the farm.
@ -17,6 +20,9 @@ via emulation using `podman build --arch --platform`.
If no farm is specified, the build will be sent out to all the nodes that `podman system connection` knows of.
Note: Since the images built are directly pushed to a registry, the user must pass in a full image name using the
**--tag** option in the format _registry_**/**_repository_**/**_imageName_[**:**_tag_]`.
## OPTIONS
@@option add-host
@ -193,6 +199,8 @@ Build only on farm nodes that match the given platforms.
@@option timestamp
@@option tls-verify
@@option ulimit.image
@@option unsetenv.image

View File

@ -4,7 +4,7 @@
podman\-farm\-create - Create a new farm
## SYNOPSIS
**podman farm create** [*options*] *name* [*connections*]
**podman farm create** *name* [*connections*]
## DESCRIPTION
Create a new farm with connections that Podman knows about which were added via the
@ -13,8 +13,6 @@ Create a new farm with connections that Podman knows about which were added via
An empty farm can be created without adding any connections to it. Add or remove
connections from a farm via the *podman farm update* command.
## OPTIONS
## EXAMPLE
```

8
go.mod
View File

@ -14,7 +14,7 @@ require (
github.com/containers/common v0.57.1-0.20231206135104-b647eb3a5eea
github.com/containers/conmon v2.0.20+incompatible
github.com/containers/gvisor-tap-vsock v0.7.1
github.com/containers/image/v5 v5.29.1-0.20231201205726-671ab94a09ea
github.com/containers/image/v5 v5.29.1-0.20231214202217-8eabe0f6b3eb
github.com/containers/libhvee v0.6.0
github.com/containers/ocicrypt v1.1.9
github.com/containers/psgo v1.8.0
@ -124,7 +124,7 @@ require (
github.com/go-openapi/loads v0.21.2 // indirect
github.com/go-openapi/runtime v0.26.0 // indirect
github.com/go-openapi/spec v0.20.9 // indirect
github.com/go-openapi/strfmt v0.21.7 // indirect
github.com/go-openapi/strfmt v0.21.9 // indirect
github.com/go-openapi/swag v0.22.4 // indirect
github.com/go-openapi/validate v0.22.1 // indirect
github.com/go-playground/locales v0.14.1 // indirect
@ -183,7 +183,7 @@ require (
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/sigstore/fulcio v1.4.3 // indirect
github.com/sigstore/rekor v1.2.2 // indirect
github.com/sigstore/sigstore v1.7.5 // indirect
github.com/sigstore/sigstore v1.7.6 // indirect
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
github.com/sylabs/sif/v2 v2.15.0 // indirect
@ -197,7 +197,7 @@ require (
github.com/vbatts/tar-split v0.11.5 // indirect
github.com/vishvananda/netns v0.0.4 // indirect
github.com/yusufpapurcu/wmi v1.2.3 // indirect
go.mongodb.org/mongo-driver v1.11.3 // indirect
go.mongodb.org/mongo-driver v1.13.0 // indirect
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel v1.19.0 // indirect

21
go.sum
View File

@ -262,8 +262,8 @@ github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6J
github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I=
github.com/containers/gvisor-tap-vsock v0.7.1 h1:+Rc+sOPplrkQb/BUXeN0ug8TxjgyrIqo/9P/eNS2A4c=
github.com/containers/gvisor-tap-vsock v0.7.1/go.mod h1:WSSsjcuYZkvP8i0J+Ht3LF8yvysn3krD5zxQ74wz7y0=
github.com/containers/image/v5 v5.29.1-0.20231201205726-671ab94a09ea h1:tsXGDybhfKVnQ3vgsuPYhhNu5VnxNlDdLFwx5X1ruSo=
github.com/containers/image/v5 v5.29.1-0.20231201205726-671ab94a09ea/go.mod h1:viinaAODpZKsuvRIecjkmgV890VxszevaGiH+m8Qcug=
github.com/containers/image/v5 v5.29.1-0.20231214202217-8eabe0f6b3eb h1:lCFjLpNX0BXdset9e5T5lp/MnPh4FmulIP3gkrntFts=
github.com/containers/image/v5 v5.29.1-0.20231214202217-8eabe0f6b3eb/go.mod h1:OVejSoGky0FNBvFris29zLQs7bcVWtGM/cZwQ6e38Ik=
github.com/containers/libhvee v0.6.0 h1:tUzwSz8R0GjR6IctgDnkTMjdtCk5Mxhpai4Vyv6UeF4=
github.com/containers/libhvee v0.6.0/go.mod h1:f/q1wCdQqOLiK3IZqqBfOD7exMZYBU5pDYsrMa/pSFg=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
@ -461,8 +461,8 @@ github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6
github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg=
github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg=
github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k=
github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew=
github.com/go-openapi/strfmt v0.21.9 h1:LnEGOO9qyEC1v22Bzr323M98G13paIUGPU7yeJtG9Xs=
github.com/go-openapi/strfmt v0.21.9/go.mod h1:0k3v301mglEaZRJdDDGSlN6Npq4VMVU69DE0LUyf7uA=
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
@ -480,7 +480,7 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.15.5 h1:LEBecTWb/1j5TNY1YYG2RcOUN3R7NLylN+x8TTueE24=
github.com/go-playground/validator/v10 v10.15.5/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
github.com/go-rod/rod v0.114.4 h1:FpkNFukjCuZLwnoLs+S9aCL95o/EMec6M+41UmvQay8=
github.com/go-rod/rod v0.114.5 h1:1x6oqnslwFVuXJbJifgxspJUd3O4ntaGhRLHt+4Er9c=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
@ -975,8 +975,8 @@ github.com/sigstore/fulcio v1.4.3 h1:9JcUCZjjVhRF9fmhVuz6i1RyhCc/EGCD7MOl+iqCJLQ
github.com/sigstore/fulcio v1.4.3/go.mod h1:BQPWo7cfxmJwgaHlphUHUpFkp5+YxeJes82oo39m5og=
github.com/sigstore/rekor v1.2.2 h1:5JK/zKZvcQpL/jBmHvmFj3YbpDMBQnJQ6ygp8xdF3bY=
github.com/sigstore/rekor v1.2.2/go.mod h1:FGnWBGWzeNceJnp0x9eDFd41mI8aQqCjj+Zp0IEs0Qg=
github.com/sigstore/sigstore v1.7.5 h1:ij55dBhLwjICmLTBJZm7SqoQLdsu/oowDanACcJNs48=
github.com/sigstore/sigstore v1.7.5/go.mod h1:9OCmYWhzuq/G4e1cy9m297tuMRJ1LExyrXY3ZC3Zt/s=
github.com/sigstore/sigstore v1.7.6 h1:zB0woXx+3Bp7dk7AjklHF1VhXBdCs84VXkZbp0IHLv8=
github.com/sigstore/sigstore v1.7.6/go.mod h1:FJE+NpEZIs4QKqZl4B2RtaVLVDcDtocAwTiNlexeBkY=
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
@ -1044,7 +1044,6 @@ github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ
github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs=
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
@ -1093,8 +1092,10 @@ github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
@ -1128,8 +1129,8 @@ go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3C
go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg=
go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng=
go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8=
go.mongodb.org/mongo-driver v1.11.3 h1:Ql6K6qYHEzB6xvu4+AU0BoRoqf9vFPcc4o7MUIdPW8Y=
go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g=
go.mongodb.org/mongo-driver v1.13.0 h1:67DgFFjYOCMWdtTEmKFpV3ffWlFnh+CYZ8ZS/tXWUfY=
go.mongodb.org/mongo-driver v1.13.0/go.mod h1:/rGBTebI3XYboVmgz+Wv3Bcbl3aD0QF9zl6kDDw18rQ=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak=
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=

View File

@ -45,6 +45,4 @@ type ImageEngine interface { //nolint:interfacebloat
FarmNodeName(ctx context.Context) string
FarmNodeDriver(ctx context.Context) string
FarmNodeInspect(ctx context.Context) (*FarmInspectReport, error)
PullToFile(ctx context.Context, options PullToFileOptions) (string, error)
PullToLocal(ctx context.Context, options PullToLocalOptions) (string, error)
}

View File

@ -492,19 +492,3 @@ type FarmInspectReport struct {
Arch string
Variant string
}
// PullToFileOptions are the options for pulling the images from farm
// nodes into a dir
type PullToFileOptions struct {
ImageID string
SaveFormat string
SaveFile string
}
// PullToLocalOptions are the options for pulling the images from farm
// nodes into containers-storage
type PullToLocalOptions struct {
ImageID string
SaveFormat string
Destination ImageEngine
}

View File

@ -131,6 +131,10 @@ type BuildReport struct {
type FarmBuildOptions struct {
// Cleanup removes built images from farm nodes on success
Cleanup bool
// Authfile is the path to the file holding registry credentials
Authfile string
// SkipTLSVerify skips tls verification when set to true
SkipTLSVerify bool
}
type IDOrNameResponse struct {

View File

@ -5,12 +5,10 @@ package abi
import (
"context"
"fmt"
"os"
"strings"
"github.com/containers/buildah/pkg/parse"
lplatform "github.com/containers/common/libimage/platform"
istorage "github.com/containers/image/v5/storage"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/containers/podman/v4/pkg/emulation"
)
@ -56,64 +54,3 @@ func (ir *ImageEngine) FarmNodeInspect(ctx context.Context) (*entities.FarmInspe
Arch: ir.arch,
Variant: ir.variant}, ir.platformsErr
}
// PullToFile pulls the image from the remote engine and saves it to a file,
// returning a string-format reference which can be parsed by containers/image.
func (ir *ImageEngine) PullToFile(ctx context.Context, options entities.PullToFileOptions) (reference string, err error) {
saveOptions := entities.ImageSaveOptions{
Format: options.SaveFormat,
Output: options.SaveFile,
}
if err := ir.Save(ctx, options.ImageID, nil, saveOptions); err != nil {
return "", fmt.Errorf("saving image %q: %w", options.ImageID, err)
}
return options.SaveFormat + ":" + options.SaveFile, nil
}
// PullToFile pulls the image from the remote engine and saves it to the local
// engine passed in via options, returning a string-format reference which can
// be parsed by containers/image.
func (ir *ImageEngine) PullToLocal(ctx context.Context, options entities.PullToLocalOptions) (reference string, err error) {
destination := options.Destination
if destination == nil {
return "", fmt.Errorf("destination not given, cannot pull image %q", options.ImageID)
}
// Check if the image is already present at destination
var br *entities.BoolReport
br, err = destination.Exists(ctx, options.ImageID)
if err != nil {
return "", err
}
if br.Value {
return istorage.Transport.Name() + ":" + options.ImageID, nil
}
tempFile, err := os.CreateTemp("", "")
if err != nil {
return "", err
}
defer os.Remove(tempFile.Name())
defer tempFile.Close()
saveOptions := entities.ImageSaveOptions{
Format: options.SaveFormat,
Output: tempFile.Name(),
}
// Save image built on builder in a temp file
if err := ir.Save(ctx, options.ImageID, nil, saveOptions); err != nil {
return "", fmt.Errorf("saving image %q: %w", options.ImageID, err)
}
// Load the image saved in tempFile into the local engine
loadOptions := entities.ImageLoadOptions{
Input: tempFile.Name(),
}
_, err = destination.Load(ctx, loadOptions)
if err != nil {
return "", err
}
return istorage.Transport.Name() + ":" + options.ImageID, nil
}

View File

@ -39,6 +39,8 @@ import (
"github.com/sirupsen/logrus"
)
const UnknownDigestSuffix = docker.UnknownDigestSuffix
func (ir *ImageEngine) Exists(_ context.Context, nameOrID string) (*entities.BoolReport, error) {
exists, err := ir.Libpod.LibimageRuntime().Exists(nameOrID)
if err != nil {

View File

@ -2,11 +2,8 @@ package tunnel
import (
"context"
"errors"
"fmt"
"os"
istorage "github.com/containers/image/v5/storage"
"github.com/containers/podman/v4/pkg/bindings/system"
"github.com/containers/podman/v4/pkg/domain/entities"
)
@ -47,47 +44,3 @@ func (ir *ImageEngine) FarmNodeInspect(ctx context.Context) (*entities.FarmInspe
Arch: ir.arch,
Variant: ir.variant}, ir.platformsErr
}
// PullToFile pulls the image from the remote engine and saves it to a file,
// returning a string-format reference which can be parsed by containers/image.
func (ir *ImageEngine) PullToFile(ctx context.Context, options entities.PullToFileOptions) (reference string, err error) {
saveOptions := entities.ImageSaveOptions{
Format: options.SaveFormat,
Output: options.SaveFile,
}
if err := ir.Save(ctx, options.ImageID, nil, saveOptions); err != nil {
return "", fmt.Errorf("saving image %q: %w", options.ImageID, err)
}
return options.SaveFormat + ":" + options.SaveFile, nil
}
// PullToLocal pulls the image from the remote engine and saves it to the local
// engine passed in via options, returning a string-format reference which can
// be parsed by containers/image.
func (ir *ImageEngine) PullToLocal(ctx context.Context, options entities.PullToLocalOptions) (reference string, err error) {
tempFile, err := os.CreateTemp("", "")
if err != nil {
return "", err
}
defer os.Remove(tempFile.Name())
defer tempFile.Close()
saveOptions := entities.ImageSaveOptions{
Format: options.SaveFormat,
Output: tempFile.Name(),
}
if err := ir.Save(ctx, options.ImageID, nil, saveOptions); err != nil {
return "", fmt.Errorf("saving image %q to temporary file: %w", options.ImageID, err)
}
loadOptions := entities.ImageLoadOptions{
Input: tempFile.Name(),
}
if options.Destination == nil {
return "", errors.New("internal error: options.Destination not set")
} else {
if _, err = options.Destination.Load(ctx, loadOptions); err != nil {
return "", fmt.Errorf("loading image %q: %w", options.ImageID, err)
}
}
name := fmt.Sprintf("%s:%s", istorage.Transport.Name(), options.ImageID)
return name, err
}

View File

@ -32,7 +32,7 @@ type Schedule struct {
platformBuilders map[string]string // target->connection
}
func newFarmWithBuilders(_ context.Context, name string, destinations *map[string]config.Destination, localEngine entities.ImageEngine) (*Farm, error) {
func newFarmWithBuilders(_ context.Context, name string, destinations *map[string]config.Destination, localEngine entities.ImageEngine, buildLocal bool) (*Farm, error) {
farm := &Farm{
builders: make(map[string]entities.ImageEngine),
localEngine: localEngine,
@ -66,7 +66,7 @@ func newFarmWithBuilders(_ context.Context, name string, destinations *map[strin
})
}
// If local=true then use the local machine for builds as well
if localEngine != nil {
if buildLocal {
builderGroup.Go(func() error {
fmt.Println("Setting up local builder")
defer fmt.Println("Local builder ready")
@ -88,14 +88,14 @@ func newFarmWithBuilders(_ context.Context, name string, destinations *map[strin
return nil, errors.New("no builders configured")
}
func NewFarm(ctx context.Context, name string, localEngine entities.ImageEngine) (*Farm, error) {
func NewFarm(ctx context.Context, name string, localEngine entities.ImageEngine, buildLocal bool) (*Farm, error) {
// Get the destinations of the connections specified in the farm
destinations, err := getFarmDestinations(name)
if err != nil {
return nil, err
}
return newFarmWithBuilders(ctx, name, &destinations, localEngine)
return newFarmWithBuilders(ctx, name, &destinations, localEngine, buildLocal)
}
// Done performs any necessary end-of-process cleanup for the farm's members.
@ -315,7 +315,7 @@ func (f *Farm) Schedule(ctx context.Context, platforms []string) (Schedule, erro
// Build runs a build using the specified targetplatform:service map. If all
// builds succeed, it copies the resulting images from the remote hosts to the
// local service and builds a manifest list with the specified reference name.
func (f *Farm) Build(ctx context.Context, schedule Schedule, options entities.BuildOptions, reference string) error {
func (f *Farm) Build(ctx context.Context, schedule Schedule, options entities.BuildOptions, reference string, localEngine entities.ImageEngine) error {
switch options.OutputFormat {
default:
return fmt.Errorf("unknown output format %q requested", options.OutputFormat)
@ -359,24 +359,13 @@ func (f *Farm) Build(ctx context.Context, schedule Schedule, options entities.Bu
})
}
// Decide where the final result will be stored.
var (
manifestListBuilder listBuilder
err error
)
listBuilderOptions := listBuilderOptions{
cleanup: options.Cleanup,
iidFile: options.IIDFile,
}
if strings.HasPrefix(reference, "dir:") || f.localEngine == nil {
location := strings.TrimPrefix(reference, "dir:")
manifestListBuilder, err = newFileManifestListBuilder(location, listBuilderOptions)
if err != nil {
return fmt.Errorf("preparing to build list: %w", err)
}
} else {
manifestListBuilder = newLocalManifestListBuilder(reference, f.localEngine, listBuilderOptions)
cleanup: options.Cleanup,
iidFile: options.IIDFile,
authfile: options.Authfile,
skipTLSVerify: options.SkipTLSVerify,
}
manifestListBuilder := newManifestListBuilder(reference, f.localEngine, listBuilderOptions)
// Start builds in parallel and wait for them all to finish.
var (

View File

@ -3,31 +3,21 @@ package farm
import (
"context"
"fmt"
"io/fs"
"os"
"path/filepath"
"sync"
lmanifests "github.com/containers/common/libimage/manifests"
"github.com/containers/common/pkg/supplemented"
cp "github.com/containers/image/v5/copy"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/signature"
"github.com/containers/image/v5/transports/alltransports"
"github.com/containers/image/v5/docker"
"github.com/containers/image/v5/types"
"github.com/containers/podman/v4/pkg/domain/entities"
"github.com/hashicorp/go-multierror"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
)
type listBuilder interface {
build(ctx context.Context, images map[entities.BuildReport]entities.ImageEngine) (string, error)
}
type listBuilderOptions struct {
cleanup bool
iidFile string
cleanup bool
iidFile string
authfile string
skipTLSVerify bool
}
type listLocal struct {
@ -38,7 +28,7 @@ type listLocal struct {
// newLocalManifestListBuilder returns a manifest list builder which saves a
// manifest list and images to local storage.
func newLocalManifestListBuilder(listName string, localEngine entities.ImageEngine, options listBuilderOptions) listBuilder {
func newManifestListBuilder(listName string, localEngine entities.ImageEngine, options listBuilderOptions) *listLocal {
return &listLocal{
listName: listName,
options: options,
@ -49,47 +39,42 @@ func newLocalManifestListBuilder(listName string, localEngine entities.ImageEngi
// Build retrieves images from the build reports and assembles them into a
// manifest list in local container storage.
func (l *listLocal) build(ctx context.Context, images map[entities.BuildReport]entities.ImageEngine) (string, error) {
manifest := l.listName
exists, err := l.localEngine.ManifestExists(ctx, l.listName)
if err != nil {
return "", err
}
// Create list if it doesn't exist
if !exists.Value {
manifest, err = l.localEngine.ManifestCreate(ctx, l.listName, []string{}, entities.ManifestCreateOptions{})
_, err = l.localEngine.ManifestCreate(ctx, l.listName, []string{}, entities.ManifestCreateOptions{SkipTLSVerify: types.NewOptionalBool(l.options.skipTLSVerify)})
if err != nil {
return "", fmt.Errorf("creating manifest list %q: %w", l.listName, err)
}
}
// Pull the images into local storage
// Push the images to the registry given by the user
var (
pullGroup multierror.Group
pushGroup multierror.Group
refsMutex sync.Mutex
)
refs := []string{}
for image, engine := range images {
image, engine := image, engine
pullOptions := entities.PullToLocalOptions{
ImageID: image.ID,
SaveFormat: image.SaveFormat,
Destination: l.localEngine,
}
pullGroup.Go(func() error {
logrus.Infof("copying image %s", image.ID)
defer logrus.Infof("copied image %s", image.ID)
ref, err := engine.PullToLocal(ctx, pullOptions)
pushGroup.Go(func() error {
logrus.Infof("pushing image %s", image.ID)
defer logrus.Infof("pushed image %s", image.ID)
// Push the image to the registry
report, err := engine.Push(ctx, image.ID, l.listName+docker.UnknownDigestSuffix, entities.ImagePushOptions{Authfile: l.options.authfile, Quiet: false, SkipTLSVerify: types.NewOptionalBool(l.options.skipTLSVerify)})
if err != nil {
return fmt.Errorf("pulling image %q to local storage: %w", image, err)
return fmt.Errorf("pushing image %q to registry: %w", image, err)
}
refsMutex.Lock()
defer refsMutex.Unlock()
refs = append(refs, ref)
refs = append(refs, "docker://"+l.listName+"@"+report.ManifestDigest)
return nil
})
}
pullErrors := pullGroup.Wait()
err = pullErrors.ErrorOrNil()
pushErrors := pushGroup.Wait()
err = pushErrors.ErrorOrNil()
if err != nil {
return "", fmt.Errorf("building: %w", err)
}
@ -119,17 +104,21 @@ func (l *listLocal) build(ctx context.Context, images map[entities.BuildReport]e
// Clear the list in the event it already existed
if exists.Value {
_, err = l.localEngine.ManifestListClear(ctx, manifest)
_, err = l.localEngine.ManifestListClear(ctx, l.listName)
if err != nil {
return "", fmt.Errorf("error clearing list %q", manifest)
return "", fmt.Errorf("error clearing list %q", l.listName)
}
}
// Add the images to the list
listID, err := l.localEngine.ManifestAdd(ctx, manifest, refs, entities.ManifestAddOptions{})
listID, err := l.localEngine.ManifestAdd(ctx, l.listName, refs, entities.ManifestAddOptions{Authfile: l.options.authfile, SkipTLSVerify: types.NewOptionalBool(l.options.skipTLSVerify)})
if err != nil {
return "", fmt.Errorf("adding images %q to list: %w", refs, err)
}
_, err = l.localEngine.ManifestPush(ctx, l.listName, l.listName, entities.ImagePushOptions{Authfile: l.options.authfile, SkipTLSVerify: types.NewOptionalBool(l.options.skipTLSVerify)})
if err != nil {
return "", err
}
// Write the manifest list's ID file if we're expected to
if l.options.iidFile != "" {
@ -140,158 +129,3 @@ func (l *listLocal) build(ctx context.Context, images map[entities.BuildReport]e
return l.listName, nil
}
type listFiles struct {
directory string
options listBuilderOptions
}
// newFileManifestListBuilder returns a manifest list builder which saves a manifest
// list and images to a specified directory in the non-standard dir: format.
func newFileManifestListBuilder(directory string, options listBuilderOptions) (listBuilder, error) {
if options.iidFile != "" {
return nil, fmt.Errorf("saving to dir: format doesn't use image IDs, --iidfile not supported")
}
return &listFiles{directory: directory, options: options}, nil
}
// Build retrieves images from the build reports and assembles them into a
// manifest list in the configured directory.
func (m *listFiles) build(ctx context.Context, images map[entities.BuildReport]entities.ImageEngine) (string, error) {
listFormat := v1.MediaTypeImageIndex
imageFormat := v1.MediaTypeImageManifest
tempDir, err := os.MkdirTemp("", "")
if err != nil {
return "", err
}
defer os.RemoveAll(tempDir)
name := fmt.Sprintf("dir:%s", tempDir)
tempRef, err := alltransports.ParseImageName(name)
if err != nil {
return "", fmt.Errorf("parsing temporary image ref %q: %w", name, err)
}
if err := os.MkdirAll(m.directory, 0o755); err != nil {
return "", err
}
output, err := alltransports.ParseImageName("dir:" + m.directory)
if err != nil {
return "", fmt.Errorf("parsing output directory ref %q: %w", "dir:"+m.directory, err)
}
// Pull the images into the temporary directory
var (
pullGroup multierror.Group
pullErrors *multierror.Error
refsMutex sync.Mutex
)
refs := make(map[entities.BuildReport]types.ImageReference)
for image, engine := range images {
image, engine := image, engine
tempFile, err := os.CreateTemp(tempDir, "archive-*.tar")
if err != nil {
defer func() {
pullErrors = pullGroup.Wait()
}()
perr := pullErrors.ErrorOrNil()
if perr != nil {
return "", perr
}
return "", err
}
defer tempFile.Close()
pullGroup.Go(func() error {
logrus.Infof("copying image %s", image.ID)
defer logrus.Infof("copied image %s", image.ID)
pullOptions := entities.PullToFileOptions{
ImageID: image.ID,
SaveFormat: image.SaveFormat,
SaveFile: tempFile.Name(),
}
if image.SaveFormat == manifest.DockerV2Schema2MediaType {
listFormat = manifest.DockerV2ListMediaType
imageFormat = manifest.DockerV2Schema2MediaType
}
reference, err := engine.PullToFile(ctx, pullOptions)
if err != nil {
return fmt.Errorf("pulling image %q to temporary directory: %w", image, err)
}
ref, err := alltransports.ParseImageName(reference)
if err != nil {
return fmt.Errorf("pulling image %q to temporary directory: %w", image, err)
}
refsMutex.Lock()
defer refsMutex.Unlock()
refs[image] = ref
return nil
})
}
pullErrors = pullGroup.Wait()
err = pullErrors.ErrorOrNil()
if err != nil {
return "", fmt.Errorf("building: %w", err)
}
if m.options.cleanup {
var rmGroup multierror.Group
for image, engine := range images {
image, engine := image, engine
rmGroup.Go(func() error {
_, err := engine.Remove(ctx, []string{image.ID}, entities.ImageRemoveOptions{})
if len(err) > 0 {
return err[0]
}
return nil
})
}
rmErrors := rmGroup.Wait()
if rmErrors != nil {
if err = rmErrors.ErrorOrNil(); err != nil {
return "", fmt.Errorf("removing intermediate images: %w", err)
}
}
}
supplemental := []types.ImageReference{}
var sys types.SystemContext
// Create a manifest list
list := lmanifests.Create()
// Add the images to the list
for image, ref := range refs {
if _, err = list.Add(ctx, &sys, ref, true); err != nil {
return "", fmt.Errorf("adding image %q to list: %w", image.ID, err)
}
supplemental = append(supplemental, ref)
}
// Save the list to the temporary directory to be the main manifest
listBytes, err := list.Serialize(listFormat)
if err != nil {
return "", fmt.Errorf("serializing manifest list: %w", err)
}
if err = os.WriteFile(filepath.Join(tempDir, "manifest.json"), listBytes, fs.FileMode(0o600)); err != nil {
return "", fmt.Errorf("writing temporary manifest list: %w", err)
}
// Now copy everything to the final dir: location
defaultPolicy, err := signature.DefaultPolicy(&sys)
if err != nil {
return "", err
}
policyContext, err := signature.NewPolicyContext(defaultPolicy)
if err != nil {
return "", err
}
input := supplemented.Reference(tempRef, supplemental, cp.CopyAllImages, nil)
copyOptions := cp.Options{
ForceManifestMIMEType: imageFormat,
ImageListSelection: cp.CopyAllImages,
}
_, err = cp.Image(ctx, policyContext, output, input, &copyOptions)
if err != nil {
return "", fmt.Errorf("copying images to dir:%q: %w", m.directory, err)
}
return "dir:" + m.directory, nil
}

View File

@ -5,7 +5,6 @@
load helpers.bash
@test "farm - check farm has been created" {
run_podman farm ls
assert "$output" =~ $FARMNAME
@ -17,18 +16,20 @@ load helpers.bash
empty_farm="empty-farm"
# create an empty farm
run_podman farm create $empty_farm
run_podman farm build --farm $empty_farm -t $iname $PODMAN_TMPDIR
run_podman farm build --farm $empty_farm --authfile $AUTHFILE --tls-verify=false -t $REGISTRY/$iname $FARM_TMPDIR
assert "$output" =~ "Local builder ready"
# get the system architecture
run_podman info --format '{{.Host.Arch}}'
ARCH=$output
# inspect manifest list built and saved in local containers-storage
# FIXME: use --format?
run_podman manifest inspect $iname
assert "$output" =~ $ARCH
run_podman images -a
echo "# skopeo inspect ..."
run skopeo inspect "$@" --tls-verify=false --authfile $AUTHFILE docker://$REGISTRY/$iname
echo "$output"
is "$status" "0" "skopeo inspect - exit status"
# FIXME-someday: why do we need the prune?
run_podman manifest rm $iname
@ -37,18 +38,19 @@ load helpers.bash
@test "farm - build on farm node only with --cleanup" {
iname="test-image-2"
run_podman farm build --cleanup --local=false -t $iname $PODMAN_TMPDIR
run_podman farm build --cleanup --local=false --authfile $AUTHFILE --tls-verify=false -t $REGISTRY/$iname $FARM_TMPDIR
assert "$output" =~ "Farm \"$FARMNAME\" ready"
# get the system architecture
run_podman info --format '{{.Host.Arch}}'
ARCH=$output
# inspect manifest list built and saved in dir
# FIXME FIXME FIXME! #20505: do not write anything under cwd
ls -l $iname
# inspect manifest list built and saved in local containers-storage
run_podman manifest inspect $iname
assert "$output" =~ $ARCH
# FIXME FIXME FIXME FIXME! NEVER WRITE INTO PWD!
manifestarch=$(jq -r '.manifests[].platform.architecture' <$iname/manifest.json)
assert "$manifestarch" = "$ARCH" "arch from $iname/manifest.json"
echo "# skopeo inspect ..."
run skopeo inspect "$@" --tls-verify=false --authfile $AUTHFILE docker://$REGISTRY/$iname
echo "$output"
is "$status" "0" "skopeo inspect - exit status"
# see if we can ssh into node to check the image was cleaned up
run ssh $ROOTLESS_USER@localhost podman images --filter dangling=true --noheading
@ -58,21 +60,27 @@ load helpers.bash
run_podman images --filter dangling=true --noheading
assert "$output" = "" "podman images on local host"
run_podman manifest rm $iname
run_podman image prune -f
}
@test "farm - build on farm node and local" {
iname="test-image-3"
run_podman farm build -t $iname $PODMAN_TMPDIR
run_podman farm build --authfile $AUTHFILE --tls-verify=false -t $REGISTRY/$iname $FARM_TMPDIR
assert "$output" =~ "Farm \"$FARMNAME\" ready"
# get the system architecture
run_podman info --format '{{.Host.Arch}}'
ARCH=$output
# inspect manifest list built and saved in dir
# inspect manifest list built and saved
run_podman manifest inspect $iname
assert "$output" =~ $ARCH
echo "# skopeo inspect ..."
run skopeo inspect "$@" --tls-verify=false --authfile $AUTHFILE docker://$REGISTRY/$iname
echo "$output"
is "$status" "0" "skopeo inspect - exit status"
run_podman manifest rm $iname
run_podman image prune -f
}
@ -81,15 +89,21 @@ load helpers.bash
@test "farm - build on farm node only (podman-remote)" {
iname="test-image-4"
run_podman --remote farm build -t $iname $PODMAN_TMPDIR
run_podman --remote farm build --authfile $AUTHFILE --tls-verify=false -t $REGISTRY/$iname $FARM_TMPDIR
assert "$output" =~ "Farm \"$FARMNAME\" ready"
# get the system architecture
run_podman --remote info --format '{{.Host.Arch}}'
ARCH=$output
# inspect manifest list built and saved in dir
manifestarch=$(jq -r '.manifests[].platform.architecture' <$iname/manifest.json)
assert "$manifestarch" = "$ARCH" "arch from $iname/manifest.json"
# inspect manifest list built and saved
run_podman manifest inspect $iname
assert "$output" =~ $ARCH
echo "# skopeo inspect ..."
run skopeo inspect "$@" --tls-verify=false --authfile $AUTHFILE docker://$REGISTRY/$iname
echo "$output"
is "$status" "0" "skopeo inspect - exit status"
run_podman manifest rm $iname
run_podman image prune -f
}

View File

@ -2,11 +2,13 @@
load ../system/helpers.bash
export FARM_TMPDIR=$(mktemp -d --tmpdir=${BATS_TMPDIR:-/tmp} podman_bats.XXXXXX)
function setup(){
basic_setup
# Always create the same containerfile
cat >$PODMAN_TMPDIR/Containerfile <<EOF
cat >$FARM_TMPDIR/Containerfile <<EOF
FROM $IMAGE
RUN arch | tee /arch.txt
RUN date | tee /built.txt

View File

@ -3,6 +3,9 @@
bats_require_minimum_version 1.8.0
load helpers
load ../system/helpers
load ../system/helpers.registry
load ../system/helpers.network
function setup_suite(){
if [[ -z "$ROOTLESS_USER" ]]; then
@ -32,9 +35,29 @@ function setup_suite(){
# only set up the podman farm before the first test
run_podman system connection add --identity $sshkey test-node $ROOTLESS_USER@localhost
run_podman farm create $FARMNAME test-node
export PODMAN_LOGIN_WORKDIR=$(mktemp -d --tmpdir=${BATS_TMPDIR:-${TMPDIR:-/tmp}} podman-bats-registry.XXXXXX)
export PODMAN_LOGIN_USER="user$(random_string 4)"
export PODMAN_LOGIN_PASS="pw$(random_string 15)"
# FIXME: racy! It could be many minutes between now and when we start it.
# To mitigate, we use a range not used anywhere else in system tests.
export PODMAN_LOGIN_REGISTRY_PORT=$(random_free_port 42000-42999)
# create a local registry to push images to
export REGISTRY=localhost:${PODMAN_LOGIN_REGISTRY_PORT}
export AUTHFILE=$FARM_TMPDIR/authfile.json
start_registry
run_podman login --authfile=$AUTHFILE \
--tls-verify=false \
--username ${PODMAN_LOGIN_USER} \
--password ${PODMAN_LOGIN_PASS} \
$REGISTRY
}
function teardown_suite(){
# clear out the farms after the last farm test
run_podman farm rm --all
stop_registry
}

View File

@ -20,6 +20,7 @@ import (
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
chunkedToc "github.com/containers/storage/pkg/chunked/toc"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
@ -694,6 +695,13 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
requiredCompression = ic.compressionFormat
originalCompression = srcInfo.CompressionAlgorithm
}
// Check if we have a chunked layer in storage that's based on that blob. These layers are stored by their TOC digest.
tocDigest, err := chunkedToc.GetTOCDigest(srcInfo.Annotations)
if err != nil {
return types.BlobInfo{}, "", err
}
reused, reusedBlob, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{
Cache: ic.c.blobInfoCache,
CanSubstitute: canSubstitute,
@ -702,6 +710,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
SrcRef: srcRef,
RequiredCompression: requiredCompression,
OriginalCompression: originalCompression,
TOCDigest: tocDigest,
})
if err != nil {
return types.BlobInfo{}, "", fmt.Errorf("trying to reuse blob %s at destination: %w", srcInfo.Digest, err)

View File

@ -123,6 +123,9 @@ func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageRef
if !ok {
return "", errors.New("ref must be a dockerReference")
}
if dr.isUnknownDigest {
return "", fmt.Errorf("docker: reference %q is for unknown digest case; cannot get digest", dr.StringWithinTransport())
}
tagOrDigest, err := dr.tagOrDigest()
if err != nil {

View File

@ -452,7 +452,15 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error {
var refTail string
if instanceDigest != nil {
// If d.ref.isUnknownDigest=true, then we push without a tag, so get the
// digest that will be used
if d.ref.isUnknownDigest {
digest, err := manifest.Digest(m)
if err != nil {
return err
}
refTail = digest.String()
} else if instanceDigest != nil {
// If the instanceDigest is provided, then use it as the refTail, because the reference,
// whether it includes a tag or a digest, refers to the list as a whole, and not this
// particular instance.

View File

@ -38,8 +38,8 @@ type dockerImageSource struct {
impl.DoesNotAffectLayerInfosForCopy
stubs.ImplementsGetBlobAt
logicalRef dockerReference // The reference the user requested.
physicalRef dockerReference // The actual reference we are accessing (possibly a mirror)
logicalRef dockerReference // The reference the user requested. This must satisfy !isUnknownDigest
physicalRef dockerReference // The actual reference we are accessing (possibly a mirror). This must satisfy !isUnknownDigest
c *dockerClient
// State
cachedManifest []byte // nil if not loaded yet
@ -48,7 +48,12 @@ type dockerImageSource struct {
// newImageSource creates a new ImageSource for the specified image reference.
// The caller must call .Close() on the returned ImageSource.
// The caller must ensure !ref.isUnknownDigest.
func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) {
if ref.isUnknownDigest {
return nil, fmt.Errorf("reading images from docker: reference %q without a tag or digest is not supported", ref.StringWithinTransport())
}
registryConfig, err := loadRegistryConfiguration(sys)
if err != nil {
return nil, err
@ -121,7 +126,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
// The caller must call .Close() on the returned ImageSource.
func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logicalRef dockerReference, pullSource sysregistriesv2.PullSource,
registryConfig *registryConfiguration) (*dockerImageSource, error) {
physicalRef, err := newReference(pullSource.Reference)
physicalRef, err := newReference(pullSource.Reference, false)
if err != nil {
return nil, err
}
@ -591,6 +596,10 @@ func (s *dockerImageSource) getSignaturesFromSigstoreAttachments(ctx context.Con
// deleteImage deletes the named image from the registry, if supported.
func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) error {
if ref.isUnknownDigest {
return fmt.Errorf("Docker reference without a tag or digest cannot be deleted")
}
registryConfig, err := loadRegistryConfiguration(sys)
if err != nil {
return err

View File

@ -12,6 +12,11 @@ import (
"github.com/containers/image/v5/types"
)
// UnknownDigestSuffix can be appended to a reference when the caller
// wants to push an image without a tag or digest.
// NewReferenceUnknownDigest() is called when this const is detected.
const UnknownDigestSuffix = "@@unknown-digest@@"
func init() {
transports.Register(Transport)
}
@ -43,7 +48,8 @@ func (t dockerTransport) ValidatePolicyConfigurationScope(scope string) error {
// dockerReference is an ImageReference for Docker images.
type dockerReference struct {
ref reference.Named // By construction we know that !reference.IsNameOnly(ref)
ref reference.Named // By construction we know that !reference.IsNameOnly(ref) unless isUnknownDigest=true
isUnknownDigest bool
}
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
@ -51,23 +57,46 @@ func ParseReference(refString string) (types.ImageReference, error) {
if !strings.HasPrefix(refString, "//") {
return nil, fmt.Errorf("docker: image reference %s does not start with //", refString)
}
// Check if ref has UnknownDigestSuffix suffixed to it
unknownDigest := false
if strings.HasSuffix(refString, UnknownDigestSuffix) {
unknownDigest = true
refString = strings.TrimSuffix(refString, UnknownDigestSuffix)
}
ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//"))
if err != nil {
return nil, err
}
if unknownDigest {
if !reference.IsNameOnly(ref) {
return nil, fmt.Errorf("docker: image reference %q has unknown digest set but it contains either a tag or digest", ref.String()+UnknownDigestSuffix)
}
return NewReferenceUnknownDigest(ref)
}
ref = reference.TagNameOnly(ref)
return NewReference(ref)
}
// NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly().
func NewReference(ref reference.Named) (types.ImageReference, error) {
return newReference(ref)
return newReference(ref, false)
}
// NewReferenceUnknownDigest returns a Docker reference for a named reference, which can be used to write images without setting
// a tag on the registry. The reference must satisfy reference.IsNameOnly()
func NewReferenceUnknownDigest(ref reference.Named) (types.ImageReference, error) {
return newReference(ref, true)
}
// newReference returns a dockerReference for a named reference.
func newReference(ref reference.Named) (dockerReference, error) {
if reference.IsNameOnly(ref) {
return dockerReference{}, fmt.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
func newReference(ref reference.Named, unknownDigest bool) (dockerReference, error) {
if reference.IsNameOnly(ref) && !unknownDigest {
return dockerReference{}, fmt.Errorf("Docker reference %s is not for an unknown digest case; tag or digest is needed", reference.FamiliarString(ref))
}
if !reference.IsNameOnly(ref) && unknownDigest {
return dockerReference{}, fmt.Errorf("Docker reference %s is for an unknown digest case but reference has a tag or digest", reference.FamiliarString(ref))
}
// A github.com/distribution/reference value can have a tag and a digest at the same time!
// The docker/distribution API does not really support that (we cant ask for an image with a specific
@ -81,7 +110,8 @@ func newReference(ref reference.Named) (dockerReference, error) {
}
return dockerReference{
ref: ref,
ref: ref,
isUnknownDigest: unknownDigest,
}, nil
}
@ -95,7 +125,11 @@ func (ref dockerReference) Transport() types.ImageTransport {
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
func (ref dockerReference) StringWithinTransport() string {
return "//" + reference.FamiliarString(ref.ref)
famString := "//" + reference.FamiliarString(ref.ref)
if ref.isUnknownDigest {
return famString + UnknownDigestSuffix
}
return famString
}
// DockerReference returns a Docker reference associated with this reference
@ -113,6 +147,9 @@ func (ref dockerReference) DockerReference() reference.Named {
// not required/guaranteed that it will be a valid input to Transport().ParseReference().
// Returns "" if configuration identities for these references are not supported.
func (ref dockerReference) PolicyConfigurationIdentity() string {
if ref.isUnknownDigest {
return ref.ref.Name()
}
res, err := policyconfiguration.DockerReferenceIdentity(ref.ref)
if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure.
panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err))
@ -126,7 +163,13 @@ func (ref dockerReference) PolicyConfigurationIdentity() string {
// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
// and each following element to be a prefix of the element preceding it.
func (ref dockerReference) PolicyConfigurationNamespaces() []string {
return policyconfiguration.DockerReferenceNamespaces(ref.ref)
namespaces := policyconfiguration.DockerReferenceNamespaces(ref.ref)
if ref.isUnknownDigest {
if len(namespaces) != 0 && namespaces[0] == ref.ref.Name() {
namespaces = namespaces[1:]
}
}
return namespaces
}
// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
@ -163,6 +206,10 @@ func (ref dockerReference) tagOrDigest() (string, error) {
if ref, ok := ref.ref.(reference.NamedTagged); ok {
return ref.Tag(), nil
}
if ref.isUnknownDigest {
return "", fmt.Errorf("Docker reference %q is for an unknown digest case, has neither a digest nor a tag", reference.FamiliarString(ref.ref))
}
// This should not happen, NewReference above refuses reference.IsNameOnly values.
return "", fmt.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", reference.FamiliarString(ref.ref))
}

View File

@ -36,7 +36,7 @@ type BlobInfoCache2 interface {
// that could possibly be reused within the specified (transport scope) (if they still
// exist, which is not guaranteed).
//
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if
// canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look
// up variants of the blob which have the same uncompressed digest.
//

View File

@ -28,7 +28,7 @@ type wrapped struct {
//
// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct
// with public methods, or perhaps a private interface), so that we can add methods
// without breaking any external implementors of a public interface.
// without breaking any external implementers of a public interface.
func FromPublic(dest types.ImageDestination) private.ImageDestination {
if dest2, ok := dest.(private.ImageDestination); ok {
return dest2

View File

@ -27,7 +27,7 @@ type wrapped struct {
//
// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct
// with public methods, or perhaps a private interface), so that we can add methods
// without breaking any external implementors of a public interface.
// without breaking any external implementers of a public interface.
func FromPublic(src types.ImageSource) private.ImageSource {
if src2, ok := src.(private.ImageSource); ok {
return src2

View File

@ -117,6 +117,7 @@ type TryReusingBlobOptions struct {
EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented.
LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise.
SrcRef reference.Named // A reference to the source image that contains the input blob.
TOCDigest *digest.Digest // If specified, the blob can be looked up in the destination also by its TOC digest.
}
// ReusedBlob is information about a blob reused in a destination.

View File

@ -9,6 +9,7 @@ import (
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
ociencspec "github.com/containers/ocicrypt/spec"
chunkedToc "github.com/containers/storage/pkg/chunked/toc"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@ -235,7 +236,7 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type
}
// ImageID computes an ID which can uniquely identify this image by its contents.
func (m *OCI1) ImageID([]digest.Digest) (string, error) {
func (m *OCI1) ImageID(diffIDs []digest.Digest) (string, error) {
// The way m.Config.Digest “uniquely identifies” an image is
// by containing RootFS.DiffIDs, which identify the layers of the image.
// For non-image artifacts, the we cant expect the config to change
@ -259,9 +260,44 @@ func (m *OCI1) ImageID([]digest.Digest) (string, error) {
if err := m.Config.Digest.Validate(); err != nil {
return "", err
}
// If there is any layer that is using partial content, we calculate the image ID
// in a different way since the diffID cannot be validated as for regular pulled images.
for _, layer := range m.Layers {
toc, err := chunkedToc.GetTOCDigest(layer.Annotations)
if err != nil {
return "", fmt.Errorf("error looking up annotation for layer %q: %w", layer.Digest, err)
}
if toc != nil {
return m.calculateImageIDForPartialImage(diffIDs)
}
}
return m.Config.Digest.Hex(), nil
}
func (m *OCI1) calculateImageIDForPartialImage(diffIDs []digest.Digest) (string, error) {
newID := digest.Canonical.Digester()
for i, layer := range m.Layers {
diffID := diffIDs[i]
_, err := newID.Hash().Write([]byte(diffID.Hex()))
if err != nil {
return "", fmt.Errorf("error writing diffID %q: %w", diffID, err)
}
toc, err := chunkedToc.GetTOCDigest(layer.Annotations)
if err != nil {
return "", fmt.Errorf("error looking up annotation for layer %q: %w", layer.Digest, err)
}
if toc != nil {
_, err = newID.Hash().Write([]byte(toc.Hex()))
if err != nil {
return "", fmt.Errorf("error writing TOC %q: %w", toc, err)
}
}
}
return newID.Digest().Hex(), nil
}
// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
// (and the code can handle that).
// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted

View File

@ -91,11 +91,11 @@ func min(a, b int) int {
// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the
// number of entries to limit for known and unknown location separately, only to make testing simpler.
// TODO: following function is not destructive any more in the nature instead priortized result is actually copies of the original
// TODO: following function is not destructive any more in the nature instead prioritized result is actually copies of the original
// candidate set, so In future we might wanna re-name this public API and remove the destructive prefix.
func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, totalLimit int, noLocationLimit int) []blobinfocache.BICReplacementCandidate2 {
// split unknown candidates and known candidates
// and limit them seperately.
// and limit them separately.
var knownLocationCandidates []CandidateWithTime
var unknownLocationCandidates []CandidateWithTime
// We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should

View File

@ -184,7 +184,7 @@ func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) that could possibly be reused
// within the specified (transport scope) (if they still exist, which is not guaranteed).
//
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute,
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
// uncompressed digest.
func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 {

View File

@ -171,7 +171,7 @@ func transaction[T any](sqc *cache, fn func(tx *sql.Tx) (T, error)) (T, error) {
// dbTransaction calls fn within a read-write transaction in db.
func dbTransaction[T any](db *sql.DB, fn func(tx *sql.Tx) (T, error)) (T, error) {
// Ideally we should be able to distinguish between read-only and read-write transactions, see the _txlock=exclusive dicussion.
// Ideally we should be able to distinguish between read-only and read-write transactions, see the _txlock=exclusive discussion.
var zeroRes T // A zero value of T
@ -496,7 +496,7 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
// that could possibly be reused within the specified (transport scope) (if they still
// exist, which is not guaranteed).
//
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if
// canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look
// up variants of the blob which have the same uncompressed digest.
//

View File

@ -23,6 +23,6 @@ func (f *fulcioTrustRoot) validate() error {
func verifyRekorFulcio(rekorPublicKey *ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte,
untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string,
untrustedPayloadBytes []byte) (crypto.PublicKey, error) {
return nil, errors.New("fulcio diabled at compile-time")
return nil, errors.New("fulcio disabled at compile-time")
}

View File

@ -77,13 +77,13 @@ type storageImageDestination struct {
indexToStorageID map[int]*string
// All accesses to below data are protected by `lock` which is made
// *explicit* in the code.
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image
blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer
diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output
uncompressedOrTocDigest map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs or TOC IDs.
fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image
blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer
diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output
}
// addedLayerInfo records data about a layer to use in this image.
@ -117,18 +117,18 @@ func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*
HasThreadSafePutBlob: true,
}),
imageRef: imageRef,
directory: directory,
signatureses: make(map[digest.Digest][]byte),
blobDiffIDs: make(map[digest.Digest]digest.Digest),
blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer),
fileSizes: make(map[digest.Digest]int64),
filenames: make(map[digest.Digest]string),
SignatureSizes: []int{},
SignaturesSizes: make(map[digest.Digest][]int),
indexToStorageID: make(map[int]*string),
indexToAddedLayerInfo: make(map[int]addedLayerInfo),
diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput),
imageRef: imageRef,
directory: directory,
signatureses: make(map[digest.Digest][]byte),
uncompressedOrTocDigest: make(map[digest.Digest]digest.Digest),
blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer),
fileSizes: make(map[digest.Digest]int64),
filenames: make(map[digest.Digest]string),
SignatureSizes: []int{},
SignaturesSizes: make(map[digest.Digest][]int),
indexToStorageID: make(map[int]*string),
indexToAddedLayerInfo: make(map[int]addedLayerInfo),
diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput),
}
dest.Compat = impl.AddCompat(dest)
return dest, nil
@ -227,7 +227,7 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf
// Record information about the blob.
s.lock.Lock()
s.blobDiffIDs[blobDigest] = diffID.Digest()
s.uncompressedOrTocDigest[blobDigest] = diffID.Digest()
s.fileSizes[blobDigest] = counter.Count
s.filenames[blobDigest] = filename
s.lock.Unlock()
@ -289,7 +289,7 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
blobDigest := srcInfo.Digest
s.lock.Lock()
s.blobDiffIDs[blobDigest] = blobDigest
s.uncompressedOrTocDigest[blobDigest] = blobDigest
s.fileSizes[blobDigest] = 0
s.filenames[blobDigest] = ""
s.diffOutputs[blobDigest] = out
@ -321,7 +321,7 @@ func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context,
})
}
// tryReusingBlobAsPending implements TryReusingBlobWithOptions for (digest, size or -1), filling s.blobDiffIDs and other metadata.
// tryReusingBlobAsPending implements TryReusingBlobWithOptions for (digest, size or -1), filling s.uncompressedOrTocDigest and other metadata.
// The caller must arrange the blob to be eventually committed using s.commitLayer().
func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, size int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
// lock the entire method as it executes fairly quickly
@ -335,7 +335,7 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest,
return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, digest, err)
} else if err == nil {
// Record the uncompressed value so that we can use it to calculate layer IDs.
s.blobDiffIDs[digest] = aLayer.UncompressedDigest()
s.uncompressedOrTocDigest[digest] = aLayer.UncompressedDigest()
s.blobAdditionalLayer[digest] = aLayer
return true, private.ReusedBlob{
Digest: digest,
@ -366,7 +366,7 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest,
}
if len(layers) > 0 {
// Save this for completeness.
s.blobDiffIDs[digest] = layers[0].UncompressedDigest
s.uncompressedOrTocDigest[digest] = layers[0].UncompressedDigest
return true, private.ReusedBlob{
Digest: digest,
Size: layers[0].UncompressedSize,
@ -380,7 +380,7 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest,
}
if len(layers) > 0 {
// Record the uncompressed value so that we can use it to calculate layer IDs.
s.blobDiffIDs[digest] = layers[0].UncompressedDigest
s.uncompressedOrTocDigest[digest] = layers[0].UncompressedDigest
return true, private.ReusedBlob{
Digest: digest,
Size: layers[0].CompressedSize,
@ -398,7 +398,7 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest,
}
if len(layers) > 0 {
if size != -1 {
s.blobDiffIDs[digest] = layers[0].UncompressedDigest
s.uncompressedOrTocDigest[digest] = layers[0].UncompressedDigest
return true, private.ReusedBlob{
Digest: digest,
Size: size,
@ -407,7 +407,7 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest,
if !options.CanSubstitute {
return false, private.ReusedBlob{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blob with digest %s", digest)
}
s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest
s.uncompressedOrTocDigest[uncompressedDigest] = layers[0].UncompressedDigest
return true, private.ReusedBlob{
Digest: uncompressedDigest,
Size: layers[0].UncompressedSize,
@ -416,6 +416,25 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest,
}
}
tocDigest := digest
if options.TOCDigest != nil {
tocDigest = *options.TOCDigest
}
// Check if we have a chunked layer in storage with the same TOC digest.
layers, err = s.imageRef.transport.store.LayersByTOCDigest(tocDigest)
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with TOC digest %q: %w`, tocDigest, err)
}
if len(layers) > 0 {
// Save this for completeness.
s.uncompressedOrTocDigest[digest] = layers[0].TOCDigest
return true, private.ReusedBlob{
Digest: layers[0].TOCDigest,
Size: layers[0].UncompressedSize,
}, nil
}
// Nope, we don't have it.
return false, private.ReusedBlob{}, nil
}
@ -438,16 +457,20 @@ func (s *storageImageDestination) computeID(m manifest.Manifest) string {
continue
}
blobSum := m.FSLayers[i].BlobSum
diffID, ok := s.blobDiffIDs[blobSum]
diffID, ok := s.uncompressedOrTocDigest[blobSum]
if !ok {
logrus.Infof("error looking up diffID for layer %q", blobSum.String())
return ""
}
diffIDs = append([]digest.Digest{diffID}, diffIDs...)
}
case *manifest.Schema2, *manifest.OCI1:
// We know the ID calculation for these formats doesn't actually use the diffIDs,
// so we don't need to populate the diffID list.
case *manifest.Schema2:
// We know the ID calculation doesn't actually use the diffIDs, so we don't need to populate
// the diffID list.
case *manifest.OCI1:
for _, l := range m.Layers {
diffIDs = append(diffIDs, l.Digest)
}
default:
return ""
}
@ -518,7 +541,7 @@ func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo)
}
s.lock.Unlock()
// Note: commitLayer locks on-demand.
if err := s.commitLayer(index, info, -1); err != nil {
if stopQueue, err := s.commitLayer(index, info, -1); stopQueue || err != nil {
return err
}
s.lock.Lock()
@ -532,18 +555,32 @@ func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo)
return nil
}
// getDiffIDOrTOCDigest returns the diffID for the specified digest or the digest for the TOC, if known.
func (s *storageImageDestination) getDiffIDOrTOCDigest(uncompressedDigest digest.Digest) (digest.Digest, bool) {
s.lock.Lock()
defer s.lock.Unlock()
if d, found := s.diffOutputs[uncompressedDigest]; found {
return d.TOCDigest, found
}
d, found := s.uncompressedOrTocDigest[uncompressedDigest]
return d, found
}
// commitLayer commits the specified layer with the given index to the storage.
// size can usually be -1; it can be provided if the layer is not known to be already present in blobDiffIDs.
// size can usually be -1; it can be provided if the layer is not known to be already present in uncompressedOrTocDigest.
//
// If the layer cannot be committed yet, the function returns (true, nil).
//
// Note that the previous layer is expected to already be committed.
//
// Caution: this function must be called without holding `s.lock`. Callers
// must guarantee that, at any given time, at most one goroutine may execute
// `commitLayer()`.
func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, size int64) error {
func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, size int64) (bool, error) {
// Already committed? Return early.
if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted {
return nil
return false, nil
}
// Start with an empty string or the previous layer ID. Note that
@ -557,68 +594,96 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
// Carry over the previous ID for empty non-base layers.
if info.emptyLayer {
s.indexToStorageID[index] = &lastLayer
return nil
return false, nil
}
// Check if there's already a layer with the ID that we'd give to the result of applying
// this layer blob to its parent, if it has one, or the blob's hex value otherwise.
s.lock.Lock()
diffID, haveDiffID := s.blobDiffIDs[info.digest]
s.lock.Unlock()
if !haveDiffID {
// The diffIDOrTOCDigest refers either to the DiffID or the digest of the TOC.
diffIDOrTOCDigest, haveDiffIDOrTOCDigest := s.getDiffIDOrTOCDigest(info.digest)
if !haveDiffIDOrTOCDigest {
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
// or to even check if we had it.
// Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
// that relies on using a blob digest that has never been seen by the store had better call
// TryReusingBlob; not calling PutBlob already violates the documented API, so theres only
// so far we are going to accommodate that (if we should be doing that at all).
logrus.Debugf("looking for diffID for blob %+v", info.digest)
logrus.Debugf("looking for diffID or TOC digest for blob %+v", info.digest)
// Use tryReusingBlobAsPending, not the top-level TryReusingBlobWithOptions, to prevent recursion via queueOrCommit.
has, _, err := s.tryReusingBlobAsPending(info.digest, size, &private.TryReusingBlobOptions{
Cache: none.NoCache,
CanSubstitute: false,
})
if err != nil {
return fmt.Errorf("checking for a layer based on blob %q: %w", info.digest.String(), err)
return false, fmt.Errorf("checking for a layer based on blob %q: %w", info.digest.String(), err)
}
if !has {
return fmt.Errorf("error determining uncompressed digest for blob %q", info.digest.String())
return false, fmt.Errorf("error determining uncompressed digest or TOC digest for blob %q", info.digest.String())
}
diffID, haveDiffID = s.blobDiffIDs[info.digest]
if !haveDiffID {
return fmt.Errorf("we have blob %q, but don't know its uncompressed digest", info.digest.String())
diffIDOrTOCDigest, haveDiffIDOrTOCDigest = s.getDiffIDOrTOCDigest(info.digest)
if !haveDiffIDOrTOCDigest {
return false, fmt.Errorf("we have blob %q, but don't know its uncompressed or TOC digest", info.digest.String())
}
}
id := diffID.Hex()
id := diffIDOrTOCDigest.Hex()
if lastLayer != "" {
id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex()
id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffIDOrTOCDigest.Hex())).Hex()
}
if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
// There's already a layer that should have the right contents, just reuse it.
lastLayer = layer.ID
s.indexToStorageID[index] = &lastLayer
return nil
return false, nil
}
s.lock.Lock()
diffOutput, ok := s.diffOutputs[info.digest]
s.lock.Unlock()
if ok {
layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil)
if err != nil {
return err
if s.manifest == nil {
logrus.Debugf("Skipping commit for TOC=%q, manifest not yet available", id)
return true, nil
}
// FIXME: what to do with the uncompressed digest?
diffOutput.UncompressedDigest = info.digest
man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest))
if err != nil {
return false, fmt.Errorf("parsing manifest: %w", err)
}
if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, nil); err != nil {
cb, err := s.getConfigBlob(man.ConfigInfo())
if err != nil {
return false, err
}
// retrieve the expected uncompressed digest from the config blob.
configOCI := &imgspecv1.Image{}
if err := json.Unmarshal(cb, configOCI); err != nil {
return false, err
}
if index >= len(configOCI.RootFS.DiffIDs) {
return false, fmt.Errorf("index %d out of range for configOCI.RootFS.DiffIDs", index)
}
layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil)
if err != nil {
return false, err
}
// let the storage layer know what was the original uncompressed layer.
flags := make(map[string]interface{})
flags[expectedLayerDiffIDFlag] = configOCI.RootFS.DiffIDs[index]
logrus.Debugf("Setting uncompressed digest to %q for layer %q", configOCI.RootFS.DiffIDs[index], id)
options := &graphdriver.ApplyDiffWithDifferOpts{
Flags: flags,
}
if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, options); err != nil {
_ = s.imageRef.transport.store.Delete(layer.ID)
return err
return false, err
}
s.indexToStorageID[index] = &layer.ID
return nil
return false, nil
}
s.lock.Lock()
@ -627,11 +692,11 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
if ok {
layer, err := al.PutAs(id, lastLayer, nil)
if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
return fmt.Errorf("failed to put layer from digest and labels: %w", err)
return false, fmt.Errorf("failed to put layer from digest and labels: %w", err)
}
lastLayer = layer.ID
s.indexToStorageID[index] = &lastLayer
return nil
return false, nil
}
// Check if we previously cached a file with that blob's contents. If we didn't,
@ -642,7 +707,7 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
if !ok {
// Try to find the layer with contents matching that blobsum.
layer := ""
layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffID)
layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffIDOrTOCDigest)
if err2 == nil && len(layers) > 0 {
layer = layers[0].ID
} else {
@ -652,7 +717,7 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
}
}
if layer == "" {
return fmt.Errorf("locating layer for blob %q: %w", info.digest, err2)
return false, fmt.Errorf("locating layer for blob %q: %w", info.digest, err2)
}
// Read the layer's contents.
noCompression := archive.Uncompressed
@ -661,17 +726,17 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
}
diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions)
if err2 != nil {
return fmt.Errorf("reading layer %q for blob %q: %w", layer, info.digest, err2)
return false, fmt.Errorf("reading layer %q for blob %q: %w", layer, info.digest, err2)
}
// Copy the layer diff to a file. Diff() takes a lock that it holds
// until the ReadCloser that it returns is closed, and PutLayer() wants
// the same lock, so the diff can't just be directly streamed from one
// to the other.
filename = s.computeNextBlobCacheFile()
file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0o600)
if err != nil {
diff.Close()
return fmt.Errorf("creating temporary file %q: %w", filename, err)
return false, fmt.Errorf("creating temporary file %q: %w", filename, err)
}
// Copy the data to the file.
// TODO: This can take quite some time, and should ideally be cancellable using
@ -680,7 +745,7 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
diff.Close()
file.Close()
if err != nil {
return fmt.Errorf("storing blob to file %q: %w", filename, err)
return false, fmt.Errorf("storing blob to file %q: %w", filename, err)
}
// Make sure that we can find this file later, should we need the layer's
// contents again.
@ -691,21 +756,21 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
// Read the cached blob and use it as a diff.
file, err := os.Open(filename)
if err != nil {
return fmt.Errorf("opening file %q: %w", filename, err)
return false, fmt.Errorf("opening file %q: %w", filename, err)
}
defer file.Close()
// Build the new layer using the diff, regardless of where it came from.
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, &storage.LayerOptions{
OriginalDigest: info.digest,
UncompressedDigest: diffID,
UncompressedDigest: diffIDOrTOCDigest,
}, file)
if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
return fmt.Errorf("adding layer with blob %q: %w", info.digest, err)
return false, fmt.Errorf("adding layer with blob %q: %w", info.digest, err)
}
s.indexToStorageID[index] = &layer.ID
return nil
return false, nil
}
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
@ -752,11 +817,13 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
// Extract, commit, or find the layers.
for i, blob := range layerBlobs {
if err := s.commitLayer(i, addedLayerInfo{
if stopQueue, err := s.commitLayer(i, addedLayerInfo{
digest: blob.Digest,
emptyLayer: blob.EmptyLayer,
}, blob.Size); err != nil {
return err
} else if stopQueue {
return fmt.Errorf("Internal error: storageImageDestination.Commit(): commitLayer() not ready to commit for layer %q", blob.Digest)
}
}
var lastLayer string

View File

@ -29,21 +29,33 @@ import (
"github.com/sirupsen/logrus"
)
// getBlobMutexProtected is a struct to hold the state of the getBlobMutex mutex.
type getBlobMutexProtected struct {
// digestToLayerID is a lookup map from the layer digest (either the uncompressed digest or the TOC digest) to the
// layer ID in the store.
digestToLayerID map[digest.Digest]string
// layerPosition stores where we are in reading a blob's layers
layerPosition map[digest.Digest]int
}
type storageImageSource struct {
impl.Compat
impl.PropertyMethodsInitialize
stubs.NoGetBlobAtInitialize
imageRef storageReference
image *storage.Image
systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files
layerPosition map[digest.Digest]int // Where we are in reading a blob's layers
cachedManifest []byte // A cached copy of the manifest, if already known, or nil
getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions
SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // List of sizes of each signature slice
imageRef storageReference
image *storage.Image
systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files
cachedManifest []byte // A cached copy of the manifest, if already known, or nil
getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions (it guards layerPosition and digestToLayerID)
getBlobMutexProtected getBlobMutexProtected
SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // List of sizes of each signature slice
}
const expectedLayerDiffIDFlag = "expected-layer-diffid"
// newImageSource sets up an image for reading.
func newImageSource(sys *types.SystemContext, imageRef storageReference) (*storageImageSource, error) {
// First, locate the image.
@ -62,9 +74,12 @@ func newImageSource(sys *types.SystemContext, imageRef storageReference) (*stora
imageRef: imageRef,
systemContext: sys,
image: img,
layerPosition: make(map[digest.Digest]int),
SignatureSizes: []int{},
SignaturesSizes: make(map[digest.Digest][]int),
getBlobMutexProtected: getBlobMutexProtected{
digestToLayerID: make(map[digest.Digest]string),
layerPosition: make(map[digest.Digest]int),
},
}
image.Compat = impl.AddCompat(image)
if img.Metadata != "" {
@ -91,6 +106,7 @@ func (s *storageImageSource) Close() error {
func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) {
// We need a valid digest value.
digest := info.Digest
err = digest.Validate()
if err != nil {
return nil, 0, err
@ -100,10 +116,24 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c
return io.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil
}
// Check if the blob corresponds to a diff that was used to initialize any layers. Our
// callers should try to retrieve layers using their uncompressed digests, so no need to
// check if they're using one of the compressed digests, which we can't reproduce anyway.
layers, _ := s.imageRef.transport.store.LayersByUncompressedDigest(digest)
var layers []storage.Layer
// If the digest was overridden by LayerInfosForCopy, then we need to use the TOC digest
// to retrieve it from the storage.
s.getBlobMutex.Lock()
layerID, found := s.getBlobMutexProtected.digestToLayerID[digest]
s.getBlobMutex.Unlock()
if found {
if layer, err := s.imageRef.transport.store.Layer(layerID); err == nil {
layers = []storage.Layer{*layer}
}
} else {
// Check if the blob corresponds to a diff that was used to initialize any layers. Our
// callers should try to retrieve layers using their uncompressed digests, so no need to
// check if they're using one of the compressed digests, which we can't reproduce anyway.
layers, _ = s.imageRef.transport.store.LayersByUncompressedDigest(digest)
}
// If it's not a layer, then it must be a data item.
if len(layers) == 0 {
@ -174,8 +204,8 @@ func (s *storageImageSource) getBlobAndLayerID(digest digest.Digest, layers []st
// which claim to have the same contents, that we actually do have multiple layers, otherwise we could
// just go ahead and use the first one every time.
s.getBlobMutex.Lock()
i := s.layerPosition[digest]
s.layerPosition[digest] = i + 1
i := s.getBlobMutexProtected.layerPosition[digest]
s.getBlobMutexProtected.layerPosition[digest] = i + 1
s.getBlobMutex.Unlock()
if len(layers) > 0 {
layer = layers[i%len(layers)]
@ -267,14 +297,35 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige
if err != nil {
return nil, fmt.Errorf("reading layer %q in image %q: %w", layerID, s.image.ID, err)
}
if layer.UncompressedDigest == "" {
return nil, fmt.Errorf("uncompressed digest for layer %q is unknown", layerID)
if layer.UncompressedDigest == "" && layer.TOCDigest == "" {
return nil, fmt.Errorf("uncompressed digest and TOC digest for layer %q is unknown", layerID)
}
if layer.UncompressedSize < 0 {
return nil, fmt.Errorf("uncompressed size for layer %q is unknown", layerID)
}
blobDigest := layer.UncompressedDigest
if layer.TOCDigest != "" {
if layer.Flags == nil || layer.Flags[expectedLayerDiffIDFlag] == nil {
return nil, fmt.Errorf("TOC digest %q for layer %q is present but %q flag is not set", layer.TOCDigest, layerID, expectedLayerDiffIDFlag)
}
if expectedDigest, ok := layer.Flags[expectedLayerDiffIDFlag].(string); ok {
// if the layer is stored by its TOC, report the expected diffID as the layer Digest
// but store the TOC digest so we can later retrieve it from the storage.
blobDigest, err = digest.Parse(expectedDigest)
if err != nil {
return nil, fmt.Errorf("parsing expected diffID %q for layer %q: %w", expectedDigest, layerID, err)
}
} else {
return nil, fmt.Errorf("TOC digest %q for layer %q is present but %q flag is not a string", layer.TOCDigest, layerID, expectedLayerDiffIDFlag)
}
}
s.getBlobMutex.Lock()
s.getBlobMutexProtected.digestToLayerID[blobDigest] = layer.ID
s.getBlobMutex.Unlock()
blobInfo := types.BlobInfo{
Digest: layer.UncompressedDigest,
Digest: blobDigest,
Size: layer.UncompressedSize,
MediaType: uncompressedLayerType,
}
@ -384,7 +435,7 @@ func (s *storageImageSource) getSize() (int64, error) {
if err != nil {
return -1, err
}
if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 {
if (layer.TOCDigest == "" && layer.UncompressedDigest == "") || layer.UncompressedSize < 0 {
return -1, fmt.Errorf("size for layer %q is unknown, failing getSize()", layerID)
}
sum += layer.UncompressedSize

View File

@ -0,0 +1,34 @@
package toc
import (
"github.com/containers/storage/pkg/chunked/internal"
digest "github.com/opencontainers/go-digest"
)
// tocJSONDigestAnnotation is the annotation key for the digest of the estargz
// TOC JSON.
// It is defined in github.com/containerd/stargz-snapshotter/estargz as TOCJSONDigestAnnotation
// Duplicate it here to avoid a dependency on the package.
const tocJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest"
// GetTOCDigest returns the digest of the TOC as recorded in the annotations.
// This function retrieves a digest that represents the content of a
// table of contents (TOC) from the image's annotations.
// This is an experimental feature and may be changed/removed in the future.
func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) {
if contentDigest, ok := annotations[tocJSONDigestAnnotation]; ok {
d, err := digest.Parse(contentDigest)
if err != nil {
return nil, err
}
return &d, nil
}
if contentDigest, ok := annotations[internal.ManifestChecksumKey]; ok {
d, err := digest.Parse(contentDigest)
if err != nil {
return nil, err
}
return &d, nil
}
return nil, nil
}

View File

@ -4,56 +4,58 @@ linters-settings:
golint:
min-confidence: 0
gocyclo:
min-complexity: 31
min-complexity: 45
maligned:
suggest-new: true
dupl:
threshold: 100
threshold: 200
goconst:
min-len: 2
min-occurrences: 4
min-occurrences: 3
linters:
enable:
- revive
- goimports
- gosec
enable-all: true
disable:
- maligned
- unparam
- unconvert
- predeclared
- prealloc
- misspell
# disable:
# - maligned
# - lll
# - gochecknoinits
# - gochecknoglobals
# - godox
# - gocognit
# - whitespace
# - wsl
# - funlen
# - wrapcheck
# - testpackage
# - nlreturn
# - gofumpt
# - goerr113
# - gci
# - gomnd
# - godot
# - exhaustivestruct
# - paralleltest
# - varnamelen
# - ireturn
# - exhaustruct
# #- thelper
issues:
exclude-rules:
- path: bson.go
text: "should be .*ObjectID"
linters:
- golint
- stylecheck
- lll
- gochecknoinits
- gochecknoglobals
- funlen
- godox
- gocognit
- whitespace
- wsl
- wrapcheck
- testpackage
- nlreturn
- gomnd
- exhaustivestruct
- goerr113
- errorlint
- nestif
- godot
- gofumpt
- paralleltest
- tparallel
- thelper
- ifshort
- exhaustruct
- varnamelen
- gci
- depguard
- errchkjson
- inamedparam
- nonamedreturns
- musttag
- ireturn
- forcetypeassert
- cyclop
# deprecated linters
- deadcode
- interfacer
- scopelint
- varcheck
- structcheck
- golint
- nosnakecase

View File

@ -1,8 +1,7 @@
# Strfmt [![Build Status](https://travis-ci.org/go-openapi/strfmt.svg?branch=master)](https://travis-ci.org/go-openapi/strfmt) [![codecov](https://codecov.io/gh/go-openapi/strfmt/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/strfmt) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
# Strfmt [![Build Status](https://github.com/go-openapi/strfmt/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/strfmt/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/strfmt/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/strfmt)
[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/strfmt/master/LICENSE)
[![GoDoc](https://godoc.org/github.com/go-openapi/strfmt?status.svg)](http://godoc.org/github.com/go-openapi/strfmt)
[![GolangCI](https://golangci.com/badges/github.com/go-openapi/strfmt.svg)](https://golangci.com)
[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/strfmt)](https://goreportcard.com/report/github.com/go-openapi/strfmt)
This package exposes a registry of data types to support string formats in the go-openapi toolkit.

View File

@ -39,10 +39,10 @@ func IsBSONObjectID(str string) bool {
// ObjectId represents a BSON object ID (alias to go.mongodb.org/mongo-driver/bson/primitive.ObjectID)
//
// swagger:strfmt bsonobjectid
type ObjectId bsonprim.ObjectID //nolint:revive
type ObjectId bsonprim.ObjectID //nolint:revive,stylecheck
// NewObjectId creates a ObjectId from a Hex String
func NewObjectId(hex string) ObjectId { //nolint:revive
func NewObjectId(hex string) ObjectId { //nolint:revive,stylecheck
oid, err := bsonprim.ObjectIDFromHex(hex)
if err != nil {
panic(err)
@ -135,7 +135,7 @@ func (id *ObjectId) UnmarshalBSON(data []byte) error {
// BSON document if the error is nil.
func (id ObjectId) MarshalBSONValue() (bsontype.Type, []byte, error) {
oid := bsonprim.ObjectID(id)
return bsontype.ObjectID, oid[:], nil
return bson.TypeObjectID, oid[:], nil
}
// UnmarshalBSONValue is an interface implemented by types that can unmarshal a

View File

@ -94,7 +94,7 @@ func NewSeededFormats(seeds []knownFormat, normalizer NameNormalizer) Registry {
}
// MapStructureHookFunc is a decode hook function for mapstructure
func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { //nolint:gocyclo,cyclop
func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc {
return func(from reflect.Type, to reflect.Type, obj interface{}) (interface{}, error) {
if from.Kind() != reflect.String {
return obj, nil

View File

@ -76,6 +76,8 @@ const (
ISO8601TimeWithReducedPrecisionLocaltime = "2006-01-02T15:04"
// ISO8601TimeUniversalSortableDateTimePattern represents a ISO8601 universal sortable date time pattern.
ISO8601TimeUniversalSortableDateTimePattern = "2006-01-02 15:04:05"
// short form of ISO8601TimeUniversalSortableDateTimePattern
ISO8601TimeUniversalSortableDateTimePatternShortForm = "2006-01-02"
// DateTimePattern pattern to match for the date-time format from http://tools.ietf.org/html/rfc3339#section-5.6
DateTimePattern = `^([0-9]{2}):([0-9]{2}):([0-9]{2})(.[0-9]+)?(z|([+-][0-9]{2}:[0-9]{2}))$`
)
@ -84,7 +86,7 @@ var (
rxDateTime = regexp.MustCompile(DateTimePattern)
// DateTimeFormats is the collection of formats used by ParseDateTime()
DateTimeFormats = []string{RFC3339Micro, RFC3339MicroNoColon, RFC3339Millis, RFC3339MillisNoColon, time.RFC3339, time.RFC3339Nano, ISO8601LocalTime, ISO8601TimeWithReducedPrecision, ISO8601TimeWithReducedPrecisionLocaltime, ISO8601TimeUniversalSortableDateTimePattern}
DateTimeFormats = []string{RFC3339Micro, RFC3339MicroNoColon, RFC3339Millis, RFC3339MillisNoColon, time.RFC3339, time.RFC3339Nano, ISO8601LocalTime, ISO8601TimeWithReducedPrecision, ISO8601TimeWithReducedPrecisionLocaltime, ISO8601TimeUniversalSortableDateTimePattern, ISO8601TimeUniversalSortableDateTimePatternShortForm}
// MarshalFormat sets the time resolution format used for marshaling time (set to milliseconds)
MarshalFormat = RFC3339Millis
@ -245,7 +247,7 @@ func (t DateTime) MarshalBSONValue() (bsontype.Type, []byte, error) {
buf := make([]byte, 8)
binary.LittleEndian.PutUint64(buf, uint64(i64))
return bsontype.DateTime, buf, nil
return bson.TypeDateTime, buf, nil
}
// UnmarshalBSONValue is an interface implemented by types that can unmarshal a
@ -253,7 +255,7 @@ func (t DateTime) MarshalBSONValue() (bsontype.Type, []byte, error) {
// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it
// wishes to retain the data after returning.
func (t *DateTime) UnmarshalBSONValue(tpe bsontype.Type, data []byte) error {
if tpe == bsontype.Null {
if tpe == bson.TypeNull {
*t = DateTime{}
return nil
}

View File

@ -14,17 +14,23 @@ import (
)
// ArrayCodec is the Codec used for bsoncore.Array values.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
// ArrayCodec registered.
type ArrayCodec struct{}
var defaultArrayCodec = NewArrayCodec()
// NewArrayCodec returns an ArrayCodec.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
// ArrayCodec registered.
func NewArrayCodec() *ArrayCodec {
return &ArrayCodec{}
}
// EncodeValue is the ValueEncoder for bsoncore.Array values.
func (ac *ArrayCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
func (ac *ArrayCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Type() != tCoreArray {
return ValueEncoderError{Name: "CoreArrayEncodeValue", Types: []reflect.Type{tCoreArray}, Received: val}
}
@ -34,7 +40,7 @@ func (ac *ArrayCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val r
}
// DecodeValue is the ValueDecoder for bsoncore.Array values.
func (ac *ArrayCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
func (ac *ArrayCodec) DecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.CanSet() || val.Type() != tCoreArray {
return ValueDecoderError{Name: "CoreArrayDecodeValue", Types: []reflect.Type{tCoreArray}, Received: val}
}

View File

@ -23,6 +23,8 @@ var (
// Marshaler is an interface implemented by types that can marshal themselves
// into a BSON document represented as bytes. The bytes returned must be a valid
// BSON document if the error is nil.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Marshaler] instead.
type Marshaler interface {
MarshalBSON() ([]byte, error)
}
@ -31,6 +33,8 @@ type Marshaler interface {
// themselves into a BSON value as bytes. The type must be the valid type for
// the bytes returned. The bytes and byte type together must be valid if the
// error is nil.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.ValueMarshaler] instead.
type ValueMarshaler interface {
MarshalBSONValue() (bsontype.Type, []byte, error)
}
@ -39,6 +43,8 @@ type ValueMarshaler interface {
// document representation of themselves. The BSON bytes can be assumed to be
// valid. UnmarshalBSON must copy the BSON bytes if it wishes to retain the data
// after returning.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Unmarshaler] instead.
type Unmarshaler interface {
UnmarshalBSON([]byte) error
}
@ -47,6 +53,8 @@ type Unmarshaler interface {
// BSON value representation of themselves. The BSON bytes and type can be
// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it
// wishes to retain the data after returning.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.ValueUnmarshaler] instead.
type ValueUnmarshaler interface {
UnmarshalBSONValue(bsontype.Type, []byte) error
}
@ -111,13 +119,93 @@ func (vde ValueDecoderError) Error() string {
// value.
type EncodeContext struct {
*Registry
// MinSize causes the Encoder to marshal Go integer values (int, int8, int16, int32, int64,
// uint, uint8, uint16, uint32, or uint64) as the minimum BSON int size (either 32 or 64 bits)
// that can represent the integer value.
//
// Deprecated: Use bson.Encoder.IntMinSize instead.
MinSize bool
errorOnInlineDuplicates bool
stringifyMapKeysWithFmt bool
nilMapAsEmpty bool
nilSliceAsEmpty bool
nilByteSliceAsEmpty bool
omitZeroStruct bool
useJSONStructTags bool
}
// ErrorOnInlineDuplicates causes the Encoder to return an error if there is a duplicate field in
// the marshaled BSON when the "inline" struct tag option is set.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.ErrorOnInlineDuplicates] instead.
func (ec *EncodeContext) ErrorOnInlineDuplicates() {
ec.errorOnInlineDuplicates = true
}
// StringifyMapKeysWithFmt causes the Encoder to convert Go map keys to BSON document field name
// strings using fmt.Sprintf() instead of the default string conversion logic.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.StringifyMapKeysWithFmt] instead.
func (ec *EncodeContext) StringifyMapKeysWithFmt() {
ec.stringifyMapKeysWithFmt = true
}
// NilMapAsEmpty causes the Encoder to marshal nil Go maps as empty BSON documents instead of BSON
// null.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilMapAsEmpty] instead.
func (ec *EncodeContext) NilMapAsEmpty() {
ec.nilMapAsEmpty = true
}
// NilSliceAsEmpty causes the Encoder to marshal nil Go slices as empty BSON arrays instead of BSON
// null.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilSliceAsEmpty] instead.
func (ec *EncodeContext) NilSliceAsEmpty() {
ec.nilSliceAsEmpty = true
}
// NilByteSliceAsEmpty causes the Encoder to marshal nil Go byte slices as empty BSON binary values
// instead of BSON null.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilByteSliceAsEmpty] instead.
func (ec *EncodeContext) NilByteSliceAsEmpty() {
ec.nilByteSliceAsEmpty = true
}
// OmitZeroStruct causes the Encoder to consider the zero value for a struct (e.g. MyStruct{})
// as empty and omit it from the marshaled BSON when the "omitempty" struct tag option is set.
//
// Note that the Encoder only examines exported struct fields when determining if a struct is the
// zero value. It considers pointers to a zero struct value (e.g. &MyStruct{}) not empty.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.OmitZeroStruct] instead.
func (ec *EncodeContext) OmitZeroStruct() {
ec.omitZeroStruct = true
}
// UseJSONStructTags causes the Encoder to fall back to using the "json" struct tag if a "bson"
// struct tag is not specified.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.UseJSONStructTags] instead.
func (ec *EncodeContext) UseJSONStructTags() {
ec.useJSONStructTags = true
}
// DecodeContext is the contextual information required for a Codec to decode a
// value.
type DecodeContext struct {
*Registry
// Truncate, if true, instructs decoders to to truncate the fractional part of BSON "double"
// values when attempting to unmarshal them into a Go integer (int, int8, int16, int32, int64,
// uint, uint8, uint16, uint32, or uint64) struct field. The truncation logic does not apply to
// BSON "decimal128" values.
//
// Deprecated: Use bson.Decoder.AllowTruncatingDoubles instead.
Truncate bool
// Ancestor is the type of a containing document. This is mainly used to determine what type
@ -125,7 +213,7 @@ type DecodeContext struct {
// Ancestor is a bson.M, BSON embedded document values being decoded into an empty interface
// will be decoded into a bson.M.
//
// Deprecated: Use DefaultDocumentM or DefaultDocumentD instead.
// Deprecated: Use bson.Decoder.DefaultDocumentM or bson.Decoder.DefaultDocumentD instead.
Ancestor reflect.Type
// defaultDocumentType specifies the Go type to decode top-level and nested BSON documents into. In particular, the
@ -133,22 +221,74 @@ type DecodeContext struct {
// set to a type that a BSON document cannot be unmarshaled into (e.g. "string"), unmarshalling will result in an
// error. DocumentType overrides the Ancestor field.
defaultDocumentType reflect.Type
binaryAsSlice bool
useJSONStructTags bool
useLocalTimeZone bool
zeroMaps bool
zeroStructs bool
}
// DefaultDocumentM will decode empty documents using the primitive.M type. This behavior is restricted to data typed as
// "interface{}" or "map[string]interface{}".
// BinaryAsSlice causes the Decoder to unmarshal BSON binary field values that are the "Generic" or
// "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.BinaryAsSlice] instead.
func (dc *DecodeContext) BinaryAsSlice() {
dc.binaryAsSlice = true
}
// UseJSONStructTags causes the Decoder to fall back to using the "json" struct tag if a "bson"
// struct tag is not specified.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseJSONStructTags] instead.
func (dc *DecodeContext) UseJSONStructTags() {
dc.useJSONStructTags = true
}
// UseLocalTimeZone causes the Decoder to unmarshal time.Time values in the local timezone instead
// of the UTC timezone.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseLocalTimeZone] instead.
func (dc *DecodeContext) UseLocalTimeZone() {
dc.useLocalTimeZone = true
}
// ZeroMaps causes the Decoder to delete any existing values from Go maps in the destination value
// passed to Decode before unmarshaling BSON documents into them.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroMaps] instead.
func (dc *DecodeContext) ZeroMaps() {
dc.zeroMaps = true
}
// ZeroStructs causes the Decoder to delete any existing values from Go structs in the destination
// value passed to Decode before unmarshaling BSON documents into them.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroStructs] instead.
func (dc *DecodeContext) ZeroStructs() {
dc.zeroStructs = true
}
// DefaultDocumentM causes the Decoder to always unmarshal documents into the primitive.M type. This
// behavior is restricted to data typed as "interface{}" or "map[string]interface{}".
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.DefaultDocumentM] instead.
func (dc *DecodeContext) DefaultDocumentM() {
dc.defaultDocumentType = reflect.TypeOf(primitive.M{})
}
// DefaultDocumentD will decode empty documents using the primitive.D type. This behavior is restricted to data typed as
// "interface{}" or "map[string]interface{}".
// DefaultDocumentD causes the Decoder to always unmarshal documents into the primitive.D type. This
// behavior is restricted to data typed as "interface{}" or "map[string]interface{}".
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.DefaultDocumentD] instead.
func (dc *DecodeContext) DefaultDocumentD() {
dc.defaultDocumentType = reflect.TypeOf(primitive.D{})
}
// ValueCodec is the interface that groups the methods to encode and decode
// ValueCodec is an interface for encoding and decoding a reflect.Value.
// values.
//
// Deprecated: Use [ValueEncoder] and [ValueDecoder] instead.
type ValueCodec interface {
ValueEncoder
ValueDecoder
@ -233,6 +373,10 @@ func decodeTypeOrValueWithInfo(vd ValueDecoder, td typeDecoder, dc DecodeContext
// CodecZeroer is the interface implemented by Codecs that can also determine if
// a value of the type that would be encoded is zero.
//
// Deprecated: Defining custom rules for the zero/empty value will not be supported in Go Driver
// 2.0. Users who want to omit empty complex values should use a pointer field and set the value to
// nil instead.
type CodecZeroer interface {
IsTypeZero(interface{}) bool
}

View File

@ -16,18 +16,30 @@ import (
)
// ByteSliceCodec is the Codec used for []byte values.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
// ByteSliceCodec registered.
type ByteSliceCodec struct {
// EncodeNilAsEmpty causes EncodeValue to marshal nil Go byte slices as empty BSON binary values
// instead of BSON null.
//
// Deprecated: Use bson.Encoder.NilByteSliceAsEmpty instead.
EncodeNilAsEmpty bool
}
var (
defaultByteSliceCodec = NewByteSliceCodec()
_ ValueCodec = defaultByteSliceCodec
// Assert that defaultByteSliceCodec satisfies the typeDecoder interface, which allows it to be
// used by collection type decoders (e.g. map, slice, etc) to set individual values in a
// collection.
_ typeDecoder = defaultByteSliceCodec
)
// NewByteSliceCodec returns a StringCodec with options opts.
// NewByteSliceCodec returns a ByteSliceCodec with options opts.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
// ByteSliceCodec registered.
func NewByteSliceCodec(opts ...*bsonoptions.ByteSliceCodecOptions) *ByteSliceCodec {
byteSliceOpt := bsonoptions.MergeByteSliceCodecOptions(opts...)
codec := ByteSliceCodec{}
@ -42,13 +54,13 @@ func (bsc *ByteSliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter,
if !val.IsValid() || val.Type() != tByteSlice {
return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val}
}
if val.IsNil() && !bsc.EncodeNilAsEmpty {
if val.IsNil() && !bsc.EncodeNilAsEmpty && !ec.nilByteSliceAsEmpty {
return vw.WriteNull()
}
return vw.WriteBinary(val.Interface().([]byte))
}
func (bsc *ByteSliceCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
func (bsc *ByteSliceCodec) decodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
if t != tByteSlice {
return emptyValue, ValueDecoderError{
Name: "ByteSliceDecodeValue",

View File

@ -0,0 +1,166 @@
// Copyright (C) MongoDB, Inc. 2017-present.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
package bsoncodec
import (
"reflect"
"sync"
"sync/atomic"
)
// Runtime check that the kind encoder and decoder caches can store any valid
// reflect.Kind constant.
func init() {
if s := reflect.Kind(len(kindEncoderCache{}.entries)).String(); s != "kind27" {
panic("The capacity of kindEncoderCache is too small.\n" +
"This is due to a new type being added to reflect.Kind.")
}
}
// statically assert array size
var _ = (kindEncoderCache{}).entries[reflect.UnsafePointer]
var _ = (kindDecoderCache{}).entries[reflect.UnsafePointer]
type typeEncoderCache struct {
cache sync.Map // map[reflect.Type]ValueEncoder
}
func (c *typeEncoderCache) Store(rt reflect.Type, enc ValueEncoder) {
c.cache.Store(rt, enc)
}
func (c *typeEncoderCache) Load(rt reflect.Type) (ValueEncoder, bool) {
if v, _ := c.cache.Load(rt); v != nil {
return v.(ValueEncoder), true
}
return nil, false
}
func (c *typeEncoderCache) LoadOrStore(rt reflect.Type, enc ValueEncoder) ValueEncoder {
if v, loaded := c.cache.LoadOrStore(rt, enc); loaded {
enc = v.(ValueEncoder)
}
return enc
}
func (c *typeEncoderCache) Clone() *typeEncoderCache {
cc := new(typeEncoderCache)
c.cache.Range(func(k, v interface{}) bool {
if k != nil && v != nil {
cc.cache.Store(k, v)
}
return true
})
return cc
}
type typeDecoderCache struct {
cache sync.Map // map[reflect.Type]ValueDecoder
}
func (c *typeDecoderCache) Store(rt reflect.Type, dec ValueDecoder) {
c.cache.Store(rt, dec)
}
func (c *typeDecoderCache) Load(rt reflect.Type) (ValueDecoder, bool) {
if v, _ := c.cache.Load(rt); v != nil {
return v.(ValueDecoder), true
}
return nil, false
}
func (c *typeDecoderCache) LoadOrStore(rt reflect.Type, dec ValueDecoder) ValueDecoder {
if v, loaded := c.cache.LoadOrStore(rt, dec); loaded {
dec = v.(ValueDecoder)
}
return dec
}
func (c *typeDecoderCache) Clone() *typeDecoderCache {
cc := new(typeDecoderCache)
c.cache.Range(func(k, v interface{}) bool {
if k != nil && v != nil {
cc.cache.Store(k, v)
}
return true
})
return cc
}
// atomic.Value requires that all calls to Store() have the same concrete type
// so we wrap the ValueEncoder with a kindEncoderCacheEntry to ensure the type
// is always the same (since different concrete types may implement the
// ValueEncoder interface).
type kindEncoderCacheEntry struct {
enc ValueEncoder
}
type kindEncoderCache struct {
entries [reflect.UnsafePointer + 1]atomic.Value // *kindEncoderCacheEntry
}
func (c *kindEncoderCache) Store(rt reflect.Kind, enc ValueEncoder) {
if enc != nil && rt < reflect.Kind(len(c.entries)) {
c.entries[rt].Store(&kindEncoderCacheEntry{enc: enc})
}
}
func (c *kindEncoderCache) Load(rt reflect.Kind) (ValueEncoder, bool) {
if rt < reflect.Kind(len(c.entries)) {
if ent, ok := c.entries[rt].Load().(*kindEncoderCacheEntry); ok {
return ent.enc, ent.enc != nil
}
}
return nil, false
}
func (c *kindEncoderCache) Clone() *kindEncoderCache {
cc := new(kindEncoderCache)
for i, v := range c.entries {
if val := v.Load(); val != nil {
cc.entries[i].Store(val)
}
}
return cc
}
// atomic.Value requires that all calls to Store() have the same concrete type
// so we wrap the ValueDecoder with a kindDecoderCacheEntry to ensure the type
// is always the same (since different concrete types may implement the
// ValueDecoder interface).
type kindDecoderCacheEntry struct {
dec ValueDecoder
}
type kindDecoderCache struct {
entries [reflect.UnsafePointer + 1]atomic.Value // *kindDecoderCacheEntry
}
func (c *kindDecoderCache) Store(rt reflect.Kind, dec ValueDecoder) {
if rt < reflect.Kind(len(c.entries)) {
c.entries[rt].Store(&kindDecoderCacheEntry{dec: dec})
}
}
func (c *kindDecoderCache) Load(rt reflect.Kind) (ValueDecoder, bool) {
if rt < reflect.Kind(len(c.entries)) {
if ent, ok := c.entries[rt].Load().(*kindDecoderCacheEntry); ok {
return ent.dec, ent.dec != nil
}
}
return nil, false
}
func (c *kindDecoderCache) Clone() *kindDecoderCache {
cc := new(kindDecoderCache)
for i, v := range c.entries {
if val := v.Load(); val != nil {
cc.entries[i].Store(val)
}
}
return cc
}

View File

@ -24,7 +24,7 @@ import (
var (
defaultValueDecoders DefaultValueDecoders
errCannotTruncate = errors.New("float64 can only be truncated to an integer type when truncation is enabled")
errCannotTruncate = errors.New("float64 can only be truncated to a lower precision type when truncation is enabled")
)
type decodeBinaryError struct {
@ -48,6 +48,9 @@ func newDefaultStructCodec() *StructCodec {
// DefaultValueDecoders is a namespace type for the default ValueDecoders used
// when creating a registry.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value decoders registered.
type DefaultValueDecoders struct{}
// RegisterDefaultDecoders will register the decoder methods attached to DefaultValueDecoders with
@ -56,6 +59,9 @@ type DefaultValueDecoders struct{}
// There is no support for decoding map[string]interface{} because there is no decoder for
// interface{}, so users must either register this decoder themselves or use the
// EmptyInterfaceDecoder available in the bson package.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value decoders registered.
func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) {
if rb == nil {
panic(errors.New("argument to RegisterDefaultDecoders must not be nil"))
@ -132,6 +138,9 @@ func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) {
}
// DDecodeValue is the ValueDecoderFunc for primitive.D instances.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value decoders registered.
func (dvd DefaultValueDecoders) DDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.IsValid() || !val.CanSet() || val.Type() != tD {
return ValueDecoderError{Name: "DDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val}
@ -188,7 +197,7 @@ func (dvd DefaultValueDecoders) DDecodeValue(dc DecodeContext, vr bsonrw.ValueRe
return nil
}
func (dvd DefaultValueDecoders) booleanDecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
func (dvd DefaultValueDecoders) booleanDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
if t.Kind() != reflect.Bool {
return emptyValue, ValueDecoderError{
Name: "BooleanDecodeValue",
@ -235,6 +244,9 @@ func (dvd DefaultValueDecoders) booleanDecodeType(dctx DecodeContext, vr bsonrw.
}
// BooleanDecodeValue is the ValueDecoderFunc for bool types.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value decoders registered.
func (dvd DefaultValueDecoders) BooleanDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.IsValid() || !val.CanSet() || val.Kind() != reflect.Bool {
return ValueDecoderError{Name: "BooleanDecodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val}
@ -333,6 +345,9 @@ func (DefaultValueDecoders) intDecodeType(dc DecodeContext, vr bsonrw.ValueReade
}
// IntDecodeValue is the ValueDecoderFunc for int types.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value decoders registered.
func (dvd DefaultValueDecoders) IntDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.CanSet() {
return ValueDecoderError{
@ -434,7 +449,7 @@ func (dvd DefaultValueDecoders) UintDecodeValue(dc DecodeContext, vr bsonrw.Valu
return nil
}
func (dvd DefaultValueDecoders) floatDecodeType(ec DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
func (dvd DefaultValueDecoders) floatDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
var f float64
var err error
switch vrType := vr.Type(); vrType {
@ -477,7 +492,7 @@ func (dvd DefaultValueDecoders) floatDecodeType(ec DecodeContext, vr bsonrw.Valu
switch t.Kind() {
case reflect.Float32:
if !ec.Truncate && float64(float32(f)) != f {
if !dc.Truncate && float64(float32(f)) != f {
return emptyValue, errCannotTruncate
}
@ -494,6 +509,9 @@ func (dvd DefaultValueDecoders) floatDecodeType(ec DecodeContext, vr bsonrw.Valu
}
// FloatDecodeValue is the ValueDecoderFunc for float types.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value decoders registered.
func (dvd DefaultValueDecoders) FloatDecodeValue(ec DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.CanSet() {
return ValueDecoderError{
@ -515,7 +533,7 @@ func (dvd DefaultValueDecoders) FloatDecodeValue(ec DecodeContext, vr bsonrw.Val
// StringDecodeValue is the ValueDecoderFunc for string types.
//
// Deprecated: StringDecodeValue is not registered by default. Use StringCodec.DecodeValue instead.
func (dvd DefaultValueDecoders) StringDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
func (dvd DefaultValueDecoders) StringDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
var str string
var err error
switch vr.Type() {
@ -536,7 +554,7 @@ func (dvd DefaultValueDecoders) StringDecodeValue(dctx DecodeContext, vr bsonrw.
return nil
}
func (DefaultValueDecoders) javaScriptDecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
func (DefaultValueDecoders) javaScriptDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
if t != tJavaScript {
return emptyValue, ValueDecoderError{
Name: "JavaScriptDecodeValue",
@ -565,6 +583,9 @@ func (DefaultValueDecoders) javaScriptDecodeType(dctx DecodeContext, vr bsonrw.V
}
// JavaScriptDecodeValue is the ValueDecoderFunc for the primitive.JavaScript type.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value decoders registered.
func (dvd DefaultValueDecoders) JavaScriptDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.CanSet() || val.Type() != tJavaScript {
return ValueDecoderError{Name: "JavaScriptDecodeValue", Types: []reflect.Type{tJavaScript}, Received: val}
@ -579,7 +600,7 @@ func (dvd DefaultValueDecoders) JavaScriptDecodeValue(dctx DecodeContext, vr bso
return nil
}
func (DefaultValueDecoders) symbolDecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
func (DefaultValueDecoders) symbolDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
if t != tSymbol {
return emptyValue, ValueDecoderError{
Name: "SymbolDecodeValue",
@ -620,6 +641,9 @@ func (DefaultValueDecoders) symbolDecodeType(dctx DecodeContext, vr bsonrw.Value
}
// SymbolDecodeValue is the ValueDecoderFunc for the primitive.Symbol type.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value decoders registered.
func (dvd DefaultValueDecoders) SymbolDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.CanSet() || val.Type() != tSymbol {
return ValueDecoderError{Name: "SymbolDecodeValue", Types: []reflect.Type{tSymbol}, Received: val}
@ -634,7 +658,7 @@ func (dvd DefaultValueDecoders) SymbolDecodeValue(dctx DecodeContext, vr bsonrw.
return nil
}
func (DefaultValueDecoders) binaryDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
func (DefaultValueDecoders) binaryDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
if t != tBinary {
return emptyValue, ValueDecoderError{
Name: "BinaryDecodeValue",
@ -664,6 +688,9 @@ func (DefaultValueDecoders) binaryDecodeType(dc DecodeContext, vr bsonrw.ValueRe
}
// BinaryDecodeValue is the ValueDecoderFunc for Binary.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value decoders registered.
func (dvd DefaultValueDecoders) BinaryDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.CanSet() || val.Type() != tBinary {
return ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tBinary}, Received: val}
@ -678,7 +705,7 @@ func (dvd DefaultValueDecoders) BinaryDecodeValue(dc DecodeContext, vr bsonrw.Va
return nil
}
func (DefaultValueDecoders) undefinedDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
func (DefaultValueDecoders) undefinedDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
if t != tUndefined {
return emptyValue, ValueDecoderError{
Name: "UndefinedDecodeValue",
@ -704,6 +731,9 @@ func (DefaultValueDecoders) undefinedDecodeType(dc DecodeContext, vr bsonrw.Valu
}
// UndefinedDecodeValue is the ValueDecoderFunc for Undefined.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value decoders registered.
func (dvd DefaultValueDecoders) UndefinedDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.CanSet() || val.Type() != tUndefined {
return ValueDecoderError{Name: "UndefinedDecodeValue", Types: []reflect.Type{tUndefined}, Received: val}
@ -719,7 +749,7 @@ func (dvd DefaultValueDecoders) UndefinedDecodeValue(dc DecodeContext, vr bsonrw
}
// Accept both 12-byte string and pretty-printed 24-byte hex string formats.
func (dvd DefaultValueDecoders) objectIDDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
func (dvd DefaultValueDecoders) objectIDDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
if t != tOID {
return emptyValue, ValueDecoderError{
Name: "ObjectIDDecodeValue",
@ -765,6 +795,9 @@ func (dvd DefaultValueDecoders) objectIDDecodeType(dc DecodeContext, vr bsonrw.V
}
// ObjectIDDecodeValue is the ValueDecoderFunc for primitive.ObjectID.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value decoders registered.
func (dvd DefaultValueDecoders) ObjectIDDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.CanSet() || val.Type() != tOID {
return ValueDecoderError{Name: "ObjectIDDecodeValue", Types: []reflect.Type{tOID}, Received: val}
@ -779,7 +812,7 @@ func (dvd DefaultValueDecoders) ObjectIDDecodeValue(dc DecodeContext, vr bsonrw.
return nil
}
func (DefaultValueDecoders) dateTimeDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
func (DefaultValueDecoders) dateTimeDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
if t != tDateTime {
return emptyValue, ValueDecoderError{
Name: "DateTimeDecodeValue",
@ -808,6 +841,9 @@ func (DefaultValueDecoders) dateTimeDecodeType(dc DecodeContext, vr bsonrw.Value
}
// DateTimeDecodeValue is the ValueDecoderFunc for DateTime.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value decoders registered.
func (dvd DefaultValueDecoders) DateTimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.CanSet() || val.Type() != tDateTime {
return ValueDecoderError{Name: "DateTimeDecodeValue", Types: []reflect.Type{tDateTime}, Received: val}
@ -822,7 +858,7 @@ func (dvd DefaultValueDecoders) DateTimeDecodeValue(dc DecodeContext, vr bsonrw.
return nil
}
func (DefaultValueDecoders) nullDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
func (DefaultValueDecoders) nullDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
if t != tNull {
return emptyValue, ValueDecoderError{
Name: "NullDecodeValue",
@ -848,6 +884,9 @@ func (DefaultValueDecoders) nullDecodeType(dc DecodeContext, vr bsonrw.ValueRead
}
// NullDecodeValue is the ValueDecoderFunc for Null.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value decoders registered.
func (dvd DefaultValueDecoders) NullDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.CanSet() || val.Type() != tNull {
return ValueDecoderError{Name: "NullDecodeValue", Types: []reflect.Type{tNull}, Received: val}
@ -862,7 +901,7 @@ func (dvd DefaultValueDecoders) NullDecodeValue(dc DecodeContext, vr bsonrw.Valu
return nil
}
func (DefaultValueDecoders) regexDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
func (DefaultValueDecoders) regexDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
if t != tRegex {
return emptyValue, ValueDecoderError{
Name: "RegexDecodeValue",
@ -891,6 +930,9 @@ func (DefaultValueDecoders) regexDecodeType(dc DecodeContext, vr bsonrw.ValueRea
}
// RegexDecodeValue is the ValueDecoderFunc for Regex.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value decoders registered.
func (dvd DefaultValueDecoders) RegexDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.CanSet() || val.Type() != tRegex {
return ValueDecoderError{Name: "RegexDecodeValue", Types: []reflect.Type{tRegex}, Received: val}
@ -905,7 +947,7 @@ func (dvd DefaultValueDecoders) RegexDecodeValue(dc DecodeContext, vr bsonrw.Val
return nil
}
func (DefaultValueDecoders) dBPointerDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
func (DefaultValueDecoders) dBPointerDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
if t != tDBPointer {
return emptyValue, ValueDecoderError{
Name: "DBPointerDecodeValue",
@ -935,6 +977,9 @@ func (DefaultValueDecoders) dBPointerDecodeType(dc DecodeContext, vr bsonrw.Valu
}
// DBPointerDecodeValue is the ValueDecoderFunc for DBPointer.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value decoders registered.
func (dvd DefaultValueDecoders) DBPointerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.CanSet() || val.Type() != tDBPointer {
return ValueDecoderError{Name: "DBPointerDecodeValue", Types: []reflect.Type{tDBPointer}, Received: val}
@ -949,7 +994,7 @@ func (dvd DefaultValueDecoders) DBPointerDecodeValue(dc DecodeContext, vr bsonrw
return nil
}
func (DefaultValueDecoders) timestampDecodeType(dc DecodeContext, vr bsonrw.ValueReader, reflectType reflect.Type) (reflect.Value, error) {
func (DefaultValueDecoders) timestampDecodeType(_ DecodeContext, vr bsonrw.ValueReader, reflectType reflect.Type) (reflect.Value, error) {
if reflectType != tTimestamp {
return emptyValue, ValueDecoderError{
Name: "TimestampDecodeValue",
@ -978,6 +1023,9 @@ func (DefaultValueDecoders) timestampDecodeType(dc DecodeContext, vr bsonrw.Valu
}
// TimestampDecodeValue is the ValueDecoderFunc for Timestamp.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value decoders registered.
func (dvd DefaultValueDecoders) TimestampDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.CanSet() || val.Type() != tTimestamp {
return ValueDecoderError{Name: "TimestampDecodeValue", Types: []reflect.Type{tTimestamp}, Received: val}
@ -992,7 +1040,7 @@ func (dvd DefaultValueDecoders) TimestampDecodeValue(dc DecodeContext, vr bsonrw
return nil
}
func (DefaultValueDecoders) minKeyDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
func (DefaultValueDecoders) minKeyDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
if t != tMinKey {
return emptyValue, ValueDecoderError{
Name: "MinKeyDecodeValue",
@ -1020,6 +1068,9 @@ func (DefaultValueDecoders) minKeyDecodeType(dc DecodeContext, vr bsonrw.ValueRe
}
// MinKeyDecodeValue is the ValueDecoderFunc for MinKey.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value decoders registered.
func (dvd DefaultValueDecoders) MinKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.CanSet() || val.Type() != tMinKey {
return ValueDecoderError{Name: "MinKeyDecodeValue", Types: []reflect.Type{tMinKey}, Received: val}
@ -1034,7 +1085,7 @@ func (dvd DefaultValueDecoders) MinKeyDecodeValue(dc DecodeContext, vr bsonrw.Va
return nil
}
func (DefaultValueDecoders) maxKeyDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
func (DefaultValueDecoders) maxKeyDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
if t != tMaxKey {
return emptyValue, ValueDecoderError{
Name: "MaxKeyDecodeValue",
@ -1062,6 +1113,9 @@ func (DefaultValueDecoders) maxKeyDecodeType(dc DecodeContext, vr bsonrw.ValueRe
}
// MaxKeyDecodeValue is the ValueDecoderFunc for MaxKey.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value decoders registered.
func (dvd DefaultValueDecoders) MaxKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.CanSet() || val.Type() != tMaxKey {
return ValueDecoderError{Name: "MaxKeyDecodeValue", Types: []reflect.Type{tMaxKey}, Received: val}
@ -1076,7 +1130,7 @@ func (dvd DefaultValueDecoders) MaxKeyDecodeValue(dc DecodeContext, vr bsonrw.Va
return nil
}
func (dvd DefaultValueDecoders) decimal128DecodeType(dctx DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
func (dvd DefaultValueDecoders) decimal128DecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
if t != tDecimal {
return emptyValue, ValueDecoderError{
Name: "Decimal128DecodeValue",
@ -1105,6 +1159,9 @@ func (dvd DefaultValueDecoders) decimal128DecodeType(dctx DecodeContext, vr bson
}
// Decimal128DecodeValue is the ValueDecoderFunc for primitive.Decimal128.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value decoders registered.
func (dvd DefaultValueDecoders) Decimal128DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.CanSet() || val.Type() != tDecimal {
return ValueDecoderError{Name: "Decimal128DecodeValue", Types: []reflect.Type{tDecimal}, Received: val}
@ -1119,7 +1176,7 @@ func (dvd DefaultValueDecoders) Decimal128DecodeValue(dctx DecodeContext, vr bso
return nil
}
func (dvd DefaultValueDecoders) jsonNumberDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
func (dvd DefaultValueDecoders) jsonNumberDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
if t != tJSONNumber {
return emptyValue, ValueDecoderError{
Name: "JSONNumberDecodeValue",
@ -1164,6 +1221,9 @@ func (dvd DefaultValueDecoders) jsonNumberDecodeType(dc DecodeContext, vr bsonrw
}
// JSONNumberDecodeValue is the ValueDecoderFunc for json.Number.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value decoders registered.
func (dvd DefaultValueDecoders) JSONNumberDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.CanSet() || val.Type() != tJSONNumber {
return ValueDecoderError{Name: "JSONNumberDecodeValue", Types: []reflect.Type{tJSONNumber}, Received: val}
@ -1178,7 +1238,7 @@ func (dvd DefaultValueDecoders) JSONNumberDecodeValue(dc DecodeContext, vr bsonr
return nil
}
func (dvd DefaultValueDecoders) urlDecodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
func (dvd DefaultValueDecoders) urlDecodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
if t != tURL {
return emptyValue, ValueDecoderError{
Name: "URLDecodeValue",
@ -1213,6 +1273,9 @@ func (dvd DefaultValueDecoders) urlDecodeType(dc DecodeContext, vr bsonrw.ValueR
}
// URLDecodeValue is the ValueDecoderFunc for url.URL.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value decoders registered.
func (dvd DefaultValueDecoders) URLDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.CanSet() || val.Type() != tURL {
return ValueDecoderError{Name: "URLDecodeValue", Types: []reflect.Type{tURL}, Received: val}
@ -1230,7 +1293,7 @@ func (dvd DefaultValueDecoders) URLDecodeValue(dc DecodeContext, vr bsonrw.Value
// TimeDecodeValue is the ValueDecoderFunc for time.Time.
//
// Deprecated: TimeDecodeValue is not registered by default. Use TimeCodec.DecodeValue instead.
func (dvd DefaultValueDecoders) TimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
func (dvd DefaultValueDecoders) TimeDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if vr.Type() != bsontype.DateTime {
return fmt.Errorf("cannot decode %v into a time.Time", vr.Type())
}
@ -1251,7 +1314,7 @@ func (dvd DefaultValueDecoders) TimeDecodeValue(dc DecodeContext, vr bsonrw.Valu
// ByteSliceDecodeValue is the ValueDecoderFunc for []byte.
//
// Deprecated: ByteSliceDecodeValue is not registered by default. Use ByteSliceCodec.DecodeValue instead.
func (dvd DefaultValueDecoders) ByteSliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
func (dvd DefaultValueDecoders) ByteSliceDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if vr.Type() != bsontype.Binary && vr.Type() != bsontype.Null {
return fmt.Errorf("cannot decode %v into a []byte", vr.Type())
}
@ -1336,6 +1399,9 @@ func (dvd DefaultValueDecoders) MapDecodeValue(dc DecodeContext, vr bsonrw.Value
}
// ArrayDecodeValue is the ValueDecoderFunc for array types.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value decoders registered.
func (dvd DefaultValueDecoders) ArrayDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.IsValid() || val.Kind() != reflect.Array {
return ValueDecoderError{Name: "ArrayDecodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val}
@ -1447,7 +1513,10 @@ func (dvd DefaultValueDecoders) SliceDecodeValue(dc DecodeContext, vr bsonrw.Val
}
// ValueUnmarshalerDecodeValue is the ValueDecoderFunc for ValueUnmarshaler implementations.
func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value decoders registered.
func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.IsValid() || (!val.Type().Implements(tValueUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tValueUnmarshaler)) {
return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val}
}
@ -1471,16 +1540,19 @@ func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(dc DecodeContext, vr
return err
}
fn := val.Convert(tValueUnmarshaler).MethodByName("UnmarshalBSONValue")
errVal := fn.Call([]reflect.Value{reflect.ValueOf(t), reflect.ValueOf(src)})[0]
if !errVal.IsNil() {
return errVal.Interface().(error)
m, ok := val.Interface().(ValueUnmarshaler)
if !ok {
// NB: this error should be unreachable due to the above checks
return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val}
}
return nil
return m.UnmarshalBSONValue(t, src)
}
// UnmarshalerDecodeValue is the ValueDecoderFunc for Unmarshaler implementations.
func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value decoders registered.
func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.IsValid() || (!val.Type().Implements(tUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tUnmarshaler)) {
return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val}
}
@ -1516,12 +1588,12 @@ func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(dc DecodeContext, vr bson
val = val.Addr() // If the type doesn't implement the interface, a pointer to it must.
}
fn := val.Convert(tUnmarshaler).MethodByName("UnmarshalBSON")
errVal := fn.Call([]reflect.Value{reflect.ValueOf(src)})[0]
if !errVal.IsNil() {
return errVal.Interface().(error)
m, ok := val.Interface().(Unmarshaler)
if !ok {
// NB: this error should be unreachable due to the above checks
return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val}
}
return nil
return m.UnmarshalBSON(src)
}
// EmptyInterfaceDecodeValue is the ValueDecoderFunc for interface{}.
@ -1565,7 +1637,10 @@ func (dvd DefaultValueDecoders) EmptyInterfaceDecodeValue(dc DecodeContext, vr b
}
// CoreDocumentDecodeValue is the ValueDecoderFunc for bsoncore.Document.
func (DefaultValueDecoders) CoreDocumentDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value decoders registered.
func (DefaultValueDecoders) CoreDocumentDecodeValue(_ DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.CanSet() || val.Type() != tCoreDocument {
return ValueDecoderError{Name: "CoreDocumentDecodeValue", Types: []reflect.Type{tCoreDocument}, Received: val}
}
@ -1671,6 +1746,9 @@ func (dvd DefaultValueDecoders) codeWithScopeDecodeType(dc DecodeContext, vr bso
}
// CodeWithScopeDecodeValue is the ValueDecoderFunc for CodeWithScope.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value decoders registered.
func (dvd DefaultValueDecoders) CodeWithScopeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.CanSet() || val.Type() != tCodeWithScope {
return ValueDecoderError{Name: "CodeWithScopeDecodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val}

View File

@ -58,10 +58,16 @@ func encodeElement(ec EncodeContext, dw bsonrw.DocumentWriter, e primitive.E) er
// DefaultValueEncoders is a namespace type for the default ValueEncoders used
// when creating a registry.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value encoders registered.
type DefaultValueEncoders struct{}
// RegisterDefaultEncoders will register the encoder methods attached to DefaultValueEncoders with
// the provided RegistryBuilder.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value encoders registered.
func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) {
if rb == nil {
panic(errors.New("argument to RegisterDefaultEncoders must not be nil"))
@ -113,7 +119,10 @@ func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) {
}
// BooleanEncodeValue is the ValueEncoderFunc for bool types.
func (dve DefaultValueEncoders) BooleanEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value encoders registered.
func (dve DefaultValueEncoders) BooleanEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Kind() != reflect.Bool {
return ValueEncoderError{Name: "BooleanEncodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val}
}
@ -125,6 +134,9 @@ func fitsIn32Bits(i int64) bool {
}
// IntEncodeValue is the ValueEncoderFunc for int types.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value encoders registered.
func (dve DefaultValueEncoders) IntEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
switch val.Kind() {
case reflect.Int8, reflect.Int16, reflect.Int32:
@ -176,7 +188,10 @@ func (dve DefaultValueEncoders) UintEncodeValue(ec EncodeContext, vw bsonrw.Valu
}
// FloatEncodeValue is the ValueEncoderFunc for float types.
func (dve DefaultValueEncoders) FloatEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value encoders registered.
func (dve DefaultValueEncoders) FloatEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
switch val.Kind() {
case reflect.Float32, reflect.Float64:
return vw.WriteDouble(val.Float())
@ -188,7 +203,7 @@ func (dve DefaultValueEncoders) FloatEncodeValue(ec EncodeContext, vw bsonrw.Val
// StringEncodeValue is the ValueEncoderFunc for string types.
//
// Deprecated: StringEncodeValue is not registered by default. Use StringCodec.EncodeValue instead.
func (dve DefaultValueEncoders) StringEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
func (dve DefaultValueEncoders) StringEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if val.Kind() != reflect.String {
return ValueEncoderError{
Name: "StringEncodeValue",
@ -201,7 +216,10 @@ func (dve DefaultValueEncoders) StringEncodeValue(ectx EncodeContext, vw bsonrw.
}
// ObjectIDEncodeValue is the ValueEncoderFunc for primitive.ObjectID.
func (dve DefaultValueEncoders) ObjectIDEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value encoders registered.
func (dve DefaultValueEncoders) ObjectIDEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Type() != tOID {
return ValueEncoderError{Name: "ObjectIDEncodeValue", Types: []reflect.Type{tOID}, Received: val}
}
@ -209,7 +227,10 @@ func (dve DefaultValueEncoders) ObjectIDEncodeValue(ec EncodeContext, vw bsonrw.
}
// Decimal128EncodeValue is the ValueEncoderFunc for primitive.Decimal128.
func (dve DefaultValueEncoders) Decimal128EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value encoders registered.
func (dve DefaultValueEncoders) Decimal128EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Type() != tDecimal {
return ValueEncoderError{Name: "Decimal128EncodeValue", Types: []reflect.Type{tDecimal}, Received: val}
}
@ -217,6 +238,9 @@ func (dve DefaultValueEncoders) Decimal128EncodeValue(ec EncodeContext, vw bsonr
}
// JSONNumberEncodeValue is the ValueEncoderFunc for json.Number.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value encoders registered.
func (dve DefaultValueEncoders) JSONNumberEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Type() != tJSONNumber {
return ValueEncoderError{Name: "JSONNumberEncodeValue", Types: []reflect.Type{tJSONNumber}, Received: val}
@ -237,7 +261,10 @@ func (dve DefaultValueEncoders) JSONNumberEncodeValue(ec EncodeContext, vw bsonr
}
// URLEncodeValue is the ValueEncoderFunc for url.URL.
func (dve DefaultValueEncoders) URLEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value encoders registered.
func (dve DefaultValueEncoders) URLEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Type() != tURL {
return ValueEncoderError{Name: "URLEncodeValue", Types: []reflect.Type{tURL}, Received: val}
}
@ -248,7 +275,7 @@ func (dve DefaultValueEncoders) URLEncodeValue(ec EncodeContext, vw bsonrw.Value
// TimeEncodeValue is the ValueEncoderFunc for time.TIme.
//
// Deprecated: TimeEncodeValue is not registered by default. Use TimeCodec.EncodeValue instead.
func (dve DefaultValueEncoders) TimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
func (dve DefaultValueEncoders) TimeEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Type() != tTime {
return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val}
}
@ -260,7 +287,7 @@ func (dve DefaultValueEncoders) TimeEncodeValue(ec EncodeContext, vw bsonrw.Valu
// ByteSliceEncodeValue is the ValueEncoderFunc for []byte.
//
// Deprecated: ByteSliceEncodeValue is not registered by default. Use ByteSliceCodec.EncodeValue instead.
func (dve DefaultValueEncoders) ByteSliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
func (dve DefaultValueEncoders) ByteSliceEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Type() != tByteSlice {
return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val}
}
@ -343,6 +370,9 @@ func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.Docum
}
// ArrayEncodeValue is the ValueEncoderFunc for array types.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value encoders registered.
func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Kind() != reflect.Array {
return ValueEncoderError{Name: "ArrayEncodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val}
@ -515,7 +545,10 @@ func (dve DefaultValueEncoders) EmptyInterfaceEncodeValue(ec EncodeContext, vw b
}
// ValueMarshalerEncodeValue is the ValueEncoderFunc for ValueMarshaler implementations.
func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value encoders registered.
func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
// Either val or a pointer to val must implement ValueMarshaler
switch {
case !val.IsValid():
@ -531,17 +564,22 @@ func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(ec EncodeContext, vw b
return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val}
}
fn := val.Convert(tValueMarshaler).MethodByName("MarshalBSONValue")
returns := fn.Call(nil)
if !returns[2].IsNil() {
return returns[2].Interface().(error)
m, ok := val.Interface().(ValueMarshaler)
if !ok {
return vw.WriteNull()
}
t, data, err := m.MarshalBSONValue()
if err != nil {
return err
}
t, data := returns[0].Interface().(bsontype.Type), returns[1].Interface().([]byte)
return bsonrw.Copier{}.CopyValueFromBytes(vw, t, data)
}
// MarshalerEncodeValue is the ValueEncoderFunc for Marshaler implementations.
func (dve DefaultValueEncoders) MarshalerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value encoders registered.
func (dve DefaultValueEncoders) MarshalerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
// Either val or a pointer to val must implement Marshaler
switch {
case !val.IsValid():
@ -557,16 +595,21 @@ func (dve DefaultValueEncoders) MarshalerEncodeValue(ec EncodeContext, vw bsonrw
return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val}
}
fn := val.Convert(tMarshaler).MethodByName("MarshalBSON")
returns := fn.Call(nil)
if !returns[1].IsNil() {
return returns[1].Interface().(error)
m, ok := val.Interface().(Marshaler)
if !ok {
return vw.WriteNull()
}
data, err := m.MarshalBSON()
if err != nil {
return err
}
data := returns[0].Interface().([]byte)
return bsonrw.Copier{}.CopyValueFromBytes(vw, bsontype.EmbeddedDocument, data)
}
// ProxyEncodeValue is the ValueEncoderFunc for Proxy implementations.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value encoders registered.
func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
// Either val or a pointer to val must implement Proxy
switch {
@ -583,27 +626,38 @@ func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.Val
return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val}
}
fn := val.Convert(tProxy).MethodByName("ProxyBSON")
returns := fn.Call(nil)
if !returns[1].IsNil() {
return returns[1].Interface().(error)
}
data := returns[0]
var encoder ValueEncoder
var err error
if data.Elem().IsValid() {
encoder, err = ec.LookupEncoder(data.Elem().Type())
} else {
encoder, err = ec.LookupEncoder(nil)
m, ok := val.Interface().(Proxy)
if !ok {
return vw.WriteNull()
}
v, err := m.ProxyBSON()
if err != nil {
return err
}
return encoder.EncodeValue(ec, vw, data.Elem())
if v == nil {
encoder, err := ec.LookupEncoder(nil)
if err != nil {
return err
}
return encoder.EncodeValue(ec, vw, reflect.ValueOf(nil))
}
vv := reflect.ValueOf(v)
switch vv.Kind() {
case reflect.Ptr, reflect.Interface:
vv = vv.Elem()
}
encoder, err := ec.LookupEncoder(vv.Type())
if err != nil {
return err
}
return encoder.EncodeValue(ec, vw, vv)
}
// JavaScriptEncodeValue is the ValueEncoderFunc for the primitive.JavaScript type.
func (DefaultValueEncoders) JavaScriptEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value encoders registered.
func (DefaultValueEncoders) JavaScriptEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Type() != tJavaScript {
return ValueEncoderError{Name: "JavaScriptEncodeValue", Types: []reflect.Type{tJavaScript}, Received: val}
}
@ -612,7 +666,10 @@ func (DefaultValueEncoders) JavaScriptEncodeValue(ectx EncodeContext, vw bsonrw.
}
// SymbolEncodeValue is the ValueEncoderFunc for the primitive.Symbol type.
func (DefaultValueEncoders) SymbolEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value encoders registered.
func (DefaultValueEncoders) SymbolEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Type() != tSymbol {
return ValueEncoderError{Name: "SymbolEncodeValue", Types: []reflect.Type{tSymbol}, Received: val}
}
@ -621,7 +678,10 @@ func (DefaultValueEncoders) SymbolEncodeValue(ectx EncodeContext, vw bsonrw.Valu
}
// BinaryEncodeValue is the ValueEncoderFunc for Binary.
func (DefaultValueEncoders) BinaryEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value encoders registered.
func (DefaultValueEncoders) BinaryEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Type() != tBinary {
return ValueEncoderError{Name: "BinaryEncodeValue", Types: []reflect.Type{tBinary}, Received: val}
}
@ -631,7 +691,10 @@ func (DefaultValueEncoders) BinaryEncodeValue(ec EncodeContext, vw bsonrw.ValueW
}
// UndefinedEncodeValue is the ValueEncoderFunc for Undefined.
func (DefaultValueEncoders) UndefinedEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value encoders registered.
func (DefaultValueEncoders) UndefinedEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Type() != tUndefined {
return ValueEncoderError{Name: "UndefinedEncodeValue", Types: []reflect.Type{tUndefined}, Received: val}
}
@ -640,7 +703,10 @@ func (DefaultValueEncoders) UndefinedEncodeValue(ec EncodeContext, vw bsonrw.Val
}
// DateTimeEncodeValue is the ValueEncoderFunc for DateTime.
func (DefaultValueEncoders) DateTimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value encoders registered.
func (DefaultValueEncoders) DateTimeEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Type() != tDateTime {
return ValueEncoderError{Name: "DateTimeEncodeValue", Types: []reflect.Type{tDateTime}, Received: val}
}
@ -649,7 +715,10 @@ func (DefaultValueEncoders) DateTimeEncodeValue(ec EncodeContext, vw bsonrw.Valu
}
// NullEncodeValue is the ValueEncoderFunc for Null.
func (DefaultValueEncoders) NullEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value encoders registered.
func (DefaultValueEncoders) NullEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Type() != tNull {
return ValueEncoderError{Name: "NullEncodeValue", Types: []reflect.Type{tNull}, Received: val}
}
@ -658,7 +727,10 @@ func (DefaultValueEncoders) NullEncodeValue(ec EncodeContext, vw bsonrw.ValueWri
}
// RegexEncodeValue is the ValueEncoderFunc for Regex.
func (DefaultValueEncoders) RegexEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value encoders registered.
func (DefaultValueEncoders) RegexEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Type() != tRegex {
return ValueEncoderError{Name: "RegexEncodeValue", Types: []reflect.Type{tRegex}, Received: val}
}
@ -669,7 +741,10 @@ func (DefaultValueEncoders) RegexEncodeValue(ec EncodeContext, vw bsonrw.ValueWr
}
// DBPointerEncodeValue is the ValueEncoderFunc for DBPointer.
func (DefaultValueEncoders) DBPointerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value encoders registered.
func (DefaultValueEncoders) DBPointerEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Type() != tDBPointer {
return ValueEncoderError{Name: "DBPointerEncodeValue", Types: []reflect.Type{tDBPointer}, Received: val}
}
@ -680,7 +755,10 @@ func (DefaultValueEncoders) DBPointerEncodeValue(ec EncodeContext, vw bsonrw.Val
}
// TimestampEncodeValue is the ValueEncoderFunc for Timestamp.
func (DefaultValueEncoders) TimestampEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value encoders registered.
func (DefaultValueEncoders) TimestampEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Type() != tTimestamp {
return ValueEncoderError{Name: "TimestampEncodeValue", Types: []reflect.Type{tTimestamp}, Received: val}
}
@ -691,7 +769,10 @@ func (DefaultValueEncoders) TimestampEncodeValue(ec EncodeContext, vw bsonrw.Val
}
// MinKeyEncodeValue is the ValueEncoderFunc for MinKey.
func (DefaultValueEncoders) MinKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value encoders registered.
func (DefaultValueEncoders) MinKeyEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Type() != tMinKey {
return ValueEncoderError{Name: "MinKeyEncodeValue", Types: []reflect.Type{tMinKey}, Received: val}
}
@ -700,7 +781,10 @@ func (DefaultValueEncoders) MinKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueW
}
// MaxKeyEncodeValue is the ValueEncoderFunc for MaxKey.
func (DefaultValueEncoders) MaxKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value encoders registered.
func (DefaultValueEncoders) MaxKeyEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Type() != tMaxKey {
return ValueEncoderError{Name: "MaxKeyEncodeValue", Types: []reflect.Type{tMaxKey}, Received: val}
}
@ -709,7 +793,10 @@ func (DefaultValueEncoders) MaxKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueW
}
// CoreDocumentEncodeValue is the ValueEncoderFunc for bsoncore.Document.
func (DefaultValueEncoders) CoreDocumentEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value encoders registered.
func (DefaultValueEncoders) CoreDocumentEncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Type() != tCoreDocument {
return ValueEncoderError{Name: "CoreDocumentEncodeValue", Types: []reflect.Type{tCoreDocument}, Received: val}
}
@ -720,6 +807,9 @@ func (DefaultValueEncoders) CoreDocumentEncodeValue(ec EncodeContext, vw bsonrw.
}
// CodeWithScopeEncodeValue is the ValueEncoderFunc for CodeWithScope.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with all default
// value encoders registered.
func (dve DefaultValueEncoders) CodeWithScopeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Type() != tCodeWithScope {
return ValueEncoderError{Name: "CodeWithScopeEncodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val}

View File

@ -31,35 +31,39 @@
// allow the use of a function with the correct signature as a ValueDecoder. A DecodeContext
// instance is provided and serves similar functionality to the EncodeContext.
//
// # Registry and RegistryBuilder
// # Registry
//
// A Registry is an immutable store for ValueEncoders, ValueDecoders, and a type map. See the Registry type
// documentation for examples of registering various custom encoders and decoders. A Registry can be constructed using a
// RegistryBuilder, which handles three main types of codecs:
// A Registry is a store for ValueEncoders, ValueDecoders, and a type map. See the Registry type
// documentation for examples of registering various custom encoders and decoders. A Registry can
// have three main types of codecs:
//
// 1. Type encoders/decoders - These can be registered using the RegisterTypeEncoder and RegisterTypeDecoder methods.
// The registered codec will be invoked when encoding/decoding a value whose type matches the registered type exactly.
// If the registered type is an interface, the codec will be invoked when encoding or decoding values whose type is the
// interface, but not for values with concrete types that implement the interface.
// 1. Type encoders/decoders - These can be registered using the RegisterTypeEncoder and
// RegisterTypeDecoder methods. The registered codec will be invoked when encoding/decoding a value
// whose type matches the registered type exactly.
// If the registered type is an interface, the codec will be invoked when encoding or decoding
// values whose type is the interface, but not for values with concrete types that implement the
// interface.
//
// 2. Hook encoders/decoders - These can be registered using the RegisterHookEncoder and RegisterHookDecoder methods.
// These methods only accept interface types and the registered codecs will be invoked when encoding or decoding values
// whose types implement the interface. An example of a hook defined by the driver is bson.Marshaler. The driver will
// call the MarshalBSON method for any value whose type implements bson.Marshaler, regardless of the value's concrete
// type.
// 2. Hook encoders/decoders - These can be registered using the RegisterHookEncoder and
// RegisterHookDecoder methods. These methods only accept interface types and the registered codecs
// will be invoked when encoding or decoding values whose types implement the interface. An example
// of a hook defined by the driver is bson.Marshaler. The driver will call the MarshalBSON method
// for any value whose type implements bson.Marshaler, regardless of the value's concrete type.
//
// 3. Type map entries - This can be used to associate a BSON type with a Go type. These type associations are used when
// decoding into a bson.D/bson.M or a struct field of type interface{}. For example, by default, BSON int32 and int64
// values decode as Go int32 and int64 instances, respectively, when decoding into a bson.D. The following code would
// change the behavior so these values decode as Go int instances instead:
// 3. Type map entries - This can be used to associate a BSON type with a Go type. These type
// associations are used when decoding into a bson.D/bson.M or a struct field of type interface{}.
// For example, by default, BSON int32 and int64 values decode as Go int32 and int64 instances,
// respectively, when decoding into a bson.D. The following code would change the behavior so these
// values decode as Go int instances instead:
//
// intType := reflect.TypeOf(int(0))
// registryBuilder.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType)
// registry.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType)
//
// 4. Kind encoder/decoders - These can be registered using the RegisterDefaultEncoder and RegisterDefaultDecoder
// methods. The registered codec will be invoked when encoding or decoding values whose reflect.Kind matches the
// registered reflect.Kind as long as the value's type doesn't match a registered type or hook encoder/decoder first.
// These methods should be used to change the behavior for all values for a specific kind.
// 4. Kind encoder/decoders - These can be registered using the RegisterDefaultEncoder and
// RegisterDefaultDecoder methods. The registered codec will be invoked when encoding or decoding
// values whose reflect.Kind matches the registered reflect.Kind as long as the value's type doesn't
// match a registered type or hook encoder/decoder first. These methods should be used to change the
// behavior for all values for a specific kind.
//
// # Registry Lookup Procedure
//
@ -67,17 +71,18 @@
//
// 1. A type encoder registered for the exact type of the value.
//
// 2. A hook encoder registered for an interface that is implemented by the value or by a pointer to the value. If the
// value matches multiple hooks (e.g. the type implements bsoncodec.Marshaler and bsoncodec.ValueMarshaler), the first
// one registered will be selected. Note that registries constructed using bson.NewRegistryBuilder have driver-defined
// hooks registered for the bsoncodec.Marshaler, bsoncodec.ValueMarshaler, and bsoncodec.Proxy interfaces, so those
// will take precedence over any new hooks.
// 2. A hook encoder registered for an interface that is implemented by the value or by a pointer to
// the value. If the value matches multiple hooks (e.g. the type implements bsoncodec.Marshaler and
// bsoncodec.ValueMarshaler), the first one registered will be selected. Note that registries
// constructed using bson.NewRegistry have driver-defined hooks registered for the
// bsoncodec.Marshaler, bsoncodec.ValueMarshaler, and bsoncodec.Proxy interfaces, so those will take
// precedence over any new hooks.
//
// 3. A kind encoder registered for the value's kind.
//
// If all of these lookups fail to find an encoder, an error of type ErrNoEncoder is returned. The same precedence
// rules apply for decoders, with the exception that an error of type ErrNoDecoder will be returned if no decoder is
// found.
// If all of these lookups fail to find an encoder, an error of type ErrNoEncoder is returned. The
// same precedence rules apply for decoders, with the exception that an error of type ErrNoDecoder
// will be returned if no decoder is found.
//
// # DefaultValueEncoders and DefaultValueDecoders
//

View File

@ -16,18 +16,30 @@ import (
)
// EmptyInterfaceCodec is the Codec used for interface{} values.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
// EmptyInterfaceCodec registered.
type EmptyInterfaceCodec struct {
// DecodeBinaryAsSlice causes DecodeValue to unmarshal BSON binary field values that are the
// "Generic" or "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary.
//
// Deprecated: Use bson.Decoder.BinaryAsSlice instead.
DecodeBinaryAsSlice bool
}
var (
defaultEmptyInterfaceCodec = NewEmptyInterfaceCodec()
_ ValueCodec = defaultEmptyInterfaceCodec
// Assert that defaultEmptyInterfaceCodec satisfies the typeDecoder interface, which allows it
// to be used by collection type decoders (e.g. map, slice, etc) to set individual values in a
// collection.
_ typeDecoder = defaultEmptyInterfaceCodec
)
// NewEmptyInterfaceCodec returns a EmptyInterfaceCodec with options opts.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
// EmptyInterfaceCodec registered.
func NewEmptyInterfaceCodec(opts ...*bsonoptions.EmptyInterfaceCodecOptions) *EmptyInterfaceCodec {
interfaceOpt := bsonoptions.MergeEmptyInterfaceCodecOptions(opts...)
@ -121,7 +133,7 @@ func (eic EmptyInterfaceCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReade
return emptyValue, err
}
if eic.DecodeBinaryAsSlice && rtype == tBinary {
if (eic.DecodeBinaryAsSlice || dc.binaryAsSlice) && rtype == tBinary {
binElem := elem.Interface().(primitive.Binary)
if binElem.Subtype == bsontype.BinaryGeneric || binElem.Subtype == bsontype.BinaryBinaryOld {
elem = reflect.ValueOf(binElem.Data)

View File

@ -20,14 +20,29 @@ import (
var defaultMapCodec = NewMapCodec()
// MapCodec is the Codec used for map values.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
// MapCodec registered.
type MapCodec struct {
DecodeZerosMap bool
EncodeNilAsEmpty bool
// DecodeZerosMap causes DecodeValue to delete any existing values from Go maps in the destination
// value passed to Decode before unmarshaling BSON documents into them.
//
// Deprecated: Use bson.Decoder.ZeroMaps instead.
DecodeZerosMap bool
// EncodeNilAsEmpty causes EncodeValue to marshal nil Go maps as empty BSON documents instead of
// BSON null.
//
// Deprecated: Use bson.Encoder.NilMapAsEmpty instead.
EncodeNilAsEmpty bool
// EncodeKeysWithStringer causes the Encoder to convert Go map keys to BSON document field name
// strings using fmt.Sprintf() instead of the default string conversion logic.
//
// Deprecated: Use bson.Encoder.StringifyMapKeysWithFmt instead.
EncodeKeysWithStringer bool
}
var _ ValueCodec = &MapCodec{}
// KeyMarshaler is the interface implemented by an object that can marshal itself into a string key.
// This applies to types used as map keys and is similar to encoding.TextMarshaler.
type KeyMarshaler interface {
@ -45,6 +60,9 @@ type KeyUnmarshaler interface {
}
// NewMapCodec returns a MapCodec with options opts.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
// MapCodec registered.
func NewMapCodec(opts ...*bsonoptions.MapCodecOptions) *MapCodec {
mapOpt := bsonoptions.MergeMapCodecOptions(opts...)
@ -67,7 +85,7 @@ func (mc *MapCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val ref
return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val}
}
if val.IsNil() && !mc.EncodeNilAsEmpty {
if val.IsNil() && !mc.EncodeNilAsEmpty && !ec.nilMapAsEmpty {
// If we have a nil map but we can't WriteNull, that means we're probably trying to encode
// to a TopLevel document. We can't currently tell if this is what actually happened, but if
// there's a deeper underlying problem, the error will also be returned from WriteDocument,
@ -100,7 +118,7 @@ func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, v
keys := val.MapKeys()
for _, key := range keys {
keyStr, err := mc.encodeKey(key)
keyStr, err := mc.encodeKey(key, ec.stringifyMapKeysWithFmt)
if err != nil {
return err
}
@ -163,7 +181,7 @@ func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val ref
val.Set(reflect.MakeMap(val.Type()))
}
if val.Len() > 0 && mc.DecodeZerosMap {
if val.Len() > 0 && (mc.DecodeZerosMap || dc.zeroMaps) {
clearMap(val)
}
@ -211,8 +229,8 @@ func clearMap(m reflect.Value) {
}
}
func (mc *MapCodec) encodeKey(val reflect.Value) (string, error) {
if mc.EncodeKeysWithStringer {
func (mc *MapCodec) encodeKey(val reflect.Value, encodeKeysWithStringer bool) (string, error) {
if mc.EncodeKeysWithStringer || encodeKeysWithStringer {
return fmt.Sprint(val), nil
}

View File

@ -8,7 +8,6 @@ package bsoncodec
import (
"reflect"
"sync"
"go.mongodb.org/mongo-driver/bson/bsonrw"
"go.mongodb.org/mongo-driver/bson/bsontype"
@ -18,18 +17,20 @@ var _ ValueEncoder = &PointerCodec{}
var _ ValueDecoder = &PointerCodec{}
// PointerCodec is the Codec used for pointers.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
// PointerCodec registered.
type PointerCodec struct {
ecache map[reflect.Type]ValueEncoder
dcache map[reflect.Type]ValueDecoder
l sync.RWMutex
ecache typeEncoderCache
dcache typeDecoderCache
}
// NewPointerCodec returns a PointerCodec that has been initialized.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
// PointerCodec registered.
func NewPointerCodec() *PointerCodec {
return &PointerCodec{
ecache: make(map[reflect.Type]ValueEncoder),
dcache: make(map[reflect.Type]ValueDecoder),
}
return &PointerCodec{}
}
// EncodeValue handles encoding a pointer by either encoding it to BSON Null if the pointer is nil
@ -46,24 +47,19 @@ func (pc *PointerCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val
return vw.WriteNull()
}
pc.l.RLock()
enc, ok := pc.ecache[val.Type()]
pc.l.RUnlock()
if ok {
if enc == nil {
return ErrNoEncoder{Type: val.Type()}
typ := val.Type()
if v, ok := pc.ecache.Load(typ); ok {
if v == nil {
return ErrNoEncoder{Type: typ}
}
return enc.EncodeValue(ec, vw, val.Elem())
return v.EncodeValue(ec, vw, val.Elem())
}
enc, err := ec.LookupEncoder(val.Type().Elem())
pc.l.Lock()
pc.ecache[val.Type()] = enc
pc.l.Unlock()
// TODO(charlie): handle concurrent requests for the same type
enc, err := ec.LookupEncoder(typ.Elem())
enc = pc.ecache.LoadOrStore(typ, enc)
if err != nil {
return err
}
return enc.EncodeValue(ec, vw, val.Elem())
}
@ -74,36 +70,31 @@ func (pc *PointerCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val
return ValueDecoderError{Name: "PointerCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val}
}
typ := val.Type()
if vr.Type() == bsontype.Null {
val.Set(reflect.Zero(val.Type()))
val.Set(reflect.Zero(typ))
return vr.ReadNull()
}
if vr.Type() == bsontype.Undefined {
val.Set(reflect.Zero(val.Type()))
val.Set(reflect.Zero(typ))
return vr.ReadUndefined()
}
if val.IsNil() {
val.Set(reflect.New(val.Type().Elem()))
val.Set(reflect.New(typ.Elem()))
}
pc.l.RLock()
dec, ok := pc.dcache[val.Type()]
pc.l.RUnlock()
if ok {
if dec == nil {
return ErrNoDecoder{Type: val.Type()}
if v, ok := pc.dcache.Load(typ); ok {
if v == nil {
return ErrNoDecoder{Type: typ}
}
return dec.DecodeValue(dc, vr, val.Elem())
return v.DecodeValue(dc, vr, val.Elem())
}
dec, err := dc.LookupDecoder(val.Type().Elem())
pc.l.Lock()
pc.dcache[val.Type()] = dec
pc.l.Unlock()
// TODO(charlie): handle concurrent requests for the same type
dec, err := dc.LookupDecoder(typ.Elem())
dec = pc.dcache.LoadOrStore(typ, dec)
if err != nil {
return err
}
return dec.DecodeValue(dc, vr, val.Elem())
}

View File

@ -16,12 +16,18 @@ import (
)
// ErrNilType is returned when nil is passed to either LookupEncoder or LookupDecoder.
//
// Deprecated: ErrNilType will not be supported in Go Driver 2.0.
var ErrNilType = errors.New("cannot perform a decoder lookup on <nil>")
// ErrNotPointer is returned when a non-pointer type is provided to LookupDecoder.
//
// Deprecated: ErrNotPointer will not be supported in Go Driver 2.0.
var ErrNotPointer = errors.New("non-pointer provided to LookupDecoder")
// ErrNoEncoder is returned when there wasn't an encoder available for a type.
//
// Deprecated: ErrNoEncoder will not be supported in Go Driver 2.0.
type ErrNoEncoder struct {
Type reflect.Type
}
@ -34,6 +40,8 @@ func (ene ErrNoEncoder) Error() string {
}
// ErrNoDecoder is returned when there wasn't a decoder available for a type.
//
// Deprecated: ErrNoDecoder will not be supported in Go Driver 2.0.
type ErrNoDecoder struct {
Type reflect.Type
}
@ -43,6 +51,8 @@ func (end ErrNoDecoder) Error() string {
}
// ErrNoTypeMapEntry is returned when there wasn't a type available for the provided BSON type.
//
// Deprecated: ErrNoTypeMapEntry will not be supported in Go Driver 2.0.
type ErrNoTypeMapEntry struct {
Type bsontype.Type
}
@ -52,63 +62,30 @@ func (entme ErrNoTypeMapEntry) Error() string {
}
// ErrNotInterface is returned when the provided type is not an interface.
//
// Deprecated: ErrNotInterface will not be supported in Go Driver 2.0.
var ErrNotInterface = errors.New("The provided type is not an interface")
// A RegistryBuilder is used to build a Registry. This type is not goroutine
// safe.
//
// Deprecated: Use Registry instead.
type RegistryBuilder struct {
typeEncoders map[reflect.Type]ValueEncoder
interfaceEncoders []interfaceValueEncoder
kindEncoders map[reflect.Kind]ValueEncoder
typeDecoders map[reflect.Type]ValueDecoder
interfaceDecoders []interfaceValueDecoder
kindDecoders map[reflect.Kind]ValueDecoder
typeMap map[bsontype.Type]reflect.Type
}
// A Registry is used to store and retrieve codecs for types and interfaces. This type is the main
// typed passed around and Encoders and Decoders are constructed from it.
type Registry struct {
typeEncoders map[reflect.Type]ValueEncoder
typeDecoders map[reflect.Type]ValueDecoder
interfaceEncoders []interfaceValueEncoder
interfaceDecoders []interfaceValueDecoder
kindEncoders map[reflect.Kind]ValueEncoder
kindDecoders map[reflect.Kind]ValueDecoder
typeMap map[bsontype.Type]reflect.Type
mu sync.RWMutex
registry *Registry
}
// NewRegistryBuilder creates a new empty RegistryBuilder.
//
// Deprecated: Use NewRegistry instead.
func NewRegistryBuilder() *RegistryBuilder {
return &RegistryBuilder{
typeEncoders: make(map[reflect.Type]ValueEncoder),
typeDecoders: make(map[reflect.Type]ValueDecoder),
interfaceEncoders: make([]interfaceValueEncoder, 0),
interfaceDecoders: make([]interfaceValueDecoder, 0),
kindEncoders: make(map[reflect.Kind]ValueEncoder),
kindDecoders: make(map[reflect.Kind]ValueDecoder),
typeMap: make(map[bsontype.Type]reflect.Type),
registry: NewRegistry(),
}
}
func buildDefaultRegistry() *Registry {
rb := NewRegistryBuilder()
defaultValueEncoders.RegisterDefaultEncoders(rb)
defaultValueDecoders.RegisterDefaultDecoders(rb)
return rb.Build()
}
// RegisterCodec will register the provided ValueCodec for the provided type.
//
// Deprecated: Use Registry.RegisterTypeEncoder and Registry.RegisterTypeDecoder instead.
func (rb *RegistryBuilder) RegisterCodec(t reflect.Type, codec ValueCodec) *RegistryBuilder {
rb.RegisterTypeEncoder(t, codec)
rb.RegisterTypeDecoder(t, codec)
@ -120,31 +97,22 @@ func (rb *RegistryBuilder) RegisterCodec(t reflect.Type, codec ValueCodec) *Regi
// The type will be used directly, so an encoder can be registered for a type and a different encoder can be registered
// for a pointer to that type.
//
// If the given type is an interface, the encoder will be called when marshalling a type that is that interface. It
// will not be called when marshalling a non-interface type that implements the interface.
// If the given type is an interface, the encoder will be called when marshaling a type that is that interface. It
// will not be called when marshaling a non-interface type that implements the interface.
//
// Deprecated: Use Registry.RegisterTypeEncoder instead.
func (rb *RegistryBuilder) RegisterTypeEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder {
rb.typeEncoders[t] = enc
rb.registry.RegisterTypeEncoder(t, enc)
return rb
}
// RegisterHookEncoder will register an encoder for the provided interface type t. This encoder will be called when
// marshalling a type if the type implements t or a pointer to the type implements t. If the provided type is not
// marshaling a type if the type implements t or a pointer to the type implements t. If the provided type is not
// an interface (i.e. t.Kind() != reflect.Interface), this method will panic.
//
// Deprecated: Use Registry.RegisterInterfaceEncoder instead.
func (rb *RegistryBuilder) RegisterHookEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder {
if t.Kind() != reflect.Interface {
panicStr := fmt.Sprintf("RegisterHookEncoder expects a type with kind reflect.Interface, "+
"got type %s with kind %s", t, t.Kind())
panic(panicStr)
}
for idx, encoder := range rb.interfaceEncoders {
if encoder.i == t {
rb.interfaceEncoders[idx].ve = enc
return rb
}
}
rb.interfaceEncoders = append(rb.interfaceEncoders, interfaceValueEncoder{i: t, ve: enc})
rb.registry.RegisterInterfaceEncoder(t, enc)
return rb
}
@ -153,97 +121,78 @@ func (rb *RegistryBuilder) RegisterHookEncoder(t reflect.Type, enc ValueEncoder)
// The type will be used directly, so a decoder can be registered for a type and a different decoder can be registered
// for a pointer to that type.
//
// If the given type is an interface, the decoder will be called when unmarshalling into a type that is that interface.
// It will not be called when unmarshalling into a non-interface type that implements the interface.
// If the given type is an interface, the decoder will be called when unmarshaling into a type that is that interface.
// It will not be called when unmarshaling into a non-interface type that implements the interface.
//
// Deprecated: Use Registry.RegisterTypeDecoder instead.
func (rb *RegistryBuilder) RegisterTypeDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder {
rb.typeDecoders[t] = dec
rb.registry.RegisterTypeDecoder(t, dec)
return rb
}
// RegisterHookDecoder will register an decoder for the provided interface type t. This decoder will be called when
// unmarshalling into a type if the type implements t or a pointer to the type implements t. If the provided type is not
// unmarshaling into a type if the type implements t or a pointer to the type implements t. If the provided type is not
// an interface (i.e. t.Kind() != reflect.Interface), this method will panic.
//
// Deprecated: Use Registry.RegisterInterfaceDecoder instead.
func (rb *RegistryBuilder) RegisterHookDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder {
if t.Kind() != reflect.Interface {
panicStr := fmt.Sprintf("RegisterHookDecoder expects a type with kind reflect.Interface, "+
"got type %s with kind %s", t, t.Kind())
panic(panicStr)
}
for idx, decoder := range rb.interfaceDecoders {
if decoder.i == t {
rb.interfaceDecoders[idx].vd = dec
return rb
}
}
rb.interfaceDecoders = append(rb.interfaceDecoders, interfaceValueDecoder{i: t, vd: dec})
rb.registry.RegisterInterfaceDecoder(t, dec)
return rb
}
// RegisterEncoder registers the provided type and encoder pair.
//
// Deprecated: Use RegisterTypeEncoder or RegisterHookEncoder instead.
// Deprecated: Use Registry.RegisterTypeEncoder or Registry.RegisterInterfaceEncoder instead.
func (rb *RegistryBuilder) RegisterEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder {
if t == tEmpty {
rb.typeEncoders[t] = enc
rb.registry.RegisterTypeEncoder(t, enc)
return rb
}
switch t.Kind() {
case reflect.Interface:
for idx, ir := range rb.interfaceEncoders {
if ir.i == t {
rb.interfaceEncoders[idx].ve = enc
return rb
}
}
rb.interfaceEncoders = append(rb.interfaceEncoders, interfaceValueEncoder{i: t, ve: enc})
rb.registry.RegisterInterfaceEncoder(t, enc)
default:
rb.typeEncoders[t] = enc
rb.registry.RegisterTypeEncoder(t, enc)
}
return rb
}
// RegisterDecoder registers the provided type and decoder pair.
//
// Deprecated: Use RegisterTypeDecoder or RegisterHookDecoder instead.
// Deprecated: Use Registry.RegisterTypeDecoder or Registry.RegisterInterfaceDecoder instead.
func (rb *RegistryBuilder) RegisterDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder {
if t == nil {
rb.typeDecoders[nil] = dec
rb.registry.RegisterTypeDecoder(t, dec)
return rb
}
if t == tEmpty {
rb.typeDecoders[t] = dec
rb.registry.RegisterTypeDecoder(t, dec)
return rb
}
switch t.Kind() {
case reflect.Interface:
for idx, ir := range rb.interfaceDecoders {
if ir.i == t {
rb.interfaceDecoders[idx].vd = dec
return rb
}
}
rb.interfaceDecoders = append(rb.interfaceDecoders, interfaceValueDecoder{i: t, vd: dec})
rb.registry.RegisterInterfaceDecoder(t, dec)
default:
rb.typeDecoders[t] = dec
rb.registry.RegisterTypeDecoder(t, dec)
}
return rb
}
// RegisterDefaultEncoder will registr the provided ValueEncoder to the provided
// RegisterDefaultEncoder will register the provided ValueEncoder to the provided
// kind.
//
// Deprecated: Use Registry.RegisterKindEncoder instead.
func (rb *RegistryBuilder) RegisterDefaultEncoder(kind reflect.Kind, enc ValueEncoder) *RegistryBuilder {
rb.kindEncoders[kind] = enc
rb.registry.RegisterKindEncoder(kind, enc)
return rb
}
// RegisterDefaultDecoder will register the provided ValueDecoder to the
// provided kind.
//
// Deprecated: Use Registry.RegisterKindDecoder instead.
func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDecoder) *RegistryBuilder {
rb.kindDecoders[kind] = dec
rb.registry.RegisterKindDecoder(kind, dec)
return rb
}
@ -256,120 +205,235 @@ func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDe
// to decode to bson.Raw, use the following code:
//
// rb.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{}))
//
// Deprecated: Use Registry.RegisterTypeMapEntry instead.
func (rb *RegistryBuilder) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) *RegistryBuilder {
rb.typeMap[bt] = rt
rb.registry.RegisterTypeMapEntry(bt, rt)
return rb
}
// Build creates a Registry from the current state of this RegistryBuilder.
//
// Deprecated: Use NewRegistry instead.
func (rb *RegistryBuilder) Build() *Registry {
registry := new(Registry)
registry.typeEncoders = make(map[reflect.Type]ValueEncoder)
for t, enc := range rb.typeEncoders {
registry.typeEncoders[t] = enc
r := &Registry{
interfaceEncoders: append([]interfaceValueEncoder(nil), rb.registry.interfaceEncoders...),
interfaceDecoders: append([]interfaceValueDecoder(nil), rb.registry.interfaceDecoders...),
typeEncoders: rb.registry.typeEncoders.Clone(),
typeDecoders: rb.registry.typeDecoders.Clone(),
kindEncoders: rb.registry.kindEncoders.Clone(),
kindDecoders: rb.registry.kindDecoders.Clone(),
}
registry.typeDecoders = make(map[reflect.Type]ValueDecoder)
for t, dec := range rb.typeDecoders {
registry.typeDecoders[t] = dec
}
registry.interfaceEncoders = make([]interfaceValueEncoder, len(rb.interfaceEncoders))
copy(registry.interfaceEncoders, rb.interfaceEncoders)
registry.interfaceDecoders = make([]interfaceValueDecoder, len(rb.interfaceDecoders))
copy(registry.interfaceDecoders, rb.interfaceDecoders)
registry.kindEncoders = make(map[reflect.Kind]ValueEncoder)
for kind, enc := range rb.kindEncoders {
registry.kindEncoders[kind] = enc
}
registry.kindDecoders = make(map[reflect.Kind]ValueDecoder)
for kind, dec := range rb.kindDecoders {
registry.kindDecoders[kind] = dec
}
registry.typeMap = make(map[bsontype.Type]reflect.Type)
for bt, rt := range rb.typeMap {
registry.typeMap[bt] = rt
}
return registry
rb.registry.typeMap.Range(func(k, v interface{}) bool {
if k != nil && v != nil {
r.typeMap.Store(k, v)
}
return true
})
return r
}
// LookupEncoder inspects the registry for an encoder for the given type. The lookup precedence works as follows:
// A Registry is used to store and retrieve codecs for types and interfaces. This type is the main
// typed passed around and Encoders and Decoders are constructed from it.
type Registry struct {
interfaceEncoders []interfaceValueEncoder
interfaceDecoders []interfaceValueDecoder
typeEncoders *typeEncoderCache
typeDecoders *typeDecoderCache
kindEncoders *kindEncoderCache
kindDecoders *kindDecoderCache
typeMap sync.Map // map[bsontype.Type]reflect.Type
}
// NewRegistry creates a new empty Registry.
func NewRegistry() *Registry {
return &Registry{
typeEncoders: new(typeEncoderCache),
typeDecoders: new(typeDecoderCache),
kindEncoders: new(kindEncoderCache),
kindDecoders: new(kindDecoderCache),
}
}
// RegisterTypeEncoder registers the provided ValueEncoder for the provided type.
//
// 1. An encoder registered for the exact type. If the given type represents an interface, an encoder registered using
// RegisterTypeEncoder for the interface will be selected.
// The type will be used as provided, so an encoder can be registered for a type and a different
// encoder can be registered for a pointer to that type.
//
// 2. An encoder registered using RegisterHookEncoder for an interface implemented by the type or by a pointer to the
// type.
// If the given type is an interface, the encoder will be called when marshaling a type that is
// that interface. It will not be called when marshaling a non-interface type that implements the
// interface. To get the latter behavior, call RegisterHookEncoder instead.
//
// 3. An encoder registered for the reflect.Kind of the value.
// RegisterTypeEncoder should not be called concurrently with any other Registry method.
func (r *Registry) RegisterTypeEncoder(valueType reflect.Type, enc ValueEncoder) {
r.typeEncoders.Store(valueType, enc)
}
// RegisterTypeDecoder registers the provided ValueDecoder for the provided type.
//
// If no encoder is found, an error of type ErrNoEncoder is returned.
func (r *Registry) LookupEncoder(t reflect.Type) (ValueEncoder, error) {
encodererr := ErrNoEncoder{Type: t}
r.mu.RLock()
enc, found := r.lookupTypeEncoder(t)
r.mu.RUnlock()
// The type will be used as provided, so a decoder can be registered for a type and a different
// decoder can be registered for a pointer to that type.
//
// If the given type is an interface, the decoder will be called when unmarshaling into a type that
// is that interface. It will not be called when unmarshaling into a non-interface type that
// implements the interface. To get the latter behavior, call RegisterHookDecoder instead.
//
// RegisterTypeDecoder should not be called concurrently with any other Registry method.
func (r *Registry) RegisterTypeDecoder(valueType reflect.Type, dec ValueDecoder) {
r.typeDecoders.Store(valueType, dec)
}
// RegisterKindEncoder registers the provided ValueEncoder for the provided kind.
//
// Use RegisterKindEncoder to register an encoder for any type with the same underlying kind. For
// example, consider the type MyInt defined as
//
// type MyInt int32
//
// To define an encoder for MyInt and int32, use RegisterKindEncoder like
//
// reg.RegisterKindEncoder(reflect.Int32, myEncoder)
//
// RegisterKindEncoder should not be called concurrently with any other Registry method.
func (r *Registry) RegisterKindEncoder(kind reflect.Kind, enc ValueEncoder) {
r.kindEncoders.Store(kind, enc)
}
// RegisterKindDecoder registers the provided ValueDecoder for the provided kind.
//
// Use RegisterKindDecoder to register a decoder for any type with the same underlying kind. For
// example, consider the type MyInt defined as
//
// type MyInt int32
//
// To define an decoder for MyInt and int32, use RegisterKindDecoder like
//
// reg.RegisterKindDecoder(reflect.Int32, myDecoder)
//
// RegisterKindDecoder should not be called concurrently with any other Registry method.
func (r *Registry) RegisterKindDecoder(kind reflect.Kind, dec ValueDecoder) {
r.kindDecoders.Store(kind, dec)
}
// RegisterInterfaceEncoder registers an encoder for the provided interface type iface. This encoder will
// be called when marshaling a type if the type implements iface or a pointer to the type
// implements iface. If the provided type is not an interface
// (i.e. iface.Kind() != reflect.Interface), this method will panic.
//
// RegisterInterfaceEncoder should not be called concurrently with any other Registry method.
func (r *Registry) RegisterInterfaceEncoder(iface reflect.Type, enc ValueEncoder) {
if iface.Kind() != reflect.Interface {
panicStr := fmt.Errorf("RegisterInterfaceEncoder expects a type with kind reflect.Interface, "+
"got type %s with kind %s", iface, iface.Kind())
panic(panicStr)
}
for idx, encoder := range r.interfaceEncoders {
if encoder.i == iface {
r.interfaceEncoders[idx].ve = enc
return
}
}
r.interfaceEncoders = append(r.interfaceEncoders, interfaceValueEncoder{i: iface, ve: enc})
}
// RegisterInterfaceDecoder registers an decoder for the provided interface type iface. This decoder will
// be called when unmarshaling into a type if the type implements iface or a pointer to the type
// implements iface. If the provided type is not an interface (i.e. iface.Kind() != reflect.Interface),
// this method will panic.
//
// RegisterInterfaceDecoder should not be called concurrently with any other Registry method.
func (r *Registry) RegisterInterfaceDecoder(iface reflect.Type, dec ValueDecoder) {
if iface.Kind() != reflect.Interface {
panicStr := fmt.Errorf("RegisterInterfaceDecoder expects a type with kind reflect.Interface, "+
"got type %s with kind %s", iface, iface.Kind())
panic(panicStr)
}
for idx, decoder := range r.interfaceDecoders {
if decoder.i == iface {
r.interfaceDecoders[idx].vd = dec
return
}
}
r.interfaceDecoders = append(r.interfaceDecoders, interfaceValueDecoder{i: iface, vd: dec})
}
// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this
// mapping is decoding situations where an empty interface is used and a default type needs to be
// created and decoded into.
//
// By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON
// documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents
// to decode to bson.Raw, use the following code:
//
// reg.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{}))
func (r *Registry) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) {
r.typeMap.Store(bt, rt)
}
// LookupEncoder returns the first matching encoder in the Registry. It uses the following lookup
// order:
//
// 1. An encoder registered for the exact type. If the given type is an interface, an encoder
// registered using RegisterTypeEncoder for that interface will be selected.
//
// 2. An encoder registered using RegisterInterfaceEncoder for an interface implemented by the type
// or by a pointer to the type.
//
// 3. An encoder registered using RegisterKindEncoder for the kind of value.
//
// If no encoder is found, an error of type ErrNoEncoder is returned. LookupEncoder is safe for
// concurrent use by multiple goroutines after all codecs and encoders are registered.
func (r *Registry) LookupEncoder(valueType reflect.Type) (ValueEncoder, error) {
enc, found := r.lookupTypeEncoder(valueType)
if found {
if enc == nil {
return nil, ErrNoEncoder{Type: t}
return nil, ErrNoEncoder{Type: valueType}
}
return enc, nil
}
enc, found = r.lookupInterfaceEncoder(t, true)
enc, found = r.lookupInterfaceEncoder(valueType, true)
if found {
r.mu.Lock()
r.typeEncoders[t] = enc
r.mu.Unlock()
return enc, nil
return r.typeEncoders.LoadOrStore(valueType, enc), nil
}
if valueType == nil {
r.storeTypeEncoder(valueType, nil)
return nil, ErrNoEncoder{Type: valueType}
}
if t == nil {
r.mu.Lock()
r.typeEncoders[t] = nil
r.mu.Unlock()
return nil, encodererr
if v, ok := r.kindEncoders.Load(valueType.Kind()); ok {
return r.storeTypeEncoder(valueType, v), nil
}
enc, found = r.kindEncoders[t.Kind()]
if !found {
r.mu.Lock()
r.typeEncoders[t] = nil
r.mu.Unlock()
return nil, encodererr
}
r.mu.Lock()
r.typeEncoders[t] = enc
r.mu.Unlock()
return enc, nil
r.storeTypeEncoder(valueType, nil)
return nil, ErrNoEncoder{Type: valueType}
}
func (r *Registry) lookupTypeEncoder(t reflect.Type) (ValueEncoder, bool) {
enc, found := r.typeEncoders[t]
return enc, found
func (r *Registry) storeTypeEncoder(rt reflect.Type, enc ValueEncoder) ValueEncoder {
return r.typeEncoders.LoadOrStore(rt, enc)
}
func (r *Registry) lookupInterfaceEncoder(t reflect.Type, allowAddr bool) (ValueEncoder, bool) {
if t == nil {
func (r *Registry) lookupTypeEncoder(rt reflect.Type) (ValueEncoder, bool) {
return r.typeEncoders.Load(rt)
}
func (r *Registry) lookupInterfaceEncoder(valueType reflect.Type, allowAddr bool) (ValueEncoder, bool) {
if valueType == nil {
return nil, false
}
for _, ienc := range r.interfaceEncoders {
if t.Implements(ienc.i) {
if valueType.Implements(ienc.i) {
return ienc.ve, true
}
if allowAddr && t.Kind() != reflect.Ptr && reflect.PtrTo(t).Implements(ienc.i) {
// if *t implements an interface, this will catch if t implements an interface further ahead
// in interfaceEncoders
defaultEnc, found := r.lookupInterfaceEncoder(t, false)
if allowAddr && valueType.Kind() != reflect.Ptr && reflect.PtrTo(valueType).Implements(ienc.i) {
// if *t implements an interface, this will catch if t implements an interface further
// ahead in interfaceEncoders
defaultEnc, found := r.lookupInterfaceEncoder(valueType, false)
if !found {
defaultEnc = r.kindEncoders[t.Kind()]
defaultEnc, _ = r.kindEncoders.Load(valueType.Kind())
}
return newCondAddrEncoder(ienc.ve, defaultEnc), true
}
@ -377,70 +441,62 @@ func (r *Registry) lookupInterfaceEncoder(t reflect.Type, allowAddr bool) (Value
return nil, false
}
// LookupDecoder inspects the registry for an decoder for the given type. The lookup precedence works as follows:
// LookupDecoder returns the first matching decoder in the Registry. It uses the following lookup
// order:
//
// 1. A decoder registered for the exact type. If the given type represents an interface, a decoder registered using
// RegisterTypeDecoder for the interface will be selected.
// 1. A decoder registered for the exact type. If the given type is an interface, a decoder
// registered using RegisterTypeDecoder for that interface will be selected.
//
// 2. A decoder registered using RegisterHookDecoder for an interface implemented by the type or by a pointer to the
// type.
// 2. A decoder registered using RegisterInterfaceDecoder for an interface implemented by the type or by
// a pointer to the type.
//
// 3. A decoder registered for the reflect.Kind of the value.
// 3. A decoder registered using RegisterKindDecoder for the kind of value.
//
// If no decoder is found, an error of type ErrNoDecoder is returned.
func (r *Registry) LookupDecoder(t reflect.Type) (ValueDecoder, error) {
if t == nil {
// If no decoder is found, an error of type ErrNoDecoder is returned. LookupDecoder is safe for
// concurrent use by multiple goroutines after all codecs and decoders are registered.
func (r *Registry) LookupDecoder(valueType reflect.Type) (ValueDecoder, error) {
if valueType == nil {
return nil, ErrNilType
}
decodererr := ErrNoDecoder{Type: t}
r.mu.RLock()
dec, found := r.lookupTypeDecoder(t)
r.mu.RUnlock()
dec, found := r.lookupTypeDecoder(valueType)
if found {
if dec == nil {
return nil, ErrNoDecoder{Type: t}
return nil, ErrNoDecoder{Type: valueType}
}
return dec, nil
}
dec, found = r.lookupInterfaceDecoder(t, true)
dec, found = r.lookupInterfaceDecoder(valueType, true)
if found {
r.mu.Lock()
r.typeDecoders[t] = dec
r.mu.Unlock()
return dec, nil
return r.storeTypeDecoder(valueType, dec), nil
}
dec, found = r.kindDecoders[t.Kind()]
if !found {
r.mu.Lock()
r.typeDecoders[t] = nil
r.mu.Unlock()
return nil, decodererr
if v, ok := r.kindDecoders.Load(valueType.Kind()); ok {
return r.storeTypeDecoder(valueType, v), nil
}
r.mu.Lock()
r.typeDecoders[t] = dec
r.mu.Unlock()
return dec, nil
r.storeTypeDecoder(valueType, nil)
return nil, ErrNoDecoder{Type: valueType}
}
func (r *Registry) lookupTypeDecoder(t reflect.Type) (ValueDecoder, bool) {
dec, found := r.typeDecoders[t]
return dec, found
func (r *Registry) lookupTypeDecoder(valueType reflect.Type) (ValueDecoder, bool) {
return r.typeDecoders.Load(valueType)
}
func (r *Registry) lookupInterfaceDecoder(t reflect.Type, allowAddr bool) (ValueDecoder, bool) {
func (r *Registry) storeTypeDecoder(typ reflect.Type, dec ValueDecoder) ValueDecoder {
return r.typeDecoders.LoadOrStore(typ, dec)
}
func (r *Registry) lookupInterfaceDecoder(valueType reflect.Type, allowAddr bool) (ValueDecoder, bool) {
for _, idec := range r.interfaceDecoders {
if t.Implements(idec.i) {
if valueType.Implements(idec.i) {
return idec.vd, true
}
if allowAddr && t.Kind() != reflect.Ptr && reflect.PtrTo(t).Implements(idec.i) {
// if *t implements an interface, this will catch if t implements an interface further ahead
// in interfaceDecoders
defaultDec, found := r.lookupInterfaceDecoder(t, false)
if allowAddr && valueType.Kind() != reflect.Ptr && reflect.PtrTo(valueType).Implements(idec.i) {
// if *t implements an interface, this will catch if t implements an interface further
// ahead in interfaceDecoders
defaultDec, found := r.lookupInterfaceDecoder(valueType, false)
if !found {
defaultDec = r.kindDecoders[t.Kind()]
defaultDec, _ = r.kindDecoders.Load(valueType.Kind())
}
return newCondAddrDecoder(idec.vd, defaultDec), true
}
@ -450,12 +506,14 @@ func (r *Registry) lookupInterfaceDecoder(t reflect.Type, allowAddr bool) (Value
// LookupTypeMapEntry inspects the registry's type map for a Go type for the corresponding BSON
// type. If no type is found, ErrNoTypeMapEntry is returned.
//
// LookupTypeMapEntry should not be called concurrently with any other Registry method.
func (r *Registry) LookupTypeMapEntry(bt bsontype.Type) (reflect.Type, error) {
t, ok := r.typeMap[bt]
if !ok || t == nil {
v, ok := r.typeMap.Load(bt)
if v == nil || !ok {
return nil, ErrNoTypeMapEntry{Type: bt}
}
return t, nil
return v.(reflect.Type), nil
}
type interfaceValueEncoder struct {

View File

@ -19,13 +19,21 @@ import (
var defaultSliceCodec = NewSliceCodec()
// SliceCodec is the Codec used for slice values.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
// SliceCodec registered.
type SliceCodec struct {
// EncodeNilAsEmpty causes EncodeValue to marshal nil Go slices as empty BSON arrays instead of
// BSON null.
//
// Deprecated: Use bson.Encoder.NilSliceAsEmpty instead.
EncodeNilAsEmpty bool
}
var _ ValueCodec = &MapCodec{}
// NewSliceCodec returns a MapCodec with options opts.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
// SliceCodec registered.
func NewSliceCodec(opts ...*bsonoptions.SliceCodecOptions) *SliceCodec {
sliceOpt := bsonoptions.MergeSliceCodecOptions(opts...)
@ -42,21 +50,19 @@ func (sc SliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val re
return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val}
}
if val.IsNil() && !sc.EncodeNilAsEmpty {
if val.IsNil() && !sc.EncodeNilAsEmpty && !ec.nilSliceAsEmpty {
return vw.WriteNull()
}
// If we have a []byte we want to treat it as a binary instead of as an array.
if val.Type().Elem() == tByte {
var byteSlice []byte
for idx := 0; idx < val.Len(); idx++ {
byteSlice = append(byteSlice, val.Index(idx).Interface().(byte))
}
byteSlice := make([]byte, val.Len())
reflect.Copy(reflect.ValueOf(byteSlice), val)
return vw.WriteBinary(byteSlice)
}
// If we have a []primitive.E we want to treat it as a document instead of as an array.
if val.Type().ConvertibleTo(tD) {
if val.Type() == tD || val.Type().ConvertibleTo(tD) {
d := val.Convert(tD).Interface().(primitive.D)
dw, err := vw.WriteDocument()
@ -145,11 +151,8 @@ func (sc *SliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val r
if val.IsNil() {
val.Set(reflect.MakeSlice(val.Type(), 0, len(data)))
}
val.SetLen(0)
for _, elem := range data {
val.Set(reflect.Append(val, reflect.ValueOf(elem)))
}
val.Set(reflect.AppendSlice(val, reflect.ValueOf(data)))
return nil
case bsontype.String:
if sliceType := val.Type().Elem(); sliceType != tByte {
@ -164,11 +167,8 @@ func (sc *SliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val r
if val.IsNil() {
val.Set(reflect.MakeSlice(val.Type(), 0, len(byteStr)))
}
val.SetLen(0)
for _, elem := range byteStr {
val.Set(reflect.Append(val, reflect.ValueOf(elem)))
}
val.Set(reflect.AppendSlice(val, reflect.ValueOf(byteStr)))
return nil
default:
return fmt.Errorf("cannot decode %v into a slice", vrType)

View File

@ -15,26 +15,38 @@ import (
"go.mongodb.org/mongo-driver/bson/bsontype"
)
// StringCodec is the Codec used for struct values.
// StringCodec is the Codec used for string values.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
// StringCodec registered.
type StringCodec struct {
// DecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation.
// If false, a string made from the raw object ID bytes will be used. Defaults to true.
//
// Deprecated: Decoding object IDs as raw bytes will not be supported in Go Driver 2.0.
DecodeObjectIDAsHex bool
}
var (
defaultStringCodec = NewStringCodec()
_ ValueCodec = defaultStringCodec
// Assert that defaultStringCodec satisfies the typeDecoder interface, which allows it to be
// used by collection type decoders (e.g. map, slice, etc) to set individual values in a
// collection.
_ typeDecoder = defaultStringCodec
)
// NewStringCodec returns a StringCodec with options opts.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
// StringCodec registered.
func NewStringCodec(opts ...*bsonoptions.StringCodecOptions) *StringCodec {
stringOpt := bsonoptions.MergeStringCodecOptions(opts...)
return &StringCodec{*stringOpt.DecodeObjectIDAsHex}
}
// EncodeValue is the ValueEncoder for string types.
func (sc *StringCodec) EncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
func (sc *StringCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if val.Kind() != reflect.String {
return ValueEncoderError{
Name: "StringEncodeValue",
@ -46,7 +58,7 @@ func (sc *StringCodec) EncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, va
return vw.WriteString(val.String())
}
func (sc *StringCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
func (sc *StringCodec) decodeType(_ DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
if t.Kind() != reflect.String {
return emptyValue, ValueDecoderError{
Name: "StringDecodeValue",
@ -71,6 +83,7 @@ func (sc *StringCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t ref
if sc.DecodeObjectIDAsHex {
str = oid.Hex()
} else {
// TODO(GODRIVER-2796): Return an error here instead of decoding to a garbled string.
byteArray := [12]byte(oid)
str = string(byteArray[:])
}

View File

@ -59,14 +59,43 @@ type Zeroer interface {
}
// StructCodec is the Codec used for struct values.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
// StructCodec registered.
type StructCodec struct {
cache map[reflect.Type]*structDescription
l sync.RWMutex
parser StructTagParser
DecodeZeroStruct bool
DecodeDeepZeroInline bool
EncodeOmitDefaultStruct bool
AllowUnexportedFields bool
cache sync.Map // map[reflect.Type]*structDescription
parser StructTagParser
// DecodeZeroStruct causes DecodeValue to delete any existing values from Go structs in the
// destination value passed to Decode before unmarshaling BSON documents into them.
//
// Deprecated: Use bson.Decoder.ZeroStructs instead.
DecodeZeroStruct bool
// DecodeDeepZeroInline causes DecodeValue to delete any existing values from Go structs in the
// destination value passed to Decode before unmarshaling BSON documents into them.
//
// Deprecated: DecodeDeepZeroInline will not be supported in Go Driver 2.0.
DecodeDeepZeroInline bool
// EncodeOmitDefaultStruct causes the Encoder to consider the zero value for a struct (e.g.
// MyStruct{}) as empty and omit it from the marshaled BSON when the "omitempty" struct tag
// option is set.
//
// Deprecated: Use bson.Encoder.OmitZeroStruct instead.
EncodeOmitDefaultStruct bool
// AllowUnexportedFields allows encoding and decoding values from un-exported struct fields.
//
// Deprecated: AllowUnexportedFields does not work on recent versions of Go and will not be
// supported in Go Driver 2.0.
AllowUnexportedFields bool
// OverwriteDuplicatedInlinedFields, if false, causes EncodeValue to return an error if there is
// a duplicate field in the marshaled BSON when the "inline" struct tag option is set. The
// default value is true.
//
// Deprecated: Use bson.Encoder.ErrorOnInlineDuplicates instead.
OverwriteDuplicatedInlinedFields bool
}
@ -74,6 +103,9 @@ var _ ValueEncoder = &StructCodec{}
var _ ValueDecoder = &StructCodec{}
// NewStructCodec returns a StructCodec that uses p for struct tag parsing.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
// StructCodec registered.
func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions) (*StructCodec, error) {
if p == nil {
return nil, errors.New("a StructTagParser must be provided to NewStructCodec")
@ -82,7 +114,6 @@ func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions)
structOpt := bsonoptions.MergeStructCodecOptions(opts...)
codec := &StructCodec{
cache: make(map[reflect.Type]*structDescription),
parser: p,
}
@ -106,12 +137,12 @@ func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions)
}
// EncodeValue handles encoding generic struct types.
func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
func (sc *StructCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Kind() != reflect.Struct {
return ValueEncoderError{Name: "StructCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val}
}
sd, err := sc.describeStruct(r.Registry, val.Type())
sd, err := sc.describeStruct(ec.Registry, val.Type(), ec.useJSONStructTags, ec.errorOnInlineDuplicates)
if err != nil {
return err
}
@ -131,7 +162,7 @@ func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val r
}
}
desc.encoder, rv, err = defaultValueEncoders.lookupElementEncoder(r, desc.encoder, rv)
desc.encoder, rv, err = defaultValueEncoders.lookupElementEncoder(ec, desc.encoder, rv)
if err != nil && err != errInvalidValue {
return err
@ -158,17 +189,17 @@ func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val r
encoder := desc.encoder
var isZero bool
rvInterface := rv.Interface()
var zero bool
if cz, ok := encoder.(CodecZeroer); ok {
isZero = cz.IsTypeZero(rvInterface)
zero = cz.IsTypeZero(rv.Interface())
} else if rv.Kind() == reflect.Interface {
// sc.isZero will not treat an interface rv as an interface, so we need to check for the zero interface separately.
isZero = rv.IsNil()
// isZero will not treat an interface rv as an interface, so we need to check for the
// zero interface separately.
zero = rv.IsNil()
} else {
isZero = sc.isZero(rvInterface)
zero = isZero(rv, sc.EncodeOmitDefaultStruct || ec.omitZeroStruct)
}
if desc.omitEmpty && isZero {
if desc.omitEmpty && zero {
continue
}
@ -177,7 +208,17 @@ func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val r
return err
}
ectx := EncodeContext{Registry: r.Registry, MinSize: desc.minSize}
ectx := EncodeContext{
Registry: ec.Registry,
MinSize: desc.minSize || ec.MinSize,
errorOnInlineDuplicates: ec.errorOnInlineDuplicates,
stringifyMapKeysWithFmt: ec.stringifyMapKeysWithFmt,
nilMapAsEmpty: ec.nilMapAsEmpty,
nilSliceAsEmpty: ec.nilSliceAsEmpty,
nilByteSliceAsEmpty: ec.nilByteSliceAsEmpty,
omitZeroStruct: ec.omitZeroStruct,
useJSONStructTags: ec.useJSONStructTags,
}
err = encoder.EncodeValue(ectx, vw2, rv)
if err != nil {
return err
@ -191,7 +232,7 @@ func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val r
return exists
}
return defaultMapCodec.mapEncodeValue(r, dw, rv, collisionFn)
return defaultMapCodec.mapEncodeValue(ec, dw, rv, collisionFn)
}
return dw.WriteDocumentEnd()
@ -213,7 +254,7 @@ func newDecodeError(key string, original error) error {
// DecodeValue implements the Codec interface.
// By default, map types in val will not be cleared. If a map has existing key/value pairs, it will be extended with the new ones from vr.
// For slices, the decoder will set the length of the slice to zero and append all elements. The underlying array will not be cleared.
func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
func (sc *StructCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.CanSet() || val.Kind() != reflect.Struct {
return ValueDecoderError{Name: "StructCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val}
}
@ -238,12 +279,12 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r
return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type())
}
sd, err := sc.describeStruct(r.Registry, val.Type())
sd, err := sc.describeStruct(dc.Registry, val.Type(), dc.useJSONStructTags, false)
if err != nil {
return err
}
if sc.DecodeZeroStruct {
if sc.DecodeZeroStruct || dc.zeroStructs {
val.Set(reflect.Zero(val.Type()))
}
if sc.DecodeDeepZeroInline && sd.inline {
@ -254,7 +295,7 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r
var inlineMap reflect.Value
if sd.inlineMap >= 0 {
inlineMap = val.Field(sd.inlineMap)
decoder, err = r.LookupDecoder(inlineMap.Type().Elem())
decoder, err = dc.LookupDecoder(inlineMap.Type().Elem())
if err != nil {
return err
}
@ -298,8 +339,8 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r
}
elem := reflect.New(inlineMap.Type().Elem()).Elem()
r.Ancestor = inlineMap.Type()
err = decoder.DecodeValue(r, vr, elem)
dc.Ancestor = inlineMap.Type()
err = decoder.DecodeValue(dc, vr, elem)
if err != nil {
return err
}
@ -326,7 +367,17 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r
}
field = field.Addr()
dctx := DecodeContext{Registry: r.Registry, Truncate: fd.truncate || r.Truncate}
dctx := DecodeContext{
Registry: dc.Registry,
Truncate: fd.truncate || dc.Truncate,
defaultDocumentType: dc.defaultDocumentType,
binaryAsSlice: dc.binaryAsSlice,
useJSONStructTags: dc.useJSONStructTags,
useLocalTimeZone: dc.useLocalTimeZone,
zeroMaps: dc.zeroMaps,
zeroStructs: dc.zeroStructs,
}
if fd.decoder == nil {
return newDecodeError(fd.name, ErrNoDecoder{Type: field.Elem().Type()})
}
@ -340,51 +391,32 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r
return nil
}
func (sc *StructCodec) isZero(i interface{}) bool {
v := reflect.ValueOf(i)
// check the value validity
if !v.IsValid() {
func isZero(v reflect.Value, omitZeroStruct bool) bool {
kind := v.Kind()
if (kind != reflect.Ptr || !v.IsNil()) && v.Type().Implements(tZeroer) {
return v.Interface().(Zeroer).IsZero()
}
if kind == reflect.Struct {
if !omitZeroStruct {
return false
}
vt := v.Type()
if vt == tTime {
return v.Interface().(time.Time).IsZero()
}
numField := vt.NumField()
for i := 0; i < numField; i++ {
ff := vt.Field(i)
if ff.PkgPath != "" && !ff.Anonymous {
continue // Private field
}
if !isZero(v.Field(i), omitZeroStruct) {
return false
}
}
return true
}
if z, ok := v.Interface().(Zeroer); ok && (v.Kind() != reflect.Ptr || !v.IsNil()) {
return z.IsZero()
}
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
case reflect.Struct:
if sc.EncodeOmitDefaultStruct {
vt := v.Type()
if vt == tTime {
return v.Interface().(time.Time).IsZero()
}
for i := 0; i < v.NumField(); i++ {
if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous {
continue // Private field
}
fld := v.Field(i)
if !sc.isZero(fld.Interface()) {
return false
}
}
return true
}
}
return false
return !v.IsValid() || v.IsZero()
}
type structDescription struct {
@ -435,16 +467,35 @@ func (bi byIndex) Less(i, j int) bool {
return len(bi[i].inline) < len(bi[j].inline)
}
func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescription, error) {
func (sc *StructCodec) describeStruct(
r *Registry,
t reflect.Type,
useJSONStructTags bool,
errorOnDuplicates bool,
) (*structDescription, error) {
// We need to analyze the struct, including getting the tags, collecting
// information about inlining, and create a map of the field name to the field.
sc.l.RLock()
ds, exists := sc.cache[t]
sc.l.RUnlock()
if exists {
return ds, nil
if v, ok := sc.cache.Load(t); ok {
return v.(*structDescription), nil
}
// TODO(charlie): Only describe the struct once when called
// concurrently with the same type.
ds, err := sc.describeStructSlow(r, t, useJSONStructTags, errorOnDuplicates)
if err != nil {
return nil, err
}
if v, loaded := sc.cache.LoadOrStore(t, ds); loaded {
ds = v.(*structDescription)
}
return ds, nil
}
func (sc *StructCodec) describeStructSlow(
r *Registry,
t reflect.Type,
useJSONStructTags bool,
errorOnDuplicates bool,
) (*structDescription, error) {
numFields := t.NumField()
sd := &structDescription{
fm: make(map[string]fieldDescription, numFields),
@ -477,7 +528,14 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr
decoder: decoder,
}
stags, err := sc.parser.ParseStructTags(sf)
var stags StructTags
// If the caller requested that we use JSON struct tags, use the JSONFallbackStructTagParser
// instead of the parser defined on the codec.
if useJSONStructTags {
stags, err = JSONFallbackStructTagParser.ParseStructTags(sf)
} else {
stags, err = sc.parser.ParseStructTags(sf)
}
if err != nil {
return nil, err
}
@ -507,7 +565,7 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr
}
fallthrough
case reflect.Struct:
inlinesf, err := sc.describeStruct(r, sfType)
inlinesf, err := sc.describeStruct(r, sfType, useJSONStructTags, errorOnDuplicates)
if err != nil {
return nil, err
}
@ -559,7 +617,7 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr
continue
}
dominant, ok := dominantField(fields[i : i+advance])
if !ok || !sc.OverwriteDuplicatedInlinedFields {
if !ok || !sc.OverwriteDuplicatedInlinedFields || errorOnDuplicates {
return nil, fmt.Errorf("struct %s has duplicated key %s", t.String(), name)
}
sd.fl = append(sd.fl, dominant)
@ -568,10 +626,6 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr
sort.Sort(byIndex(sd.fl))
sc.l.Lock()
sc.cache[t] = sd
sc.l.Unlock()
return sd, nil
}
@ -629,21 +683,21 @@ func getInlineField(val reflect.Value, index []int) (reflect.Value, error) {
// DeepZero returns recursive zero object
func deepZero(st reflect.Type) (result reflect.Value) {
result = reflect.Indirect(reflect.New(st))
if result.Kind() == reflect.Struct {
for i := 0; i < result.NumField(); i++ {
if f := result.Field(i); f.Kind() == reflect.Ptr {
if f.CanInterface() {
if ft := reflect.TypeOf(f.Interface()); ft.Elem().Kind() == reflect.Struct {
result.Field(i).Set(recursivePointerTo(deepZero(ft.Elem())))
}
if st.Kind() == reflect.Struct {
numField := st.NumField()
for i := 0; i < numField; i++ {
if result == emptyValue {
result = reflect.Indirect(reflect.New(st))
}
f := result.Field(i)
if f.CanInterface() {
if f.Type().Kind() == reflect.Struct {
result.Field(i).Set(recursivePointerTo(deepZero(f.Type().Elem())))
}
}
}
}
return
return result
}
// recursivePointerTo calls reflect.New(v.Type) but recursively for its fields inside

View File

@ -12,12 +12,16 @@ import (
)
// StructTagParser returns the struct tags for a given struct field.
//
// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0.
type StructTagParser interface {
ParseStructTags(reflect.StructField) (StructTags, error)
}
// StructTagParserFunc is an adapter that allows a generic function to be used
// as a StructTagParser.
//
// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0.
type StructTagParserFunc func(reflect.StructField) (StructTags, error)
// ParseStructTags implements the StructTagParser interface.
@ -50,7 +54,7 @@ func (stpf StructTagParserFunc) ParseStructTags(sf reflect.StructField) (StructT
// Skip This struct field should be skipped. This is usually denoted by parsing a "-"
// for the name.
//
// TODO(skriptble): Add tags for undefined as nil and for null as nil.
// Deprecated: Defining custom BSON struct tag parsers will not be supported in Go Driver 2.0.
type StructTags struct {
Name string
OmitEmpty bool
@ -85,6 +89,8 @@ type StructTags struct {
// A struct tag either consisting entirely of '-' or with a bson key with a
// value consisting entirely of '-' will return a StructTags with Skip true and
// the remaining fields will be their default values.
//
// Deprecated: DefaultStructTagParser will be removed in Go Driver 2.0.
var DefaultStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) {
key := strings.ToLower(sf.Name)
tag, ok := sf.Tag.Lookup("bson")
@ -125,6 +131,9 @@ func parseTags(key string, tag string) (StructTags, error) {
// JSONFallbackStructTagParser has the same behavior as DefaultStructTagParser
// but will also fallback to parsing the json tag instead on a field where the
// bson tag isn't available.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.UseJSONStructTags] and
// [go.mongodb.org/mongo-driver/bson.Decoder.UseJSONStructTags] instead.
var JSONFallbackStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) {
key := strings.ToLower(sf.Name)
tag, ok := sf.Tag.Lookup("bson")

View File

@ -22,18 +22,28 @@ const (
)
// TimeCodec is the Codec used for time.Time values.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
// TimeCodec registered.
type TimeCodec struct {
// UseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false.
//
// Deprecated: Use bson.Decoder.UseLocalTimeZone instead.
UseLocalTimeZone bool
}
var (
defaultTimeCodec = NewTimeCodec()
_ ValueCodec = defaultTimeCodec
// Assert that defaultTimeCodec satisfies the typeDecoder interface, which allows it to be used
// by collection type decoders (e.g. map, slice, etc) to set individual values in a collection.
_ typeDecoder = defaultTimeCodec
)
// NewTimeCodec returns a TimeCodec with options opts.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
// TimeCodec registered.
func NewTimeCodec(opts ...*bsonoptions.TimeCodecOptions) *TimeCodec {
timeOpt := bsonoptions.MergeTimeCodecOptions(opts...)
@ -95,7 +105,7 @@ func (tc *TimeCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t refle
return emptyValue, fmt.Errorf("cannot decode %v into a time.Time", vrType)
}
if !tc.UseLocalTimeZone {
if !tc.UseLocalTimeZone && !dc.useLocalTimeZone {
timeVal = timeVal.UTC()
}
return reflect.ValueOf(timeVal), nil
@ -117,7 +127,7 @@ func (tc *TimeCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val re
}
// EncodeValue is the ValueEncoderFunc for time.TIme.
func (tc *TimeCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
func (tc *TimeCodec) EncodeValue(_ EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Type() != tTime {
return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val}
}

View File

@ -34,6 +34,7 @@ var tValueUnmarshaler = reflect.TypeOf((*ValueUnmarshaler)(nil)).Elem()
var tMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem()
var tUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
var tProxy = reflect.TypeOf((*Proxy)(nil)).Elem()
var tZeroer = reflect.TypeOf((*Zeroer)(nil)).Elem()
var tBinary = reflect.TypeOf(primitive.Binary{})
var tUndefined = reflect.TypeOf(primitive.Undefined{})

View File

@ -17,18 +17,29 @@ import (
)
// UIntCodec is the Codec used for uint values.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
// UIntCodec registered.
type UIntCodec struct {
// EncodeToMinSize causes EncodeValue to marshal Go uint values (excluding uint64) as the
// minimum BSON int size (either 32-bit or 64-bit) that can represent the integer value.
//
// Deprecated: Use bson.Encoder.IntMinSize instead.
EncodeToMinSize bool
}
var (
defaultUIntCodec = NewUIntCodec()
_ ValueCodec = defaultUIntCodec
// Assert that defaultUIntCodec satisfies the typeDecoder interface, which allows it to be used
// by collection type decoders (e.g. map, slice, etc) to set individual values in a collection.
_ typeDecoder = defaultUIntCodec
)
// NewUIntCodec returns a UIntCodec with options opts.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.NewRegistry] to get a registry with the
// UIntCodec registered.
func NewUIntCodec(opts ...*bsonoptions.UIntCodecOptions) *UIntCodec {
uintOpt := bsonoptions.MergeUIntCodecOptions(opts...)

View File

@ -7,22 +7,33 @@
package bsonoptions
// ByteSliceCodecOptions represents all possible options for byte slice encoding and decoding.
//
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
// and unmarshal behavior instead.
type ByteSliceCodecOptions struct {
EncodeNilAsEmpty *bool // Specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false.
}
// ByteSliceCodec creates a new *ByteSliceCodecOptions
//
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
// and unmarshal behavior instead.
func ByteSliceCodec() *ByteSliceCodecOptions {
return &ByteSliceCodecOptions{}
}
// SetEncodeNilAsEmpty specifies if a nil byte slice should encode as an empty binary instead of null. Defaults to false.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilByteSliceAsEmpty] instead.
func (bs *ByteSliceCodecOptions) SetEncodeNilAsEmpty(b bool) *ByteSliceCodecOptions {
bs.EncodeNilAsEmpty = &b
return bs
}
// MergeByteSliceCodecOptions combines the given *ByteSliceCodecOptions into a single *ByteSliceCodecOptions in a last one wins fashion.
//
// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
// single options struct instead.
func MergeByteSliceCodecOptions(opts ...*ByteSliceCodecOptions) *ByteSliceCodecOptions {
bs := ByteSliceCodec()
for _, opt := range opts {

View File

@ -7,22 +7,33 @@
package bsonoptions
// EmptyInterfaceCodecOptions represents all possible options for interface{} encoding and decoding.
//
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
// and unmarshal behavior instead.
type EmptyInterfaceCodecOptions struct {
DecodeBinaryAsSlice *bool // Specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false.
}
// EmptyInterfaceCodec creates a new *EmptyInterfaceCodecOptions
//
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
// and unmarshal behavior instead.
func EmptyInterfaceCodec() *EmptyInterfaceCodecOptions {
return &EmptyInterfaceCodecOptions{}
}
// SetDecodeBinaryAsSlice specifies if Old and Generic type binarys should default to []slice instead of primitive.Binary. Defaults to false.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.BinaryAsSlice] instead.
func (e *EmptyInterfaceCodecOptions) SetDecodeBinaryAsSlice(b bool) *EmptyInterfaceCodecOptions {
e.DecodeBinaryAsSlice = &b
return e
}
// MergeEmptyInterfaceCodecOptions combines the given *EmptyInterfaceCodecOptions into a single *EmptyInterfaceCodecOptions in a last one wins fashion.
//
// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
// single options struct instead.
func MergeEmptyInterfaceCodecOptions(opts ...*EmptyInterfaceCodecOptions) *EmptyInterfaceCodecOptions {
e := EmptyInterfaceCodec()
for _, opt := range opts {

View File

@ -7,6 +7,9 @@
package bsonoptions
// MapCodecOptions represents all possible options for map encoding and decoding.
//
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
// and unmarshal behavior instead.
type MapCodecOptions struct {
DecodeZerosMap *bool // Specifies if the map should be zeroed before decoding into it. Defaults to false.
EncodeNilAsEmpty *bool // Specifies if a nil map should encode as an empty document instead of null. Defaults to false.
@ -19,17 +22,24 @@ type MapCodecOptions struct {
}
// MapCodec creates a new *MapCodecOptions
//
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
// and unmarshal behavior instead.
func MapCodec() *MapCodecOptions {
return &MapCodecOptions{}
}
// SetDecodeZerosMap specifies if the map should be zeroed before decoding into it. Defaults to false.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroMaps] instead.
func (t *MapCodecOptions) SetDecodeZerosMap(b bool) *MapCodecOptions {
t.DecodeZerosMap = &b
return t
}
// SetEncodeNilAsEmpty specifies if a nil map should encode as an empty document instead of null. Defaults to false.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilMapAsEmpty] instead.
func (t *MapCodecOptions) SetEncodeNilAsEmpty(b bool) *MapCodecOptions {
t.EncodeNilAsEmpty = &b
return t
@ -40,12 +50,17 @@ func (t *MapCodecOptions) SetEncodeNilAsEmpty(b bool) *MapCodecOptions {
// type must either be a string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with
// fmt.Sprint() and the encoding key type must be a string, an integer type, or a float. If true, the use of Stringer
// will override TextMarshaler/TextUnmarshaler. Defaults to false.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.StringifyMapKeysWithFmt] instead.
func (t *MapCodecOptions) SetEncodeKeysWithStringer(b bool) *MapCodecOptions {
t.EncodeKeysWithStringer = &b
return t
}
// MergeMapCodecOptions combines the given *MapCodecOptions into a single *MapCodecOptions in a last one wins fashion.
//
// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
// single options struct instead.
func MergeMapCodecOptions(opts ...*MapCodecOptions) *MapCodecOptions {
s := MapCodec()
for _, opt := range opts {

View File

@ -7,22 +7,33 @@
package bsonoptions
// SliceCodecOptions represents all possible options for slice encoding and decoding.
//
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
// and unmarshal behavior instead.
type SliceCodecOptions struct {
EncodeNilAsEmpty *bool // Specifies if a nil slice should encode as an empty array instead of null. Defaults to false.
}
// SliceCodec creates a new *SliceCodecOptions
//
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
// and unmarshal behavior instead.
func SliceCodec() *SliceCodecOptions {
return &SliceCodecOptions{}
}
// SetEncodeNilAsEmpty specifies if a nil slice should encode as an empty array instead of null. Defaults to false.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.NilSliceAsEmpty] instead.
func (s *SliceCodecOptions) SetEncodeNilAsEmpty(b bool) *SliceCodecOptions {
s.EncodeNilAsEmpty = &b
return s
}
// MergeSliceCodecOptions combines the given *SliceCodecOptions into a single *SliceCodecOptions in a last one wins fashion.
//
// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
// single options struct instead.
func MergeSliceCodecOptions(opts ...*SliceCodecOptions) *SliceCodecOptions {
s := SliceCodec()
for _, opt := range opts {

View File

@ -9,23 +9,34 @@ package bsonoptions
var defaultDecodeOIDAsHex = true
// StringCodecOptions represents all possible options for string encoding and decoding.
//
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
// and unmarshal behavior instead.
type StringCodecOptions struct {
DecodeObjectIDAsHex *bool // Specifies if we should decode ObjectID as the hex value. Defaults to true.
}
// StringCodec creates a new *StringCodecOptions
//
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
// and unmarshal behavior instead.
func StringCodec() *StringCodecOptions {
return &StringCodecOptions{}
}
// SetDecodeObjectIDAsHex specifies if object IDs should be decoded as their hex representation. If false, a string made
// from the raw object ID bytes will be used. Defaults to true.
//
// Deprecated: Decoding object IDs as raw bytes will not be supported in Go Driver 2.0.
func (t *StringCodecOptions) SetDecodeObjectIDAsHex(b bool) *StringCodecOptions {
t.DecodeObjectIDAsHex = &b
return t
}
// MergeStringCodecOptions combines the given *StringCodecOptions into a single *StringCodecOptions in a last one wins fashion.
//
// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
// single options struct instead.
func MergeStringCodecOptions(opts ...*StringCodecOptions) *StringCodecOptions {
s := &StringCodecOptions{&defaultDecodeOIDAsHex}
for _, opt := range opts {

View File

@ -9,6 +9,9 @@ package bsonoptions
var defaultOverwriteDuplicatedInlinedFields = true
// StructCodecOptions represents all possible options for struct encoding and decoding.
//
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
// and unmarshal behavior instead.
type StructCodecOptions struct {
DecodeZeroStruct *bool // Specifies if structs should be zeroed before decoding into them. Defaults to false.
DecodeDeepZeroInline *bool // Specifies if structs should be recursively zeroed when a inline value is decoded. Defaults to false.
@ -18,17 +21,24 @@ type StructCodecOptions struct {
}
// StructCodec creates a new *StructCodecOptions
//
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
// and unmarshal behavior instead.
func StructCodec() *StructCodecOptions {
return &StructCodecOptions{}
}
// SetDecodeZeroStruct specifies if structs should be zeroed before decoding into them. Defaults to false.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.ZeroStructs] instead.
func (t *StructCodecOptions) SetDecodeZeroStruct(b bool) *StructCodecOptions {
t.DecodeZeroStruct = &b
return t
}
// SetDecodeDeepZeroInline specifies if structs should be zeroed before decoding into them. Defaults to false.
//
// Deprecated: DecodeDeepZeroInline will not be supported in Go Driver 2.0.
func (t *StructCodecOptions) SetDecodeDeepZeroInline(b bool) *StructCodecOptions {
t.DecodeDeepZeroInline = &b
return t
@ -36,6 +46,8 @@ func (t *StructCodecOptions) SetDecodeDeepZeroInline(b bool) *StructCodecOptions
// SetEncodeOmitDefaultStruct specifies if default structs should be considered empty by omitempty. A default struct has all
// its values set to their default value. Defaults to false.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.OmitZeroStruct] instead.
func (t *StructCodecOptions) SetEncodeOmitDefaultStruct(b bool) *StructCodecOptions {
t.EncodeOmitDefaultStruct = &b
return t
@ -45,18 +57,26 @@ func (t *StructCodecOptions) SetEncodeOmitDefaultStruct(b bool) *StructCodecOpti
// same bson key. When true and decoding, values will be written to the outermost struct with a matching key, and when
// encoding, keys will have the value of the top-most matching field. When false, decoding and encoding will error if
// there are duplicate keys after the struct is inlined. Defaults to true.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.ErrorOnInlineDuplicates] instead.
func (t *StructCodecOptions) SetOverwriteDuplicatedInlinedFields(b bool) *StructCodecOptions {
t.OverwriteDuplicatedInlinedFields = &b
return t
}
// SetAllowUnexportedFields specifies if unexported fields should be marshaled/unmarshaled. Defaults to false.
//
// Deprecated: AllowUnexportedFields does not work on recent versions of Go and will not be
// supported in Go Driver 2.0.
func (t *StructCodecOptions) SetAllowUnexportedFields(b bool) *StructCodecOptions {
t.AllowUnexportedFields = &b
return t
}
// MergeStructCodecOptions combines the given *StructCodecOptions into a single *StructCodecOptions in a last one wins fashion.
//
// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
// single options struct instead.
func MergeStructCodecOptions(opts ...*StructCodecOptions) *StructCodecOptions {
s := &StructCodecOptions{
OverwriteDuplicatedInlinedFields: &defaultOverwriteDuplicatedInlinedFields,

View File

@ -7,22 +7,33 @@
package bsonoptions
// TimeCodecOptions represents all possible options for time.Time encoding and decoding.
//
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
// and unmarshal behavior instead.
type TimeCodecOptions struct {
UseLocalTimeZone *bool // Specifies if we should decode into the local time zone. Defaults to false.
}
// TimeCodec creates a new *TimeCodecOptions
//
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
// and unmarshal behavior instead.
func TimeCodec() *TimeCodecOptions {
return &TimeCodecOptions{}
}
// SetUseLocalTimeZone specifies if we should decode into the local time zone. Defaults to false.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Decoder.UseLocalTimeZone] instead.
func (t *TimeCodecOptions) SetUseLocalTimeZone(b bool) *TimeCodecOptions {
t.UseLocalTimeZone = &b
return t
}
// MergeTimeCodecOptions combines the given *TimeCodecOptions into a single *TimeCodecOptions in a last one wins fashion.
//
// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
// single options struct instead.
func MergeTimeCodecOptions(opts ...*TimeCodecOptions) *TimeCodecOptions {
t := TimeCodec()
for _, opt := range opts {

View File

@ -7,22 +7,33 @@
package bsonoptions
// UIntCodecOptions represents all possible options for uint encoding and decoding.
//
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
// and unmarshal behavior instead.
type UIntCodecOptions struct {
EncodeToMinSize *bool // Specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false.
}
// UIntCodec creates a new *UIntCodecOptions
//
// Deprecated: Use the bson.Encoder and bson.Decoder configuration methods to set the desired BSON marshal
// and unmarshal behavior instead.
func UIntCodec() *UIntCodecOptions {
return &UIntCodecOptions{}
}
// SetEncodeToMinSize specifies if all uints except uint64 should be decoded to minimum size bsontype. Defaults to false.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.Encoder.IntMinSize] instead.
func (u *UIntCodecOptions) SetEncodeToMinSize(b bool) *UIntCodecOptions {
u.EncodeToMinSize = &b
return u
}
// MergeUIntCodecOptions combines the given *UIntCodecOptions into a single *UIntCodecOptions in a last one wins fashion.
//
// Deprecated: Merging options structs will not be supported in Go Driver 2.0. Users should create a
// single options struct instead.
func MergeUIntCodecOptions(opts ...*UIntCodecOptions) *UIntCodecOptions {
u := UIntCodec()
for _, opt := range opts {

View File

@ -17,20 +17,32 @@ import (
// Copier is a type that allows copying between ValueReaders, ValueWriters, and
// []byte values.
//
// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be
// supported in Go Driver 2.0.
type Copier struct{}
// NewCopier creates a new copier with the given registry. If a nil registry is provided
// a default registry is used.
//
// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be
// supported in Go Driver 2.0.
func NewCopier() Copier {
return Copier{}
}
// CopyDocument handles copying a document from src to dst.
//
// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be
// supported in Go Driver 2.0.
func CopyDocument(dst ValueWriter, src ValueReader) error {
return Copier{}.CopyDocument(dst, src)
}
// CopyDocument handles copying one document from the src to the dst.
//
// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be
// supported in Go Driver 2.0.
func (c Copier) CopyDocument(dst ValueWriter, src ValueReader) error {
dr, err := src.ReadDocument()
if err != nil {
@ -47,6 +59,9 @@ func (c Copier) CopyDocument(dst ValueWriter, src ValueReader) error {
// CopyArrayFromBytes copies the values from a BSON array represented as a
// []byte to a ValueWriter.
//
// Deprecated: Copying BSON arrays using the ValueWriter and ValueReader interfaces will not be
// supported in Go Driver 2.0.
func (c Copier) CopyArrayFromBytes(dst ValueWriter, src []byte) error {
aw, err := dst.WriteArray()
if err != nil {
@ -63,6 +78,9 @@ func (c Copier) CopyArrayFromBytes(dst ValueWriter, src []byte) error {
// CopyDocumentFromBytes copies the values from a BSON document represented as a
// []byte to a ValueWriter.
//
// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be
// supported in Go Driver 2.0.
func (c Copier) CopyDocumentFromBytes(dst ValueWriter, src []byte) error {
dw, err := dst.WriteDocument()
if err != nil {
@ -81,6 +99,9 @@ type writeElementFn func(key string) (ValueWriter, error)
// CopyBytesToArrayWriter copies the values from a BSON Array represented as a []byte to an
// ArrayWriter.
//
// Deprecated: Copying BSON arrays using the ArrayWriter interface will not be supported in Go
// Driver 2.0.
func (c Copier) CopyBytesToArrayWriter(dst ArrayWriter, src []byte) error {
wef := func(_ string) (ValueWriter, error) {
return dst.WriteArrayElement()
@ -91,6 +112,9 @@ func (c Copier) CopyBytesToArrayWriter(dst ArrayWriter, src []byte) error {
// CopyBytesToDocumentWriter copies the values from a BSON document represented as a []byte to a
// DocumentWriter.
//
// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be
// supported in Go Driver 2.0.
func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error {
wef := func(key string) (ValueWriter, error) {
return dst.WriteDocumentElement(key)
@ -100,7 +124,7 @@ func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error
}
func (c Copier) copyBytesToValueWriter(src []byte, wef writeElementFn) error {
// TODO(skriptble): Create errors types here. Anything thats a tag should be a property.
// TODO(skriptble): Create errors types here. Anything that is a tag should be a property.
length, rem, ok := bsoncore.ReadLength(src)
if !ok {
return fmt.Errorf("couldn't read length from src, not enough bytes. length=%d", len(src))
@ -150,12 +174,18 @@ func (c Copier) copyBytesToValueWriter(src []byte, wef writeElementFn) error {
// CopyDocumentToBytes copies an entire document from the ValueReader and
// returns it as bytes.
//
// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be
// supported in Go Driver 2.0.
func (c Copier) CopyDocumentToBytes(src ValueReader) ([]byte, error) {
return c.AppendDocumentBytes(nil, src)
}
// AppendDocumentBytes functions the same as CopyDocumentToBytes, but will
// append the result to dst.
//
// Deprecated: Copying BSON documents using the ValueWriter and ValueReader interfaces will not be
// supported in Go Driver 2.0.
func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) {
if br, ok := src.(BytesReader); ok {
_, dst, err := br.ReadValueBytes(dst)
@ -163,7 +193,7 @@ func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error)
}
vw := vwPool.Get().(*valueWriter)
defer vwPool.Put(vw)
defer putValueWriter(vw)
vw.reset(dst)
@ -173,6 +203,9 @@ func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error)
}
// AppendArrayBytes copies an array from the ValueReader to dst.
//
// Deprecated: Copying BSON arrays using the ValueWriter and ValueReader interfaces will not be
// supported in Go Driver 2.0.
func (c Copier) AppendArrayBytes(dst []byte, src ValueReader) ([]byte, error) {
if br, ok := src.(BytesReader); ok {
_, dst, err := br.ReadValueBytes(dst)
@ -180,7 +213,7 @@ func (c Copier) AppendArrayBytes(dst []byte, src ValueReader) ([]byte, error) {
}
vw := vwPool.Get().(*valueWriter)
defer vwPool.Put(vw)
defer putValueWriter(vw)
vw.reset(dst)
@ -190,6 +223,8 @@ func (c Copier) AppendArrayBytes(dst []byte, src ValueReader) ([]byte, error) {
}
// CopyValueFromBytes will write the value represtend by t and src to dst.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.UnmarshalValue] instead.
func (c Copier) CopyValueFromBytes(dst ValueWriter, t bsontype.Type, src []byte) error {
if wvb, ok := dst.(BytesWriter); ok {
return wvb.WriteValueBytes(t, src)
@ -206,19 +241,24 @@ func (c Copier) CopyValueFromBytes(dst ValueWriter, t bsontype.Type, src []byte)
// CopyValueToBytes copies a value from src and returns it as a bsontype.Type and a
// []byte.
//
// Deprecated: Use [go.mongodb.org/mongo-driver/bson.MarshalValue] instead.
func (c Copier) CopyValueToBytes(src ValueReader) (bsontype.Type, []byte, error) {
return c.AppendValueBytes(nil, src)
}
// AppendValueBytes functions the same as CopyValueToBytes, but will append the
// result to dst.
//
// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go
// Driver 2.0.
func (c Copier) AppendValueBytes(dst []byte, src ValueReader) (bsontype.Type, []byte, error) {
if br, ok := src.(BytesReader); ok {
return br.ReadValueBytes(dst)
}
vw := vwPool.Get().(*valueWriter)
defer vwPool.Put(vw)
defer putValueWriter(vw)
start := len(dst)
@ -234,6 +274,9 @@ func (c Copier) AppendValueBytes(dst []byte, src ValueReader) (bsontype.Type, []
}
// CopyValue will copy a single value from src to dst.
//
// Deprecated: Copying BSON values using the ValueWriter and ValueReader interfaces will not be
// supported in Go Driver 2.0.
func (c Copier) CopyValue(dst ValueWriter, src ValueReader) error {
var err error
switch src.Type() {

View File

@ -16,11 +16,15 @@ import (
)
// ExtJSONValueReaderPool is a pool for ValueReaders that read ExtJSON.
//
// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0.
type ExtJSONValueReaderPool struct {
pool sync.Pool
}
// NewExtJSONValueReaderPool instantiates a new ExtJSONValueReaderPool.
//
// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0.
func NewExtJSONValueReaderPool() *ExtJSONValueReaderPool {
return &ExtJSONValueReaderPool{
pool: sync.Pool{
@ -32,6 +36,8 @@ func NewExtJSONValueReaderPool() *ExtJSONValueReaderPool {
}
// Get retrieves a ValueReader from the pool and uses src as the underlying ExtJSON.
//
// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0.
func (bvrp *ExtJSONValueReaderPool) Get(r io.Reader, canonical bool) (ValueReader, error) {
vr := bvrp.pool.Get().(*extJSONValueReader)
return vr.reset(r, canonical)
@ -39,6 +45,8 @@ func (bvrp *ExtJSONValueReaderPool) Get(r io.Reader, canonical bool) (ValueReade
// Put inserts a ValueReader into the pool. If the ValueReader is not a ExtJSON ValueReader nothing
// is inserted into the pool and ok will be false.
//
// Deprecated: ExtJSONValueReaderPool will not be supported in Go Driver 2.0.
func (bvrp *ExtJSONValueReaderPool) Put(vr ValueReader) (ok bool) {
bvr, ok := vr.(*extJSONValueReader)
if !ok {

View File

@ -23,11 +23,15 @@ import (
)
// ExtJSONValueWriterPool is a pool for ExtJSON ValueWriters.
//
// Deprecated: ExtJSONValueWriterPool will not be supported in Go Driver 2.0.
type ExtJSONValueWriterPool struct {
pool sync.Pool
}
// NewExtJSONValueWriterPool creates a new pool for ValueWriter instances that write to ExtJSON.
//
// Deprecated: ExtJSONValueWriterPool will not be supported in Go Driver 2.0.
func NewExtJSONValueWriterPool() *ExtJSONValueWriterPool {
return &ExtJSONValueWriterPool{
pool: sync.Pool{
@ -39,6 +43,8 @@ func NewExtJSONValueWriterPool() *ExtJSONValueWriterPool {
}
// Get retrieves a ExtJSON ValueWriter from the pool and resets it to use w as the destination.
//
// Deprecated: ExtJSONValueWriterPool will not be supported in Go Driver 2.0.
func (bvwp *ExtJSONValueWriterPool) Get(w io.Writer, canonical, escapeHTML bool) ValueWriter {
vw := bvwp.pool.Get().(*extJSONValueWriter)
if writer, ok := w.(*SliceWriter); ok {
@ -53,6 +59,8 @@ func (bvwp *ExtJSONValueWriterPool) Get(w io.Writer, canonical, escapeHTML bool)
// Put inserts a ValueWriter into the pool. If the ValueWriter is not a ExtJSON ValueWriter, nothing
// happens and ok will be false.
//
// Deprecated: ExtJSONValueWriterPool will not be supported in Go Driver 2.0.
func (bvwp *ExtJSONValueWriterPool) Put(vw ValueWriter) (ok bool) {
bvw, ok := vw.(*extJSONValueWriter)
if !ok {
@ -80,6 +88,7 @@ type extJSONValueWriter struct {
frame int64
canonical bool
escapeHTML bool
newlines bool
}
// NewExtJSONValueWriter creates a ValueWriter that writes Extended JSON to w.
@ -88,10 +97,13 @@ func NewExtJSONValueWriter(w io.Writer, canonical, escapeHTML bool) (ValueWriter
return nil, errNilWriter
}
return newExtJSONWriter(w, canonical, escapeHTML), nil
// Enable newlines for all Extended JSON value writers created by NewExtJSONValueWriter. We
// expect these value writers to be used with an Encoder, which should add newlines after
// encoded Extended JSON documents.
return newExtJSONWriter(w, canonical, escapeHTML, true), nil
}
func newExtJSONWriter(w io.Writer, canonical, escapeHTML bool) *extJSONValueWriter {
func newExtJSONWriter(w io.Writer, canonical, escapeHTML, newlines bool) *extJSONValueWriter {
stack := make([]ejvwState, 1, 5)
stack[0] = ejvwState{mode: mTopLevel}
@ -101,6 +113,7 @@ func newExtJSONWriter(w io.Writer, canonical, escapeHTML bool) *extJSONValueWrit
stack: stack,
canonical: canonical,
escapeHTML: escapeHTML,
newlines: newlines,
}
}
@ -564,6 +577,12 @@ func (ejvw *extJSONValueWriter) WriteDocumentEnd() error {
case mDocument:
ejvw.buf = append(ejvw.buf, ',')
case mTopLevel:
// If the value writer has newlines enabled, end top-level documents with a newline so that
// multiple documents encoded to the same writer are separated by newlines. That matches the
// Go json.Encoder behavior and also works with bsonrw.NewExtJSONValueReader.
if ejvw.newlines {
ejvw.buf = append(ejvw.buf, '\n')
}
if ejvw.w != nil {
if _, err := ejvw.w.Write(ejvw.buf); err != nil {
return err

View File

@ -58,6 +58,8 @@ type ValueReader interface {
// types that implement ValueReader may also implement this interface.
//
// The bytes of the value will be appended to dst.
//
// Deprecated: BytesReader will not be supported in Go Driver 2.0.
type BytesReader interface {
ReadValueBytes(dst []byte) (bsontype.Type, []byte, error)
}

View File

@ -28,11 +28,15 @@ var vrPool = sync.Pool{
}
// BSONValueReaderPool is a pool for ValueReaders that read BSON.
//
// Deprecated: BSONValueReaderPool will not be supported in Go Driver 2.0.
type BSONValueReaderPool struct {
pool sync.Pool
}
// NewBSONValueReaderPool instantiates a new BSONValueReaderPool.
//
// Deprecated: BSONValueReaderPool will not be supported in Go Driver 2.0.
func NewBSONValueReaderPool() *BSONValueReaderPool {
return &BSONValueReaderPool{
pool: sync.Pool{
@ -44,6 +48,8 @@ func NewBSONValueReaderPool() *BSONValueReaderPool {
}
// Get retrieves a ValueReader from the pool and uses src as the underlying BSON.
//
// Deprecated: BSONValueReaderPool will not be supported in Go Driver 2.0.
func (bvrp *BSONValueReaderPool) Get(src []byte) ValueReader {
vr := bvrp.pool.Get().(*valueReader)
vr.reset(src)
@ -52,6 +58,8 @@ func (bvrp *BSONValueReaderPool) Get(src []byte) ValueReader {
// Put inserts a ValueReader into the pool. If the ValueReader is not a BSON ValueReader nothing
// is inserted into the pool and ok will be false.
//
// Deprecated: BSONValueReaderPool will not be supported in Go Driver 2.0.
func (bvrp *BSONValueReaderPool) Put(vr ValueReader) (ok bool) {
bvr, ok := vr.(*valueReader)
if !ok {
@ -731,8 +739,7 @@ func (vr *valueReader) ReadValue() (ValueReader, error) {
return nil, ErrEOA
}
_, err = vr.readCString()
if err != nil {
if err := vr.skipCString(); err != nil {
return nil, err
}
@ -786,6 +793,15 @@ func (vr *valueReader) readByte() (byte, error) {
return vr.d[vr.offset-1], nil
}
func (vr *valueReader) skipCString() error {
idx := bytes.IndexByte(vr.d[vr.offset:], 0x00)
if idx < 0 {
return io.EOF
}
vr.offset += int64(idx) + 1
return nil
}
func (vr *valueReader) readCString() (string, error) {
idx := bytes.IndexByte(vr.d[vr.offset:], 0x00)
if idx < 0 {

View File

@ -28,12 +28,23 @@ var vwPool = sync.Pool{
},
}
func putValueWriter(vw *valueWriter) {
if vw != nil {
vw.w = nil // don't leak the writer
vwPool.Put(vw)
}
}
// BSONValueWriterPool is a pool for BSON ValueWriters.
//
// Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0.
type BSONValueWriterPool struct {
pool sync.Pool
}
// NewBSONValueWriterPool creates a new pool for ValueWriter instances that write to BSON.
//
// Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0.
func NewBSONValueWriterPool() *BSONValueWriterPool {
return &BSONValueWriterPool{
pool: sync.Pool{
@ -45,6 +56,8 @@ func NewBSONValueWriterPool() *BSONValueWriterPool {
}
// Get retrieves a BSON ValueWriter from the pool and resets it to use w as the destination.
//
// Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0.
func (bvwp *BSONValueWriterPool) Get(w io.Writer) ValueWriter {
vw := bvwp.pool.Get().(*valueWriter)
@ -56,6 +69,8 @@ func (bvwp *BSONValueWriterPool) Get(w io.Writer) ValueWriter {
}
// GetAtModeElement retrieves a ValueWriterFlusher from the pool and resets it to use w as the destination.
//
// Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0.
func (bvwp *BSONValueWriterPool) GetAtModeElement(w io.Writer) ValueWriterFlusher {
vw := bvwp.Get(w).(*valueWriter)
vw.push(mElement)
@ -64,6 +79,8 @@ func (bvwp *BSONValueWriterPool) GetAtModeElement(w io.Writer) ValueWriterFlushe
// Put inserts a ValueWriter into the pool. If the ValueWriter is not a BSON ValueWriter, nothing
// happens and ok will be false.
//
// Deprecated: BSONValueWriterPool will not be supported in Go Driver 2.0.
func (bvwp *BSONValueWriterPool) Put(vw ValueWriter) (ok bool) {
bvw, ok := vw.(*valueWriter)
if !ok {
@ -139,32 +156,21 @@ type valueWriter struct {
}
func (vw *valueWriter) advanceFrame() {
if vw.frame+1 >= int64(len(vw.stack)) { // We need to grow the stack
length := len(vw.stack)
if length+1 >= cap(vw.stack) {
// double it
buf := make([]vwState, 2*cap(vw.stack)+1)
copy(buf, vw.stack)
vw.stack = buf
}
vw.stack = vw.stack[:length+1]
}
vw.frame++
if vw.frame >= int64(len(vw.stack)) {
vw.stack = append(vw.stack, vwState{})
}
}
func (vw *valueWriter) push(m mode) {
vw.advanceFrame()
// Clean the stack
vw.stack[vw.frame].mode = m
vw.stack[vw.frame].key = ""
vw.stack[vw.frame].arrkey = 0
vw.stack[vw.frame].start = 0
vw.stack[vw.frame] = vwState{mode: m}
vw.stack[vw.frame].mode = m
switch m {
case mDocument, mArray, mCodeWithScope:
vw.reserveLength()
vw.reserveLength() // WARN: this is not needed
}
}
@ -203,6 +209,7 @@ func newValueWriter(w io.Writer) *valueWriter {
return vw
}
// TODO: only used in tests
func newValueWriterFromSlice(buf []byte) *valueWriter {
vw := new(valueWriter)
stack := make([]vwState, 1, 5)
@ -239,17 +246,16 @@ func (vw *valueWriter) invalidTransitionError(destination mode, name string, mod
}
func (vw *valueWriter) writeElementHeader(t bsontype.Type, destination mode, callerName string, addmodes ...mode) error {
switch vw.stack[vw.frame].mode {
frame := &vw.stack[vw.frame]
switch frame.mode {
case mElement:
key := vw.stack[vw.frame].key
key := frame.key
if !isValidCString(key) {
return errors.New("BSON element key cannot contain null bytes")
}
vw.buf = bsoncore.AppendHeader(vw.buf, t, key)
vw.appendHeader(t, key)
case mValue:
// TODO: Do this with a cache of the first 1000 or so array keys.
vw.buf = bsoncore.AppendHeader(vw.buf, t, strconv.Itoa(vw.stack[vw.frame].arrkey))
vw.appendIntHeader(t, frame.arrkey)
default:
modes := []mode{mElement, mValue}
if addmodes != nil {
@ -591,9 +597,11 @@ func (vw *valueWriter) writeLength() error {
if length > maxSize {
return errMaxDocumentSizeExceeded{size: int64(len(vw.buf))}
}
length = length - int(vw.stack[vw.frame].start)
start := vw.stack[vw.frame].start
frame := &vw.stack[vw.frame]
length = length - int(frame.start)
start := frame.start
_ = vw.buf[start+3] // BCE
vw.buf[start+0] = byte(length)
vw.buf[start+1] = byte(length >> 8)
vw.buf[start+2] = byte(length >> 16)
@ -602,5 +610,31 @@ func (vw *valueWriter) writeLength() error {
}
func isValidCString(cs string) bool {
return !strings.ContainsRune(cs, '\x00')
// Disallow the zero byte in a cstring because the zero byte is used as the
// terminating character.
//
// It's safe to check bytes instead of runes because all multibyte UTF-8
// code points start with (binary) 11xxxxxx or 10xxxxxx, so 00000000 (i.e.
// 0) will never be part of a multibyte UTF-8 code point. This logic is the
// same as the "r < utf8.RuneSelf" case in strings.IndexRune but can be
// inlined.
//
// https://cs.opensource.google/go/go/+/refs/tags/go1.21.1:src/strings/strings.go;l=127
return strings.IndexByte(cs, 0) == -1
}
// appendHeader is the same as bsoncore.AppendHeader but does not check if the
// key is a valid C string since the caller has already checked for that.
//
// The caller of this function must check if key is a valid C string.
func (vw *valueWriter) appendHeader(t bsontype.Type, key string) {
vw.buf = bsoncore.AppendType(vw.buf, t)
vw.buf = append(vw.buf, key...)
vw.buf = append(vw.buf, 0x00)
}
func (vw *valueWriter) appendIntHeader(t bsontype.Type, key int) {
vw.buf = bsoncore.AppendType(vw.buf, t)
vw.buf = strconv.AppendInt(vw.buf, int64(key), 10)
vw.buf = append(vw.buf, 0x00)
}

View File

@ -56,6 +56,8 @@ type ValueWriter interface {
}
// ValueWriterFlusher is a superset of ValueWriter that exposes functionality to flush to the underlying buffer.
//
// Deprecated: ValueWriterFlusher will not be supported in Go Driver 2.0.
type ValueWriterFlusher interface {
ValueWriter
Flush() error
@ -64,13 +66,20 @@ type ValueWriterFlusher interface {
// BytesWriter is the interface used to write BSON bytes to a ValueWriter.
// This interface is meant to be a superset of ValueWriter, so that types that
// implement ValueWriter may also implement this interface.
//
// Deprecated: BytesWriter will not be supported in Go Driver 2.0.
type BytesWriter interface {
WriteValueBytes(t bsontype.Type, b []byte) error
}
// SliceWriter allows a pointer to a slice of bytes to be used as an io.Writer.
//
// Deprecated: SliceWriter will not be supported in Go Driver 2.0.
type SliceWriter []byte
// Write writes the bytes to the underlying slice.
//
// Deprecated: SliceWriter will not be supported in Go Driver 2.0.
func (sw *SliceWriter) Write(p []byte) (int, error) {
written := len(p)
*sw = append(*sw, p...)

View File

@ -8,7 +8,9 @@
// a stringifier for the Type to enable easier debugging when working with BSON.
package bsontype // import "go.mongodb.org/mongo-driver/bson/bsontype"
// These constants uniquely refer to each BSON type.
// BSON element types as described in https://bsonspec.org/spec.html.
//
// Deprecated: Use bson.Type* constants instead.
const (
Double Type = 0x01
String Type = 0x02
@ -31,7 +33,12 @@ const (
Decimal128 Type = 0x13
MinKey Type = 0xFF
MaxKey Type = 0x7F
)
// BSON binary element subtypes as described in https://bsonspec.org/spec.html.
//
// Deprecated: Use the bson.TypeBinary* constants instead.
const (
BinaryGeneric byte = 0x00
BinaryFunction byte = 0x01
BinaryBinaryOld byte = 0x02
@ -40,6 +47,7 @@ const (
BinaryMD5 byte = 0x05
BinaryEncrypted byte = 0x06
BinaryColumn byte = 0x07
BinarySensitive byte = 0x08
BinaryUserDefined byte = 0x80
)
@ -95,3 +103,14 @@ func (bt Type) String() string {
return "invalid"
}
}
// IsValid will return true if the Type is valid.
func (bt Type) IsValid() bool {
switch bt {
case Double, String, EmbeddedDocument, Array, Binary, Undefined, ObjectID, Boolean, DateTime, Null, Regex,
DBPointer, JavaScript, Symbol, CodeWithScope, Int32, Timestamp, Int64, Decimal128, MinKey, MaxKey:
return true
default:
return false
}
}

View File

@ -38,6 +38,12 @@ type Decoder struct {
// (*Decoder).SetContext.
defaultDocumentM bool
defaultDocumentD bool
binaryAsSlice bool
useJSONStructTags bool
useLocalTimeZone bool
zeroMaps bool
zeroStructs bool
}
// NewDecoder returns a new decoder that uses the DefaultRegistry to read from vr.
@ -53,6 +59,9 @@ func NewDecoder(vr bsonrw.ValueReader) (*Decoder, error) {
}
// NewDecoderWithContext returns a new decoder that uses DecodeContext dc to read from vr.
//
// Deprecated: Use [NewDecoder] and use the Decoder configuration methods set the desired unmarshal
// behavior instead.
func NewDecoderWithContext(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader) (*Decoder, error) {
if dc.Registry == nil {
dc.Registry = DefaultRegistry
@ -70,8 +79,7 @@ func NewDecoderWithContext(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader) (*
// Decode reads the next BSON document from the stream and decodes it into the
// value pointed to by val.
//
// The documentation for Unmarshal contains details about of BSON into a Go
// value.
// See [Unmarshal] for details about BSON unmarshaling behavior.
func (d *Decoder) Decode(val interface{}) error {
if unmarshaler, ok := val.(Unmarshaler); ok {
// TODO(skriptble): Reuse a []byte here and use the AppendDocumentBytes method.
@ -100,42 +108,101 @@ func (d *Decoder) Decode(val interface{}) error {
if err != nil {
return err
}
if d.defaultDocumentM {
d.dc.DefaultDocumentM()
}
if d.defaultDocumentD {
d.dc.DefaultDocumentD()
}
if d.binaryAsSlice {
d.dc.BinaryAsSlice()
}
if d.useJSONStructTags {
d.dc.UseJSONStructTags()
}
if d.useLocalTimeZone {
d.dc.UseLocalTimeZone()
}
if d.zeroMaps {
d.dc.ZeroMaps()
}
if d.zeroStructs {
d.dc.ZeroStructs()
}
return decoder.DecodeValue(d.dc, d.vr, rval)
}
// Reset will reset the state of the decoder, using the same *DecodeContext used in
// the original construction but using vr for reading.
func (d *Decoder) Reset(vr bsonrw.ValueReader) error {
// TODO:(GODRIVER-2719): Remove error return value.
d.vr = vr
return nil
}
// SetRegistry replaces the current registry of the decoder with r.
func (d *Decoder) SetRegistry(r *bsoncodec.Registry) error {
// TODO:(GODRIVER-2719): Remove error return value.
d.dc.Registry = r
return nil
}
// SetContext replaces the current registry of the decoder with dc.
//
// Deprecated: Use the Decoder configuration methods to set the desired unmarshal behavior instead.
func (d *Decoder) SetContext(dc bsoncodec.DecodeContext) error {
// TODO:(GODRIVER-2719): Remove error return value.
d.dc = dc
return nil
}
// DefaultDocumentM will decode empty documents using the primitive.M type. This behavior is restricted to data typed as
// "interface{}" or "map[string]interface{}".
// DefaultDocumentM causes the Decoder to always unmarshal documents into the primitive.M type. This
// behavior is restricted to data typed as "interface{}" or "map[string]interface{}".
func (d *Decoder) DefaultDocumentM() {
d.defaultDocumentM = true
}
// DefaultDocumentD will decode empty documents using the primitive.D type. This behavior is restricted to data typed as
// "interface{}" or "map[string]interface{}".
// DefaultDocumentD causes the Decoder to always unmarshal documents into the primitive.D type. This
// behavior is restricted to data typed as "interface{}" or "map[string]interface{}".
func (d *Decoder) DefaultDocumentD() {
d.defaultDocumentD = true
}
// AllowTruncatingDoubles causes the Decoder to truncate the fractional part of BSON "double" values
// when attempting to unmarshal them into a Go integer (int, int8, int16, int32, or int64) struct
// field. The truncation logic does not apply to BSON "decimal128" values.
func (d *Decoder) AllowTruncatingDoubles() {
d.dc.Truncate = true
}
// BinaryAsSlice causes the Decoder to unmarshal BSON binary field values that are the "Generic" or
// "Old" BSON binary subtype as a Go byte slice instead of a primitive.Binary.
func (d *Decoder) BinaryAsSlice() {
d.binaryAsSlice = true
}
// UseJSONStructTags causes the Decoder to fall back to using the "json" struct tag if a "bson"
// struct tag is not specified.
func (d *Decoder) UseJSONStructTags() {
d.useJSONStructTags = true
}
// UseLocalTimeZone causes the Decoder to unmarshal time.Time values in the local timezone instead
// of the UTC timezone.
func (d *Decoder) UseLocalTimeZone() {
d.useLocalTimeZone = true
}
// ZeroMaps causes the Decoder to delete any existing values from Go maps in the destination value
// passed to Decode before unmarshaling BSON documents into them.
func (d *Decoder) ZeroMaps() {
d.zeroMaps = true
}
// ZeroStructs causes the Decoder to delete any existing values from Go structs in the destination
// value passed to Decode before unmarshaling BSON documents into them.
func (d *Decoder) ZeroStructs() {
d.zeroStructs = true
}

View File

@ -7,7 +7,8 @@
// Package bson is a library for reading, writing, and manipulating BSON. BSON is a binary serialization format used to
// store documents and make remote procedure calls in MongoDB. The BSON specification is located at https://bsonspec.org.
// The BSON library handles marshalling and unmarshalling of values through a configurable codec system. For a description
// of the codec system and examples of registering custom codecs, see the bsoncodec package.
// of the codec system and examples of registering custom codecs, see the bsoncodec package. For additional information and
// usage examples, check out the [Work with BSON] page in the Go Driver docs site.
//
// # Raw BSON
//
@ -138,4 +139,6 @@
// # Marshalling and Unmarshalling
//
// Manually marshalling and unmarshalling can be done with the Marshal and Unmarshal family of functions.
//
// [Work with BSON]: https://www.mongodb.com/docs/drivers/go/current/fundamentals/bson/
package bson

View File

@ -29,10 +29,20 @@ var encPool = sync.Pool{
type Encoder struct {
ec bsoncodec.EncodeContext
vw bsonrw.ValueWriter
errorOnInlineDuplicates bool
intMinSize bool
stringifyMapKeysWithFmt bool
nilMapAsEmpty bool
nilSliceAsEmpty bool
nilByteSliceAsEmpty bool
omitZeroStruct bool
useJSONStructTags bool
}
// NewEncoder returns a new encoder that uses the DefaultRegistry to write to vw.
func NewEncoder(vw bsonrw.ValueWriter) (*Encoder, error) {
// TODO:(GODRIVER-2719): Remove error return value.
if vw == nil {
return nil, errors.New("cannot create a new Encoder with a nil ValueWriter")
}
@ -44,6 +54,9 @@ func NewEncoder(vw bsonrw.ValueWriter) (*Encoder, error) {
}
// NewEncoderWithContext returns a new encoder that uses EncodeContext ec to write to vw.
//
// Deprecated: Use [NewEncoder] and use the Encoder configuration methods to set the desired marshal
// behavior instead.
func NewEncoderWithContext(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter) (*Encoder, error) {
if ec.Registry == nil {
ec = bsoncodec.EncodeContext{Registry: DefaultRegistry}
@ -60,8 +73,7 @@ func NewEncoderWithContext(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter) (*
// Encode writes the BSON encoding of val to the stream.
//
// The documentation for Marshal contains details about the conversion of Go
// values to BSON.
// See [Marshal] for details about BSON marshaling behavior.
func (e *Encoder) Encode(val interface{}) error {
if marshaler, ok := val.(Marshaler); ok {
// TODO(skriptble): Should we have a MarshalAppender interface so that we can have []byte reuse?
@ -76,24 +88,112 @@ func (e *Encoder) Encode(val interface{}) error {
if err != nil {
return err
}
// Copy the configurations applied to the Encoder over to the EncodeContext, which actually
// communicates those configurations to the default ValueEncoders.
if e.errorOnInlineDuplicates {
e.ec.ErrorOnInlineDuplicates()
}
if e.intMinSize {
e.ec.MinSize = true
}
if e.stringifyMapKeysWithFmt {
e.ec.StringifyMapKeysWithFmt()
}
if e.nilMapAsEmpty {
e.ec.NilMapAsEmpty()
}
if e.nilSliceAsEmpty {
e.ec.NilSliceAsEmpty()
}
if e.nilByteSliceAsEmpty {
e.ec.NilByteSliceAsEmpty()
}
if e.omitZeroStruct {
e.ec.OmitZeroStruct()
}
if e.useJSONStructTags {
e.ec.UseJSONStructTags()
}
return encoder.EncodeValue(e.ec, e.vw, reflect.ValueOf(val))
}
// Reset will reset the state of the encoder, using the same *EncodeContext used in
// Reset will reset the state of the Encoder, using the same *EncodeContext used in
// the original construction but using vw.
func (e *Encoder) Reset(vw bsonrw.ValueWriter) error {
// TODO:(GODRIVER-2719): Remove error return value.
e.vw = vw
return nil
}
// SetRegistry replaces the current registry of the encoder with r.
// SetRegistry replaces the current registry of the Encoder with r.
func (e *Encoder) SetRegistry(r *bsoncodec.Registry) error {
// TODO:(GODRIVER-2719): Remove error return value.
e.ec.Registry = r
return nil
}
// SetContext replaces the current EncodeContext of the encoder with er.
// SetContext replaces the current EncodeContext of the encoder with ec.
//
// Deprecated: Use the Encoder configuration methods set the desired marshal behavior instead.
func (e *Encoder) SetContext(ec bsoncodec.EncodeContext) error {
// TODO:(GODRIVER-2719): Remove error return value.
e.ec = ec
return nil
}
// ErrorOnInlineDuplicates causes the Encoder to return an error if there is a duplicate field in
// the marshaled BSON when the "inline" struct tag option is set.
func (e *Encoder) ErrorOnInlineDuplicates() {
e.errorOnInlineDuplicates = true
}
// IntMinSize causes the Encoder to marshal Go integer values (int, int8, int16, int32, int64, uint,
// uint8, uint16, uint32, or uint64) as the minimum BSON int size (either 32 or 64 bits) that can
// represent the integer value.
func (e *Encoder) IntMinSize() {
e.intMinSize = true
}
// StringifyMapKeysWithFmt causes the Encoder to convert Go map keys to BSON document field name
// strings using fmt.Sprint instead of the default string conversion logic.
func (e *Encoder) StringifyMapKeysWithFmt() {
e.stringifyMapKeysWithFmt = true
}
// NilMapAsEmpty causes the Encoder to marshal nil Go maps as empty BSON documents instead of BSON
// null.
func (e *Encoder) NilMapAsEmpty() {
e.nilMapAsEmpty = true
}
// NilSliceAsEmpty causes the Encoder to marshal nil Go slices as empty BSON arrays instead of BSON
// null.
func (e *Encoder) NilSliceAsEmpty() {
e.nilSliceAsEmpty = true
}
// NilByteSliceAsEmpty causes the Encoder to marshal nil Go byte slices as empty BSON binary values
// instead of BSON null.
func (e *Encoder) NilByteSliceAsEmpty() {
e.nilByteSliceAsEmpty = true
}
// TODO(GODRIVER-2820): Update the description to remove the note about only examining exported
// TODO struct fields once the logic is updated to also inspect private struct fields.
// OmitZeroStruct causes the Encoder to consider the zero value for a struct (e.g. MyStruct{})
// as empty and omit it from the marshaled BSON when the "omitempty" struct tag option is set.
//
// Note that the Encoder only examines exported struct fields when determining if a struct is the
// zero value. It considers pointers to a zero struct value (e.g. &MyStruct{}) not empty.
func (e *Encoder) OmitZeroStruct() {
e.omitZeroStruct = true
}
// UseJSONStructTags causes the Encoder to fall back to using the "json" struct tag if a "bson"
// struct tag is not specified.
func (e *Encoder) UseJSONStructTags() {
e.useJSONStructTags = true
}

View File

@ -9,6 +9,7 @@ package bson
import (
"bytes"
"encoding/json"
"sync"
"go.mongodb.org/mongo-driver/bson/bsoncodec"
"go.mongodb.org/mongo-driver/bson/bsonrw"
@ -20,17 +21,23 @@ const defaultDstCap = 256
var bvwPool = bsonrw.NewBSONValueWriterPool()
var extjPool = bsonrw.NewExtJSONValueWriterPool()
// Marshaler is an interface implemented by types that can marshal themselves
// into a BSON document represented as bytes. The bytes returned must be a valid
// BSON document if the error is nil.
// Marshaler is the interface implemented by types that can marshal themselves
// into a valid BSON document.
//
// Implementations of Marshaler must return a full BSON document. To create
// custom BSON marshaling behavior for individual values in a BSON document,
// implement the ValueMarshaler interface instead.
type Marshaler interface {
MarshalBSON() ([]byte, error)
}
// ValueMarshaler is an interface implemented by types that can marshal
// themselves into a BSON value as bytes. The type must be the valid type for
// the bytes returned. The bytes and byte type together must be valid if the
// error is nil.
// ValueMarshaler is the interface implemented by types that can marshal
// themselves into a valid BSON value. The format of the returned bytes must
// match the returned type.
//
// Implementations of ValueMarshaler must return an individual BSON value. To
// create custom BSON marshaling behavior for an entire BSON document, implement
// the Marshaler interface instead.
type ValueMarshaler interface {
MarshalBSONValue() (bsontype.Type, []byte, error)
}
@ -48,12 +55,42 @@ func Marshal(val interface{}) ([]byte, error) {
// MarshalAppend will encode val as a BSON document and append the bytes to dst. If dst is not large enough to hold the
// bytes, it will be grown. If val is not a type that can be transformed into a document, MarshalValueAppend should be
// used instead.
//
// Deprecated: Use [NewEncoder] and pass the dst byte slice (wrapped by a bytes.Buffer) into
// [bsonrw.NewBSONValueWriter]:
//
// buf := bytes.NewBuffer(dst)
// vw, err := bsonrw.NewBSONValueWriter(buf)
// if err != nil {
// panic(err)
// }
// enc, err := bson.NewEncoder(vw)
// if err != nil {
// panic(err)
// }
//
// See [Encoder] for more examples.
func MarshalAppend(dst []byte, val interface{}) ([]byte, error) {
return MarshalAppendWithRegistry(DefaultRegistry, dst, val)
}
// MarshalWithRegistry returns the BSON encoding of val as a BSON document. If val is not a type that can be transformed
// into a document, MarshalValueWithRegistry should be used instead.
//
// Deprecated: Use [NewEncoder] and specify the Registry by calling [Encoder.SetRegistry] instead:
//
// buf := new(bytes.Buffer)
// vw, err := bsonrw.NewBSONValueWriter(buf)
// if err != nil {
// panic(err)
// }
// enc, err := bson.NewEncoder(vw)
// if err != nil {
// panic(err)
// }
// enc.SetRegistry(reg)
//
// See [Encoder] for more examples.
func MarshalWithRegistry(r *bsoncodec.Registry, val interface{}) ([]byte, error) {
dst := make([]byte, 0)
return MarshalAppendWithRegistry(r, dst, val)
@ -61,6 +98,22 @@ func MarshalWithRegistry(r *bsoncodec.Registry, val interface{}) ([]byte, error)
// MarshalWithContext returns the BSON encoding of val as a BSON document using EncodeContext ec. If val is not a type
// that can be transformed into a document, MarshalValueWithContext should be used instead.
//
// Deprecated: Use [NewEncoder] and use the Encoder configuration methods to set the desired marshal
// behavior instead:
//
// buf := bytes.NewBuffer(dst)
// vw, err := bsonrw.NewBSONValueWriter(buf)
// if err != nil {
// panic(err)
// }
// enc, err := bson.NewEncoder(vw)
// if err != nil {
// panic(err)
// }
// enc.IntMinSize()
//
// See [Encoder] for more examples.
func MarshalWithContext(ec bsoncodec.EncodeContext, val interface{}) ([]byte, error) {
dst := make([]byte, 0)
return MarshalAppendWithContext(ec, dst, val)
@ -69,16 +122,74 @@ func MarshalWithContext(ec bsoncodec.EncodeContext, val interface{}) ([]byte, er
// MarshalAppendWithRegistry will encode val as a BSON document using Registry r and append the bytes to dst. If dst is
// not large enough to hold the bytes, it will be grown. If val is not a type that can be transformed into a document,
// MarshalValueAppendWithRegistry should be used instead.
//
// Deprecated: Use [NewEncoder], and pass the dst byte slice (wrapped by a bytes.Buffer) into
// [bsonrw.NewBSONValueWriter], and specify the Registry by calling [Encoder.SetRegistry] instead:
//
// buf := bytes.NewBuffer(dst)
// vw, err := bsonrw.NewBSONValueWriter(buf)
// if err != nil {
// panic(err)
// }
// enc, err := bson.NewEncoder(vw)
// if err != nil {
// panic(err)
// }
// enc.SetRegistry(reg)
//
// See [Encoder] for more examples.
func MarshalAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val interface{}) ([]byte, error) {
return MarshalAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val)
}
// Pool of buffers for marshalling BSON.
var bufPool = sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
}
// MarshalAppendWithContext will encode val as a BSON document using Registry r and EncodeContext ec and append the
// bytes to dst. If dst is not large enough to hold the bytes, it will be grown. If val is not a type that can be
// transformed into a document, MarshalValueAppendWithContext should be used instead.
//
// Deprecated: Use [NewEncoder], pass the dst byte slice (wrapped by a bytes.Buffer) into
// [bsonrw.NewBSONValueWriter], and use the Encoder configuration methods to set the desired marshal
// behavior instead:
//
// buf := bytes.NewBuffer(dst)
// vw, err := bsonrw.NewBSONValueWriter(buf)
// if err != nil {
// panic(err)
// }
// enc, err := bson.NewEncoder(vw)
// if err != nil {
// panic(err)
// }
// enc.IntMinSize()
//
// See [Encoder] for more examples.
func MarshalAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interface{}) ([]byte, error) {
sw := new(bsonrw.SliceWriter)
*sw = dst
sw := bufPool.Get().(*bytes.Buffer)
defer func() {
// Proper usage of a sync.Pool requires each entry to have approximately
// the same memory cost. To obtain this property when the stored type
// contains a variably-sized buffer, we add a hard limit on the maximum
// buffer to place back in the pool. We limit the size to 16MiB because
// that's the maximum wire message size supported by any current MongoDB
// server.
//
// Comment based on
// https://cs.opensource.google/go/go/+/refs/tags/go1.19:src/fmt/print.go;l=147
//
// Recycle byte slices that are smaller than 16MiB and at least half
// occupied.
if sw.Cap() < 16*1024*1024 && sw.Cap()/2 < sw.Len() {
bufPool.Put(sw)
}
}()
sw.Reset()
vw := bvwPool.Get(sw)
defer bvwPool.Put(vw)
@ -99,7 +210,7 @@ func MarshalAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interf
return nil, err
}
return *sw, nil
return append(dst, sw.Bytes()...), nil
}
// MarshalValue returns the BSON encoding of val.
@ -112,17 +223,26 @@ func MarshalValue(val interface{}) (bsontype.Type, []byte, error) {
// MarshalValueAppend will append the BSON encoding of val to dst. If dst is not large enough to hold the BSON encoding
// of val, dst will be grown.
//
// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go
// Driver 2.0.
func MarshalValueAppend(dst []byte, val interface{}) (bsontype.Type, []byte, error) {
return MarshalValueAppendWithRegistry(DefaultRegistry, dst, val)
}
// MarshalValueWithRegistry returns the BSON encoding of val using Registry r.
//
// Deprecated: Using a custom registry to marshal individual BSON values will not be supported in Go
// Driver 2.0.
func MarshalValueWithRegistry(r *bsoncodec.Registry, val interface{}) (bsontype.Type, []byte, error) {
dst := make([]byte, 0)
return MarshalValueAppendWithRegistry(r, dst, val)
}
// MarshalValueWithContext returns the BSON encoding of val using EncodeContext ec.
//
// Deprecated: Using a custom EncodeContext to marshal individual BSON elements will not be
// supported in Go Driver 2.0.
func MarshalValueWithContext(ec bsoncodec.EncodeContext, val interface{}) (bsontype.Type, []byte, error) {
dst := make([]byte, 0)
return MarshalValueAppendWithContext(ec, dst, val)
@ -130,12 +250,18 @@ func MarshalValueWithContext(ec bsoncodec.EncodeContext, val interface{}) (bsont
// MarshalValueAppendWithRegistry will append the BSON encoding of val to dst using Registry r. If dst is not large
// enough to hold the BSON encoding of val, dst will be grown.
//
// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go
// Driver 2.0.
func MarshalValueAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val interface{}) (bsontype.Type, []byte, error) {
return MarshalValueAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val)
}
// MarshalValueAppendWithContext will append the BSON encoding of val to dst using EncodeContext ec. If dst is not large
// enough to hold the BSON encoding of val, dst will be grown.
//
// Deprecated: Appending individual BSON elements to an existing slice will not be supported in Go
// Driver 2.0.
func MarshalValueAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interface{}) (bsontype.Type, []byte, error) {
// get a ValueWriter configured to write to dst
sw := new(bsonrw.SliceWriter)
@ -173,17 +299,63 @@ func MarshalExtJSON(val interface{}, canonical, escapeHTML bool) ([]byte, error)
// MarshalExtJSONAppend will append the extended JSON encoding of val to dst.
// If dst is not large enough to hold the extended JSON encoding of val, dst
// will be grown.
//
// Deprecated: Use [NewEncoder] and pass the dst byte slice (wrapped by a bytes.Buffer) into
// [bsonrw.NewExtJSONValueWriter] instead:
//
// buf := bytes.NewBuffer(dst)
// vw, err := bsonrw.NewExtJSONValueWriter(buf, true, false)
// if err != nil {
// panic(err)
// }
// enc, err := bson.NewEncoder(vw)
// if err != nil {
// panic(err)
// }
//
// See [Encoder] for more examples.
func MarshalExtJSONAppend(dst []byte, val interface{}, canonical, escapeHTML bool) ([]byte, error) {
return MarshalExtJSONAppendWithRegistry(DefaultRegistry, dst, val, canonical, escapeHTML)
}
// MarshalExtJSONWithRegistry returns the extended JSON encoding of val using Registry r.
//
// Deprecated: Use [NewEncoder] and specify the Registry by calling [Encoder.SetRegistry] instead:
//
// buf := new(bytes.Buffer)
// vw, err := bsonrw.NewBSONValueWriter(buf)
// if err != nil {
// panic(err)
// }
// enc, err := bson.NewEncoder(vw)
// if err != nil {
// panic(err)
// }
// enc.SetRegistry(reg)
//
// See [Encoder] for more examples.
func MarshalExtJSONWithRegistry(r *bsoncodec.Registry, val interface{}, canonical, escapeHTML bool) ([]byte, error) {
dst := make([]byte, 0, defaultDstCap)
return MarshalExtJSONAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val, canonical, escapeHTML)
}
// MarshalExtJSONWithContext returns the extended JSON encoding of val using Registry r.
//
// Deprecated: Use [NewEncoder] and use the Encoder configuration methods to set the desired marshal
// behavior instead:
//
// buf := new(bytes.Buffer)
// vw, err := bsonrw.NewBSONValueWriter(buf)
// if err != nil {
// panic(err)
// }
// enc, err := bson.NewEncoder(vw)
// if err != nil {
// panic(err)
// }
// enc.IntMinSize()
//
// See [Encoder] for more examples.
func MarshalExtJSONWithContext(ec bsoncodec.EncodeContext, val interface{}, canonical, escapeHTML bool) ([]byte, error) {
dst := make([]byte, 0, defaultDstCap)
return MarshalExtJSONAppendWithContext(ec, dst, val, canonical, escapeHTML)
@ -192,6 +364,22 @@ func MarshalExtJSONWithContext(ec bsoncodec.EncodeContext, val interface{}, cano
// MarshalExtJSONAppendWithRegistry will append the extended JSON encoding of
// val to dst using Registry r. If dst is not large enough to hold the BSON
// encoding of val, dst will be grown.
//
// Deprecated: Use [NewEncoder], pass the dst byte slice (wrapped by a bytes.Buffer) into
// [bsonrw.NewExtJSONValueWriter], and specify the Registry by calling [Encoder.SetRegistry]
// instead:
//
// buf := bytes.NewBuffer(dst)
// vw, err := bsonrw.NewExtJSONValueWriter(buf, true, false)
// if err != nil {
// panic(err)
// }
// enc, err := bson.NewEncoder(vw)
// if err != nil {
// panic(err)
// }
//
// See [Encoder] for more examples.
func MarshalExtJSONAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val interface{}, canonical, escapeHTML bool) ([]byte, error) {
return MarshalExtJSONAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val, canonical, escapeHTML)
}
@ -199,6 +387,23 @@ func MarshalExtJSONAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val int
// MarshalExtJSONAppendWithContext will append the extended JSON encoding of
// val to dst using Registry r. If dst is not large enough to hold the BSON
// encoding of val, dst will be grown.
//
// Deprecated: Use [NewEncoder], pass the dst byte slice (wrapped by a bytes.Buffer) into
// [bsonrw.NewExtJSONValueWriter], and use the Encoder configuration methods to set the desired marshal
// behavior instead:
//
// buf := bytes.NewBuffer(dst)
// vw, err := bsonrw.NewExtJSONValueWriter(buf, true, false)
// if err != nil {
// panic(err)
// }
// enc, err := bson.NewEncoder(vw)
// if err != nil {
// panic(err)
// }
// enc.IntMinSize()
//
// See [Encoder] for more examples.
func MarshalExtJSONAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interface{}, canonical, escapeHTML bool) ([]byte, error) {
sw := new(bsonrw.SliceWriter)
*sw = dst

View File

@ -328,6 +328,7 @@ func ParseDecimal128(s string) (Decimal128, error) {
return dErr(s)
}
// Parse the significand (i.e. the non-exponent part) as a big.Int.
bi, ok := new(big.Int).SetString(intPart+decPart, 10)
if !ok {
return dErr(s)
@ -360,6 +361,19 @@ func ParseDecimal128FromBigInt(bi *big.Int, exp int) (Decimal128, bool) {
q := new(big.Int)
r := new(big.Int)
// If the significand is zero, the logical value will always be zero, independent of the
// exponent. However, the loops for handling out-of-range exponent values below may be extremely
// slow for zero values because the significand never changes. Limit the exponent value to the
// supported range here to prevent entering the loops below.
if bi.Cmp(zero) == 0 {
if exp > MaxDecimal128Exp {
exp = MaxDecimal128Exp
}
if exp < MinDecimal128Exp {
exp = MinDecimal128Exp
}
}
for bigIntCmpAbs(bi, maxS) == 1 {
bi, _ = q.QuoRem(bi, ten, r)
if r.Cmp(zero) != 0 {

View File

@ -82,18 +82,18 @@ func ObjectIDFromHex(s string) (ObjectID, error) {
return NilObjectID, ErrInvalidHex
}
b, err := hex.DecodeString(s)
var oid [12]byte
_, err := hex.Decode(oid[:], []byte(s))
if err != nil {
return NilObjectID, err
}
var oid [12]byte
copy(oid[:], b)
return oid, nil
}
// IsValidObjectID returns true if the provided hex string represents a valid ObjectID and false if not.
//
// Deprecated: Use ObjectIDFromHex and check the error instead.
func IsValidObjectID(s string) bool {
_, err := ObjectIDFromHex(s)
return err == nil

View File

@ -45,7 +45,7 @@ var _ json.Unmarshaler = (*DateTime)(nil)
// MarshalJSON marshal to time type.
func (d DateTime) MarshalJSON() ([]byte, error) {
return json.Marshal(d.Time())
return json.Marshal(d.Time().UTC())
}
// UnmarshalJSON creates a primitive.DateTime from a JSON string.
@ -141,6 +141,16 @@ type Timestamp struct {
I uint32
}
// After reports whether the time instant tp is after tp2.
func (tp Timestamp) After(tp2 Timestamp) bool {
return tp.T > tp2.T || (tp.T == tp2.T && tp.I > tp2.I)
}
// Before reports whether the time instant tp is before tp2.
func (tp Timestamp) Before(tp2 Timestamp) bool {
return tp.T < tp2.T || (tp.T == tp2.T && tp.I < tp2.I)
}
// Equal compares tp to tp2 and returns true if they are equal.
func (tp Timestamp) Equal(tp2 Timestamp) bool {
return tp.T == tp2.T && tp.I == tp2.I
@ -151,24 +161,25 @@ func (tp Timestamp) IsZero() bool {
return tp.T == 0 && tp.I == 0
}
// CompareTimestamp returns an integer comparing two Timestamps, where T is compared first, followed by I.
// Returns 0 if tp = tp2, 1 if tp > tp2, -1 if tp < tp2.
func CompareTimestamp(tp, tp2 Timestamp) int {
if tp.Equal(tp2) {
// Compare compares the time instant tp with tp2. If tp is before tp2, it returns -1; if tp is after
// tp2, it returns +1; if they're the same, it returns 0.
func (tp Timestamp) Compare(tp2 Timestamp) int {
switch {
case tp.Equal(tp2):
return 0
}
if tp.T > tp2.T {
return 1
}
if tp.T < tp2.T {
case tp.Before(tp2):
return -1
default:
return +1
}
// Compare I values because T values are equal
if tp.I > tp2.I {
return 1
}
return -1
}
// CompareTimestamp compares the time instant tp with tp2. If tp is before tp2, it returns -1; if tp is after
// tp2, it returns +1; if they're the same, it returns 0.
//
// Deprecated: Use Timestamp.Compare instead.
func CompareTimestamp(tp, tp2 Timestamp) int {
return tp.Compare(tp2)
}
// MinKey represents the BSON minkey value.
@ -186,6 +197,9 @@ type MaxKey struct{}
type D []E
// Map creates a map from the elements of the D.
//
// Deprecated: Converting directly from a D to an M will not be supported in Go Driver 2.0. Instead,
// users should marshal the D to BSON using bson.Marshal and unmarshal it to M using bson.Unmarshal.
func (d D) Map() M {
m := make(M, len(d))
for _, e := range d {

View File

@ -8,6 +8,7 @@ package bson
import (
"errors"
"fmt"
"reflect"
"go.mongodb.org/mongo-driver/bson/bsoncodec"
@ -21,10 +22,16 @@ var primitiveCodecs PrimitiveCodecs
// PrimitiveCodecs is a namespace for all of the default bsoncodec.Codecs for the primitive types
// defined in this package.
//
// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders
// registered.
type PrimitiveCodecs struct{}
// RegisterPrimitiveCodecs will register the encode and decode methods attached to PrimitiveCodecs
// with the provided RegistryBuilder. if rb is nil, a new empty RegistryBuilder will be created.
//
// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders
// registered.
func (pc PrimitiveCodecs) RegisterPrimitiveCodecs(rb *bsoncodec.RegistryBuilder) {
if rb == nil {
panic(errors.New("argument to RegisterPrimitiveCodecs must not be nil"))
@ -38,18 +45,35 @@ func (pc PrimitiveCodecs) RegisterPrimitiveCodecs(rb *bsoncodec.RegistryBuilder)
}
// RawValueEncodeValue is the ValueEncoderFunc for RawValue.
func (PrimitiveCodecs) RawValueEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
//
// If the RawValue's Type is "invalid" and the RawValue's Value is not empty or
// nil, then this method will return an error.
//
// Deprecated: Use bson.NewRegistry to get a registry with all primitive
// encoders and decoders registered.
func (PrimitiveCodecs) RawValueEncodeValue(_ bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Type() != tRawValue {
return bsoncodec.ValueEncoderError{Name: "RawValueEncodeValue", Types: []reflect.Type{tRawValue}, Received: val}
return bsoncodec.ValueEncoderError{
Name: "RawValueEncodeValue",
Types: []reflect.Type{tRawValue},
Received: val,
}
}
rawvalue := val.Interface().(RawValue)
if !rawvalue.Type.IsValid() {
return fmt.Errorf("the RawValue Type specifies an invalid BSON type: %#x", byte(rawvalue.Type))
}
return bsonrw.Copier{}.CopyValueFromBytes(vw, rawvalue.Type, rawvalue.Value)
}
// RawValueDecodeValue is the ValueDecoderFunc for RawValue.
func (PrimitiveCodecs) RawValueDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
//
// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders
// registered.
func (PrimitiveCodecs) RawValueDecodeValue(_ bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.CanSet() || val.Type() != tRawValue {
return bsoncodec.ValueDecoderError{Name: "RawValueDecodeValue", Types: []reflect.Type{tRawValue}, Received: val}
}
@ -64,7 +88,10 @@ func (PrimitiveCodecs) RawValueDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw
}
// RawEncodeValue is the ValueEncoderFunc for Reader.
func (PrimitiveCodecs) RawEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
//
// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders
// registered.
func (PrimitiveCodecs) RawEncodeValue(_ bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Type() != tRaw {
return bsoncodec.ValueEncoderError{Name: "RawEncodeValue", Types: []reflect.Type{tRaw}, Received: val}
}
@ -75,7 +102,10 @@ func (PrimitiveCodecs) RawEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.Valu
}
// RawDecodeValue is the ValueDecoderFunc for Reader.
func (PrimitiveCodecs) RawDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
//
// Deprecated: Use bson.NewRegistry to get a registry with all primitive encoders and decoders
// registered.
func (PrimitiveCodecs) RawDecodeValue(_ bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.CanSet() || val.Type() != tRaw {
return bsoncodec.ValueDecoderError{Name: "RawDecodeValue", Types: []reflect.Type{tRaw}, Received: val}
}

View File

@ -16,18 +16,27 @@ import (
// ErrNilReader indicates that an operation was attempted on a nil bson.Reader.
var ErrNilReader = errors.New("nil reader")
// Raw is a wrapper around a byte slice. It will interpret the slice as a
// BSON document. This type is a wrapper around a bsoncore.Document. Errors returned from the
// methods on this type and associated types come from the bsoncore package.
// Raw is a raw encoded BSON document. It can be used to delay BSON document decoding or precompute
// a BSON encoded document.
//
// A Raw must be a full BSON document. Use the RawValue type for individual BSON values.
type Raw []byte
// NewFromIOReader reads in a document from the given io.Reader and constructs a Raw from
// it.
func NewFromIOReader(r io.Reader) (Raw, error) {
// ReadDocument reads a BSON document from the io.Reader and returns it as a bson.Raw. If the
// reader contains multiple BSON documents, only the first document is read.
func ReadDocument(r io.Reader) (Raw, error) {
doc, err := bsoncore.NewDocumentFromReader(r)
return Raw(doc), err
}
// NewFromIOReader reads a BSON document from the io.Reader and returns it as a bson.Raw. If the
// reader contains multiple BSON documents, only the first document is read.
//
// Deprecated: Use ReadDocument instead.
func NewFromIOReader(r io.Reader) (Raw, error) {
return ReadDocument(r)
}
// Validate validates the document. This method only validates the first document in
// the slice, to validate other documents, the slice must be resliced.
func (r Raw) Validate() (err error) { return bsoncore.Document(r).Validate() }
@ -51,12 +60,19 @@ func (r Raw) LookupErr(key ...string) (RawValue, error) {
// elements. If the document is not valid, the elements up to the invalid point will be returned
// along with an error.
func (r Raw) Elements() ([]RawElement, error) {
elems, err := bsoncore.Document(r).Elements()
doc := bsoncore.Document(r)
if len(doc) == 0 {
return nil, nil
}
elems, err := doc.Elements()
if err != nil {
return nil, err
}
relems := make([]RawElement, 0, len(elems))
for _, elem := range elems {
relems = append(relems, RawElement(elem))
}
return relems, err
return relems, nil
}
// Values returns this document as a slice of values. The returned slice will contain valid values.
@ -81,5 +97,5 @@ func (r Raw) IndexErr(index uint) (RawElement, error) {
return RawElement(elem), err
}
// String implements the fmt.Stringer interface.
// String returns the BSON document encoded as Extended JSON.
func (r Raw) String() string { return bsoncore.Document(r).String() }

View File

@ -10,10 +10,7 @@ import (
"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
)
// RawElement represents a BSON element in byte form. This type provides a simple way to
// transform a slice of bytes into a BSON element and extract information from it.
//
// RawElement is a thin wrapper around a bsoncore.Element.
// RawElement is a raw encoded BSON document or array element.
type RawElement []byte
// Key returns the key for this element. If the element is not valid, this method returns an empty
@ -36,7 +33,7 @@ func (re RawElement) ValueErr() (RawValue, error) {
// Validate ensures re is a valid BSON element.
func (re RawElement) Validate() error { return bsoncore.Element(re).Validate() }
// String implements the fmt.Stringer interface. The output will be in extended JSON format.
// String returns the BSON element encoded as Extended JSON.
func (re RawElement) String() string {
doc := bsoncore.BuildDocument(nil, re)
j, err := MarshalExtJSON(Raw(doc), true, false)

View File

@ -26,11 +26,10 @@ var ErrNilContext = errors.New("DecodeContext cannot be nil")
// ErrNilRegistry is returned when the provided registry is nil.
var ErrNilRegistry = errors.New("Registry cannot be nil")
// RawValue represents a BSON value in byte form. It can be used to hold unprocessed BSON or to
// defer processing of BSON. Type is the BSON type of the value and Value are the raw bytes that
// represent the element.
// RawValue is a raw encoded BSON value. It can be used to delay BSON value decoding or precompute
// BSON encoded value. Type is the BSON type of the value and Value is the raw encoded BSON value.
//
// This type wraps bsoncore.Value for most of it's functionality.
// A RawValue must be an individual BSON value. Use the Raw type for full BSON documents.
type RawValue struct {
Type bsontype.Type
Value []byte
@ -38,6 +37,12 @@ type RawValue struct {
r *bsoncodec.Registry
}
// IsZero reports whether the RawValue is zero, i.e. no data is present on
// the RawValue. It returns true if Type is 0 and Value is empty or nil.
func (rv RawValue) IsZero() bool {
return rv.Type == 0x00 && len(rv.Value) == 0
}
// Unmarshal deserializes BSON into the provided val. If RawValue cannot be unmarshaled into val, an
// error is returned. This method will use the registry used to create the RawValue, if the RawValue
// was created from partial BSON processing, or it will use the default registry. Users wishing to
@ -268,10 +273,16 @@ func (rv RawValue) Int32OK() (int32, bool) { return convertToCoreValue(rv).Int32
// AsInt32 returns a BSON number as an int32. If the BSON type is not a numeric one, this method
// will panic.
//
// Deprecated: Use AsInt64 instead. If an int32 is required, convert the returned value to an int32
// and perform any required overflow/underflow checking.
func (rv RawValue) AsInt32() int32 { return convertToCoreValue(rv).AsInt32() }
// AsInt32OK is the same as AsInt32, except that it returns a boolean instead of
// panicking.
//
// Deprecated: Use AsInt64OK instead. If an int32 is required, convert the returned value to an
// int32 and perform any required overflow/underflow checking.
func (rv RawValue) AsInt32OK() (int32, bool) { return convertToCoreValue(rv).AsInt32OK() }
// Timestamp returns the BSON timestamp value the Value represents. It panics if the value is a

View File

@ -6,15 +6,19 @@
package bson
import "go.mongodb.org/mongo-driver/bson/bsoncodec"
import (
"go.mongodb.org/mongo-driver/bson/bsoncodec"
)
// DefaultRegistry is the default bsoncodec.Registry. It contains the default codecs and the
// primitive codecs.
var DefaultRegistry = NewRegistryBuilder().Build()
var DefaultRegistry = NewRegistry()
// NewRegistryBuilder creates a new RegistryBuilder configured with the default encoders and
// decoders from the bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the
// PrimitiveCodecs type in this package.
//
// Deprecated: Use NewRegistry instead.
func NewRegistryBuilder() *bsoncodec.RegistryBuilder {
rb := bsoncodec.NewRegistryBuilder()
bsoncodec.DefaultValueEncoders{}.RegisterDefaultEncoders(rb)
@ -22,3 +26,10 @@ func NewRegistryBuilder() *bsoncodec.RegistryBuilder {
primitiveCodecs.RegisterPrimitiveCodecs(rb)
return rb
}
// NewRegistry creates a new Registry configured with the default encoders and decoders from the
// bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the PrimitiveCodecs
// type in this package.
func NewRegistry() *bsoncodec.Registry {
return NewRegistryBuilder().Build()
}

View File

@ -10,7 +10,7 @@ import (
"go.mongodb.org/mongo-driver/bson/bsontype"
)
// These constants uniquely refer to each BSON type.
// BSON element types as described in https://bsonspec.org/spec.html.
const (
TypeDouble = bsontype.Double
TypeString = bsontype.String
@ -34,3 +34,17 @@ const (
TypeMinKey = bsontype.MinKey
TypeMaxKey = bsontype.MaxKey
)
// BSON binary element subtypes as described in https://bsonspec.org/spec.html.
const (
TypeBinaryGeneric = bsontype.BinaryGeneric
TypeBinaryFunction = bsontype.BinaryFunction
TypeBinaryBinaryOld = bsontype.BinaryBinaryOld
TypeBinaryUUIDOld = bsontype.BinaryUUIDOld
TypeBinaryUUID = bsontype.BinaryUUID
TypeBinaryMD5 = bsontype.BinaryMD5
TypeBinaryEncrypted = bsontype.BinaryEncrypted
TypeBinaryColumn = bsontype.BinaryColumn
TypeBinarySensitive = bsontype.BinarySensitive
TypeBinaryUserDefined = bsontype.BinaryUserDefined
)

View File

@ -14,18 +14,26 @@ import (
"go.mongodb.org/mongo-driver/bson/bsontype"
)
// Unmarshaler is an interface implemented by types that can unmarshal a BSON
// document representation of themselves. The BSON bytes can be assumed to be
// valid. UnmarshalBSON must copy the BSON bytes if it wishes to retain the data
// after returning.
// Unmarshaler is the interface implemented by types that can unmarshal a BSON
// document representation of themselves. The input can be assumed to be a valid
// encoding of a BSON document. UnmarshalBSON must copy the JSON data if it
// wishes to retain the data after returning.
//
// Unmarshaler is only used to unmarshal full BSON documents. To create custom
// BSON unmarshaling behavior for individual values in a BSON document,
// implement the ValueUnmarshaler interface instead.
type Unmarshaler interface {
UnmarshalBSON([]byte) error
}
// ValueUnmarshaler is an interface implemented by types that can unmarshal a
// BSON value representation of themselves. The BSON bytes and type can be
// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it
// wishes to retain the data after returning.
// ValueUnmarshaler is the interface implemented by types that can unmarshal a
// BSON value representation of themselves. The input can be assumed to be a
// valid encoding of a BSON value. UnmarshalBSONValue must copy the BSON value
// bytes if it wishes to retain the data after returning.
//
// ValueUnmarshaler is only used to unmarshal individual values in a BSON
// document. To create custom BSON unmarshaling behavior for an entire BSON
// document, implement the Unmarshaler interface instead.
type ValueUnmarshaler interface {
UnmarshalBSONValue(bsontype.Type, []byte) error
}
@ -40,6 +48,16 @@ func Unmarshal(data []byte, val interface{}) error {
// UnmarshalWithRegistry parses the BSON-encoded data using Registry r and
// stores the result in the value pointed to by val. If val is nil or not
// a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError.
//
// Deprecated: Use [NewDecoder] and specify the Registry by calling [Decoder.SetRegistry] instead:
//
// dec, err := bson.NewDecoder(bsonrw.NewBSONDocumentReader(data))
// if err != nil {
// panic(err)
// }
// dec.SetRegistry(reg)
//
// See [Decoder] for more examples.
func UnmarshalWithRegistry(r *bsoncodec.Registry, data []byte, val interface{}) error {
vr := bsonrw.NewBSONDocumentReader(data)
return unmarshalFromReader(bsoncodec.DecodeContext{Registry: r}, vr, val)
@ -48,11 +66,40 @@ func UnmarshalWithRegistry(r *bsoncodec.Registry, data []byte, val interface{})
// UnmarshalWithContext parses the BSON-encoded data using DecodeContext dc and
// stores the result in the value pointed to by val. If val is nil or not
// a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError.
//
// Deprecated: Use [NewDecoder] and use the Decoder configuration methods to set the desired unmarshal
// behavior instead:
//
// dec, err := bson.NewDecoder(bsonrw.NewBSONDocumentReader(data))
// if err != nil {
// panic(err)
// }
// dec.DefaultDocumentM()
//
// See [Decoder] for more examples.
func UnmarshalWithContext(dc bsoncodec.DecodeContext, data []byte, val interface{}) error {
vr := bsonrw.NewBSONDocumentReader(data)
return unmarshalFromReader(dc, vr, val)
}
// UnmarshalValue parses the BSON value of type t with bson.DefaultRegistry and
// stores the result in the value pointed to by val. If val is nil or not a pointer,
// UnmarshalValue returns an error.
func UnmarshalValue(t bsontype.Type, data []byte, val interface{}) error {
return UnmarshalValueWithRegistry(DefaultRegistry, t, data, val)
}
// UnmarshalValueWithRegistry parses the BSON value of type t with registry r and
// stores the result in the value pointed to by val. If val is nil or not a pointer,
// UnmarshalValue returns an error.
//
// Deprecated: Using a custom registry to unmarshal individual BSON values will not be supported in
// Go Driver 2.0.
func UnmarshalValueWithRegistry(r *bsoncodec.Registry, t bsontype.Type, data []byte, val interface{}) error {
vr := bsonrw.NewBSONValueReader(t, data)
return unmarshalFromReader(bsoncodec.DecodeContext{Registry: r}, vr, val)
}
// UnmarshalExtJSON parses the extended JSON-encoded data and stores the result
// in the value pointed to by val. If val is nil or not a pointer, Unmarshal
// returns InvalidUnmarshalError.
@ -63,6 +110,20 @@ func UnmarshalExtJSON(data []byte, canonical bool, val interface{}) error {
// UnmarshalExtJSONWithRegistry parses the extended JSON-encoded data using
// Registry r and stores the result in the value pointed to by val. If val is
// nil or not a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError.
//
// Deprecated: Use [NewDecoder] and specify the Registry by calling [Decoder.SetRegistry] instead:
//
// vr, err := bsonrw.NewExtJSONValueReader(bytes.NewReader(data), true)
// if err != nil {
// panic(err)
// }
// dec, err := bson.NewDecoder(vr)
// if err != nil {
// panic(err)
// }
// dec.SetRegistry(reg)
//
// See [Decoder] for more examples.
func UnmarshalExtJSONWithRegistry(r *bsoncodec.Registry, data []byte, canonical bool, val interface{}) error {
ejvr, err := bsonrw.NewExtJSONValueReader(bytes.NewReader(data), canonical)
if err != nil {
@ -75,6 +136,21 @@ func UnmarshalExtJSONWithRegistry(r *bsoncodec.Registry, data []byte, canonical
// UnmarshalExtJSONWithContext parses the extended JSON-encoded data using
// DecodeContext dc and stores the result in the value pointed to by val. If val is
// nil or not a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError.
//
// Deprecated: Use [NewDecoder] and use the Decoder configuration methods to set the desired unmarshal
// behavior instead:
//
// vr, err := bsonrw.NewExtJSONValueReader(bytes.NewReader(data), true)
// if err != nil {
// panic(err)
// }
// dec, err := bson.NewDecoder(vr)
// if err != nil {
// panic(err)
// }
// dec.DefaultDocumentM()
//
// See [Decoder] for more examples.
func UnmarshalExtJSONWithContext(dc bsoncodec.DecodeContext, data []byte, canonical bool, val interface{}) error {
ejvr, err := bsonrw.NewExtJSONValueReader(bytes.NewReader(data), canonical)
if err != nil {

View File

@ -7,10 +7,10 @@
package bsoncore
import (
"bytes"
"fmt"
"io"
"strconv"
"strings"
)
// NewArrayLengthError creates and returns an error for when the length of an array exceeds the
@ -53,7 +53,7 @@ func (a Array) DebugString() string {
if len(a) < 5 {
return "<malformed>"
}
var buf bytes.Buffer
var buf strings.Builder
buf.WriteString("Array")
length, rem, _ := ReadLength(a) // We know we have enough bytes to read the length
buf.WriteByte('(')
@ -69,7 +69,7 @@ func (a Array) DebugString() string {
buf.WriteString(fmt.Sprintf("<malformed (%d)>", length))
break
}
fmt.Fprintf(&buf, "%s", elem.Value().DebugString())
buf.WriteString(elem.Value().DebugString())
if length != 1 {
buf.WriteByte(',')
}
@ -85,7 +85,7 @@ func (a Array) String() string {
if len(a) < 5 {
return ""
}
var buf bytes.Buffer
var buf strings.Builder
buf.WriteByte('[')
length, rem, _ := ReadLength(a) // We know we have enough bytes to read the length
@ -100,7 +100,7 @@ func (a Array) String() string {
if !ok {
return ""
}
fmt.Fprintf(&buf, "%s", elem.Value().String())
buf.WriteString(elem.Value().String())
if length > 1 {
buf.WriteByte(',')
}

View File

@ -4,25 +4,6 @@
// not use this file except in compliance with the License. You may obtain
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
// Package bsoncore contains functions that can be used to encode and decode BSON
// elements and values to or from a slice of bytes. These functions are aimed at
// allowing low level manipulation of BSON and can be used to build a higher
// level BSON library.
//
// The Read* functions within this package return the values of the element and
// a boolean indicating if the values are valid. A boolean was used instead of
// an error because any error that would be returned would be the same: not
// enough bytes. This library attempts to do no validation, it will only return
// false if there are not enough bytes for an item to be read. For example, the
// ReadDocument function checks the length, if that length is larger than the
// number of bytes available, it will return false, if there are enough bytes, it
// will return those bytes and true. It is the consumers responsibility to
// validate those bytes.
//
// The Append* functions within this package will append the type value to the
// given dst slice. If the slice has enough capacity, it will not grow the
// slice. The Append*Element functions within this package operate in the same
// way, but additionally append the BSON type and the key before the value.
package bsoncore // import "go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
import (
@ -254,7 +235,7 @@ func BuildDocumentValue(elems ...[]byte) Value {
return Value{Type: bsontype.EmbeddedDocument, Data: BuildDocument(nil, elems...)}
}
// BuildDocumentElement will append a BSON embedded document elemnt using key and the provided
// BuildDocumentElement will append a BSON embedded document element using key and the provided
// elements and return the extended buffer.
func BuildDocumentElement(dst []byte, key string, elems ...[]byte) []byte {
return BuildDocument(AppendHeader(dst, bsontype.EmbeddedDocument, key), elems...)
@ -844,6 +825,9 @@ func readLengthBytes(src []byte) ([]byte, []byte, bool) {
if !ok {
return nil, src, false
}
if l < 4 {
return nil, src, false
}
if len(src) < int(l) {
return nil, src, false
}

View File

@ -0,0 +1,29 @@
// Copyright (C) MongoDB, Inc. 2022-present.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
// Package bsoncore contains functions that can be used to encode and decode BSON
// elements and values to or from a slice of bytes. These functions are aimed at
// allowing low level manipulation of BSON and can be used to build a higher
// level BSON library.
//
// The Read* functions within this package return the values of the element and
// a boolean indicating if the values are valid. A boolean was used instead of
// an error because any error that would be returned would be the same: not
// enough bytes. This library attempts to do no validation, it will only return
// false if there are not enough bytes for an item to be read. For example, the
// ReadDocument function checks the length, if that length is larger than the
// number of bytes available, it will return false, if there are enough bytes, it
// will return those bytes and true. It is the consumers responsibility to
// validate those bytes.
//
// The Append* functions within this package will append the type value to the
// given dst slice. If the slice has enough capacity, it will not grow the
// slice. The Append*Element functions within this package operate in the same
// way, but additionally append the BSON type and the key before the value.
//
// Warning: Package bsoncore is unstable and there is no backward compatibility
// guarantee. It is experimental and subject to change.
package bsoncore

View File

@ -7,11 +7,11 @@
package bsoncore
import (
"bytes"
"errors"
"fmt"
"io"
"strconv"
"strings"
"go.mongodb.org/mongo-driver/bson/bsontype"
)
@ -237,7 +237,7 @@ func (d Document) DebugString() string {
if len(d) < 5 {
return "<malformed>"
}
var buf bytes.Buffer
var buf strings.Builder
buf.WriteString("Document")
length, rem, _ := ReadLength(d) // We know we have enough bytes to read the length
buf.WriteByte('(')
@ -253,7 +253,7 @@ func (d Document) DebugString() string {
buf.WriteString(fmt.Sprintf("<malformed (%d)>", length))
break
}
fmt.Fprintf(&buf, "%s ", elem.DebugString())
buf.WriteString(elem.DebugString())
}
buf.WriteByte('}')
@ -266,7 +266,7 @@ func (d Document) String() string {
if len(d) < 5 {
return ""
}
var buf bytes.Buffer
var buf strings.Builder
buf.WriteByte('{')
length, rem, _ := ReadLength(d) // We know we have enough bytes to read the length
@ -285,7 +285,7 @@ func (d Document) String() string {
if !ok {
return ""
}
fmt.Fprintf(&buf, "%s", elem.String())
buf.WriteString(elem.String())
first = false
}
buf.WriteByte('}')

View File

@ -129,7 +129,7 @@ func (e Element) String() string {
if !valid {
return ""
}
return fmt.Sprintf(`"%s": %v`, key, val)
return "\"" + string(key) + "\": " + val.String()
}
// DebugString outputs a human readable version of RawElement. It will attempt to stringify the

View File

@ -59,8 +59,6 @@ func (v Value) IsNumber() bool {
// AsInt32 returns a BSON number as an int32. If the BSON type is not a numeric one, this method
// will panic.
//
// TODO(skriptble): Add support for Decimal128.
func (v Value) AsInt32() int32 {
if !v.IsNumber() {
panic(ElementTypeError{"bsoncore.Value.AsInt32", v.Type})
@ -93,8 +91,6 @@ func (v Value) AsInt32() int32 {
// AsInt32OK functions the same as AsInt32 but returns a boolean instead of panicking. False
// indicates an error.
//
// TODO(skriptble): Add support for Decimal128.
func (v Value) AsInt32OK() (int32, bool) {
if !v.IsNumber() {
return 0, false
@ -127,8 +123,6 @@ func (v Value) AsInt32OK() (int32, bool) {
// AsInt64 returns a BSON number as an int64. If the BSON type is not a numeric one, this method
// will panic.
//
// TODO(skriptble): Add support for Decimal128.
func (v Value) AsInt64() int64 {
if !v.IsNumber() {
panic(ElementTypeError{"bsoncore.Value.AsInt64", v.Type})
@ -162,8 +156,6 @@ func (v Value) AsInt64() int64 {
// AsInt64OK functions the same as AsInt64 but returns a boolean instead of panicking. False
// indicates an error.
//
// TODO(skriptble): Add support for Decimal128.
func (v Value) AsInt64OK() (int64, bool) {
if !v.IsNumber() {
return 0, false
@ -198,21 +190,14 @@ func (v Value) AsInt64OK() (int64, bool) {
// AsFloat64 returns a BSON number as an float64. If the BSON type is not a numeric one, this method
// will panic.
//
// TODO(skriptble): Add support for Decimal128.
func (v Value) AsFloat64() float64 { return 0 }
// TODO(GODRIVER-2751): Implement AsFloat64.
// func (v Value) AsFloat64() float64
// AsFloat64OK functions the same as AsFloat64 but returns a boolean instead of panicking. False
// indicates an error.
//
// TODO(skriptble): Add support for Decimal128.
func (v Value) AsFloat64OK() (float64, bool) { return 0, false }
// Add will add this value to another. This is currently only implemented for strings and numbers.
// If either value is a string, the other type is coerced into a string and added to the other.
//
// This method will alter v and will attempt to reuse the []byte of v. If the []byte is too small,
// it will be expanded.
func (v *Value) Add(v2 Value) error { return nil }
// TODO(GODRIVER-2751): Implement AsFloat64OK.
// func (v Value) AsFloat64OK() (float64, bool)
// Equal compaes v to v2 and returns true if they are equal.
func (v Value) Equal(v2 Value) bool {

9
vendor/modules.txt vendored
View File

@ -236,7 +236,7 @@ github.com/containers/conmon/runner/config
# github.com/containers/gvisor-tap-vsock v0.7.1
## explicit; go 1.20
github.com/containers/gvisor-tap-vsock/pkg/types
# github.com/containers/image/v5 v5.29.1-0.20231201205726-671ab94a09ea
# github.com/containers/image/v5 v5.29.1-0.20231214202217-8eabe0f6b3eb
## explicit; go 1.19
github.com/containers/image/v5/copy
github.com/containers/image/v5/directory
@ -367,6 +367,7 @@ github.com/containers/storage/pkg/chunked
github.com/containers/storage/pkg/chunked/compressor
github.com/containers/storage/pkg/chunked/dump
github.com/containers/storage/pkg/chunked/internal
github.com/containers/storage/pkg/chunked/toc
github.com/containers/storage/pkg/config
github.com/containers/storage/pkg/devicemapper
github.com/containers/storage/pkg/directory
@ -582,7 +583,7 @@ github.com/go-openapi/runtime/yamlpc
# github.com/go-openapi/spec v0.20.9
## explicit; go 1.13
github.com/go-openapi/spec
# github.com/go-openapi/strfmt v0.21.7
# github.com/go-openapi/strfmt v0.21.9
## explicit; go 1.19
github.com/go-openapi/strfmt
# github.com/go-openapi/swag v0.22.4
@ -975,7 +976,7 @@ github.com/sigstore/rekor/pkg/generated/client/pubkey
github.com/sigstore/rekor/pkg/generated/client/tlog
github.com/sigstore/rekor/pkg/generated/models
github.com/sigstore/rekor/pkg/util
# github.com/sigstore/sigstore v1.7.5
# github.com/sigstore/sigstore v1.7.6
## explicit; go 1.20
github.com/sigstore/sigstore/pkg/cryptoutils
github.com/sigstore/sigstore/pkg/oauth
@ -1076,7 +1077,7 @@ github.com/yusufpapurcu/wmi
# go.etcd.io/bbolt v1.3.8
## explicit; go 1.17
go.etcd.io/bbolt
# go.mongodb.org/mongo-driver v1.11.3
# go.mongodb.org/mongo-driver v1.13.0
## explicit; go 1.13
go.mongodb.org/mongo-driver/bson
go.mongodb.org/mongo-driver/bson/bsoncodec