mirror of https://github.com/knative/pkg.git
Bump k8s.io/code-generator from 0.33.4 to 0.34.1
Bumps [k8s.io/code-generator](https://github.com/kubernetes/code-generator) from 0.33.4 to 0.34.1. - [Commits](https://github.com/kubernetes/code-generator/compare/v0.33.4...v0.34.1) --- updated-dependencies: - dependency-name: k8s.io/code-generator dependency-version: 0.34.1 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
parent
8c9c1d368e
commit
da186d7ea0
21
go.mod
21
go.mod
|
|
@ -40,12 +40,12 @@ require (
|
|||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/api v0.33.4
|
||||
k8s.io/apiextensions-apiserver v0.33.4
|
||||
k8s.io/apimachinery v0.33.4
|
||||
k8s.io/apimachinery v0.34.1
|
||||
k8s.io/client-go v0.33.4
|
||||
k8s.io/code-generator v0.33.4
|
||||
k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7
|
||||
k8s.io/code-generator v0.34.1
|
||||
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397
|
||||
knative.dev/hack v0.0.0-20250902153942-1499de21e119
|
||||
sigs.k8s.io/randfill v1.0.0
|
||||
sigs.k8s.io/yaml v1.6.0
|
||||
|
|
@ -55,17 +55,17 @@ require (
|
|||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.1 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
|
||||
github.com/evanphx/json-patch v5.9.0+incompatible // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/gnostic-models v0.7.0 // indirect
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect
|
||||
github.com/influxdata/tdigest v0.0.1 // indirect
|
||||
|
|
@ -73,9 +73,10 @@ require (
|
|||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.9.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/otlptranslator v0.0.0-20250717125610-8549f4ab4f8f // indirect
|
||||
github.com/prometheus/procfs v0.17.0 // indirect
|
||||
|
|
@ -86,6 +87,7 @@ require (
|
|||
go.opentelemetry.io/proto/otlp v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac // indirect
|
||||
golang.org/x/mod v0.27.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
|
|
@ -100,7 +102,8 @@ require (
|
|||
google.golang.org/protobuf v1.36.8 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
|
||||
)
|
||||
|
|
|
|||
41
go.sum
41
go.sum
|
|
@ -16,16 +16,16 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
|
|||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgryski/go-gk v0.0.0-20200319235926-a69029f61654 h1:XOPLOMn/zT4jIgxfxSsoXPxkrzz0FaCHwp33x5POJ+Q=
|
||||
github.com/dgryski/go-gk v0.0.0-20200319235926-a69029f61654/go.mod h1:qm+vckxRlDt0aOla0RYJJVeqHZlWfOm2UIxHaqPB46E=
|
||||
github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU=
|
||||
github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls=
|
||||
github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU=
|
||||
github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
|
|
@ -47,8 +47,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
|||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
|
||||
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
|
||||
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
|
||||
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
|
|
@ -89,8 +89,9 @@ github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUt
|
|||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
|
||||
|
|
@ -184,8 +185,8 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
|
|||
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
|
||||
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
|
||||
go.yaml.in/yaml/v3 v3.0.3 h1:bXOww4E/J3f66rav3pX3m8w6jDE4knZjGOw8b5Y6iNE=
|
||||
go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
|
|
@ -264,20 +265,20 @@ k8s.io/api v0.33.4 h1:oTzrFVNPXBjMu0IlpA2eDDIU49jsuEorGHB4cvKupkk=
|
|||
k8s.io/api v0.33.4/go.mod h1:VHQZ4cuxQ9sCUMESJV5+Fe8bGnqAARZ08tSTdHWfeAc=
|
||||
k8s.io/apiextensions-apiserver v0.33.4 h1:rtq5SeXiDbXmSwxsF0MLe2Mtv3SwprA6wp+5qh/CrOU=
|
||||
k8s.io/apiextensions-apiserver v0.33.4/go.mod h1:mWXcZQkQV1GQyxeIjYApuqsn/081hhXPZwZ2URuJeSs=
|
||||
k8s.io/apimachinery v0.33.4 h1:SOf/JW33TP0eppJMkIgQ+L6atlDiP/090oaX0y9pd9s=
|
||||
k8s.io/apimachinery v0.33.4/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
|
||||
k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4=
|
||||
k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
|
||||
k8s.io/client-go v0.33.4 h1:TNH+CSu8EmXfitntjUPwaKVPN0AYMbc9F1bBS8/ABpw=
|
||||
k8s.io/client-go v0.33.4/go.mod h1:LsA0+hBG2DPwovjd931L/AoaezMPX9CmBgyVyBZmbCY=
|
||||
k8s.io/code-generator v0.33.4 h1:DiA801QxqApRIBh3OWULasVAUA237XnYvFNMh+E34Y8=
|
||||
k8s.io/code-generator v0.33.4/go.mod h1:ifWxKWhEl/Z1K7WmWAyOBEf3ex/i546ingCzLC8YVIY=
|
||||
k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 h1:2OX19X59HxDprNCVrWi6jb7LW1PoqTlYqEq5H2oetog=
|
||||
k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
|
||||
k8s.io/code-generator v0.34.1 h1:WpphT26E+j7tEgIUfFr5WfbJrktCGzB3JoJH9149xYc=
|
||||
k8s.io/code-generator v0.34.1/go.mod h1:DeWjekbDnJWRwpw3s0Jat87c+e0TgkxoR4ar608yqvg=
|
||||
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f h1:SLb+kxmzfA87x4E4brQzB33VBbT2+x7Zq9ROIHmGn9Q=
|
||||
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0=
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA=
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
|
||||
k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
knative.dev/hack v0.0.0-20250902153942-1499de21e119 h1:NbQvjnFK1tL489LN0qAybWy0E17Jpziwcv/XIHwfp6M=
|
||||
knative.dev/hack v0.0.0-20250902153942-1499de21e119/go.mod h1:R0ritgYtjLDO9527h5vb5X6gfvt5LCrJ55BNbVDsWiY=
|
||||
pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw=
|
||||
|
|
@ -289,6 +290,8 @@ sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
|||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
|
||||
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
|
||||
|
|
|
|||
|
|
@ -1,5 +1,8 @@
|
|||
# Change history of go-restful
|
||||
|
||||
## [v3.12.2] - 2025-02-21
|
||||
|
||||
- allow empty payloads in post,put,patch, issue #580 ( thanks @liggitt, Jordan Liggitt)
|
||||
|
||||
## [v3.12.1] - 2024-05-28
|
||||
|
||||
|
|
@ -18,7 +21,7 @@
|
|||
|
||||
- fix by restoring custom JSON handler functions (Mike Beaumont #540)
|
||||
|
||||
## [v3.12.0] - 2023-08-19
|
||||
## [v3.11.0] - 2023-08-19
|
||||
|
||||
- restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled.
|
||||
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ go-restful
|
|||
package for building REST-style Web Services using Google Go
|
||||
|
||||
[](https://goreportcard.com/report/github.com/emicklei/go-restful)
|
||||
[](https://pkg.go.dev/github.com/emicklei/go-restful)
|
||||
[](https://pkg.go.dev/github.com/emicklei/go-restful/v3)
|
||||
[](https://codecov.io/gh/emicklei/go-restful)
|
||||
|
||||
- [Code examples use v3](https://github.com/emicklei/go-restful/tree/v3/examples)
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ func (RouterJSR311) extractParams(pathExpr *pathExpression, matches []string) ma
|
|||
return params
|
||||
}
|
||||
|
||||
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
|
||||
// https://download.oracle.com/otndocs/jcp/jaxrs-1.1-mrel-eval-oth-JSpec/
|
||||
func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) {
|
||||
candidates := make([]*Route, 0, 8)
|
||||
for i, each := range routes {
|
||||
|
|
@ -126,9 +126,7 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R
|
|||
if trace {
|
||||
traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(previous), contentType)
|
||||
}
|
||||
if httpRequest.ContentLength > 0 {
|
||||
return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type")
|
||||
}
|
||||
return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type")
|
||||
}
|
||||
|
||||
// accept
|
||||
|
|
@ -151,20 +149,9 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R
|
|||
for _, candidate := range previous {
|
||||
available = append(available, candidate.Produces...)
|
||||
}
|
||||
// if POST,PUT,PATCH without body
|
||||
method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length")
|
||||
if (method == http.MethodPost ||
|
||||
method == http.MethodPut ||
|
||||
method == http.MethodPatch) && (length == "" || length == "0") {
|
||||
return nil, NewError(
|
||||
http.StatusUnsupportedMediaType,
|
||||
fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")),
|
||||
)
|
||||
}
|
||||
return nil, NewError(
|
||||
http.StatusNotAcceptable,
|
||||
fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")),
|
||||
)
|
||||
fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")))
|
||||
}
|
||||
// return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil
|
||||
return candidates[0], nil
|
||||
|
|
|
|||
|
|
@ -111,6 +111,8 @@ func (r Route) matchesAccept(mimeTypesWithQuality string) bool {
|
|||
}
|
||||
|
||||
// Return whether this Route can consume content with a type specified by mimeTypes (can be empty).
|
||||
// If the route does not specify Consumes then return true (*/*).
|
||||
// If no content type is set then return true for GET,HEAD,OPTIONS,DELETE and TRACE.
|
||||
func (r Route) matchesContentType(mimeTypes string) bool {
|
||||
|
||||
if len(r.Consumes) == 0 {
|
||||
|
|
|
|||
|
|
@ -1,30 +1,31 @@
|
|||
# CBOR Codec in Go
|
||||
|
||||
<!-- [](#cbor-library-in-go) -->
|
||||
<h1>CBOR Codec <a href="https://pkg.go.dev/github.com/fxamacker/cbor/v2"><img src="https://raw.githubusercontent.com/fxamacker/images/refs/heads/master/cbor/go-logo-blue.svg" alt="Go logo" style="height: 1em;" align="right"></a></h1>
|
||||
|
||||
[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html).
|
||||
|
||||
CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name-comparison-of-other-binary-) to JSON, MessagePack, Protocol Buffers, etc. CBOR is an Internet Standard defined by [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94) and is designed to be relevant for decades.
|
||||
|
||||
`fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Microsoft, Mozilla, Oasis Protocol, Tailscale, Teleport, [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor).
|
||||
`fxamacker/cbor` is used in projects by Arm Ltd., EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, IBM, Kubernetes[*](https://github.com/search?q=org%3Akubernetes%20fxamacker%2Fcbor&type=code), Let's Encrypt, Linux Foundation, Microsoft, Oasis Protocol, Red Hat[*](https://github.com/search?q=org%3Aopenshift+fxamacker%2Fcbor&type=code), Tailscale[*](https://github.com/search?q=org%3Atailscale+fxamacker%2Fcbor&type=code), Veraison[*](https://github.com/search?q=org%3Averaison+fxamacker%2Fcbor&type=code), [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor).
|
||||
|
||||
See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer.
|
||||
See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `MarshalToBuffer` and `UserBufferEncMode` accepts user-specified buffer.
|
||||
|
||||
## fxamacker/cbor
|
||||
|
||||
[](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci)
|
||||
[](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22)
|
||||
[](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A597%25%22)
|
||||
[](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml)
|
||||
[](#fuzzing-and-code-coverage)
|
||||
[](https://goreportcard.com/report/github.com/fxamacker/cbor)
|
||||
[](https://github.com/fxamacker/cbor#fuzzing-and-code-coverage)
|
||||
|
||||
`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)).
|
||||
|
||||
Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc.
|
||||
|
||||
API is mostly same as `encoding/json`, plus interfaces that simplify concurrency and CBOR options.
|
||||
|
||||
Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc.
|
||||
|
||||
<details><summary>Highlights</summary><p/>
|
||||
<details><summary> 🔎 Highlights</summary><p/>
|
||||
|
||||
__🚀 Speed__
|
||||
|
||||
|
|
@ -38,7 +39,7 @@ Codec passed multiple confidential security assessments in 2022. No vulnerabili
|
|||
|
||||
__🗜️ Data Size__
|
||||
|
||||
Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit.
|
||||
Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) and field tag "-" automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit.
|
||||
|
||||
__:jigsaw: Usability__
|
||||
|
||||
|
|
@ -58,164 +59,205 @@ Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949.
|
|||
|
||||
`fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data.
|
||||
|
||||
By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security).
|
||||
Notably, `fxamacker/cbor` is fast at rejecting malformed CBOR data.
|
||||
|
||||
<details><summary>Example decoding with encoding/gob 💥 fatal error (out of memory)</summary><p/>
|
||||
> [!NOTE]
|
||||
> Benchmarks rejecting 10 bytes of malicious CBOR data decoding to `[]byte`:
|
||||
>
|
||||
> | Codec | Speed (ns/op) | Memory | Allocs |
|
||||
> | :---- | ------------: | -----: | -----: |
|
||||
> | fxamacker/cbor 2.7.0 | 47 ± 7% | 32 B/op | 2 allocs/op |
|
||||
> | ugorji/go 1.2.12 | 5878187 ± 3% | 67111556 B/op | 13 allocs/op |
|
||||
>
|
||||
> Faster hardware (overclocked DDR4 or DDR5) can reduce speed difference.
|
||||
>
|
||||
> <details><summary> 🔎 Benchmark details </summary><p/>
|
||||
>
|
||||
> Latest comparison for decoding CBOR data to Go `[]byte`:
|
||||
> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
|
||||
> - go1.22.7, linux/amd64, i5-13600K (DDR4-2933, disabled e-cores)
|
||||
> - go test -bench=. -benchmem -count=20
|
||||
>
|
||||
> #### Prior comparisons
|
||||
>
|
||||
> | Codec | Speed (ns/op) | Memory | Allocs |
|
||||
> | :---- | ------------: | -----: | -----: |
|
||||
> | fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op |
|
||||
> | fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op |
|
||||
> | ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op |
|
||||
> | ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate |
|
||||
>
|
||||
> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
|
||||
> - go1.19.6, linux/amd64, i5-13600K (DDR4)
|
||||
> - go test -bench=. -benchmem -count=20
|
||||
>
|
||||
> </details>
|
||||
|
||||
```Go
|
||||
// Example of encoding/gob having "fatal error: runtime: out of memory"
|
||||
// while decoding 181 bytes.
|
||||
package main
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
)
|
||||
In contrast, some codecs can crash or use excessive resources while decoding bad data.
|
||||
|
||||
// Example data is from https://github.com/golang/go/issues/24446
|
||||
// (shortened to 181 bytes).
|
||||
const data = "4dffb503010102303001ff30000109010130010800010130010800010130" +
|
||||
"01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" +
|
||||
"860001013001ff860001013001ffb80000001eff850401010e3030303030" +
|
||||
"30303030303030303001ff3000010c0104000016ffb70201010830303030" +
|
||||
"3030303001ff3000010c000030ffb6040405fcff00303030303030303030" +
|
||||
"303030303030303030303030303030303030303030303030303030303030" +
|
||||
"30"
|
||||
> [!WARNING]
|
||||
> Go's `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security).
|
||||
>
|
||||
> <details><summary> 🔎 gob fatal error (out of memory) 💥 decoding 181 bytes</summary><p/>
|
||||
>
|
||||
> ```Go
|
||||
> // Example of encoding/gob having "fatal error: runtime: out of memory"
|
||||
> // while decoding 181 bytes (all Go versions as of Dec. 8, 2024).
|
||||
> package main
|
||||
> import (
|
||||
> "bytes"
|
||||
> "encoding/gob"
|
||||
> "encoding/hex"
|
||||
> "fmt"
|
||||
> )
|
||||
>
|
||||
> // Example data is from https://github.com/golang/go/issues/24446
|
||||
> // (shortened to 181 bytes).
|
||||
> const data = "4dffb503010102303001ff30000109010130010800010130010800010130" +
|
||||
> "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" +
|
||||
> "860001013001ff860001013001ffb80000001eff850401010e3030303030" +
|
||||
> "30303030303030303001ff3000010c0104000016ffb70201010830303030" +
|
||||
> "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" +
|
||||
> "303030303030303030303030303030303030303030303030303030303030" +
|
||||
> "30"
|
||||
>
|
||||
> type X struct {
|
||||
> J *X
|
||||
> K map[string]int
|
||||
> }
|
||||
>
|
||||
> func main() {
|
||||
> raw, _ := hex.DecodeString(data)
|
||||
> decoder := gob.NewDecoder(bytes.NewReader(raw))
|
||||
>
|
||||
> var x X
|
||||
> decoder.Decode(&x) // fatal error: runtime: out of memory
|
||||
> fmt.Println("Decoding finished.")
|
||||
> }
|
||||
> ```
|
||||
>
|
||||
>
|
||||
> </details>
|
||||
|
||||
type X struct {
|
||||
J *X
|
||||
K map[string]int
|
||||
}
|
||||
### Smaller Encodings with Struct Tag Options
|
||||
|
||||
func main() {
|
||||
raw, _ := hex.DecodeString(data)
|
||||
decoder := gob.NewDecoder(bytes.NewReader(raw))
|
||||
Struct tags automatically reduce encoded size of structs and improve speed.
|
||||
|
||||
var x X
|
||||
decoder.Decode(&x) // fatal error: runtime: out of memory
|
||||
fmt.Println("Decoding finished.")
|
||||
}
|
||||
```
|
||||
We can write less code by using struct tag options:
|
||||
- `toarray`: encode without field names (decode back to original struct)
|
||||
- `keyasint`: encode field names as integers (decode back to original struct)
|
||||
- `omitempty`: omit empty field when encoding
|
||||
- `omitzero`: omit zero-value field when encoding
|
||||
|
||||
<hr/>
|
||||
As a special case, struct field tag "-" omits the field.
|
||||
|
||||
</details>
|
||||
|
||||
`fxamacker/cbor` is fast at rejecting malformed CBOR data. E.g. attempts to
|
||||
decode 10 bytes of malicious CBOR data to `[]byte` (with default settings):
|
||||
|
||||
| Codec | Speed (ns/op) | Memory | Allocs |
|
||||
| :---- | ------------: | -----: | -----: |
|
||||
| fxamacker/cbor 2.5.0 | 44 ± 5% | 32 B/op | 2 allocs/op |
|
||||
| ugorji/go 1.2.11 | 5353261 ± 4% | 67111321 B/op | 13 allocs/op |
|
||||
|
||||
<details><summary>Benchmark details</summary><p/>
|
||||
|
||||
Latest comparison used:
|
||||
- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
|
||||
- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933)
|
||||
- go test -bench=. -benchmem -count=20
|
||||
|
||||
#### Prior comparisons
|
||||
|
||||
| Codec | Speed (ns/op) | Memory | Allocs |
|
||||
| :---- | ------------: | -----: | -----: |
|
||||
| fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op |
|
||||
| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op |
|
||||
| ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op |
|
||||
| ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate |
|
||||
|
||||
- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
|
||||
- go1.19.6, linux/amd64, i5-13600K (DDR4)
|
||||
- go test -bench=. -benchmem -count=20
|
||||
|
||||
<hr/>
|
||||
|
||||
</details>
|
||||
|
||||
### Smaller Encodings with Struct Tags
|
||||
|
||||
Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs.
|
||||
|
||||
<details><summary>Example encoding 3-level nested Go struct to 1 byte CBOR</summary><p/>
|
||||
|
||||
https://go.dev/play/p/YxwvfPdFQG2
|
||||
|
||||
```Go
|
||||
// Example encoding nested struct (with omitempty tag)
|
||||
// - encoding/json: 18 byte JSON
|
||||
// - fxamacker/cbor: 1 byte CBOR
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
)
|
||||
|
||||
type GrandChild struct {
|
||||
Quux int `json:",omitempty"`
|
||||
}
|
||||
|
||||
type Child struct {
|
||||
Baz int `json:",omitempty"`
|
||||
Qux GrandChild `json:",omitempty"`
|
||||
}
|
||||
|
||||
type Parent struct {
|
||||
Foo Child `json:",omitempty"`
|
||||
Bar int `json:",omitempty"`
|
||||
}
|
||||
|
||||
func cb() {
|
||||
results, _ := cbor.Marshal(Parent{})
|
||||
fmt.Println("hex(CBOR): " + hex.EncodeToString(results))
|
||||
|
||||
text, _ := cbor.Diagnose(results) // Diagnostic Notation
|
||||
fmt.Println("DN: " + text)
|
||||
}
|
||||
|
||||
func js() {
|
||||
results, _ := json.Marshal(Parent{})
|
||||
fmt.Println("hex(JSON): " + hex.EncodeToString(results))
|
||||
|
||||
text := string(results) // JSON
|
||||
fmt.Println("JSON: " + text)
|
||||
}
|
||||
|
||||
func main() {
|
||||
cb()
|
||||
fmt.Println("-------------")
|
||||
js()
|
||||
}
|
||||
```
|
||||
|
||||
Output (DN is Diagnostic Notation):
|
||||
```
|
||||
hex(CBOR): a0
|
||||
DN: {}
|
||||
-------------
|
||||
hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d
|
||||
JSON: {"Foo":{"Qux":{}}}
|
||||
```
|
||||
|
||||
<hr/>
|
||||
|
||||
</details>
|
||||
|
||||
Example using different struct tags together:
|
||||
NOTE: When a struct uses `toarray`, the encoder will ignore `omitempty` and `omitzero` to prevent position of encoded array elements from changing. This allows decoder to match encoded elements to their Go struct field.
|
||||
|
||||

|
||||
|
||||
API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options.
|
||||
> [!NOTE]
|
||||
> `fxamacker/cbor` can encode a 3-level nested Go struct to 1 byte!
|
||||
> - `encoding/json`: 18 bytes of JSON
|
||||
> - `fxamacker/cbor`: 1 byte of CBOR
|
||||
>
|
||||
> <details><summary> 🔎 Encoding 3-level nested Go struct with omitempty</summary><p/>
|
||||
>
|
||||
> https://go.dev/play/p/YxwvfPdFQG2
|
||||
>
|
||||
> ```Go
|
||||
> // Example encoding nested struct (with omitempty tag)
|
||||
> // - encoding/json: 18 byte JSON
|
||||
> // - fxamacker/cbor: 1 byte CBOR
|
||||
>
|
||||
> package main
|
||||
>
|
||||
> import (
|
||||
> "encoding/hex"
|
||||
> "encoding/json"
|
||||
> "fmt"
|
||||
>
|
||||
> "github.com/fxamacker/cbor/v2"
|
||||
> )
|
||||
>
|
||||
> type GrandChild struct {
|
||||
> Quux int `json:",omitempty"`
|
||||
> }
|
||||
>
|
||||
> type Child struct {
|
||||
> Baz int `json:",omitempty"`
|
||||
> Qux GrandChild `json:",omitempty"`
|
||||
> }
|
||||
>
|
||||
> type Parent struct {
|
||||
> Foo Child `json:",omitempty"`
|
||||
> Bar int `json:",omitempty"`
|
||||
> }
|
||||
>
|
||||
> func cb() {
|
||||
> results, _ := cbor.Marshal(Parent{})
|
||||
> fmt.Println("hex(CBOR): " + hex.EncodeToString(results))
|
||||
>
|
||||
> text, _ := cbor.Diagnose(results) // Diagnostic Notation
|
||||
> fmt.Println("DN: " + text)
|
||||
> }
|
||||
>
|
||||
> func js() {
|
||||
> results, _ := json.Marshal(Parent{})
|
||||
> fmt.Println("hex(JSON): " + hex.EncodeToString(results))
|
||||
>
|
||||
> text := string(results) // JSON
|
||||
> fmt.Println("JSON: " + text)
|
||||
> }
|
||||
>
|
||||
> func main() {
|
||||
> cb()
|
||||
> fmt.Println("-------------")
|
||||
> js()
|
||||
> }
|
||||
> ```
|
||||
>
|
||||
> Output (DN is Diagnostic Notation):
|
||||
> ```
|
||||
> hex(CBOR): a0
|
||||
> DN: {}
|
||||
> -------------
|
||||
> hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d
|
||||
> JSON: {"Foo":{"Qux":{}}}
|
||||
> ```
|
||||
>
|
||||
> </details>
|
||||
|
||||
|
||||
## Quick Start
|
||||
|
||||
__Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`.
|
||||
|
||||
> [!TIP]
|
||||
>
|
||||
> Tinygo users can try beta/experimental branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta).
|
||||
>
|
||||
> <details><summary> 🔎 More about tinygo feature branch</summary>
|
||||
>
|
||||
> ### Tinygo
|
||||
>
|
||||
> Branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta) is based on fxamacker/cbor v2.7.0 and it can be compiled using tinygo v0.33 (also compiles with golang/go).
|
||||
>
|
||||
> It passes unit tests (with both go1.22 and tinygo v0.33) and is considered beta/experimental for tinygo.
|
||||
>
|
||||
> :warning: The `feature/cbor-tinygo-beta` branch does not get fuzz tested yet.
|
||||
>
|
||||
> Changes in this feature branch only affect tinygo compiled software. Summary of changes:
|
||||
> - default `DecOptions.MaxNestedLevels` is reduced to 16 (was 32). User can specify higher limit but 24+ crashes tests when compiled with tinygo v0.33.
|
||||
> - disabled decoding CBOR tag data to Go interface because tinygo v0.33 is missing needed feature.
|
||||
> - encoding error message can be different when encoding function type.
|
||||
>
|
||||
> Related tinygo issues:
|
||||
> - https://github.com/tinygo-org/tinygo/issues/4277
|
||||
> - https://github.com/tinygo-org/tinygo/issues/4458
|
||||
>
|
||||
> </details>
|
||||
|
||||
|
||||
### Key Points
|
||||
|
||||
This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742).
|
||||
|
|
@ -252,16 +294,17 @@ rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v
|
|||
// DiagnoseFirst translates first CBOR data item to text and returns remaining bytes.
|
||||
text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text
|
||||
|
||||
// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes,
|
||||
// but new funcs UnmarshalFirst and DiagnoseFirst do not.
|
||||
// NOTE: Unmarshal() returns ExtraneousDataError if there are remaining bytes, but
|
||||
// UnmarshalFirst() and DiagnoseFirst() allow trailing bytes.
|
||||
```
|
||||
|
||||
__IMPORTANT__: 👉 CBOR settings allow trade-offs between speed, security, encoding size, etc.
|
||||
|
||||
- Different CBOR libraries may use different default settings.
|
||||
- CBOR-based formats or protocols usually require specific settings.
|
||||
|
||||
For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset.
|
||||
> [!IMPORTANT]
|
||||
> CBOR settings allow trade-offs between speed, security, encoding size, etc.
|
||||
>
|
||||
> - Different CBOR libraries may use different default settings.
|
||||
> - CBOR-based formats or protocols usually require specific settings.
|
||||
>
|
||||
> For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset.
|
||||
|
||||
### Presets
|
||||
|
||||
|
|
@ -312,9 +355,63 @@ err = em.MarshalToBuffer(v, &buf) // encode v to provided buf
|
|||
|
||||
### Struct Tags
|
||||
|
||||
Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs.
|
||||
Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) reduce encoded size of structs.
|
||||
|
||||
<details><summary>Example encoding 3-level nested Go struct to 1 byte CBOR</summary><p/>
|
||||
As a special case, struct field tag "-" omits the field.
|
||||
|
||||
<details><summary> 🔎 Example encoding with struct field tag "-"</summary><p/>
|
||||
|
||||
https://go.dev/play/p/aWEIFxd7InX
|
||||
|
||||
```Go
|
||||
// https://github.com/fxamacker/cbor/issues/652
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
)
|
||||
|
||||
// The `cbor:"-"` tag omits the Type field when encoding to CBOR.
|
||||
type Entity struct {
|
||||
_ struct{} `cbor:",toarray"`
|
||||
ID uint64 `json:"id"`
|
||||
Type string `cbor:"-" json:"typeOf"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
entity := Entity{
|
||||
ID: 1,
|
||||
Type: "int64",
|
||||
Name: "Identifier",
|
||||
}
|
||||
|
||||
c, _ := cbor.Marshal(entity)
|
||||
diag, _ := cbor.Diagnose(c)
|
||||
fmt.Printf("CBOR in hex: %x\n", c)
|
||||
fmt.Printf("CBOR in edn: %s\n", diag)
|
||||
|
||||
j, _ := json.Marshal(entity)
|
||||
fmt.Printf("JSON: %s\n", string(j))
|
||||
|
||||
fmt.Printf("JSON encoding is %d bytes\n", len(j))
|
||||
fmt.Printf("CBOR encoding is %d bytes\n", len(c))
|
||||
|
||||
// Output:
|
||||
// CBOR in hex: 82016a4964656e746966696572
|
||||
// CBOR in edn: [1, "Identifier"]
|
||||
// JSON: {"id":1,"typeOf":"int64","name":"Identifier"}
|
||||
// JSON encoding is 45 bytes
|
||||
// CBOR encoding is 13 bytes
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details><summary> 🔎 Example encoding 3-level nested Go struct to 1 byte CBOR</summary><p/>
|
||||
|
||||
https://go.dev/play/p/YxwvfPdFQG2
|
||||
|
||||
|
|
@ -382,13 +479,13 @@ JSON: {"Foo":{"Qux":{}}}
|
|||
|
||||
</details>
|
||||
|
||||
<details><summary>Example using several struct tags</summary><p/>
|
||||
<details><summary> 🔎 Example using struct tag options</summary><p/>
|
||||
|
||||

|
||||
|
||||
</details>
|
||||
|
||||
Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys.
|
||||
Struct tag options simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys.
|
||||
|
||||
### CBOR Tags
|
||||
|
||||
|
|
@ -404,7 +501,7 @@ em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags
|
|||
|
||||
`TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`.
|
||||
|
||||
<details><summary>Example using TagSet and TagOptions</summary><p/>
|
||||
<details><summary> 🔎 Example using TagSet and TagOptions</summary><p/>
|
||||
|
||||
```go
|
||||
// Use signedCWT struct defined in "Decoding CWT" example.
|
||||
|
|
@ -430,16 +527,149 @@ if err := dm.Unmarshal(data, &v); err != nil {
|
|||
em, _ := cbor.EncOptions{}.EncModeWithTags(tags)
|
||||
|
||||
// Marshal signedCWT with tag number.
|
||||
if data, err := cbor.Marshal(v); err != nil {
|
||||
if data, err := em.Marshal(v); err != nil {
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
👉 `fxamacker/cbor` allows user apps to use almost any current or future CBOR tag number by implementing `cbor.Marshaler` and `cbor.Unmarshaler` interfaces.
|
||||
|
||||
Basically, `MarshalCBOR` and `UnmarshalCBOR` functions can be implemented by user apps and those functions will automatically be called by this CBOR codec's `Marshal`, `Unmarshal`, etc.
|
||||
|
||||
The following [example](https://github.com/fxamacker/cbor/blob/master/example_embedded_json_tag_for_cbor_test.go) shows how to encode and decode a tagged CBOR data item with tag number 262. The tag content is a JSON object "embedded" as a CBOR byte string (major type 2).
|
||||
|
||||
<details><summary> 🔎 Example using Embedded JSON Tag for CBOR (tag 262)</summary>
|
||||
|
||||
```go
|
||||
// https://github.com/fxamacker/cbor/issues/657
|
||||
|
||||
package cbor_test
|
||||
|
||||
// NOTE: RFC 8949 does not mention tag number 262. IANA assigned
|
||||
// CBOR tag number 262 as "Embedded JSON Object" specified by the
|
||||
// document Embedded JSON Tag for CBOR:
|
||||
//
|
||||
// "Tag 262 can be applied to a byte string (major type 2) to indicate
|
||||
// that the byte string is a JSON Object. The length of the byte string
|
||||
// indicates the content."
|
||||
//
|
||||
// For more info, see Embedded JSON Tag for CBOR at:
|
||||
// https://github.com/toravir/CBOR-Tag-Specs/blob/master/embeddedJSON.md
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
)
|
||||
|
||||
// cborTagNumForEmbeddedJSON is the CBOR tag number 262.
|
||||
const cborTagNumForEmbeddedJSON = 262
|
||||
|
||||
// EmbeddedJSON represents a Go value to be encoded as a tagged CBOR data item
|
||||
// with tag number 262 and the tag content is a JSON object "embedded" as a
|
||||
// CBOR byte string (major type 2).
|
||||
type EmbeddedJSON struct {
|
||||
any
|
||||
}
|
||||
|
||||
func NewEmbeddedJSON(val any) EmbeddedJSON {
|
||||
return EmbeddedJSON{val}
|
||||
}
|
||||
|
||||
// MarshalCBOR encodes EmbeddedJSON to a tagged CBOR data item with the
|
||||
// tag number 262 and the tag content is a JSON object that is
|
||||
// "embedded" as a CBOR byte string.
|
||||
func (v EmbeddedJSON) MarshalCBOR() ([]byte, error) {
|
||||
// Encode v to JSON object.
|
||||
data, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create cbor.Tag representing a tagged CBOR data item.
|
||||
tag := cbor.Tag{
|
||||
Number: cborTagNumForEmbeddedJSON,
|
||||
Content: data,
|
||||
}
|
||||
|
||||
// Marshal to a tagged CBOR data item.
|
||||
return cbor.Marshal(tag)
|
||||
}
|
||||
|
||||
// UnmarshalCBOR decodes a tagged CBOR data item to EmbeddedJSON.
|
||||
// The byte slice provided to this function must contain a single
|
||||
// tagged CBOR data item with the tag number 262 and tag content
|
||||
// must be a JSON object "embedded" as a CBOR byte string.
|
||||
func (v *EmbeddedJSON) UnmarshalCBOR(b []byte) error {
|
||||
// Unmarshal tagged CBOR data item.
|
||||
var tag cbor.Tag
|
||||
if err := cbor.Unmarshal(b, &tag); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check tag number.
|
||||
if tag.Number != cborTagNumForEmbeddedJSON {
|
||||
return fmt.Errorf("got tag number %d, expect tag number %d", tag.Number, cborTagNumForEmbeddedJSON)
|
||||
}
|
||||
|
||||
// Check tag content.
|
||||
jsonData, isByteString := tag.Content.([]byte)
|
||||
if !isByteString {
|
||||
return fmt.Errorf("got tag content type %T, expect tag content []byte", tag.Content)
|
||||
}
|
||||
|
||||
// Unmarshal JSON object.
|
||||
return json.Unmarshal(jsonData, v)
|
||||
}
|
||||
|
||||
// MarshalJSON encodes EmbeddedJSON to a JSON object.
|
||||
func (v EmbeddedJSON) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(v.any)
|
||||
}
|
||||
|
||||
// UnmarshalJSON decodes a JSON object.
|
||||
func (v *EmbeddedJSON) UnmarshalJSON(b []byte) error {
|
||||
dec := json.NewDecoder(bytes.NewReader(b))
|
||||
dec.UseNumber()
|
||||
return dec.Decode(&v.any)
|
||||
}
|
||||
|
||||
func Example_embeddedJSONTagForCBOR() {
|
||||
value := NewEmbeddedJSON(map[string]any{
|
||||
"name": "gopher",
|
||||
"id": json.Number("42"),
|
||||
})
|
||||
|
||||
data, err := cbor.Marshal(value)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
fmt.Printf("cbor: %x\n", data)
|
||||
|
||||
var v EmbeddedJSON
|
||||
err = cbor.Unmarshal(data, &v)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
fmt.Printf("%+v\n", v.any)
|
||||
for k, v := range v.any.(map[string]any) {
|
||||
fmt.Printf(" %s: %v (%T)\n", k, v, v)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
### Functions and Interfaces
|
||||
|
||||
<details><summary>Functions and interfaces at a glance</summary><p/>
|
||||
<details><summary> 🔎 Functions and interfaces at a glance</summary><p/>
|
||||
|
||||
Common functions with same API as `encoding/json`:
|
||||
- `Marshal`, `Unmarshal`
|
||||
|
|
@ -453,7 +683,7 @@ because RFC 8949 treats CBOR data item with remaining bytes as malformed.
|
|||
Other useful functions:
|
||||
- `Diagnose`, `DiagnoseFirst` produce human-readable [Extended Diagnostic Notation](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G) from CBOR data.
|
||||
- `UnmarshalFirst` decodes first CBOR data item and return any remaining bytes.
|
||||
- `Wellformed` returns true if the the CBOR data item is well-formed.
|
||||
- `Wellformed` returns true if the CBOR data item is well-formed.
|
||||
|
||||
Interfaces identical or comparable to Go `encoding` packages include:
|
||||
`Marshaler`, `Unmarshaler`, `BinaryMarshaler`, and `BinaryUnmarshaler`.
|
||||
|
|
@ -472,15 +702,28 @@ Default limits may need to be increased for systems handling very large data (e.
|
|||
|
||||
## Status
|
||||
|
||||
v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality.
|
||||
[v2.9.0](https://github.com/fxamacker/cbor/releases/tag/v2.9.0) (Jul 13, 2025) improved interoperability/transcoding between CBOR & JSON, refactored tests, and improved docs.
|
||||
- Add opt-in support for `encoding.TextMarshaler` and `encoding.TextUnmarshaler` to encode and decode from CBOR text string.
|
||||
- Add opt-in support for `json.Marshaler` and `json.Unmarshaler` via user-provided transcoding function.
|
||||
- Update docs for TimeMode, Tag, RawTag, and add example for Embedded JSON Tag for CBOR.
|
||||
|
||||
v2.9.0 passed fuzz tests and is production quality.
|
||||
|
||||
The minimum version of Go required to build:
|
||||
- v2.8.0 and newer releases require go 1.20+.
|
||||
- v2.7.1 and older releases require go 1.17+.
|
||||
|
||||
For more details, see [release notes](https://github.com/fxamacker/cbor/releases).
|
||||
|
||||
### Prior Release
|
||||
### Prior Releases
|
||||
|
||||
[v2.8.0](https://github.com/fxamacker/cbor/releases/tag/v2.8.0) (March 30, 2025) is a small release primarily to add `omitzero` option to struct field tags and fix bugs. It passed fuzz tests (billions of executions) and is production quality.
|
||||
|
||||
[v2.7.0](https://github.com/fxamacker/cbor/releases/tag/v2.7.0) (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality.
|
||||
|
||||
[v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings.
|
||||
|
||||
v2.5.0 was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023).
|
||||
[v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023).
|
||||
|
||||
__IMPORTANT__: 👉 Before upgrading from v2.4 or older release, please read the notable changes highlighted in the release notes. v2.5.0 is a large release with bug fixes to error handling for extraneous data in `Unmarshal`, etc. that should be reviewed before upgrading.
|
||||
|
||||
|
|
@ -489,7 +732,7 @@ See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0
|
|||
See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc.
|
||||
|
||||
<!--
|
||||
<details><summary>👉 Benchmark Comparison: v2.4.0 vs v2.5.0</summary><p/>
|
||||
<details><summary> 🔎 Benchmark Comparison: v2.4.0 vs v2.5.0</summary><p/>
|
||||
|
||||
TODO: Update to v2.4.0 vs 2.5.0 (not beta2).
|
||||
|
||||
|
|
@ -549,9 +792,9 @@ geomean 2.782
|
|||
|
||||
## Who uses fxamacker/cbor
|
||||
|
||||
`fxamacker/cbor` is used in projects by Arm Ltd., Berlin Institute of Health at Charité, Chainlink, Cisco, Confidential Computing Consortium, ConsenSys, Dapper Labs, EdgeX Foundry, F5, FIDO Alliance, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Matrix.org, Microsoft, Mozilla, National Cybersecurity Agency of France (govt), Netherlands (govt), Oasis Protocol, Smallstep, Tailscale, Taurus SA, Teleport, TIBCO, and others.
|
||||
`fxamacker/cbor` is used in projects by Arm Ltd., Berlin Institute of Health at Charité, Chainlink, Confidential Computing Consortium, ConsenSys, EdgeX Foundry, F5, Flow Foundation, Fraunhofer‑AISEC, IBM, Kubernetes, Let's Encrypt (ISRG), Linaro, Linux Foundation, Matrix.org, Microsoft, National Cybersecurity Agency of France (govt), Netherlands (govt), Oasis Protocol, Red Hat OpenShift, Smallstep, Tailscale, Taurus SA, TIBCO, Veraison, and others.
|
||||
|
||||
`fxamacker/cbor` passed multiple confidential security assessments. A [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) (prepared by NCC Group for Microsoft Corporation) includes a subset of fxamacker/cbor v2.4.0 in its scope.
|
||||
`fxamacker/cbor` passed multiple confidential security assessments in 2022. A [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) (prepared by NCC Group for Microsoft Corporation) assessed a subset of fxamacker/cbor v2.4.
|
||||
|
||||
## Standards
|
||||
|
||||
|
|
@ -588,7 +831,7 @@ By default, decoder treats time values of floating-point NaN and Infinity as if
|
|||
__Click to expand topic:__
|
||||
|
||||
<details>
|
||||
<summary>Duplicate Map Keys</summary><p>
|
||||
<summary> 🔎 Duplicate Map Keys</summary><p>
|
||||
|
||||
This library provides options for fast detection and rejection of duplicate map keys based on applying a Go-specific data model to CBOR's extended generic data model in order to determine duplicate vs distinct map keys. Detection relies on whether the CBOR map key would be a duplicate "key" when decoded and applied to the user-provided Go map or struct.
|
||||
|
||||
|
|
@ -601,7 +844,7 @@ APF suffix means "Allow Partial Fill" so the destination map or struct can conta
|
|||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Tag Validity</summary><p>
|
||||
<summary> 🔎 Tag Validity</summary><p>
|
||||
|
||||
This library checks tag validity for built-in tags (currently tag numbers 0, 1, 2, 3, and 55799):
|
||||
|
||||
|
|
|
|||
|
|
@ -38,11 +38,38 @@ func (bs ByteString) MarshalCBOR() ([]byte, error) {
|
|||
|
||||
// UnmarshalCBOR decodes CBOR byte string (major type 2) to ByteString.
|
||||
// Decoding CBOR null and CBOR undefined sets ByteString to be empty.
|
||||
//
|
||||
// Deprecated: No longer used by this codec; kept for compatibility
|
||||
// with user apps that directly call this function.
|
||||
func (bs *ByteString) UnmarshalCBOR(data []byte) error {
|
||||
if bs == nil {
|
||||
return errors.New("cbor.ByteString: UnmarshalCBOR on nil pointer")
|
||||
}
|
||||
|
||||
d := decoder{data: data, dm: defaultDecMode}
|
||||
|
||||
// Check well-formedness of CBOR data item.
|
||||
// ByteString.UnmarshalCBOR() is exported, so
|
||||
// the codec needs to support same behavior for:
|
||||
// - Unmarshal(data, *ByteString)
|
||||
// - ByteString.UnmarshalCBOR(data)
|
||||
err := d.wellformed(false, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return bs.unmarshalCBOR(data)
|
||||
}
|
||||
|
||||
// unmarshalCBOR decodes CBOR byte string (major type 2) to ByteString.
|
||||
// Decoding CBOR null and CBOR undefined sets ByteString to be empty.
|
||||
// This function assumes data is well-formed, and does not perform bounds checking.
|
||||
// This function is called by Unmarshal().
|
||||
func (bs *ByteString) unmarshalCBOR(data []byte) error {
|
||||
if bs == nil {
|
||||
return errors.New("cbor.ByteString: UnmarshalCBOR on nil pointer")
|
||||
}
|
||||
|
||||
// Decoding CBOR null and CBOR undefined to ByteString resets data.
|
||||
// This behavior is similar to decoding CBOR null and CBOR undefined to []byte.
|
||||
if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) {
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ import (
|
|||
type encodeFuncs struct {
|
||||
ef encodeFunc
|
||||
ief isEmptyFunc
|
||||
izf isZeroFunc
|
||||
}
|
||||
|
||||
var (
|
||||
|
|
@ -31,10 +32,12 @@ type specialType int
|
|||
const (
|
||||
specialTypeNone specialType = iota
|
||||
specialTypeUnmarshalerIface
|
||||
specialTypeUnexportedUnmarshalerIface
|
||||
specialTypeEmptyIface
|
||||
specialTypeIface
|
||||
specialTypeTag
|
||||
specialTypeTime
|
||||
specialTypeJSONUnmarshalerIface
|
||||
)
|
||||
|
||||
type typeInfo struct {
|
||||
|
|
@ -50,7 +53,7 @@ type typeInfo struct {
|
|||
func newTypeInfo(t reflect.Type) *typeInfo {
|
||||
tInfo := typeInfo{typ: t, kind: t.Kind()}
|
||||
|
||||
for t.Kind() == reflect.Ptr {
|
||||
for t.Kind() == reflect.Pointer {
|
||||
t = t.Elem()
|
||||
}
|
||||
|
||||
|
|
@ -69,8 +72,12 @@ func newTypeInfo(t reflect.Type) *typeInfo {
|
|||
tInfo.spclType = specialTypeTag
|
||||
} else if t == typeTime {
|
||||
tInfo.spclType = specialTypeTime
|
||||
} else if reflect.PtrTo(t).Implements(typeUnmarshaler) {
|
||||
} else if reflect.PointerTo(t).Implements(typeUnexportedUnmarshaler) {
|
||||
tInfo.spclType = specialTypeUnexportedUnmarshalerIface
|
||||
} else if reflect.PointerTo(t).Implements(typeUnmarshaler) {
|
||||
tInfo.spclType = specialTypeUnmarshalerIface
|
||||
} else if reflect.PointerTo(t).Implements(typeJSONUnmarshaler) {
|
||||
tInfo.spclType = specialTypeJSONUnmarshalerIface
|
||||
}
|
||||
|
||||
switch k {
|
||||
|
|
@ -237,7 +244,7 @@ func getEncodingStructType(t reflect.Type) (*encodingStructType, error) {
|
|||
e := getEncodeBuffer()
|
||||
for i := 0; i < len(flds); i++ {
|
||||
// Get field's encodeFunc
|
||||
flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ)
|
||||
flds[i].ef, flds[i].ief, flds[i].izf = getEncodeFunc(flds[i].typ)
|
||||
if flds[i].ef == nil {
|
||||
err = &UnsupportedTypeError{t}
|
||||
break
|
||||
|
|
@ -321,7 +328,7 @@ func getEncodingStructType(t reflect.Type) (*encodingStructType, error) {
|
|||
func getEncodingStructToArrayType(t reflect.Type, flds fields) (*encodingStructType, error) {
|
||||
for i := 0; i < len(flds); i++ {
|
||||
// Get field's encodeFunc
|
||||
flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ)
|
||||
flds[i].ef, flds[i].ief, flds[i].izf = getEncodeFunc(flds[i].typ)
|
||||
if flds[i].ef == nil {
|
||||
structType := &encodingStructType{err: &UnsupportedTypeError{t}}
|
||||
encodingStructTypeCache.Store(t, structType)
|
||||
|
|
@ -337,14 +344,14 @@ func getEncodingStructToArrayType(t reflect.Type, flds fields) (*encodingStructT
|
|||
return structType, structType.err
|
||||
}
|
||||
|
||||
func getEncodeFunc(t reflect.Type) (encodeFunc, isEmptyFunc) {
|
||||
func getEncodeFunc(t reflect.Type) (encodeFunc, isEmptyFunc, isZeroFunc) {
|
||||
if v, _ := encodeFuncCache.Load(t); v != nil {
|
||||
fs := v.(encodeFuncs)
|
||||
return fs.ef, fs.ief
|
||||
return fs.ef, fs.ief, fs.izf
|
||||
}
|
||||
ef, ief := getEncodeFuncInternal(t)
|
||||
encodeFuncCache.Store(t, encodeFuncs{ef, ief})
|
||||
return ef, ief
|
||||
ef, ief, izf := getEncodeFuncInternal(t)
|
||||
encodeFuncCache.Store(t, encodeFuncs{ef, ief, izf})
|
||||
return ef, ief, izf
|
||||
}
|
||||
|
||||
func getTypeInfo(t reflect.Type) *typeInfo {
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ package cbor
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
|
|
@ -180,3 +181,11 @@ func validBuiltinTag(tagNum uint64, contentHead byte) error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Transcoder is a scheme for transcoding a single CBOR encoded data item to or from a different
|
||||
// data format.
|
||||
type Transcoder interface {
|
||||
// Transcode reads the data item in its source format from a Reader and writes a
|
||||
// corresponding representation in its destination format to a Writer.
|
||||
Transcode(dst io.Writer, src io.Reader) error
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@
|
|||
package cbor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
|
|
@ -94,7 +95,7 @@ import (
|
|||
//
|
||||
// To unmarshal CBOR null (0xf6) and undefined (0xf7) values into a
|
||||
// slice/map/pointer, Unmarshal sets Go value to nil. Because null is often
|
||||
// used to mean "not present", unmarshalling CBOR null and undefined value
|
||||
// used to mean "not present", unmarshaling CBOR null and undefined value
|
||||
// into any other Go type has no effect and returns no error.
|
||||
//
|
||||
// Unmarshal supports CBOR tag 55799 (self-describe CBOR), tag 0 and 1 (time),
|
||||
|
|
@ -104,7 +105,7 @@ import (
|
|||
// if there are any remaining bytes following the first valid CBOR data item.
|
||||
// See UnmarshalFirst, if you want to unmarshal only the first
|
||||
// CBOR data item without ExtraneousDataError caused by remaining bytes.
|
||||
func Unmarshal(data []byte, v interface{}) error {
|
||||
func Unmarshal(data []byte, v any) error {
|
||||
return defaultDecMode.Unmarshal(data, v)
|
||||
}
|
||||
|
||||
|
|
@ -114,7 +115,7 @@ func Unmarshal(data []byte, v interface{}) error {
|
|||
// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error.
|
||||
//
|
||||
// See the documentation for Unmarshal for details.
|
||||
func UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) {
|
||||
func UnmarshalFirst(data []byte, v any) (rest []byte, err error) {
|
||||
return defaultDecMode.UnmarshalFirst(data, v)
|
||||
}
|
||||
|
||||
|
|
@ -151,6 +152,10 @@ type Unmarshaler interface {
|
|||
UnmarshalCBOR([]byte) error
|
||||
}
|
||||
|
||||
type unmarshaler interface {
|
||||
unmarshalCBOR([]byte) error
|
||||
}
|
||||
|
||||
// InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
|
||||
type InvalidUnmarshalError struct {
|
||||
s string
|
||||
|
|
@ -193,12 +198,12 @@ func (e *InvalidMapKeyTypeError) Error() string {
|
|||
|
||||
// DupMapKeyError describes detected duplicate map key in CBOR map.
|
||||
type DupMapKeyError struct {
|
||||
Key interface{}
|
||||
Key any
|
||||
Index int
|
||||
}
|
||||
|
||||
func (e *DupMapKeyError) Error() string {
|
||||
return fmt.Sprintf("cbor: found duplicate map key \"%v\" at map element index %d", e.Key, e.Index)
|
||||
return fmt.Sprintf("cbor: found duplicate map key %#v at map element index %d", e.Key, e.Index)
|
||||
}
|
||||
|
||||
// UnknownFieldError describes detected unknown field in CBOR map when decoding to Go struct.
|
||||
|
|
@ -383,7 +388,7 @@ const (
|
|||
// - return UnmarshalTypeError if value doesn't fit into int64
|
||||
IntDecConvertSignedOrFail
|
||||
|
||||
// IntDecConvertSigned affects how CBOR integers (major type 0 and 1) decode to Go interface{}.
|
||||
// IntDecConvertSignedOrBigInt affects how CBOR integers (major type 0 and 1) decode to Go interface{}.
|
||||
// It makes CBOR integers (major type 0 and 1) decode to:
|
||||
// - int64 if value fits
|
||||
// - big.Int or *big.Int (see BigIntDecMode) if value doesn't fit into int64
|
||||
|
|
@ -489,11 +494,11 @@ type BigIntDecMode int
|
|||
|
||||
const (
|
||||
// BigIntDecodeValue makes CBOR bignum decode to big.Int (instead of *big.Int)
|
||||
// when unmarshalling into a Go interface{}.
|
||||
// when unmarshaling into a Go interface{}.
|
||||
BigIntDecodeValue BigIntDecMode = iota
|
||||
|
||||
// BigIntDecodePointer makes CBOR bignum decode to *big.Int when
|
||||
// unmarshalling into a Go interface{}.
|
||||
// unmarshaling into a Go interface{}.
|
||||
BigIntDecodePointer
|
||||
|
||||
maxBigIntDecMode
|
||||
|
|
@ -745,6 +750,25 @@ func (bum BinaryUnmarshalerMode) valid() bool {
|
|||
return bum >= 0 && bum < maxBinaryUnmarshalerMode
|
||||
}
|
||||
|
||||
// TextUnmarshalerMode specifies how to decode into types that implement
|
||||
// encoding.TextUnmarshaler.
|
||||
type TextUnmarshalerMode int
|
||||
|
||||
const (
|
||||
// TextUnmarshalerNone does not recognize TextUnmarshaler implementations during decode.
|
||||
TextUnmarshalerNone TextUnmarshalerMode = iota
|
||||
|
||||
// TextUnmarshalerTextString will invoke UnmarshalText on the contents of a CBOR text
|
||||
// string when decoding into a value that implements TextUnmarshaler.
|
||||
TextUnmarshalerTextString
|
||||
|
||||
maxTextUnmarshalerMode
|
||||
)
|
||||
|
||||
func (tum TextUnmarshalerMode) valid() bool {
|
||||
return tum >= 0 && tum < maxTextUnmarshalerMode
|
||||
}
|
||||
|
||||
// DecOptions specifies decoding options.
|
||||
type DecOptions struct {
|
||||
// DupMapKey specifies whether to enforce duplicate map key.
|
||||
|
|
@ -793,7 +817,7 @@ type DecOptions struct {
|
|||
// TagsMd specifies whether to allow CBOR tags (major type 6).
|
||||
TagsMd TagsMode
|
||||
|
||||
// IntDec specifies which Go integer type (int64 or uint64) to use
|
||||
// IntDec specifies which Go integer type (int64, uint64, or [big.Int]) to use
|
||||
// when decoding CBOR int (major type 0 and 1) to Go interface{}.
|
||||
IntDec IntDecMode
|
||||
|
||||
|
|
@ -807,7 +831,7 @@ type DecOptions struct {
|
|||
ExtraReturnErrors ExtraDecErrorCond
|
||||
|
||||
// DefaultMapType specifies Go map type to create and decode to
|
||||
// when unmarshalling CBOR into an empty interface value.
|
||||
// when unmarshaling CBOR into an empty interface value.
|
||||
// By default, unmarshal uses map[interface{}]interface{}.
|
||||
DefaultMapType reflect.Type
|
||||
|
||||
|
|
@ -879,6 +903,15 @@ type DecOptions struct {
|
|||
// BinaryUnmarshaler specifies how to decode into types that implement
|
||||
// encoding.BinaryUnmarshaler.
|
||||
BinaryUnmarshaler BinaryUnmarshalerMode
|
||||
|
||||
// TextUnmarshaler specifies how to decode into types that implement
|
||||
// encoding.TextUnmarshaler.
|
||||
TextUnmarshaler TextUnmarshalerMode
|
||||
|
||||
// JSONUnmarshalerTranscoder sets the transcoding scheme used to unmarshal types that
|
||||
// implement json.Unmarshaler but do not also implement cbor.Unmarshaler. If nil, decoding
|
||||
// behavior is not influenced by whether or not a type implements json.Unmarshaler.
|
||||
JSONUnmarshalerTranscoder Transcoder
|
||||
}
|
||||
|
||||
// DecMode returns DecMode with immutable options and no tags (safe for concurrency).
|
||||
|
|
@ -1091,33 +1124,39 @@ func (opts DecOptions) decMode() (*decMode, error) { //nolint:gocritic // ignore
|
|||
return nil, errors.New("cbor: invalid BinaryUnmarshaler " + strconv.Itoa(int(opts.BinaryUnmarshaler)))
|
||||
}
|
||||
|
||||
if !opts.TextUnmarshaler.valid() {
|
||||
return nil, errors.New("cbor: invalid TextUnmarshaler " + strconv.Itoa(int(opts.TextUnmarshaler)))
|
||||
}
|
||||
|
||||
dm := decMode{
|
||||
dupMapKey: opts.DupMapKey,
|
||||
timeTag: opts.TimeTag,
|
||||
maxNestedLevels: opts.MaxNestedLevels,
|
||||
maxArrayElements: opts.MaxArrayElements,
|
||||
maxMapPairs: opts.MaxMapPairs,
|
||||
indefLength: opts.IndefLength,
|
||||
tagsMd: opts.TagsMd,
|
||||
intDec: opts.IntDec,
|
||||
mapKeyByteString: opts.MapKeyByteString,
|
||||
extraReturnErrors: opts.ExtraReturnErrors,
|
||||
defaultMapType: opts.DefaultMapType,
|
||||
utf8: opts.UTF8,
|
||||
fieldNameMatching: opts.FieldNameMatching,
|
||||
bigIntDec: opts.BigIntDec,
|
||||
defaultByteStringType: opts.DefaultByteStringType,
|
||||
byteStringToString: opts.ByteStringToString,
|
||||
fieldNameByteString: opts.FieldNameByteString,
|
||||
unrecognizedTagToAny: opts.UnrecognizedTagToAny,
|
||||
timeTagToAny: opts.TimeTagToAny,
|
||||
simpleValues: simpleValues,
|
||||
nanDec: opts.NaN,
|
||||
infDec: opts.Inf,
|
||||
byteStringToTime: opts.ByteStringToTime,
|
||||
byteStringExpectedFormat: opts.ByteStringExpectedFormat,
|
||||
bignumTag: opts.BignumTag,
|
||||
binaryUnmarshaler: opts.BinaryUnmarshaler,
|
||||
dupMapKey: opts.DupMapKey,
|
||||
timeTag: opts.TimeTag,
|
||||
maxNestedLevels: opts.MaxNestedLevels,
|
||||
maxArrayElements: opts.MaxArrayElements,
|
||||
maxMapPairs: opts.MaxMapPairs,
|
||||
indefLength: opts.IndefLength,
|
||||
tagsMd: opts.TagsMd,
|
||||
intDec: opts.IntDec,
|
||||
mapKeyByteString: opts.MapKeyByteString,
|
||||
extraReturnErrors: opts.ExtraReturnErrors,
|
||||
defaultMapType: opts.DefaultMapType,
|
||||
utf8: opts.UTF8,
|
||||
fieldNameMatching: opts.FieldNameMatching,
|
||||
bigIntDec: opts.BigIntDec,
|
||||
defaultByteStringType: opts.DefaultByteStringType,
|
||||
byteStringToString: opts.ByteStringToString,
|
||||
fieldNameByteString: opts.FieldNameByteString,
|
||||
unrecognizedTagToAny: opts.UnrecognizedTagToAny,
|
||||
timeTagToAny: opts.TimeTagToAny,
|
||||
simpleValues: simpleValues,
|
||||
nanDec: opts.NaN,
|
||||
infDec: opts.Inf,
|
||||
byteStringToTime: opts.ByteStringToTime,
|
||||
byteStringExpectedFormat: opts.ByteStringExpectedFormat,
|
||||
bignumTag: opts.BignumTag,
|
||||
binaryUnmarshaler: opts.BinaryUnmarshaler,
|
||||
textUnmarshaler: opts.TextUnmarshaler,
|
||||
jsonUnmarshalerTranscoder: opts.JSONUnmarshalerTranscoder,
|
||||
}
|
||||
|
||||
return &dm, nil
|
||||
|
|
@ -1130,7 +1169,7 @@ type DecMode interface {
|
|||
// Unmarshal returns an error.
|
||||
//
|
||||
// See the documentation for Unmarshal for details.
|
||||
Unmarshal(data []byte, v interface{}) error
|
||||
Unmarshal(data []byte, v any) error
|
||||
|
||||
// UnmarshalFirst parses the first CBOR data item into the value pointed to by v
|
||||
// using the decoding mode. Any remaining bytes are returned in rest.
|
||||
|
|
@ -1138,7 +1177,7 @@ type DecMode interface {
|
|||
// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error.
|
||||
//
|
||||
// See the documentation for Unmarshal for details.
|
||||
UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error)
|
||||
UnmarshalFirst(data []byte, v any) (rest []byte, err error)
|
||||
|
||||
// Valid checks whether data is a well-formed encoded CBOR data item and
|
||||
// that it complies with configurable restrictions such as MaxNestedLevels,
|
||||
|
|
@ -1170,33 +1209,35 @@ type DecMode interface {
|
|||
}
|
||||
|
||||
type decMode struct {
|
||||
tags tagProvider
|
||||
dupMapKey DupMapKeyMode
|
||||
timeTag DecTagMode
|
||||
maxNestedLevels int
|
||||
maxArrayElements int
|
||||
maxMapPairs int
|
||||
indefLength IndefLengthMode
|
||||
tagsMd TagsMode
|
||||
intDec IntDecMode
|
||||
mapKeyByteString MapKeyByteStringMode
|
||||
extraReturnErrors ExtraDecErrorCond
|
||||
defaultMapType reflect.Type
|
||||
utf8 UTF8Mode
|
||||
fieldNameMatching FieldNameMatchingMode
|
||||
bigIntDec BigIntDecMode
|
||||
defaultByteStringType reflect.Type
|
||||
byteStringToString ByteStringToStringMode
|
||||
fieldNameByteString FieldNameByteStringMode
|
||||
unrecognizedTagToAny UnrecognizedTagToAnyMode
|
||||
timeTagToAny TimeTagToAnyMode
|
||||
simpleValues *SimpleValueRegistry
|
||||
nanDec NaNMode
|
||||
infDec InfMode
|
||||
byteStringToTime ByteStringToTimeMode
|
||||
byteStringExpectedFormat ByteStringExpectedFormatMode
|
||||
bignumTag BignumTagMode
|
||||
binaryUnmarshaler BinaryUnmarshalerMode
|
||||
tags tagProvider
|
||||
dupMapKey DupMapKeyMode
|
||||
timeTag DecTagMode
|
||||
maxNestedLevels int
|
||||
maxArrayElements int
|
||||
maxMapPairs int
|
||||
indefLength IndefLengthMode
|
||||
tagsMd TagsMode
|
||||
intDec IntDecMode
|
||||
mapKeyByteString MapKeyByteStringMode
|
||||
extraReturnErrors ExtraDecErrorCond
|
||||
defaultMapType reflect.Type
|
||||
utf8 UTF8Mode
|
||||
fieldNameMatching FieldNameMatchingMode
|
||||
bigIntDec BigIntDecMode
|
||||
defaultByteStringType reflect.Type
|
||||
byteStringToString ByteStringToStringMode
|
||||
fieldNameByteString FieldNameByteStringMode
|
||||
unrecognizedTagToAny UnrecognizedTagToAnyMode
|
||||
timeTagToAny TimeTagToAnyMode
|
||||
simpleValues *SimpleValueRegistry
|
||||
nanDec NaNMode
|
||||
infDec InfMode
|
||||
byteStringToTime ByteStringToTimeMode
|
||||
byteStringExpectedFormat ByteStringExpectedFormatMode
|
||||
bignumTag BignumTagMode
|
||||
binaryUnmarshaler BinaryUnmarshalerMode
|
||||
textUnmarshaler TextUnmarshalerMode
|
||||
jsonUnmarshalerTranscoder Transcoder
|
||||
}
|
||||
|
||||
var defaultDecMode, _ = DecOptions{}.decMode()
|
||||
|
|
@ -1211,32 +1252,34 @@ func (dm *decMode) DecOptions() DecOptions {
|
|||
}
|
||||
|
||||
return DecOptions{
|
||||
DupMapKey: dm.dupMapKey,
|
||||
TimeTag: dm.timeTag,
|
||||
MaxNestedLevels: dm.maxNestedLevels,
|
||||
MaxArrayElements: dm.maxArrayElements,
|
||||
MaxMapPairs: dm.maxMapPairs,
|
||||
IndefLength: dm.indefLength,
|
||||
TagsMd: dm.tagsMd,
|
||||
IntDec: dm.intDec,
|
||||
MapKeyByteString: dm.mapKeyByteString,
|
||||
ExtraReturnErrors: dm.extraReturnErrors,
|
||||
DefaultMapType: dm.defaultMapType,
|
||||
UTF8: dm.utf8,
|
||||
FieldNameMatching: dm.fieldNameMatching,
|
||||
BigIntDec: dm.bigIntDec,
|
||||
DefaultByteStringType: dm.defaultByteStringType,
|
||||
ByteStringToString: dm.byteStringToString,
|
||||
FieldNameByteString: dm.fieldNameByteString,
|
||||
UnrecognizedTagToAny: dm.unrecognizedTagToAny,
|
||||
TimeTagToAny: dm.timeTagToAny,
|
||||
SimpleValues: simpleValues,
|
||||
NaN: dm.nanDec,
|
||||
Inf: dm.infDec,
|
||||
ByteStringToTime: dm.byteStringToTime,
|
||||
ByteStringExpectedFormat: dm.byteStringExpectedFormat,
|
||||
BignumTag: dm.bignumTag,
|
||||
BinaryUnmarshaler: dm.binaryUnmarshaler,
|
||||
DupMapKey: dm.dupMapKey,
|
||||
TimeTag: dm.timeTag,
|
||||
MaxNestedLevels: dm.maxNestedLevels,
|
||||
MaxArrayElements: dm.maxArrayElements,
|
||||
MaxMapPairs: dm.maxMapPairs,
|
||||
IndefLength: dm.indefLength,
|
||||
TagsMd: dm.tagsMd,
|
||||
IntDec: dm.intDec,
|
||||
MapKeyByteString: dm.mapKeyByteString,
|
||||
ExtraReturnErrors: dm.extraReturnErrors,
|
||||
DefaultMapType: dm.defaultMapType,
|
||||
UTF8: dm.utf8,
|
||||
FieldNameMatching: dm.fieldNameMatching,
|
||||
BigIntDec: dm.bigIntDec,
|
||||
DefaultByteStringType: dm.defaultByteStringType,
|
||||
ByteStringToString: dm.byteStringToString,
|
||||
FieldNameByteString: dm.fieldNameByteString,
|
||||
UnrecognizedTagToAny: dm.unrecognizedTagToAny,
|
||||
TimeTagToAny: dm.timeTagToAny,
|
||||
SimpleValues: simpleValues,
|
||||
NaN: dm.nanDec,
|
||||
Inf: dm.infDec,
|
||||
ByteStringToTime: dm.byteStringToTime,
|
||||
ByteStringExpectedFormat: dm.byteStringExpectedFormat,
|
||||
BignumTag: dm.bignumTag,
|
||||
BinaryUnmarshaler: dm.binaryUnmarshaler,
|
||||
TextUnmarshaler: dm.textUnmarshaler,
|
||||
JSONUnmarshalerTranscoder: dm.jsonUnmarshalerTranscoder,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1245,7 +1288,7 @@ func (dm *decMode) DecOptions() DecOptions {
|
|||
// Unmarshal returns an error.
|
||||
//
|
||||
// See the documentation for Unmarshal for details.
|
||||
func (dm *decMode) Unmarshal(data []byte, v interface{}) error {
|
||||
func (dm *decMode) Unmarshal(data []byte, v any) error {
|
||||
d := decoder{data: data, dm: dm}
|
||||
|
||||
// Check well-formedness.
|
||||
|
|
@ -1265,7 +1308,7 @@ func (dm *decMode) Unmarshal(data []byte, v interface{}) error {
|
|||
// If v is nil, not a pointer, or a nil pointer, UnmarshalFirst returns an error.
|
||||
//
|
||||
// See the documentation for Unmarshal for details.
|
||||
func (dm *decMode) UnmarshalFirst(data []byte, v interface{}) (rest []byte, err error) {
|
||||
func (dm *decMode) UnmarshalFirst(data []byte, v any) (rest []byte, err error) {
|
||||
d := decoder{data: data, dm: dm}
|
||||
|
||||
// check well-formedness.
|
||||
|
|
@ -1341,13 +1384,13 @@ type decoder struct {
|
|||
// If CBOR data item fails to be decoded into v,
|
||||
// error is returned and offset is moved to the next CBOR data item.
|
||||
// Precondition: d.data contains at least one well-formed CBOR data item.
|
||||
func (d *decoder) value(v interface{}) error {
|
||||
func (d *decoder) value(v any) error {
|
||||
// v can't be nil, non-pointer, or nil pointer value.
|
||||
if v == nil {
|
||||
return &InvalidUnmarshalError{"cbor: Unmarshal(nil)"}
|
||||
}
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.Kind() != reflect.Ptr {
|
||||
if rv.Kind() != reflect.Pointer {
|
||||
return &InvalidUnmarshalError{"cbor: Unmarshal(non-pointer " + rv.Type().String() + ")"}
|
||||
} else if rv.IsNil() {
|
||||
return &InvalidUnmarshalError{"cbor: Unmarshal(nil " + rv.Type().String() + ")"}
|
||||
|
|
@ -1361,9 +1404,9 @@ func (d *decoder) value(v interface{}) error {
|
|||
func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolint:gocyclo
|
||||
|
||||
// Decode CBOR nil or CBOR undefined to pointer value by setting pointer value to nil.
|
||||
if d.nextCBORNil() && v.Kind() == reflect.Ptr {
|
||||
if d.nextCBORNil() && v.Kind() == reflect.Pointer {
|
||||
d.skip()
|
||||
v.Set(reflect.Zero(v.Type()))
|
||||
v.SetZero()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -1387,7 +1430,7 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin
|
|||
registeredType := d.dm.tags.getTypeFromTagNum(tagNums)
|
||||
if registeredType != nil {
|
||||
if registeredType.Implements(tInfo.nonPtrType) ||
|
||||
reflect.PtrTo(registeredType).Implements(tInfo.nonPtrType) {
|
||||
reflect.PointerTo(registeredType).Implements(tInfo.nonPtrType) {
|
||||
v.Set(reflect.New(registeredType))
|
||||
v = v.Elem()
|
||||
tInfo = getTypeInfo(registeredType)
|
||||
|
|
@ -1399,7 +1442,7 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin
|
|||
|
||||
// Create new value for the pointer v to point to.
|
||||
// At this point, CBOR value is not nil/undefined if v is a pointer.
|
||||
for v.Kind() == reflect.Ptr {
|
||||
for v.Kind() == reflect.Pointer {
|
||||
if v.IsNil() {
|
||||
if !v.CanSet() {
|
||||
d.skip()
|
||||
|
|
@ -1460,6 +1503,17 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin
|
|||
|
||||
case specialTypeUnmarshalerIface:
|
||||
return d.parseToUnmarshaler(v)
|
||||
|
||||
case specialTypeUnexportedUnmarshalerIface:
|
||||
return d.parseToUnexportedUnmarshaler(v)
|
||||
|
||||
case specialTypeJSONUnmarshalerIface:
|
||||
// This special type implies that the type does not also implement
|
||||
// cbor.Umarshaler.
|
||||
if d.dm.jsonUnmarshalerTranscoder == nil {
|
||||
break
|
||||
}
|
||||
return d.parseToJSONUnmarshaler(v)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1516,14 +1570,14 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin
|
|||
return err
|
||||
}
|
||||
copied = copied || converted
|
||||
return fillByteString(t, b, !copied, v, d.dm.byteStringToString, d.dm.binaryUnmarshaler)
|
||||
return fillByteString(t, b, !copied, v, d.dm.byteStringToString, d.dm.binaryUnmarshaler, d.dm.textUnmarshaler)
|
||||
|
||||
case cborTypeTextString:
|
||||
b, err := d.parseTextString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fillTextString(t, b, v)
|
||||
return fillTextString(t, b, v, d.dm.textUnmarshaler)
|
||||
|
||||
case cborTypePrimitives:
|
||||
_, ai, val := d.getHead()
|
||||
|
|
@ -1575,7 +1629,7 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin
|
|||
return nil
|
||||
}
|
||||
if tInfo.nonPtrKind == reflect.Slice || tInfo.nonPtrKind == reflect.Array {
|
||||
return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler)
|
||||
return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler, d.dm.textUnmarshaler)
|
||||
}
|
||||
if bi.IsUint64() {
|
||||
return fillPositiveInt(t, bi.Uint64(), v)
|
||||
|
|
@ -1598,7 +1652,7 @@ func (d *decoder) parseToValue(v reflect.Value, tInfo *typeInfo) error { //nolin
|
|||
return nil
|
||||
}
|
||||
if tInfo.nonPtrKind == reflect.Slice || tInfo.nonPtrKind == reflect.Array {
|
||||
return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler)
|
||||
return fillByteString(t, b, !copied, v, ByteStringToStringForbidden, d.dm.binaryUnmarshaler, d.dm.textUnmarshaler)
|
||||
}
|
||||
if bi.IsInt64() {
|
||||
return fillNegativeInt(t, bi.Int64(), v)
|
||||
|
|
@ -1788,12 +1842,12 @@ func (d *decoder) parseToTime() (time.Time, bool, error) {
|
|||
// parseToUnmarshaler parses CBOR data to value implementing Unmarshaler interface.
|
||||
// It assumes data is well-formed, and does not perform bounds checking.
|
||||
func (d *decoder) parseToUnmarshaler(v reflect.Value) error {
|
||||
if d.nextCBORNil() && v.Kind() == reflect.Ptr && v.IsNil() {
|
||||
if d.nextCBORNil() && v.Kind() == reflect.Pointer && v.IsNil() {
|
||||
d.skip()
|
||||
return nil
|
||||
}
|
||||
|
||||
if v.Kind() != reflect.Ptr && v.CanAddr() {
|
||||
if v.Kind() != reflect.Pointer && v.CanAddr() {
|
||||
v = v.Addr()
|
||||
}
|
||||
if u, ok := v.Interface().(Unmarshaler); ok {
|
||||
|
|
@ -1805,9 +1859,55 @@ func (d *decoder) parseToUnmarshaler(v reflect.Value) error {
|
|||
return errors.New("cbor: failed to assert " + v.Type().String() + " as cbor.Unmarshaler")
|
||||
}
|
||||
|
||||
// parseToUnexportedUnmarshaler parses CBOR data to value implementing unmarshaler interface.
|
||||
// It assumes data is well-formed, and does not perform bounds checking.
|
||||
func (d *decoder) parseToUnexportedUnmarshaler(v reflect.Value) error {
|
||||
if d.nextCBORNil() && v.Kind() == reflect.Pointer && v.IsNil() {
|
||||
d.skip()
|
||||
return nil
|
||||
}
|
||||
|
||||
if v.Kind() != reflect.Pointer && v.CanAddr() {
|
||||
v = v.Addr()
|
||||
}
|
||||
if u, ok := v.Interface().(unmarshaler); ok {
|
||||
start := d.off
|
||||
d.skip()
|
||||
return u.unmarshalCBOR(d.data[start:d.off])
|
||||
}
|
||||
d.skip()
|
||||
return errors.New("cbor: failed to assert " + v.Type().String() + " as cbor.unmarshaler")
|
||||
}
|
||||
|
||||
// parseToJSONUnmarshaler parses CBOR data to be transcoded to JSON and passed to the value's
|
||||
// implementation of the json.Unmarshaler interface. It assumes data is well-formed, and does not
|
||||
// perform bounds checking.
|
||||
func (d *decoder) parseToJSONUnmarshaler(v reflect.Value) error {
|
||||
if d.nextCBORNil() && v.Kind() == reflect.Pointer && v.IsNil() {
|
||||
d.skip()
|
||||
return nil
|
||||
}
|
||||
|
||||
if v.Kind() != reflect.Pointer && v.CanAddr() {
|
||||
v = v.Addr()
|
||||
}
|
||||
if u, ok := v.Interface().(jsonUnmarshaler); ok {
|
||||
start := d.off
|
||||
d.skip()
|
||||
e := getEncodeBuffer()
|
||||
defer putEncodeBuffer(e)
|
||||
if err := d.dm.jsonUnmarshalerTranscoder.Transcode(e, bytes.NewReader(d.data[start:d.off])); err != nil {
|
||||
return &TranscodeError{err: err, rtype: v.Type(), sourceFormat: "cbor", targetFormat: "json"}
|
||||
}
|
||||
return u.UnmarshalJSON(e.Bytes())
|
||||
}
|
||||
d.skip()
|
||||
return errors.New("cbor: failed to assert " + v.Type().String() + " as json.Unmarshaler")
|
||||
}
|
||||
|
||||
// parse parses CBOR data and returns value in default Go type.
|
||||
// It assumes data is well-formed, and does not perform bounds checking.
|
||||
func (d *decoder) parse(skipSelfDescribedTag bool) (interface{}, error) { //nolint:gocyclo
|
||||
func (d *decoder) parse(skipSelfDescribedTag bool) (any, error) { //nolint:gocyclo
|
||||
// Strip self-described CBOR tag number.
|
||||
if skipSelfDescribedTag {
|
||||
for d.nextCBORType() == cborTypeTag {
|
||||
|
|
@ -2224,15 +2324,15 @@ func (d *decoder) parseTextString() ([]byte, error) {
|
|||
return b, nil
|
||||
}
|
||||
|
||||
func (d *decoder) parseArray() ([]interface{}, error) {
|
||||
func (d *decoder) parseArray() ([]any, error) {
|
||||
_, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
|
||||
hasSize := !indefiniteLength
|
||||
count := int(val)
|
||||
if !hasSize {
|
||||
count = d.numOfItemsUntilBreak() // peek ahead to get array size to preallocate slice for better performance
|
||||
}
|
||||
v := make([]interface{}, count)
|
||||
var e interface{}
|
||||
v := make([]any, count)
|
||||
var e any
|
||||
var err, lastErr error
|
||||
for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ {
|
||||
if e, lastErr = d.parse(true); lastErr != nil {
|
||||
|
|
@ -2290,20 +2390,19 @@ func (d *decoder) parseArrayToArray(v reflect.Value, tInfo *typeInfo) error {
|
|||
}
|
||||
// Set remaining Go array elements to zero values.
|
||||
if gi < vLen {
|
||||
zeroV := reflect.Zero(tInfo.elemTypeInfo.typ)
|
||||
for ; gi < vLen; gi++ {
|
||||
v.Index(gi).Set(zeroV)
|
||||
v.Index(gi).SetZero()
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *decoder) parseMap() (interface{}, error) {
|
||||
func (d *decoder) parseMap() (any, error) {
|
||||
_, _, val, indefiniteLength := d.getHeadWithIndefiniteLengthFlag()
|
||||
hasSize := !indefiniteLength
|
||||
count := int(val)
|
||||
m := make(map[interface{}]interface{})
|
||||
var k, e interface{}
|
||||
m := make(map[any]any)
|
||||
var k, e any
|
||||
var err, lastErr error
|
||||
keyCount := 0
|
||||
for i := 0; (hasSize && i < count) || (!hasSize && !d.foundBreak()); i++ {
|
||||
|
|
@ -2376,13 +2475,13 @@ func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //noli
|
|||
}
|
||||
keyType, eleType := tInfo.keyTypeInfo.typ, tInfo.elemTypeInfo.typ
|
||||
reuseKey, reuseEle := isImmutableKind(tInfo.keyTypeInfo.kind), isImmutableKind(tInfo.elemTypeInfo.kind)
|
||||
var keyValue, eleValue, zeroKeyValue, zeroEleValue reflect.Value
|
||||
var keyValue, eleValue reflect.Value
|
||||
keyIsInterfaceType := keyType == typeIntf // If key type is interface{}, need to check if key value is hashable.
|
||||
var err, lastErr error
|
||||
keyCount := v.Len()
|
||||
var existingKeys map[interface{}]bool // Store existing map keys, used for detecting duplicate map key.
|
||||
var existingKeys map[any]bool // Store existing map keys, used for detecting duplicate map key.
|
||||
if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
|
||||
existingKeys = make(map[interface{}]bool, keyCount)
|
||||
existingKeys = make(map[any]bool, keyCount)
|
||||
if keyCount > 0 {
|
||||
vKeys := v.MapKeys()
|
||||
for i := 0; i < len(vKeys); i++ {
|
||||
|
|
@ -2395,10 +2494,7 @@ func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //noli
|
|||
if !keyValue.IsValid() {
|
||||
keyValue = reflect.New(keyType).Elem()
|
||||
} else if !reuseKey {
|
||||
if !zeroKeyValue.IsValid() {
|
||||
zeroKeyValue = reflect.Zero(keyType)
|
||||
}
|
||||
keyValue.Set(zeroKeyValue)
|
||||
keyValue.SetZero()
|
||||
}
|
||||
if lastErr = d.parseToValue(keyValue, tInfo.keyTypeInfo); lastErr != nil {
|
||||
if err == nil {
|
||||
|
|
@ -2413,7 +2509,7 @@ func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //noli
|
|||
if !isHashableValue(keyValue.Elem()) {
|
||||
var converted bool
|
||||
if d.dm.mapKeyByteString == MapKeyByteStringAllowed {
|
||||
var k interface{}
|
||||
var k any
|
||||
k, converted = convertByteSliceToByteString(keyValue.Elem().Interface())
|
||||
if converted {
|
||||
keyValue.Set(reflect.ValueOf(k))
|
||||
|
|
@ -2433,10 +2529,7 @@ func (d *decoder) parseMapToMap(v reflect.Value, tInfo *typeInfo) error { //noli
|
|||
if !eleValue.IsValid() {
|
||||
eleValue = reflect.New(eleType).Elem()
|
||||
} else if !reuseEle {
|
||||
if !zeroEleValue.IsValid() {
|
||||
zeroEleValue = reflect.Zero(eleType)
|
||||
}
|
||||
eleValue.Set(zeroEleValue)
|
||||
eleValue.SetZero()
|
||||
}
|
||||
if lastErr := d.parseToValue(eleValue, tInfo.elemTypeInfo); lastErr != nil {
|
||||
if err == nil {
|
||||
|
|
@ -2584,7 +2677,7 @@ func (d *decoder) parseMapToStruct(v reflect.Value, tInfo *typeInfo) error { //n
|
|||
|
||||
// Keeps track of CBOR map keys to detect duplicate map key
|
||||
keyCount := 0
|
||||
var mapKeys map[interface{}]struct{}
|
||||
var mapKeys map[any]struct{}
|
||||
|
||||
errOnUnknownField := (d.dm.extraReturnErrors & ExtraDecErrorUnknownField) > 0
|
||||
|
||||
|
|
@ -2594,7 +2687,7 @@ MapEntryLoop:
|
|||
|
||||
// If duplicate field detection is enabled and the key at index j did not match any
|
||||
// field, k will hold the map key.
|
||||
var k interface{}
|
||||
var k any
|
||||
|
||||
t := d.nextCBORType()
|
||||
if t == cborTypeTextString || (t == cborTypeByteString && d.dm.fieldNameByteString == FieldNameByteStringAllowed) {
|
||||
|
|
@ -2764,7 +2857,7 @@ MapEntryLoop:
|
|||
// check is never reached.
|
||||
if d.dm.dupMapKey == DupMapKeyEnforcedAPF {
|
||||
if mapKeys == nil {
|
||||
mapKeys = make(map[interface{}]struct{}, 1)
|
||||
mapKeys = make(map[any]struct{}, 1)
|
||||
}
|
||||
mapKeys[k] = struct{}{}
|
||||
newKeyCount := len(mapKeys)
|
||||
|
|
@ -2968,20 +3061,25 @@ func (d *decoder) nextCBORNil() bool {
|
|||
return d.data[d.off] == 0xf6 || d.data[d.off] == 0xf7
|
||||
}
|
||||
|
||||
type jsonUnmarshaler interface{ UnmarshalJSON([]byte) error }
|
||||
|
||||
var (
|
||||
typeIntf = reflect.TypeOf([]interface{}(nil)).Elem()
|
||||
typeTime = reflect.TypeOf(time.Time{})
|
||||
typeBigInt = reflect.TypeOf(big.Int{})
|
||||
typeUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
|
||||
typeBinaryUnmarshaler = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
|
||||
typeString = reflect.TypeOf("")
|
||||
typeByteSlice = reflect.TypeOf([]byte(nil))
|
||||
typeIntf = reflect.TypeOf([]any(nil)).Elem()
|
||||
typeTime = reflect.TypeOf(time.Time{})
|
||||
typeBigInt = reflect.TypeOf(big.Int{})
|
||||
typeUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
|
||||
typeUnexportedUnmarshaler = reflect.TypeOf((*unmarshaler)(nil)).Elem()
|
||||
typeBinaryUnmarshaler = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
|
||||
typeTextUnmarshaler = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
|
||||
typeJSONUnmarshaler = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem()
|
||||
typeString = reflect.TypeOf("")
|
||||
typeByteSlice = reflect.TypeOf([]byte(nil))
|
||||
)
|
||||
|
||||
func fillNil(_ cborType, v reflect.Value) error {
|
||||
switch v.Kind() {
|
||||
case reflect.Slice, reflect.Map, reflect.Interface, reflect.Ptr:
|
||||
v.Set(reflect.Zero(v.Type()))
|
||||
case reflect.Slice, reflect.Map, reflect.Interface, reflect.Pointer:
|
||||
v.SetZero()
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
|
|
@ -3082,8 +3180,8 @@ func fillFloat(t cborType, val float64, v reflect.Value) error {
|
|||
return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()}
|
||||
}
|
||||
|
||||
func fillByteString(t cborType, val []byte, shared bool, v reflect.Value, bsts ByteStringToStringMode, bum BinaryUnmarshalerMode) error {
|
||||
if bum == BinaryUnmarshalerByteString && reflect.PtrTo(v.Type()).Implements(typeBinaryUnmarshaler) {
|
||||
func fillByteString(t cborType, val []byte, shared bool, v reflect.Value, bsts ByteStringToStringMode, bum BinaryUnmarshalerMode, tum TextUnmarshalerMode) error {
|
||||
if bum == BinaryUnmarshalerByteString && reflect.PointerTo(v.Type()).Implements(typeBinaryUnmarshaler) {
|
||||
if v.CanAddr() {
|
||||
v = v.Addr()
|
||||
if u, ok := v.Interface().(encoding.BinaryUnmarshaler); ok {
|
||||
|
|
@ -3095,9 +3193,26 @@ func fillByteString(t cborType, val []byte, shared bool, v reflect.Value, bsts B
|
|||
}
|
||||
return errors.New("cbor: cannot set new value for " + v.Type().String())
|
||||
}
|
||||
if bsts != ByteStringToStringForbidden && v.Kind() == reflect.String {
|
||||
v.SetString(string(val))
|
||||
return nil
|
||||
if bsts != ByteStringToStringForbidden {
|
||||
if tum == TextUnmarshalerTextString && reflect.PointerTo(v.Type()).Implements(typeTextUnmarshaler) {
|
||||
if v.CanAddr() {
|
||||
v = v.Addr()
|
||||
if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
|
||||
// The contract of TextUnmarshaler forbids retaining the input
|
||||
// bytes, so no copying is required even if val is shared.
|
||||
if err := u.UnmarshalText(val); err != nil {
|
||||
return fmt.Errorf("cbor: cannot unmarshal text for %s: %w", v.Type(), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return errors.New("cbor: cannot set new value for " + v.Type().String())
|
||||
}
|
||||
|
||||
if v.Kind() == reflect.String {
|
||||
v.SetString(string(val))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 {
|
||||
src := val
|
||||
|
|
@ -3117,9 +3232,8 @@ func fillByteString(t cborType, val []byte, shared bool, v reflect.Value, bsts B
|
|||
}
|
||||
// Set remaining Go array elements to zero values.
|
||||
if i < vLen {
|
||||
zeroV := reflect.Zero(reflect.TypeOf(byte(0)))
|
||||
for ; i < vLen; i++ {
|
||||
v.Index(i).Set(zeroV)
|
||||
v.Index(i).SetZero()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
@ -3127,11 +3241,28 @@ func fillByteString(t cborType, val []byte, shared bool, v reflect.Value, bsts B
|
|||
return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()}
|
||||
}
|
||||
|
||||
func fillTextString(t cborType, val []byte, v reflect.Value) error {
|
||||
func fillTextString(t cborType, val []byte, v reflect.Value, tum TextUnmarshalerMode) error {
|
||||
// Check if the value implements TextUnmarshaler and the mode allows it
|
||||
if tum == TextUnmarshalerTextString && reflect.PointerTo(v.Type()).Implements(typeTextUnmarshaler) {
|
||||
if v.CanAddr() {
|
||||
v = v.Addr()
|
||||
if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
|
||||
// The contract of TextUnmarshaler forbids retaining the input
|
||||
// bytes, so no copying is required even if val is shared.
|
||||
if err := u.UnmarshalText(val); err != nil {
|
||||
return fmt.Errorf("cbor: cannot unmarshal text for %s: %w", v.Type(), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return errors.New("cbor: cannot set new value for " + v.Type().String())
|
||||
}
|
||||
|
||||
if v.Kind() == reflect.String {
|
||||
v.SetString(string(val))
|
||||
return nil
|
||||
}
|
||||
|
||||
return &UnmarshalTypeError{CBORType: t.String(), GoType: v.Type().String()}
|
||||
}
|
||||
|
||||
|
|
@ -3172,7 +3303,7 @@ func isHashableValue(rv reflect.Value) bool {
|
|||
// This function also handles nested tags.
|
||||
// CBOR data is already verified to be well-formed before this function is used,
|
||||
// so the recursion won't exceed max nested levels.
|
||||
func convertByteSliceToByteString(v interface{}) (interface{}, bool) {
|
||||
func convertByteSliceToByteString(v any) (any, bool) {
|
||||
switch v := v.(type) {
|
||||
case []byte:
|
||||
return ByteString(v), true
|
||||
|
|
|
|||
|
|
@ -2,15 +2,15 @@
|
|||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
/*
|
||||
Package cbor is a modern CBOR codec (RFC 8949 & RFC 7049) with CBOR tags,
|
||||
Go struct tags (toarray/keyasint/omitempty), Core Deterministic Encoding,
|
||||
Package cbor is a modern CBOR codec (RFC 8949 & RFC 8742) with CBOR tags,
|
||||
Go struct tag options (toarray/keyasint/omitempty/omitzero), Core Deterministic Encoding,
|
||||
CTAP2, Canonical CBOR, float64->32->16, and duplicate map key detection.
|
||||
|
||||
Encoding options allow "preferred serialization" by encoding integers and floats
|
||||
to their smallest forms (e.g. float16) when values fit.
|
||||
|
||||
Struct tags like "keyasint", "toarray" and "omitempty" make CBOR data smaller
|
||||
and easier to use with structs.
|
||||
Struct tag options "keyasint", "toarray", "omitempty", and "omitzero" reduce encoding size
|
||||
and reduce programming effort.
|
||||
|
||||
For example, "toarray" tag makes struct fields encode to CBOR array elements. And
|
||||
"keyasint" makes a field encode to an element of CBOR map with specified int key.
|
||||
|
|
@ -23,11 +23,19 @@ The Quick Start guide is at https://github.com/fxamacker/cbor#quick-start
|
|||
|
||||
Function signatures identical to encoding/json include:
|
||||
|
||||
Marshal, Unmarshal, NewEncoder, NewDecoder, (*Encoder).Encode, (*Decoder).Decode.
|
||||
Marshal, Unmarshal, NewEncoder, NewDecoder, (*Encoder).Encode, (*Decoder).Decode
|
||||
|
||||
Standard interfaces include:
|
||||
|
||||
BinaryMarshaler, BinaryUnmarshaler, Marshaler, and Unmarshaler.
|
||||
BinaryMarshaler, BinaryUnmarshaler, Marshaler, and Unmarshaler
|
||||
|
||||
Diagnostic functions translate CBOR data item into Diagnostic Notation:
|
||||
|
||||
Diagnose, DiagnoseFirst
|
||||
|
||||
Functions that simplify using CBOR Sequences (RFC 8742) include:
|
||||
|
||||
UnmarshalFirst
|
||||
|
||||
Custom encoding and decoding is possible by implementing standard interfaces for
|
||||
user-defined Go types.
|
||||
|
|
@ -50,19 +58,19 @@ Modes are intended to be reused and are safe for concurrent use.
|
|||
|
||||
EncMode and DecMode Interfaces
|
||||
|
||||
// EncMode interface uses immutable options and is safe for concurrent use.
|
||||
type EncMode interface {
|
||||
// EncMode interface uses immutable options and is safe for concurrent use.
|
||||
type EncMode interface {
|
||||
Marshal(v interface{}) ([]byte, error)
|
||||
NewEncoder(w io.Writer) *Encoder
|
||||
EncOptions() EncOptions // returns copy of options
|
||||
}
|
||||
}
|
||||
|
||||
// DecMode interface uses immutable options and is safe for concurrent use.
|
||||
type DecMode interface {
|
||||
// DecMode interface uses immutable options and is safe for concurrent use.
|
||||
type DecMode interface {
|
||||
Unmarshal(data []byte, v interface{}) error
|
||||
NewDecoder(r io.Reader) *Decoder
|
||||
DecOptions() DecOptions // returns copy of options
|
||||
}
|
||||
}
|
||||
|
||||
Using Default Encoding Mode
|
||||
|
||||
|
|
@ -78,6 +86,16 @@ Using Default Decoding Mode
|
|||
decoder := cbor.NewDecoder(r)
|
||||
err = decoder.Decode(&v)
|
||||
|
||||
Using Default Mode of UnmarshalFirst to Decode CBOR Sequences
|
||||
|
||||
// Decode the first CBOR data item and return remaining bytes:
|
||||
rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v
|
||||
|
||||
Using Extended Diagnostic Notation (EDN) to represent CBOR data
|
||||
|
||||
// Translate the first CBOR data item into text and return remaining bytes.
|
||||
text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to text
|
||||
|
||||
Creating and Using Encoding Modes
|
||||
|
||||
// Create EncOptions using either struct literal or a function.
|
||||
|
|
@ -111,15 +129,20 @@ Decoding Options: https://github.com/fxamacker/cbor#decoding-options
|
|||
Struct tags like `cbor:"name,omitempty"` and `json:"name,omitempty"` work as expected.
|
||||
If both struct tags are specified then `cbor` is used.
|
||||
|
||||
Struct tags like "keyasint", "toarray", and "omitempty" make it easy to use
|
||||
Struct tag options like "keyasint", "toarray", "omitempty", and "omitzero" make it easy to use
|
||||
very compact formats like COSE and CWT (CBOR Web Tokens) with structs.
|
||||
|
||||
The "omitzero" option omits zero values from encoding, matching
|
||||
[stdlib encoding/json behavior](https://pkg.go.dev/encoding/json#Marshal).
|
||||
When specified in the `cbor` tag, the option is always honored.
|
||||
When specified in the `json` tag, the option is honored when building with Go 1.24+.
|
||||
|
||||
For example, "toarray" makes struct fields encode to array elements. And "keyasint"
|
||||
makes struct fields encode to elements of CBOR map with int keys.
|
||||
|
||||
https://raw.githubusercontent.com/fxamacker/images/master/cbor/v2.0.0/cbor_easy_api.png
|
||||
|
||||
Struct tags are listed at https://github.com/fxamacker/cbor#struct-tags-1
|
||||
Struct tag options are listed at https://github.com/fxamacker/cbor#struct-tags-1
|
||||
|
||||
# Tests and Fuzzing
|
||||
|
||||
|
|
|
|||
|
|
@ -58,8 +58,10 @@ import (
|
|||
//
|
||||
// Marshal supports format string stored under the "cbor" key in the struct
|
||||
// field's tag. CBOR format string can specify the name of the field,
|
||||
// "omitempty" and "keyasint" options, and special case "-" for field omission.
|
||||
// If "cbor" key is absent, Marshal uses "json" key.
|
||||
// "omitempty", "omitzero" and "keyasint" options, and special case "-" for
|
||||
// field omission. If "cbor" key is absent, Marshal uses "json" key.
|
||||
// When using the "json" key, the "omitzero" option is honored when building
|
||||
// with Go 1.24+ to match stdlib encoding/json behavior.
|
||||
//
|
||||
// Struct field name is treated as integer if it has "keyasint" option in
|
||||
// its format string. The format string must specify an integer as its
|
||||
|
|
@ -67,8 +69,8 @@ import (
|
|||
//
|
||||
// Special struct field "_" is used to specify struct level options, such as
|
||||
// "toarray". "toarray" option enables Go struct to be encoded as CBOR array.
|
||||
// "omitempty" is disabled by "toarray" to ensure that the same number
|
||||
// of elements are encoded every time.
|
||||
// "omitempty" and "omitzero" are disabled by "toarray" to ensure that the
|
||||
// same number of elements are encoded every time.
|
||||
//
|
||||
// Anonymous struct fields are marshaled as if their exported fields
|
||||
// were fields in the outer struct. Marshal follows the same struct fields
|
||||
|
|
@ -92,7 +94,7 @@ import (
|
|||
//
|
||||
// Values of other types cannot be encoded in CBOR. Attempting
|
||||
// to encode such a value causes Marshal to return an UnsupportedTypeError.
|
||||
func Marshal(v interface{}) ([]byte, error) {
|
||||
func Marshal(v any) ([]byte, error) {
|
||||
return defaultEncMode.Marshal(v)
|
||||
}
|
||||
|
||||
|
|
@ -103,7 +105,7 @@ func Marshal(v interface{}) ([]byte, error) {
|
|||
// partially encoded data if error is returned.
|
||||
//
|
||||
// See Marshal for more details.
|
||||
func MarshalToBuffer(v interface{}, buf *bytes.Buffer) error {
|
||||
func MarshalToBuffer(v any, buf *bytes.Buffer) error {
|
||||
return defaultEncMode.MarshalToBuffer(v, buf)
|
||||
}
|
||||
|
||||
|
|
@ -130,6 +132,20 @@ func (e *MarshalerError) Unwrap() error {
|
|||
return e.err
|
||||
}
|
||||
|
||||
type TranscodeError struct {
|
||||
err error
|
||||
rtype reflect.Type
|
||||
sourceFormat, targetFormat string
|
||||
}
|
||||
|
||||
func (e TranscodeError) Error() string {
|
||||
return "cbor: cannot transcode from " + e.sourceFormat + " to " + e.targetFormat + ": " + e.err.Error()
|
||||
}
|
||||
|
||||
func (e TranscodeError) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
// UnsupportedTypeError is returned by Marshal when attempting to encode value
|
||||
// of an unsupported type.
|
||||
type UnsupportedTypeError struct {
|
||||
|
|
@ -291,24 +307,51 @@ func (icm InfConvertMode) valid() bool {
|
|||
return icm >= 0 && icm < maxInfConvert
|
||||
}
|
||||
|
||||
// TimeMode specifies how to encode time.Time values.
|
||||
// TimeMode specifies how to encode time.Time values in compliance with RFC 8949 (CBOR):
|
||||
// - Section 3.4.1: Standard Date/Time String
|
||||
// - Section 3.4.2: Epoch-Based Date/Time
|
||||
// For more info, see:
|
||||
// - https://www.rfc-editor.org/rfc/rfc8949.html
|
||||
// NOTE: User applications that prefer to encode time with fractional seconds to an integer
|
||||
// (instead of floating point or text string) can use a CBOR tag number not assigned by IANA:
|
||||
// 1. Define a user-defined type in Go with just a time.Time or int64 as its data.
|
||||
// 2. Implement the cbor.Marshaler and cbor.Unmarshaler interface for that user-defined type
|
||||
// to encode or decode the tagged data item with an enclosed integer content.
|
||||
type TimeMode int
|
||||
|
||||
const (
|
||||
// TimeUnix causes time.Time to be encoded as epoch time in integer with second precision.
|
||||
// TimeUnix causes time.Time to encode to a CBOR time (tag 1) with an integer content
|
||||
// representing seconds elapsed (with 1-second precision) since UNIX Epoch UTC.
|
||||
// The TimeUnix option is location independent and has a clear precision guarantee.
|
||||
TimeUnix TimeMode = iota
|
||||
|
||||
// TimeUnixMicro causes time.Time to be encoded as epoch time in float-point rounded to microsecond precision.
|
||||
// TimeUnixMicro causes time.Time to encode to a CBOR time (tag 1) with a floating point content
|
||||
// representing seconds elapsed (with up to 1-microsecond precision) since UNIX Epoch UTC.
|
||||
// NOTE: The floating point content is encoded to the shortest floating-point encoding that preserves
|
||||
// the 64-bit floating point value. I.e., the floating point encoding can be IEEE 764:
|
||||
// binary64, binary32, or binary16 depending on the content's value.
|
||||
TimeUnixMicro
|
||||
|
||||
// TimeUnixDynamic causes time.Time to be encoded as integer if time.Time doesn't have fractional seconds,
|
||||
// otherwise float-point rounded to microsecond precision.
|
||||
// TimeUnixDynamic causes time.Time to encode to a CBOR time (tag 1) with either an integer content or
|
||||
// a floating point content, depending on the content's value. This option is equivalent to dynamically
|
||||
// choosing TimeUnix if time.Time doesn't have fractional seconds, and using TimeUnixMicro if time.Time
|
||||
// has fractional seconds.
|
||||
TimeUnixDynamic
|
||||
|
||||
// TimeRFC3339 causes time.Time to be encoded as RFC3339 formatted string with second precision.
|
||||
// TimeRFC3339 causes time.Time to encode to a CBOR time (tag 0) with a text string content
|
||||
// representing the time using 1-second precision in RFC3339 format. If the time.Time has a
|
||||
// non-UTC timezone then a "localtime - UTC" numeric offset will be included as specified in RFC3339.
|
||||
// NOTE: User applications can avoid including the RFC3339 numeric offset by:
|
||||
// - providing a time.Time value set to UTC, or
|
||||
// - using the TimeUnix, TimeUnixMicro, or TimeUnixDynamic option instead of TimeRFC3339.
|
||||
TimeRFC3339
|
||||
|
||||
// TimeRFC3339Nano causes time.Time to be encoded as RFC3339 formatted string with nanosecond precision.
|
||||
// TimeRFC3339Nano causes time.Time to encode to a CBOR time (tag 0) with a text string content
|
||||
// representing the time using 1-nanosecond precision in RFC3339 format. If the time.Time has a
|
||||
// non-UTC timezone then a "localtime - UTC" numeric offset will be included as specified in RFC3339.
|
||||
// NOTE: User applications can avoid including the RFC3339 numeric offset by:
|
||||
// - providing a time.Time value set to UTC, or
|
||||
// - using the TimeUnix, TimeUnixMicro, or TimeUnixDynamic option instead of TimeRFC3339Nano.
|
||||
TimeRFC3339Nano
|
||||
|
||||
maxTimeMode
|
||||
|
|
@ -481,6 +524,24 @@ func (bmm BinaryMarshalerMode) valid() bool {
|
|||
return bmm >= 0 && bmm < maxBinaryMarshalerMode
|
||||
}
|
||||
|
||||
// TextMarshalerMode specifies how to encode types that implement encoding.TextMarshaler.
|
||||
type TextMarshalerMode int
|
||||
|
||||
const (
|
||||
// TextMarshalerNone does not recognize TextMarshaler implementations during encode.
|
||||
// This is the default behavior.
|
||||
TextMarshalerNone TextMarshalerMode = iota
|
||||
|
||||
// TextMarshalerTextString encodes the output of MarshalText to a CBOR text string.
|
||||
TextMarshalerTextString
|
||||
|
||||
maxTextMarshalerMode
|
||||
)
|
||||
|
||||
func (tmm TextMarshalerMode) valid() bool {
|
||||
return tmm >= 0 && tmm < maxTextMarshalerMode
|
||||
}
|
||||
|
||||
// EncOptions specifies encoding options.
|
||||
type EncOptions struct {
|
||||
// Sort specifies sorting order.
|
||||
|
|
@ -538,6 +599,14 @@ type EncOptions struct {
|
|||
|
||||
// BinaryMarshaler specifies how to encode types that implement encoding.BinaryMarshaler.
|
||||
BinaryMarshaler BinaryMarshalerMode
|
||||
|
||||
// TextMarshaler specifies how to encode types that implement encoding.TextMarshaler.
|
||||
TextMarshaler TextMarshalerMode
|
||||
|
||||
// JSONMarshalerTranscoder sets the transcoding scheme used to marshal types that implement
|
||||
// json.Marshaler but do not also implement cbor.Marshaler. If nil, encoding behavior is not
|
||||
// influenced by whether or not a type implements json.Marshaler.
|
||||
JSONMarshalerTranscoder Transcoder
|
||||
}
|
||||
|
||||
// CanonicalEncOptions returns EncOptions for "Canonical CBOR" encoding,
|
||||
|
|
@ -748,6 +817,9 @@ func (opts EncOptions) encMode() (*encMode, error) { //nolint:gocritic // ignore
|
|||
if !opts.BinaryMarshaler.valid() {
|
||||
return nil, errors.New("cbor: invalid BinaryMarshaler " + strconv.Itoa(int(opts.BinaryMarshaler)))
|
||||
}
|
||||
if !opts.TextMarshaler.valid() {
|
||||
return nil, errors.New("cbor: invalid TextMarshaler " + strconv.Itoa(int(opts.TextMarshaler)))
|
||||
}
|
||||
em := encMode{
|
||||
sort: opts.Sort,
|
||||
shortestFloat: opts.ShortestFloat,
|
||||
|
|
@ -767,13 +839,15 @@ func (opts EncOptions) encMode() (*encMode, error) { //nolint:gocritic // ignore
|
|||
byteSliceLaterEncodingTag: byteSliceLaterEncodingTag,
|
||||
byteArray: opts.ByteArray,
|
||||
binaryMarshaler: opts.BinaryMarshaler,
|
||||
textMarshaler: opts.TextMarshaler,
|
||||
jsonMarshalerTranscoder: opts.JSONMarshalerTranscoder,
|
||||
}
|
||||
return &em, nil
|
||||
}
|
||||
|
||||
// EncMode is the main interface for CBOR encoding.
|
||||
type EncMode interface {
|
||||
Marshal(v interface{}) ([]byte, error)
|
||||
Marshal(v any) ([]byte, error)
|
||||
NewEncoder(w io.Writer) *Encoder
|
||||
EncOptions() EncOptions
|
||||
}
|
||||
|
|
@ -783,7 +857,7 @@ type EncMode interface {
|
|||
// into the built-in buffer pool.
|
||||
type UserBufferEncMode interface {
|
||||
EncMode
|
||||
MarshalToBuffer(v interface{}, buf *bytes.Buffer) error
|
||||
MarshalToBuffer(v any, buf *bytes.Buffer) error
|
||||
|
||||
// This private method is to prevent users implementing
|
||||
// this interface and so future additions to it will
|
||||
|
|
@ -812,6 +886,8 @@ type encMode struct {
|
|||
byteSliceLaterEncodingTag uint64
|
||||
byteArray ByteArrayMode
|
||||
binaryMarshaler BinaryMarshalerMode
|
||||
textMarshaler TextMarshalerMode
|
||||
jsonMarshalerTranscoder Transcoder
|
||||
}
|
||||
|
||||
var defaultEncMode, _ = EncOptions{}.encMode()
|
||||
|
|
@ -888,22 +964,24 @@ func getMarshalerDecMode(indefLength IndefLengthMode, tagsMd TagsMode) *decMode
|
|||
// EncOptions returns user specified options used to create this EncMode.
|
||||
func (em *encMode) EncOptions() EncOptions {
|
||||
return EncOptions{
|
||||
Sort: em.sort,
|
||||
ShortestFloat: em.shortestFloat,
|
||||
NaNConvert: em.nanConvert,
|
||||
InfConvert: em.infConvert,
|
||||
BigIntConvert: em.bigIntConvert,
|
||||
Time: em.time,
|
||||
TimeTag: em.timeTag,
|
||||
IndefLength: em.indefLength,
|
||||
NilContainers: em.nilContainers,
|
||||
TagsMd: em.tagsMd,
|
||||
OmitEmpty: em.omitEmpty,
|
||||
String: em.stringType,
|
||||
FieldName: em.fieldName,
|
||||
ByteSliceLaterFormat: em.byteSliceLaterFormat,
|
||||
ByteArray: em.byteArray,
|
||||
BinaryMarshaler: em.binaryMarshaler,
|
||||
Sort: em.sort,
|
||||
ShortestFloat: em.shortestFloat,
|
||||
NaNConvert: em.nanConvert,
|
||||
InfConvert: em.infConvert,
|
||||
BigIntConvert: em.bigIntConvert,
|
||||
Time: em.time,
|
||||
TimeTag: em.timeTag,
|
||||
IndefLength: em.indefLength,
|
||||
NilContainers: em.nilContainers,
|
||||
TagsMd: em.tagsMd,
|
||||
OmitEmpty: em.omitEmpty,
|
||||
String: em.stringType,
|
||||
FieldName: em.fieldName,
|
||||
ByteSliceLaterFormat: em.byteSliceLaterFormat,
|
||||
ByteArray: em.byteArray,
|
||||
BinaryMarshaler: em.binaryMarshaler,
|
||||
TextMarshaler: em.textMarshaler,
|
||||
JSONMarshalerTranscoder: em.jsonMarshalerTranscoder,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -921,7 +999,7 @@ func (em *encMode) encTagBytes(t reflect.Type) []byte {
|
|||
// Marshal returns the CBOR encoding of v using em encoding mode.
|
||||
//
|
||||
// See the documentation for Marshal for details.
|
||||
func (em *encMode) Marshal(v interface{}) ([]byte, error) {
|
||||
func (em *encMode) Marshal(v any) ([]byte, error) {
|
||||
e := getEncodeBuffer()
|
||||
|
||||
if err := encode(e, em, reflect.ValueOf(v)); err != nil {
|
||||
|
|
@ -943,7 +1021,7 @@ func (em *encMode) Marshal(v interface{}) ([]byte, error) {
|
|||
// partially encoded data if error is returned.
|
||||
//
|
||||
// See Marshal for more details.
|
||||
func (em *encMode) MarshalToBuffer(v interface{}, buf *bytes.Buffer) error {
|
||||
func (em *encMode) MarshalToBuffer(v any, buf *bytes.Buffer) error {
|
||||
if buf == nil {
|
||||
return fmt.Errorf("cbor: encoding buffer provided by user is nil")
|
||||
}
|
||||
|
|
@ -957,7 +1035,7 @@ func (em *encMode) NewEncoder(w io.Writer) *Encoder {
|
|||
|
||||
// encodeBufferPool caches unused bytes.Buffer objects for later reuse.
|
||||
var encodeBufferPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
New: func() any {
|
||||
e := new(bytes.Buffer)
|
||||
e.Grow(32) // TODO: make this configurable
|
||||
return e
|
||||
|
|
@ -975,6 +1053,7 @@ func putEncodeBuffer(e *bytes.Buffer) {
|
|||
|
||||
type encodeFunc func(e *bytes.Buffer, em *encMode, v reflect.Value) error
|
||||
type isEmptyFunc func(em *encMode, v reflect.Value) (empty bool, err error)
|
||||
type isZeroFunc func(v reflect.Value) (zero bool, err error)
|
||||
|
||||
func encode(e *bytes.Buffer, em *encMode, v reflect.Value) error {
|
||||
if !v.IsValid() {
|
||||
|
|
@ -983,7 +1062,7 @@ func encode(e *bytes.Buffer, em *encMode, v reflect.Value) error {
|
|||
return nil
|
||||
}
|
||||
vt := v.Type()
|
||||
f, _ := getEncodeFunc(vt)
|
||||
f, _, _ := getEncodeFunc(vt)
|
||||
if f == nil {
|
||||
return &UnsupportedTypeError{vt}
|
||||
}
|
||||
|
|
@ -1483,6 +1562,15 @@ func encodeStruct(e *bytes.Buffer, em *encMode, v reflect.Value) (err error) {
|
|||
continue
|
||||
}
|
||||
}
|
||||
if f.omitZero {
|
||||
zero, err := f.izf(fv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if zero {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if !f.keyAsInt && em.fieldName == FieldNameToByteString {
|
||||
e.Write(f.cborNameByteString)
|
||||
|
|
@ -1665,6 +1753,107 @@ func (bme binaryMarshalerEncoder) isEmpty(em *encMode, v reflect.Value) (bool, e
|
|||
return len(data) == 0, nil
|
||||
}
|
||||
|
||||
type textMarshalerEncoder struct {
|
||||
alternateEncode encodeFunc
|
||||
alternateIsEmpty isEmptyFunc
|
||||
}
|
||||
|
||||
func (tme textMarshalerEncoder) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error {
|
||||
if em.textMarshaler == TextMarshalerNone {
|
||||
return tme.alternateEncode(e, em, v)
|
||||
}
|
||||
|
||||
vt := v.Type()
|
||||
m, ok := v.Interface().(encoding.TextMarshaler)
|
||||
if !ok {
|
||||
pv := reflect.New(vt)
|
||||
pv.Elem().Set(v)
|
||||
m = pv.Interface().(encoding.TextMarshaler)
|
||||
}
|
||||
data, err := m.MarshalText()
|
||||
if err != nil {
|
||||
return fmt.Errorf("cbor: cannot marshal text for %s: %w", vt, err)
|
||||
}
|
||||
if b := em.encTagBytes(vt); b != nil {
|
||||
e.Write(b)
|
||||
}
|
||||
|
||||
encodeHead(e, byte(cborTypeTextString), uint64(len(data)))
|
||||
e.Write(data)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tme textMarshalerEncoder) isEmpty(em *encMode, v reflect.Value) (bool, error) {
|
||||
if em.textMarshaler == TextMarshalerNone {
|
||||
return tme.alternateIsEmpty(em, v)
|
||||
}
|
||||
|
||||
m, ok := v.Interface().(encoding.TextMarshaler)
|
||||
if !ok {
|
||||
pv := reflect.New(v.Type())
|
||||
pv.Elem().Set(v)
|
||||
m = pv.Interface().(encoding.TextMarshaler)
|
||||
}
|
||||
data, err := m.MarshalText()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("cbor: cannot marshal text for %s: %w", v.Type(), err)
|
||||
}
|
||||
return len(data) == 0, nil
|
||||
}
|
||||
|
||||
type jsonMarshalerEncoder struct {
|
||||
alternateEncode encodeFunc
|
||||
alternateIsEmpty isEmptyFunc
|
||||
}
|
||||
|
||||
func (jme jsonMarshalerEncoder) encode(e *bytes.Buffer, em *encMode, v reflect.Value) error {
|
||||
if em.jsonMarshalerTranscoder == nil {
|
||||
return jme.alternateEncode(e, em, v)
|
||||
}
|
||||
|
||||
vt := v.Type()
|
||||
m, ok := v.Interface().(jsonMarshaler)
|
||||
if !ok {
|
||||
pv := reflect.New(vt)
|
||||
pv.Elem().Set(v)
|
||||
m = pv.Interface().(jsonMarshaler)
|
||||
}
|
||||
|
||||
json, err := m.MarshalJSON()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
offset := e.Len()
|
||||
|
||||
if b := em.encTagBytes(vt); b != nil {
|
||||
e.Write(b)
|
||||
}
|
||||
|
||||
if err := em.jsonMarshalerTranscoder.Transcode(e, bytes.NewReader(json)); err != nil {
|
||||
return &TranscodeError{err: err, rtype: vt, sourceFormat: "json", targetFormat: "cbor"}
|
||||
}
|
||||
|
||||
// Validate that the transcode function has written exactly one well-formed data item.
|
||||
d := decoder{data: e.Bytes()[offset:], dm: getMarshalerDecMode(em.indefLength, em.tagsMd)}
|
||||
if err := d.wellformed(false, true); err != nil {
|
||||
e.Truncate(offset)
|
||||
return &TranscodeError{err: err, rtype: vt, sourceFormat: "json", targetFormat: "cbor"}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (jme jsonMarshalerEncoder) isEmpty(em *encMode, v reflect.Value) (bool, error) {
|
||||
if em.jsonMarshalerTranscoder == nil {
|
||||
return jme.alternateIsEmpty(em, v)
|
||||
}
|
||||
|
||||
// As with types implementing cbor.Marshaler, transcoded json.Marshaler values always encode
|
||||
// as exactly one complete CBOR data item.
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func encodeMarshalerType(e *bytes.Buffer, em *encMode, v reflect.Value) error {
|
||||
if em.tagsMd == TagsForbidden && v.Type() == typeRawTag {
|
||||
return errors.New("cbor: cannot encode cbor.RawTag when TagsMd is TagsForbidden")
|
||||
|
|
@ -1768,41 +1957,45 @@ func encodeHead(e *bytes.Buffer, t byte, n uint64) int {
|
|||
return headSize
|
||||
}
|
||||
|
||||
type jsonMarshaler interface{ MarshalJSON() ([]byte, error) }
|
||||
|
||||
var (
|
||||
typeMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
||||
typeBinaryMarshaler = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()
|
||||
typeTextMarshaler = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
|
||||
typeJSONMarshaler = reflect.TypeOf((*jsonMarshaler)(nil)).Elem()
|
||||
typeRawMessage = reflect.TypeOf(RawMessage(nil))
|
||||
typeByteString = reflect.TypeOf(ByteString(""))
|
||||
)
|
||||
|
||||
func getEncodeFuncInternal(t reflect.Type) (ef encodeFunc, ief isEmptyFunc) {
|
||||
func getEncodeFuncInternal(t reflect.Type) (ef encodeFunc, ief isEmptyFunc, izf isZeroFunc) {
|
||||
k := t.Kind()
|
||||
if k == reflect.Ptr {
|
||||
return getEncodeIndirectValueFunc(t), isEmptyPtr
|
||||
if k == reflect.Pointer {
|
||||
return getEncodeIndirectValueFunc(t), isEmptyPtr, getIsZeroFunc(t)
|
||||
}
|
||||
switch t {
|
||||
case typeSimpleValue:
|
||||
return encodeMarshalerType, isEmptyUint
|
||||
return encodeMarshalerType, isEmptyUint, getIsZeroFunc(t)
|
||||
|
||||
case typeTag:
|
||||
return encodeTag, alwaysNotEmpty
|
||||
return encodeTag, alwaysNotEmpty, getIsZeroFunc(t)
|
||||
|
||||
case typeTime:
|
||||
return encodeTime, alwaysNotEmpty
|
||||
return encodeTime, alwaysNotEmpty, getIsZeroFunc(t)
|
||||
|
||||
case typeBigInt:
|
||||
return encodeBigInt, alwaysNotEmpty
|
||||
return encodeBigInt, alwaysNotEmpty, getIsZeroFunc(t)
|
||||
|
||||
case typeRawMessage:
|
||||
return encodeMarshalerType, isEmptySlice
|
||||
return encodeMarshalerType, isEmptySlice, getIsZeroFunc(t)
|
||||
|
||||
case typeByteString:
|
||||
return encodeMarshalerType, isEmptyString
|
||||
return encodeMarshalerType, isEmptyString, getIsZeroFunc(t)
|
||||
}
|
||||
if reflect.PtrTo(t).Implements(typeMarshaler) {
|
||||
return encodeMarshalerType, alwaysNotEmpty
|
||||
if reflect.PointerTo(t).Implements(typeMarshaler) {
|
||||
return encodeMarshalerType, alwaysNotEmpty, getIsZeroFunc(t)
|
||||
}
|
||||
if reflect.PtrTo(t).Implements(typeBinaryMarshaler) {
|
||||
if reflect.PointerTo(t).Implements(typeBinaryMarshaler) {
|
||||
defer func() {
|
||||
// capture encoding method used for modes that disable BinaryMarshaler
|
||||
bme := binaryMarshalerEncoder{
|
||||
|
|
@ -1813,41 +2006,65 @@ func getEncodeFuncInternal(t reflect.Type) (ef encodeFunc, ief isEmptyFunc) {
|
|||
ief = bme.isEmpty
|
||||
}()
|
||||
}
|
||||
if reflect.PointerTo(t).Implements(typeTextMarshaler) {
|
||||
defer func() {
|
||||
// capture encoding method used for modes that disable TextMarshaler
|
||||
tme := textMarshalerEncoder{
|
||||
alternateEncode: ef,
|
||||
alternateIsEmpty: ief,
|
||||
}
|
||||
ef = tme.encode
|
||||
ief = tme.isEmpty
|
||||
}()
|
||||
}
|
||||
if reflect.PointerTo(t).Implements(typeJSONMarshaler) {
|
||||
defer func() {
|
||||
// capture encoding method used for modes that don't support transcoding
|
||||
// from types that implement json.Marshaler.
|
||||
jme := jsonMarshalerEncoder{
|
||||
alternateEncode: ef,
|
||||
alternateIsEmpty: ief,
|
||||
}
|
||||
ef = jme.encode
|
||||
ief = jme.isEmpty
|
||||
}()
|
||||
}
|
||||
|
||||
switch k {
|
||||
case reflect.Bool:
|
||||
return encodeBool, isEmptyBool
|
||||
return encodeBool, isEmptyBool, getIsZeroFunc(t)
|
||||
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return encodeInt, isEmptyInt
|
||||
return encodeInt, isEmptyInt, getIsZeroFunc(t)
|
||||
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return encodeUint, isEmptyUint
|
||||
return encodeUint, isEmptyUint, getIsZeroFunc(t)
|
||||
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return encodeFloat, isEmptyFloat
|
||||
return encodeFloat, isEmptyFloat, getIsZeroFunc(t)
|
||||
|
||||
case reflect.String:
|
||||
return encodeString, isEmptyString
|
||||
return encodeString, isEmptyString, getIsZeroFunc(t)
|
||||
|
||||
case reflect.Slice:
|
||||
if t.Elem().Kind() == reflect.Uint8 {
|
||||
return encodeByteString, isEmptySlice
|
||||
return encodeByteString, isEmptySlice, getIsZeroFunc(t)
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
f, _ := getEncodeFunc(t.Elem())
|
||||
f, _, _ := getEncodeFunc(t.Elem())
|
||||
if f == nil {
|
||||
return nil, nil
|
||||
return nil, nil, nil
|
||||
}
|
||||
return arrayEncodeFunc{f: f}.encode, isEmptySlice
|
||||
return arrayEncodeFunc{f: f}.encode, isEmptySlice, getIsZeroFunc(t)
|
||||
|
||||
case reflect.Map:
|
||||
f := getEncodeMapFunc(t)
|
||||
if f == nil {
|
||||
return nil, nil
|
||||
return nil, nil, nil
|
||||
}
|
||||
return f, isEmptyMap
|
||||
return f, isEmptyMap, getIsZeroFunc(t)
|
||||
|
||||
case reflect.Struct:
|
||||
// Get struct's special field "_" tag options
|
||||
|
|
@ -1855,31 +2072,31 @@ func getEncodeFuncInternal(t reflect.Type) (ef encodeFunc, ief isEmptyFunc) {
|
|||
tag := f.Tag.Get("cbor")
|
||||
if tag != "-" {
|
||||
if hasToArrayOption(tag) {
|
||||
return encodeStructToArray, isEmptyStruct
|
||||
return encodeStructToArray, isEmptyStruct, isZeroFieldStruct
|
||||
}
|
||||
}
|
||||
}
|
||||
return encodeStruct, isEmptyStruct
|
||||
return encodeStruct, isEmptyStruct, getIsZeroFunc(t)
|
||||
|
||||
case reflect.Interface:
|
||||
return encodeIntf, isEmptyIntf
|
||||
return encodeIntf, isEmptyIntf, getIsZeroFunc(t)
|
||||
}
|
||||
return nil, nil
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func getEncodeIndirectValueFunc(t reflect.Type) encodeFunc {
|
||||
for t.Kind() == reflect.Ptr {
|
||||
for t.Kind() == reflect.Pointer {
|
||||
t = t.Elem()
|
||||
}
|
||||
f, _ := getEncodeFunc(t)
|
||||
f, _, _ := getEncodeFunc(t)
|
||||
if f == nil {
|
||||
return nil
|
||||
}
|
||||
return func(e *bytes.Buffer, em *encMode, v reflect.Value) error {
|
||||
for v.Kind() == reflect.Ptr && !v.IsNil() {
|
||||
for v.Kind() == reflect.Pointer && !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
if v.Kind() == reflect.Ptr && v.IsNil() {
|
||||
if v.Kind() == reflect.Pointer && v.IsNil() {
|
||||
e.Write(cborNil)
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1987,3 +2204,96 @@ func float32NaNFromReflectValue(v reflect.Value) float32 {
|
|||
f32 := p.Convert(reflect.TypeOf((*float32)(nil))).Elem().Interface().(float32)
|
||||
return f32
|
||||
}
|
||||
|
||||
type isZeroer interface {
|
||||
IsZero() bool
|
||||
}
|
||||
|
||||
var isZeroerType = reflect.TypeOf((*isZeroer)(nil)).Elem()
|
||||
|
||||
// getIsZeroFunc returns a function for the given type that can be called to determine if a given value is zero.
|
||||
// Types that implement `IsZero() bool` are delegated to for non-nil values.
|
||||
// Types that do not implement `IsZero() bool` use the reflect.Value#IsZero() implementation.
|
||||
// The returned function matches behavior of stdlib encoding/json behavior in Go 1.24+.
|
||||
func getIsZeroFunc(t reflect.Type) isZeroFunc {
|
||||
// Provide a function that uses a type's IsZero method if defined.
|
||||
switch {
|
||||
case t == nil:
|
||||
return isZeroDefault
|
||||
case t.Kind() == reflect.Interface && t.Implements(isZeroerType):
|
||||
return isZeroInterfaceCustom
|
||||
case t.Kind() == reflect.Pointer && t.Implements(isZeroerType):
|
||||
return isZeroPointerCustom
|
||||
case t.Implements(isZeroerType):
|
||||
return isZeroCustom
|
||||
case reflect.PointerTo(t).Implements(isZeroerType):
|
||||
return isZeroAddrCustom
|
||||
default:
|
||||
return isZeroDefault
|
||||
}
|
||||
}
|
||||
|
||||
// isZeroInterfaceCustom returns true for nil or pointer-to-nil values,
|
||||
// and delegates to the custom IsZero() implementation otherwise.
|
||||
func isZeroInterfaceCustom(v reflect.Value) (bool, error) {
|
||||
kind := v.Kind()
|
||||
|
||||
switch kind {
|
||||
case reflect.Chan, reflect.Func, reflect.Map, reflect.Pointer, reflect.Interface, reflect.Slice:
|
||||
if v.IsNil() {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case reflect.Interface, reflect.Pointer:
|
||||
if elem := v.Elem(); elem.Kind() == reflect.Pointer && elem.IsNil() {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return v.Interface().(isZeroer).IsZero(), nil
|
||||
}
|
||||
|
||||
// isZeroPointerCustom returns true for nil values,
|
||||
// and delegates to the custom IsZero() implementation otherwise.
|
||||
func isZeroPointerCustom(v reflect.Value) (bool, error) {
|
||||
if v.IsNil() {
|
||||
return true, nil
|
||||
}
|
||||
return v.Interface().(isZeroer).IsZero(), nil
|
||||
}
|
||||
|
||||
// isZeroCustom delegates to the custom IsZero() implementation.
|
||||
func isZeroCustom(v reflect.Value) (bool, error) {
|
||||
return v.Interface().(isZeroer).IsZero(), nil
|
||||
}
|
||||
|
||||
// isZeroAddrCustom delegates to the custom IsZero() implementation of the addr of the value.
|
||||
func isZeroAddrCustom(v reflect.Value) (bool, error) {
|
||||
if !v.CanAddr() {
|
||||
// Temporarily box v so we can take the address.
|
||||
v2 := reflect.New(v.Type()).Elem()
|
||||
v2.Set(v)
|
||||
v = v2
|
||||
}
|
||||
return v.Addr().Interface().(isZeroer).IsZero(), nil
|
||||
}
|
||||
|
||||
// isZeroDefault calls reflect.Value#IsZero()
|
||||
func isZeroDefault(v reflect.Value) (bool, error) {
|
||||
if !v.IsValid() {
|
||||
// v is zero value
|
||||
return true, nil
|
||||
}
|
||||
return v.IsZero(), nil
|
||||
}
|
||||
|
||||
// isZeroFieldStruct is used to determine whether to omit toarray structs
|
||||
func isZeroFieldStruct(v reflect.Value) (bool, error) {
|
||||
structType, err := getEncodingStructType(v.Type())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(structType.fields) == 0, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,8 +1,6 @@
|
|||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
//go:build go1.20
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
|
|
@ -67,8 +65,8 @@ func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v
|
|||
}
|
||||
|
||||
func getEncodeMapFunc(t reflect.Type) encodeFunc {
|
||||
kf, _ := getEncodeFunc(t.Key())
|
||||
ef, _ := getEncodeFunc(t.Elem())
|
||||
kf, _, _ := getEncodeFunc(t.Key())
|
||||
ef, _, _ := getEncodeFunc(t.Elem())
|
||||
if kf == nil || ef == nil {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -76,13 +74,13 @@ func getEncodeMapFunc(t reflect.Type) encodeFunc {
|
|||
kf: kf,
|
||||
ef: ef,
|
||||
kpool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
New: func() any {
|
||||
rk := reflect.New(t.Key()).Elem()
|
||||
return &rk
|
||||
},
|
||||
},
|
||||
vpool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
New: func() any {
|
||||
rv := reflect.New(t.Elem()).Elem()
|
||||
return &rv
|
||||
},
|
||||
|
|
|
|||
|
|
@ -1,60 +0,0 @@
|
|||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
//go:build !go1.20
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type mapKeyValueEncodeFunc struct {
|
||||
kf, ef encodeFunc
|
||||
}
|
||||
|
||||
func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error {
|
||||
if kvs == nil {
|
||||
for i, iter := 0, v.MapRange(); iter.Next(); i++ {
|
||||
if err := me.kf(e, em, iter.Key()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := me.ef(e, em, iter.Value()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
initial := e.Len()
|
||||
for i, iter := 0, v.MapRange(); iter.Next(); i++ {
|
||||
offset := e.Len()
|
||||
if err := me.kf(e, em, iter.Key()); err != nil {
|
||||
return err
|
||||
}
|
||||
valueOffset := e.Len()
|
||||
if err := me.ef(e, em, iter.Value()); err != nil {
|
||||
return err
|
||||
}
|
||||
kvs[i] = keyValue{
|
||||
offset: offset - initial,
|
||||
valueOffset: valueOffset - initial,
|
||||
nextOffset: e.Len() - initial,
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getEncodeMapFunc(t reflect.Type) encodeFunc {
|
||||
kf, _ := getEncodeFunc(t.Key())
|
||||
ef, _ := getEncodeFunc(t.Elem())
|
||||
if kf == nil || ef == nil {
|
||||
return nil
|
||||
}
|
||||
mkv := &mapKeyValueEncodeFunc{kf: kf, ef: ef}
|
||||
return mapEncodeFunc{
|
||||
e: mkv.encodeKeyValues,
|
||||
}.encode
|
||||
}
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
//go:build go1.24
|
||||
|
||||
package cbor
|
||||
|
||||
var jsonStdlibSupportsOmitzero = true
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
//go:build !go1.24
|
||||
|
||||
package cbor
|
||||
|
||||
var jsonStdlibSupportsOmitzero = false
|
||||
|
|
@ -1,3 +1,6 @@
|
|||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
|
|
@ -45,6 +48,9 @@ func (sv SimpleValue) MarshalCBOR() ([]byte, error) {
|
|||
}
|
||||
|
||||
// UnmarshalCBOR decodes CBOR simple value (major type 7) to SimpleValue.
|
||||
//
|
||||
// Deprecated: No longer used by this codec; kept for compatibility
|
||||
// with user apps that directly call this function.
|
||||
func (sv *SimpleValue) UnmarshalCBOR(data []byte) error {
|
||||
if sv == nil {
|
||||
return errors.New("cbor.SimpleValue: UnmarshalCBOR on nil pointer")
|
||||
|
|
@ -52,6 +58,29 @@ func (sv *SimpleValue) UnmarshalCBOR(data []byte) error {
|
|||
|
||||
d := decoder{data: data, dm: defaultDecMode}
|
||||
|
||||
// Check well-formedness of CBOR data item.
|
||||
// SimpleValue.UnmarshalCBOR() is exported, so
|
||||
// the codec needs to support same behavior for:
|
||||
// - Unmarshal(data, *SimpleValue)
|
||||
// - SimpleValue.UnmarshalCBOR(data)
|
||||
err := d.wellformed(false, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return sv.unmarshalCBOR(data)
|
||||
}
|
||||
|
||||
// unmarshalCBOR decodes CBOR simple value (major type 7) to SimpleValue.
|
||||
// This function assumes data is well-formed, and does not perform bounds checking.
|
||||
// This function is called by Unmarshal().
|
||||
func (sv *SimpleValue) unmarshalCBOR(data []byte) error {
|
||||
if sv == nil {
|
||||
return errors.New("cbor.SimpleValue: UnmarshalCBOR on nil pointer")
|
||||
}
|
||||
|
||||
d := decoder{data: data, dm: defaultDecMode}
|
||||
|
||||
typ, ai, val := d.getHead()
|
||||
|
||||
if typ != cborTypePrimitives {
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ func NewDecoder(r io.Reader) *Decoder {
|
|||
}
|
||||
|
||||
// Decode reads CBOR value and decodes it into the value pointed to by v.
|
||||
func (dec *Decoder) Decode(v interface{}) error {
|
||||
func (dec *Decoder) Decode(v any) error {
|
||||
_, err := dec.readNext()
|
||||
if err != nil {
|
||||
// Return validation error or read error.
|
||||
|
|
@ -170,7 +170,7 @@ func NewEncoder(w io.Writer) *Encoder {
|
|||
}
|
||||
|
||||
// Encode writes the CBOR encoding of v.
|
||||
func (enc *Encoder) Encode(v interface{}) error {
|
||||
func (enc *Encoder) Encode(v any) error {
|
||||
if len(enc.indefTypes) > 0 && v != nil {
|
||||
indefType := enc.indefTypes[len(enc.indefTypes)-1]
|
||||
if indefType == cborTypeTextString {
|
||||
|
|
|
|||
|
|
@ -18,9 +18,11 @@ type field struct {
|
|||
typ reflect.Type
|
||||
ef encodeFunc
|
||||
ief isEmptyFunc
|
||||
izf isZeroFunc
|
||||
typInfo *typeInfo // used to decoder to reuse type info
|
||||
tagged bool // used to choose dominant field (at the same level tagged fields dominate untagged fields)
|
||||
omitEmpty bool // used to skip empty field
|
||||
omitZero bool // used to skip zero field
|
||||
keyAsInt bool // used to encode/decode field name as int
|
||||
}
|
||||
|
||||
|
|
@ -157,7 +159,7 @@ func appendFields(
|
|||
f := t.Field(i)
|
||||
|
||||
ft := f.Type
|
||||
for ft.Kind() == reflect.Ptr {
|
||||
for ft.Kind() == reflect.Pointer {
|
||||
ft = ft.Elem()
|
||||
}
|
||||
|
||||
|
|
@ -165,9 +167,11 @@ func appendFields(
|
|||
continue
|
||||
}
|
||||
|
||||
cborTag := true
|
||||
tag := f.Tag.Get("cbor")
|
||||
if tag == "" {
|
||||
tag = f.Tag.Get("json")
|
||||
cborTag = false
|
||||
}
|
||||
if tag == "-" {
|
||||
continue
|
||||
|
|
@ -177,7 +181,7 @@ func appendFields(
|
|||
|
||||
// Parse field tag options
|
||||
var tagFieldName string
|
||||
var omitempty, keyasint bool
|
||||
var omitempty, omitzero, keyasint bool
|
||||
for j := 0; tag != ""; j++ {
|
||||
var token string
|
||||
idx := strings.IndexByte(tag, ',')
|
||||
|
|
@ -192,6 +196,10 @@ func appendFields(
|
|||
switch token {
|
||||
case "omitempty":
|
||||
omitempty = true
|
||||
case "omitzero":
|
||||
if cborTag || jsonStdlibSupportsOmitzero {
|
||||
omitzero = true
|
||||
}
|
||||
case "keyasint":
|
||||
keyasint = true
|
||||
}
|
||||
|
|
@ -213,6 +221,7 @@ func appendFields(
|
|||
idx: fIdx,
|
||||
typ: f.Type,
|
||||
omitEmpty: omitempty,
|
||||
omitZero: omitzero,
|
||||
keyAsInt: keyasint,
|
||||
tagged: tagged})
|
||||
} else {
|
||||
|
|
@ -230,8 +239,7 @@ func appendFields(
|
|||
// a nonexportable anonymous field of struct type.
|
||||
// Nonexportable anonymous field of struct type can contain exportable fields.
|
||||
func isFieldExportable(f reflect.StructField, fk reflect.Kind) bool { //nolint:gocritic // ignore hugeParam
|
||||
exportable := f.PkgPath == ""
|
||||
return exportable || (f.Anonymous && fk == reflect.Struct)
|
||||
return f.IsExported() || (f.Anonymous && fk == reflect.Struct)
|
||||
}
|
||||
|
||||
type embeddedFieldNullPtrFunc func(reflect.Value) (reflect.Value, error)
|
||||
|
|
@ -244,7 +252,7 @@ func getFieldValue(v reflect.Value, idx []int, f embeddedFieldNullPtrFunc) (fv r
|
|||
fv = fv.Field(n)
|
||||
|
||||
if i < len(idx)-1 {
|
||||
if fv.Kind() == reflect.Ptr && fv.Type().Elem().Kind() == reflect.Struct {
|
||||
if fv.Kind() == reflect.Pointer && fv.Type().Elem().Kind() == reflect.Struct {
|
||||
if fv.IsNil() {
|
||||
// Null pointer to embedded struct field
|
||||
fv, err = f(fv)
|
||||
|
|
|
|||
|
|
@ -1,3 +1,6 @@
|
|||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
|
|
@ -7,27 +10,54 @@ import (
|
|||
"sync"
|
||||
)
|
||||
|
||||
// Tag represents CBOR tag data, including tag number and unmarshaled tag content. Marshaling and
|
||||
// unmarshaling of tag content is subject to any encode and decode options that would apply to
|
||||
// enclosed data item if it were to appear outside of a tag.
|
||||
// Tag represents a tagged data item (CBOR major type 6), comprising a tag number and the unmarshaled tag content.
|
||||
// NOTE: The same encoding and decoding options that apply to untagged CBOR data items also applies to tag content
|
||||
// during encoding and decoding.
|
||||
type Tag struct {
|
||||
Number uint64
|
||||
Content interface{}
|
||||
Content any
|
||||
}
|
||||
|
||||
// RawTag represents CBOR tag data, including tag number and raw tag content.
|
||||
// RawTag implements Unmarshaler and Marshaler interfaces.
|
||||
// RawTag represents a tagged data item (CBOR major type 6), comprising a tag number and the raw tag content.
|
||||
// The raw tag content (enclosed data item) is a CBOR-encoded data item.
|
||||
// RawTag can be used to delay decoding a CBOR data item or precompute encoding a CBOR data item.
|
||||
type RawTag struct {
|
||||
Number uint64
|
||||
Content RawMessage
|
||||
}
|
||||
|
||||
// UnmarshalCBOR sets *t with tag number and raw tag content copied from data.
|
||||
// UnmarshalCBOR sets *t with the tag number and the raw tag content copied from data.
|
||||
//
|
||||
// Deprecated: No longer used by this codec; kept for compatibility
|
||||
// with user apps that directly call this function.
|
||||
func (t *RawTag) UnmarshalCBOR(data []byte) error {
|
||||
if t == nil {
|
||||
return errors.New("cbor.RawTag: UnmarshalCBOR on nil pointer")
|
||||
}
|
||||
|
||||
d := decoder{data: data, dm: defaultDecMode}
|
||||
|
||||
// Check if data is a well-formed CBOR data item.
|
||||
// RawTag.UnmarshalCBOR() is exported, so
|
||||
// the codec needs to support same behavior for:
|
||||
// - Unmarshal(data, *RawTag)
|
||||
// - RawTag.UnmarshalCBOR(data)
|
||||
err := d.wellformed(false, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return t.unmarshalCBOR(data)
|
||||
}
|
||||
|
||||
// unmarshalCBOR sets *t with the tag number and the raw tag content copied from data.
|
||||
// This function assumes data is well-formed, and does not perform bounds checking.
|
||||
// This function is called by Unmarshal().
|
||||
func (t *RawTag) unmarshalCBOR(data []byte) error {
|
||||
if t == nil {
|
||||
return errors.New("cbor.RawTag: UnmarshalCBOR on nil pointer")
|
||||
}
|
||||
|
||||
// Decoding CBOR null and undefined to cbor.RawTag is no-op.
|
||||
if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) {
|
||||
return nil
|
||||
|
|
@ -193,7 +223,7 @@ func (t *syncTagSet) Add(opts TagOptions, contentType reflect.Type, num uint64,
|
|||
if contentType == nil {
|
||||
return errors.New("cbor: cannot add nil content type to TagSet")
|
||||
}
|
||||
for contentType.Kind() == reflect.Ptr {
|
||||
for contentType.Kind() == reflect.Pointer {
|
||||
contentType = contentType.Elem()
|
||||
}
|
||||
tag, err := newTagItem(opts, contentType, num, nestedNum...)
|
||||
|
|
@ -216,7 +246,7 @@ func (t *syncTagSet) Add(opts TagOptions, contentType reflect.Type, num uint64,
|
|||
|
||||
// Remove removes given tag content type from TagSet.
|
||||
func (t *syncTagSet) Remove(contentType reflect.Type) {
|
||||
for contentType.Kind() == reflect.Ptr {
|
||||
for contentType.Kind() == reflect.Pointer {
|
||||
contentType = contentType.Elem()
|
||||
}
|
||||
t.Lock()
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@
|
|||
package compiler
|
||||
|
||||
import (
|
||||
yaml "gopkg.in/yaml.v3"
|
||||
yaml "go.yaml.in/yaml/v3"
|
||||
)
|
||||
|
||||
// Context contains state of the compiler as it traverses a document.
|
||||
|
|
|
|||
|
|
@ -20,9 +20,9 @@ import (
|
|||
"os/exec"
|
||||
"strings"
|
||||
|
||||
yaml "go.yaml.in/yaml/v3"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
yaml "gopkg.in/yaml.v3"
|
||||
|
||||
extensions "github.com/google/gnostic-models/extensions"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ import (
|
|||
"sort"
|
||||
"strconv"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
yaml "go.yaml.in/yaml/v3"
|
||||
|
||||
"github.com/google/gnostic-models/jsonschema"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
|
||||
yaml "gopkg.in/yaml.v3"
|
||||
yaml "go.yaml.in/yaml/v3"
|
||||
)
|
||||
|
||||
var verboseReader = false
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@
|
|||
// of JSON Schemas.
|
||||
package jsonschema
|
||||
|
||||
import "gopkg.in/yaml.v3"
|
||||
import "go.yaml.in/yaml/v3"
|
||||
|
||||
// The Schema struct models a JSON Schema and, because schemas are
|
||||
// defined hierarchically, contains many references to itself.
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ import (
|
|||
"io/ioutil"
|
||||
"strconv"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
yaml "go.yaml.in/yaml/v3"
|
||||
)
|
||||
|
||||
// This is a global map of all known Schemas.
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ package jsonschema
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
yaml "go.yaml.in/yaml/v3"
|
||||
)
|
||||
|
||||
const indentation = " "
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ import (
|
|||
"regexp"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
yaml "go.yaml.in/yaml/v3"
|
||||
|
||||
"github.com/google/gnostic-models/compiler"
|
||||
)
|
||||
|
|
@ -60,7 +60,7 @@ func NewAdditionalPropertiesItem(in *yaml.Node, context *compiler.Context) (*Add
|
|||
// since the oneof matched one of its possibilities, discard any matching errors
|
||||
errors = make([]error, 0)
|
||||
} else {
|
||||
message := fmt.Sprintf("contains an invalid AdditionalPropertiesItem")
|
||||
message := "contains an invalid AdditionalPropertiesItem"
|
||||
err := compiler.NewError(context, message)
|
||||
errors = []error{err}
|
||||
}
|
||||
|
|
@ -2543,7 +2543,7 @@ func NewNonBodyParameter(in *yaml.Node, context *compiler.Context) (*NonBodyPara
|
|||
// since the oneof matched one of its possibilities, discard any matching errors
|
||||
errors = make([]error, 0)
|
||||
} else {
|
||||
message := fmt.Sprintf("contains an invalid NonBodyParameter")
|
||||
message := "contains an invalid NonBodyParameter"
|
||||
err := compiler.NewError(context, message)
|
||||
errors = []error{err}
|
||||
}
|
||||
|
|
@ -3271,7 +3271,7 @@ func NewParameter(in *yaml.Node, context *compiler.Context) (*Parameter, error)
|
|||
// since the oneof matched one of its possibilities, discard any matching errors
|
||||
errors = make([]error, 0)
|
||||
} else {
|
||||
message := fmt.Sprintf("contains an invalid Parameter")
|
||||
message := "contains an invalid Parameter"
|
||||
err := compiler.NewError(context, message)
|
||||
errors = []error{err}
|
||||
}
|
||||
|
|
@ -3345,7 +3345,7 @@ func NewParametersItem(in *yaml.Node, context *compiler.Context) (*ParametersIte
|
|||
// since the oneof matched one of its possibilities, discard any matching errors
|
||||
errors = make([]error, 0)
|
||||
} else {
|
||||
message := fmt.Sprintf("contains an invalid ParametersItem")
|
||||
message := "contains an invalid ParametersItem"
|
||||
err := compiler.NewError(context, message)
|
||||
errors = []error{err}
|
||||
}
|
||||
|
|
@ -4561,7 +4561,7 @@ func NewResponseValue(in *yaml.Node, context *compiler.Context) (*ResponseValue,
|
|||
// since the oneof matched one of its possibilities, discard any matching errors
|
||||
errors = make([]error, 0)
|
||||
} else {
|
||||
message := fmt.Sprintf("contains an invalid ResponseValue")
|
||||
message := "contains an invalid ResponseValue"
|
||||
err := compiler.NewError(context, message)
|
||||
errors = []error{err}
|
||||
}
|
||||
|
|
@ -5030,7 +5030,7 @@ func NewSchemaItem(in *yaml.Node, context *compiler.Context) (*SchemaItem, error
|
|||
// since the oneof matched one of its possibilities, discard any matching errors
|
||||
errors = make([]error, 0)
|
||||
} else {
|
||||
message := fmt.Sprintf("contains an invalid SchemaItem")
|
||||
message := "contains an invalid SchemaItem"
|
||||
err := compiler.NewError(context, message)
|
||||
errors = []error{err}
|
||||
}
|
||||
|
|
@ -5160,7 +5160,7 @@ func NewSecurityDefinitionsItem(in *yaml.Node, context *compiler.Context) (*Secu
|
|||
// since the oneof matched one of its possibilities, discard any matching errors
|
||||
errors = make([]error, 0)
|
||||
} else {
|
||||
message := fmt.Sprintf("contains an invalid SecurityDefinitionsItem")
|
||||
message := "contains an invalid SecurityDefinitionsItem"
|
||||
err := compiler.NewError(context, message)
|
||||
errors = []error{err}
|
||||
}
|
||||
|
|
@ -6930,7 +6930,7 @@ func (m *BodyParameter) ToRawInfo() *yaml.Node {
|
|||
// always include this required field.
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("in"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In))
|
||||
if m.Required != false {
|
||||
if m.Required {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("required"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required))
|
||||
}
|
||||
|
|
@ -7149,7 +7149,7 @@ func (m *FileSchema) ToRawInfo() *yaml.Node {
|
|||
// always include this required field.
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("type"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type))
|
||||
if m.ReadOnly != false {
|
||||
if m.ReadOnly {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly))
|
||||
}
|
||||
|
|
@ -7176,7 +7176,7 @@ func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node {
|
|||
if m == nil {
|
||||
return info
|
||||
}
|
||||
if m.Required != false {
|
||||
if m.Required {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("required"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required))
|
||||
}
|
||||
|
|
@ -7192,7 +7192,7 @@ func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node {
|
|||
info.Content = append(info.Content, compiler.NewScalarNodeForString("name"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name))
|
||||
}
|
||||
if m.AllowEmptyValue != false {
|
||||
if m.AllowEmptyValue {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue))
|
||||
}
|
||||
|
|
@ -7220,7 +7220,7 @@ func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node {
|
|||
info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum))
|
||||
}
|
||||
if m.ExclusiveMaximum != false {
|
||||
if m.ExclusiveMaximum {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum))
|
||||
}
|
||||
|
|
@ -7228,7 +7228,7 @@ func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node {
|
|||
info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum))
|
||||
}
|
||||
if m.ExclusiveMinimum != false {
|
||||
if m.ExclusiveMinimum {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum))
|
||||
}
|
||||
|
|
@ -7252,7 +7252,7 @@ func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node {
|
|||
info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems))
|
||||
}
|
||||
if m.UniqueItems != false {
|
||||
if m.UniqueItems {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems))
|
||||
}
|
||||
|
|
@ -7306,7 +7306,7 @@ func (m *Header) ToRawInfo() *yaml.Node {
|
|||
info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum))
|
||||
}
|
||||
if m.ExclusiveMaximum != false {
|
||||
if m.ExclusiveMaximum {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum))
|
||||
}
|
||||
|
|
@ -7314,7 +7314,7 @@ func (m *Header) ToRawInfo() *yaml.Node {
|
|||
info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum))
|
||||
}
|
||||
if m.ExclusiveMinimum != false {
|
||||
if m.ExclusiveMinimum {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum))
|
||||
}
|
||||
|
|
@ -7338,7 +7338,7 @@ func (m *Header) ToRawInfo() *yaml.Node {
|
|||
info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems))
|
||||
}
|
||||
if m.UniqueItems != false {
|
||||
if m.UniqueItems {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems))
|
||||
}
|
||||
|
|
@ -7373,7 +7373,7 @@ func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node {
|
|||
if m == nil {
|
||||
return info
|
||||
}
|
||||
if m.Required != false {
|
||||
if m.Required {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("required"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required))
|
||||
}
|
||||
|
|
@ -7413,7 +7413,7 @@ func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node {
|
|||
info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum))
|
||||
}
|
||||
if m.ExclusiveMaximum != false {
|
||||
if m.ExclusiveMaximum {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum))
|
||||
}
|
||||
|
|
@ -7421,7 +7421,7 @@ func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node {
|
|||
info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum))
|
||||
}
|
||||
if m.ExclusiveMinimum != false {
|
||||
if m.ExclusiveMinimum {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum))
|
||||
}
|
||||
|
|
@ -7445,7 +7445,7 @@ func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node {
|
|||
info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems))
|
||||
}
|
||||
if m.UniqueItems != false {
|
||||
if m.UniqueItems {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems))
|
||||
}
|
||||
|
|
@ -7940,7 +7940,7 @@ func (m *Operation) ToRawInfo() *yaml.Node {
|
|||
info.Content = append(info.Content, compiler.NewScalarNodeForString("schemes"))
|
||||
info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Schemes))
|
||||
}
|
||||
if m.Deprecated != false {
|
||||
if m.Deprecated {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("deprecated"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Deprecated))
|
||||
}
|
||||
|
|
@ -8110,7 +8110,7 @@ func (m *PathParameterSubSchema) ToRawInfo() *yaml.Node {
|
|||
info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum))
|
||||
}
|
||||
if m.ExclusiveMaximum != false {
|
||||
if m.ExclusiveMaximum {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum))
|
||||
}
|
||||
|
|
@ -8118,7 +8118,7 @@ func (m *PathParameterSubSchema) ToRawInfo() *yaml.Node {
|
|||
info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum))
|
||||
}
|
||||
if m.ExclusiveMinimum != false {
|
||||
if m.ExclusiveMinimum {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum))
|
||||
}
|
||||
|
|
@ -8142,7 +8142,7 @@ func (m *PathParameterSubSchema) ToRawInfo() *yaml.Node {
|
|||
info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems))
|
||||
}
|
||||
if m.UniqueItems != false {
|
||||
if m.UniqueItems {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems))
|
||||
}
|
||||
|
|
@ -8218,7 +8218,7 @@ func (m *PrimitivesItems) ToRawInfo() *yaml.Node {
|
|||
info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum))
|
||||
}
|
||||
if m.ExclusiveMaximum != false {
|
||||
if m.ExclusiveMaximum {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum))
|
||||
}
|
||||
|
|
@ -8226,7 +8226,7 @@ func (m *PrimitivesItems) ToRawInfo() *yaml.Node {
|
|||
info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum))
|
||||
}
|
||||
if m.ExclusiveMinimum != false {
|
||||
if m.ExclusiveMinimum {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum))
|
||||
}
|
||||
|
|
@ -8250,7 +8250,7 @@ func (m *PrimitivesItems) ToRawInfo() *yaml.Node {
|
|||
info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems))
|
||||
}
|
||||
if m.UniqueItems != false {
|
||||
if m.UniqueItems {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems))
|
||||
}
|
||||
|
|
@ -8296,7 +8296,7 @@ func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node {
|
|||
if m == nil {
|
||||
return info
|
||||
}
|
||||
if m.Required != false {
|
||||
if m.Required {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("required"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required))
|
||||
}
|
||||
|
|
@ -8312,7 +8312,7 @@ func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node {
|
|||
info.Content = append(info.Content, compiler.NewScalarNodeForString("name"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name))
|
||||
}
|
||||
if m.AllowEmptyValue != false {
|
||||
if m.AllowEmptyValue {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue))
|
||||
}
|
||||
|
|
@ -8340,7 +8340,7 @@ func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node {
|
|||
info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum))
|
||||
}
|
||||
if m.ExclusiveMaximum != false {
|
||||
if m.ExclusiveMaximum {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum))
|
||||
}
|
||||
|
|
@ -8348,7 +8348,7 @@ func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node {
|
|||
info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum))
|
||||
}
|
||||
if m.ExclusiveMinimum != false {
|
||||
if m.ExclusiveMinimum {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum))
|
||||
}
|
||||
|
|
@ -8372,7 +8372,7 @@ func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node {
|
|||
info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems))
|
||||
}
|
||||
if m.UniqueItems != false {
|
||||
if m.UniqueItems {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems))
|
||||
}
|
||||
|
|
@ -8514,7 +8514,7 @@ func (m *Schema) ToRawInfo() *yaml.Node {
|
|||
info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum))
|
||||
}
|
||||
if m.ExclusiveMaximum != false {
|
||||
if m.ExclusiveMaximum {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum))
|
||||
}
|
||||
|
|
@ -8522,7 +8522,7 @@ func (m *Schema) ToRawInfo() *yaml.Node {
|
|||
info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum))
|
||||
}
|
||||
if m.ExclusiveMinimum != false {
|
||||
if m.ExclusiveMinimum {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum))
|
||||
}
|
||||
|
|
@ -8546,7 +8546,7 @@ func (m *Schema) ToRawInfo() *yaml.Node {
|
|||
info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems))
|
||||
}
|
||||
if m.UniqueItems != false {
|
||||
if m.UniqueItems {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems))
|
||||
}
|
||||
|
|
@ -8610,7 +8610,7 @@ func (m *Schema) ToRawInfo() *yaml.Node {
|
|||
info.Content = append(info.Content, compiler.NewScalarNodeForString("discriminator"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Discriminator))
|
||||
}
|
||||
if m.ReadOnly != false {
|
||||
if m.ReadOnly {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly))
|
||||
}
|
||||
|
|
@ -8796,11 +8796,11 @@ func (m *Xml) ToRawInfo() *yaml.Node {
|
|||
info.Content = append(info.Content, compiler.NewScalarNodeForString("prefix"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Prefix))
|
||||
}
|
||||
if m.Attribute != false {
|
||||
if m.Attribute {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("attribute"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Attribute))
|
||||
}
|
||||
if m.Wrapped != false {
|
||||
if m.Wrapped {
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForString("wrapped"))
|
||||
info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Wrapped))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@
|
|||
package openapi_v2
|
||||
|
||||
import (
|
||||
"gopkg.in/yaml.v3"
|
||||
yaml "go.yaml.in/yaml/v3"
|
||||
|
||||
"github.com/google/gnostic-models/compiler"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ import (
|
|||
"regexp"
|
||||
"strings"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
yaml "go.yaml.in/yaml/v3"
|
||||
|
||||
"github.com/google/gnostic-models/compiler"
|
||||
)
|
||||
|
|
@ -60,7 +60,7 @@ func NewAdditionalPropertiesItem(in *yaml.Node, context *compiler.Context) (*Add
|
|||
// since the oneof matched one of its possibilities, discard any matching errors
|
||||
errors = make([]error, 0)
|
||||
} else {
|
||||
message := fmt.Sprintf("contains an invalid AdditionalPropertiesItem")
|
||||
message := "contains an invalid AdditionalPropertiesItem"
|
||||
err := compiler.NewError(context, message)
|
||||
errors = []error{err}
|
||||
}
|
||||
|
|
@ -113,7 +113,7 @@ func NewAnyOrExpression(in *yaml.Node, context *compiler.Context) (*AnyOrExpress
|
|||
// since the oneof matched one of its possibilities, discard any matching errors
|
||||
errors = make([]error, 0)
|
||||
} else {
|
||||
message := fmt.Sprintf("contains an invalid AnyOrExpression")
|
||||
message := "contains an invalid AnyOrExpression"
|
||||
err := compiler.NewError(context, message)
|
||||
errors = []error{err}
|
||||
}
|
||||
|
|
@ -227,7 +227,7 @@ func NewCallbackOrReference(in *yaml.Node, context *compiler.Context) (*Callback
|
|||
// since the oneof matched one of its possibilities, discard any matching errors
|
||||
errors = make([]error, 0)
|
||||
} else {
|
||||
message := fmt.Sprintf("contains an invalid CallbackOrReference")
|
||||
message := "contains an invalid CallbackOrReference"
|
||||
err := compiler.NewError(context, message)
|
||||
errors = []error{err}
|
||||
}
|
||||
|
|
@ -979,7 +979,7 @@ func NewExampleOrReference(in *yaml.Node, context *compiler.Context) (*ExampleOr
|
|||
// since the oneof matched one of its possibilities, discard any matching errors
|
||||
errors = make([]error, 0)
|
||||
} else {
|
||||
message := fmt.Sprintf("contains an invalid ExampleOrReference")
|
||||
message := "contains an invalid ExampleOrReference"
|
||||
err := compiler.NewError(context, message)
|
||||
errors = []error{err}
|
||||
}
|
||||
|
|
@ -1320,7 +1320,7 @@ func NewHeaderOrReference(in *yaml.Node, context *compiler.Context) (*HeaderOrRe
|
|||
// since the oneof matched one of its possibilities, discard any matching errors
|
||||
errors = make([]error, 0)
|
||||
} else {
|
||||
message := fmt.Sprintf("contains an invalid HeaderOrReference")
|
||||
message := "contains an invalid HeaderOrReference"
|
||||
err := compiler.NewError(context, message)
|
||||
errors = []error{err}
|
||||
}
|
||||
|
|
@ -1713,7 +1713,7 @@ func NewLinkOrReference(in *yaml.Node, context *compiler.Context) (*LinkOrRefere
|
|||
// since the oneof matched one of its possibilities, discard any matching errors
|
||||
errors = make([]error, 0)
|
||||
} else {
|
||||
message := fmt.Sprintf("contains an invalid LinkOrReference")
|
||||
message := "contains an invalid LinkOrReference"
|
||||
err := compiler.NewError(context, message)
|
||||
errors = []error{err}
|
||||
}
|
||||
|
|
@ -3090,7 +3090,7 @@ func NewParameterOrReference(in *yaml.Node, context *compiler.Context) (*Paramet
|
|||
// since the oneof matched one of its possibilities, discard any matching errors
|
||||
errors = make([]error, 0)
|
||||
} else {
|
||||
message := fmt.Sprintf("contains an invalid ParameterOrReference")
|
||||
message := "contains an invalid ParameterOrReference"
|
||||
err := compiler.NewError(context, message)
|
||||
errors = []error{err}
|
||||
}
|
||||
|
|
@ -3606,7 +3606,7 @@ func NewRequestBodyOrReference(in *yaml.Node, context *compiler.Context) (*Reque
|
|||
// since the oneof matched one of its possibilities, discard any matching errors
|
||||
errors = make([]error, 0)
|
||||
} else {
|
||||
message := fmt.Sprintf("contains an invalid RequestBodyOrReference")
|
||||
message := "contains an invalid RequestBodyOrReference"
|
||||
err := compiler.NewError(context, message)
|
||||
errors = []error{err}
|
||||
}
|
||||
|
|
@ -3743,7 +3743,7 @@ func NewResponseOrReference(in *yaml.Node, context *compiler.Context) (*Response
|
|||
// since the oneof matched one of its possibilities, discard any matching errors
|
||||
errors = make([]error, 0)
|
||||
} else {
|
||||
message := fmt.Sprintf("contains an invalid ResponseOrReference")
|
||||
message := "contains an invalid ResponseOrReference"
|
||||
err := compiler.NewError(context, message)
|
||||
errors = []error{err}
|
||||
}
|
||||
|
|
@ -4310,7 +4310,7 @@ func NewSchemaOrReference(in *yaml.Node, context *compiler.Context) (*SchemaOrRe
|
|||
// since the oneof matched one of its possibilities, discard any matching errors
|
||||
errors = make([]error, 0)
|
||||
} else {
|
||||
message := fmt.Sprintf("contains an invalid SchemaOrReference")
|
||||
message := "contains an invalid SchemaOrReference"
|
||||
err := compiler.NewError(context, message)
|
||||
errors = []error{err}
|
||||
}
|
||||
|
|
@ -4543,7 +4543,7 @@ func NewSecuritySchemeOrReference(in *yaml.Node, context *compiler.Context) (*Se
|
|||
// since the oneof matched one of its possibilities, discard any matching errors
|
||||
errors = make([]error, 0)
|
||||
} else {
|
||||
message := fmt.Sprintf("contains an invalid SecuritySchemeOrReference")
|
||||
message := "contains an invalid SecuritySchemeOrReference"
|
||||
err := compiler.NewError(context, message)
|
||||
errors = []error{err}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@
|
|||
package openapi_v3
|
||||
|
||||
import (
|
||||
"gopkg.in/yaml.v3"
|
||||
yaml "go.yaml.in/yaml/v3"
|
||||
|
||||
"github.com/google/gnostic-models/compiler"
|
||||
)
|
||||
|
|
|
|||
35
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel
generated
vendored
Normal file
35
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel
generated
vendored
Normal file
|
|
@ -0,0 +1,35 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
go_library(
|
||||
name = "httprule",
|
||||
srcs = [
|
||||
"compile.go",
|
||||
"parse.go",
|
||||
"types.go",
|
||||
],
|
||||
importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule",
|
||||
deps = ["//utilities"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "httprule_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
"compile_test.go",
|
||||
"parse_test.go",
|
||||
"types_test.go",
|
||||
],
|
||||
embed = [":httprule"],
|
||||
deps = [
|
||||
"//utilities",
|
||||
"@org_golang_google_grpc//grpclog",
|
||||
],
|
||||
)
|
||||
|
||||
alias(
|
||||
name = "go_default_library",
|
||||
actual = ":httprule",
|
||||
visibility = ["//:__subpackages__"],
|
||||
)
|
||||
97
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
generated
vendored
Normal file
97
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
generated
vendored
Normal file
|
|
@ -0,0 +1,97 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
go_library(
|
||||
name = "runtime",
|
||||
srcs = [
|
||||
"context.go",
|
||||
"convert.go",
|
||||
"doc.go",
|
||||
"errors.go",
|
||||
"fieldmask.go",
|
||||
"handler.go",
|
||||
"marshal_httpbodyproto.go",
|
||||
"marshal_json.go",
|
||||
"marshal_jsonpb.go",
|
||||
"marshal_proto.go",
|
||||
"marshaler.go",
|
||||
"marshaler_registry.go",
|
||||
"mux.go",
|
||||
"pattern.go",
|
||||
"proto2_convert.go",
|
||||
"query.go",
|
||||
],
|
||||
importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/runtime",
|
||||
deps = [
|
||||
"//internal/httprule",
|
||||
"//utilities",
|
||||
"@org_golang_google_genproto_googleapis_api//httpbody",
|
||||
"@org_golang_google_grpc//codes",
|
||||
"@org_golang_google_grpc//grpclog",
|
||||
"@org_golang_google_grpc//health/grpc_health_v1",
|
||||
"@org_golang_google_grpc//metadata",
|
||||
"@org_golang_google_grpc//status",
|
||||
"@org_golang_google_protobuf//encoding/protojson",
|
||||
"@org_golang_google_protobuf//proto",
|
||||
"@org_golang_google_protobuf//reflect/protoreflect",
|
||||
"@org_golang_google_protobuf//reflect/protoregistry",
|
||||
"@org_golang_google_protobuf//types/known/durationpb",
|
||||
"@org_golang_google_protobuf//types/known/fieldmaskpb",
|
||||
"@org_golang_google_protobuf//types/known/structpb",
|
||||
"@org_golang_google_protobuf//types/known/timestamppb",
|
||||
"@org_golang_google_protobuf//types/known/wrapperspb",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "runtime_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
"context_test.go",
|
||||
"convert_test.go",
|
||||
"errors_test.go",
|
||||
"fieldmask_test.go",
|
||||
"handler_test.go",
|
||||
"marshal_httpbodyproto_test.go",
|
||||
"marshal_json_test.go",
|
||||
"marshal_jsonpb_test.go",
|
||||
"marshal_proto_test.go",
|
||||
"marshaler_registry_test.go",
|
||||
"mux_internal_test.go",
|
||||
"mux_test.go",
|
||||
"pattern_test.go",
|
||||
"query_fuzz_test.go",
|
||||
"query_test.go",
|
||||
],
|
||||
embed = [":runtime"],
|
||||
deps = [
|
||||
"//runtime/internal/examplepb",
|
||||
"//utilities",
|
||||
"@com_github_google_go_cmp//cmp",
|
||||
"@com_github_google_go_cmp//cmp/cmpopts",
|
||||
"@org_golang_google_genproto_googleapis_api//httpbody",
|
||||
"@org_golang_google_genproto_googleapis_rpc//errdetails",
|
||||
"@org_golang_google_genproto_googleapis_rpc//status",
|
||||
"@org_golang_google_grpc//:grpc",
|
||||
"@org_golang_google_grpc//codes",
|
||||
"@org_golang_google_grpc//health/grpc_health_v1",
|
||||
"@org_golang_google_grpc//metadata",
|
||||
"@org_golang_google_grpc//status",
|
||||
"@org_golang_google_protobuf//encoding/protojson",
|
||||
"@org_golang_google_protobuf//proto",
|
||||
"@org_golang_google_protobuf//testing/protocmp",
|
||||
"@org_golang_google_protobuf//types/known/durationpb",
|
||||
"@org_golang_google_protobuf//types/known/emptypb",
|
||||
"@org_golang_google_protobuf//types/known/fieldmaskpb",
|
||||
"@org_golang_google_protobuf//types/known/structpb",
|
||||
"@org_golang_google_protobuf//types/known/timestamppb",
|
||||
"@org_golang_google_protobuf//types/known/wrapperspb",
|
||||
],
|
||||
)
|
||||
|
||||
alias(
|
||||
name = "go_default_library",
|
||||
actual = ":runtime",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
31
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
generated
vendored
Normal file
31
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
generated
vendored
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
go_library(
|
||||
name = "utilities",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"pattern.go",
|
||||
"readerfactory.go",
|
||||
"string_array_flag.go",
|
||||
"trie.go",
|
||||
],
|
||||
importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/utilities",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "utilities_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
"string_array_flag_test.go",
|
||||
"trie_test.go",
|
||||
],
|
||||
deps = [":utilities"],
|
||||
)
|
||||
|
||||
alias(
|
||||
name = "go_default_library",
|
||||
actual = ":utilities",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
|
@ -6,10 +6,12 @@ import (
|
|||
)
|
||||
|
||||
type safeType struct {
|
||||
reflect.Type
|
||||
cfg *frozenConfig
|
||||
Type reflect.Type
|
||||
cfg *frozenConfig
|
||||
}
|
||||
|
||||
var _ Type = &safeType{}
|
||||
|
||||
func (type2 *safeType) New() interface{} {
|
||||
return reflect.New(type2.Type).Interface()
|
||||
}
|
||||
|
|
@ -18,6 +20,22 @@ func (type2 *safeType) UnsafeNew() unsafe.Pointer {
|
|||
panic("does not support unsafe operation")
|
||||
}
|
||||
|
||||
func (type2 *safeType) Kind() reflect.Kind {
|
||||
return type2.Type.Kind()
|
||||
}
|
||||
|
||||
func (type2 *safeType) Len() int {
|
||||
return type2.Type.Len()
|
||||
}
|
||||
|
||||
func (type2 *safeType) NumField() int {
|
||||
return type2.Type.NumField()
|
||||
}
|
||||
|
||||
func (type2 *safeType) String() string {
|
||||
return type2.Type.String()
|
||||
}
|
||||
|
||||
func (type2 *safeType) Elem() Type {
|
||||
return type2.cfg.Type2(type2.Type.Elem())
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2013, Patrick Mezard
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
The names of its contributors may not be used to endorse or promote
|
||||
products derived from this software without specific prior written
|
||||
permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
@ -0,0 +1,772 @@
|
|||
// Package difflib is a partial port of Python difflib module.
|
||||
//
|
||||
// It provides tools to compare sequences of strings and generate textual diffs.
|
||||
//
|
||||
// The following class and functions have been ported:
|
||||
//
|
||||
// - SequenceMatcher
|
||||
//
|
||||
// - unified_diff
|
||||
//
|
||||
// - context_diff
|
||||
//
|
||||
// Getting unified diffs was the main goal of the port. Keep in mind this code
|
||||
// is mostly suitable to output text differences in a human friendly way, there
|
||||
// are no guarantees generated diffs are consumable by patch(1).
|
||||
package difflib
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func max(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func calculateRatio(matches, length int) float64 {
|
||||
if length > 0 {
|
||||
return 2.0 * float64(matches) / float64(length)
|
||||
}
|
||||
return 1.0
|
||||
}
|
||||
|
||||
type Match struct {
|
||||
A int
|
||||
B int
|
||||
Size int
|
||||
}
|
||||
|
||||
type OpCode struct {
|
||||
Tag byte
|
||||
I1 int
|
||||
I2 int
|
||||
J1 int
|
||||
J2 int
|
||||
}
|
||||
|
||||
// SequenceMatcher compares sequence of strings. The basic
|
||||
// algorithm predates, and is a little fancier than, an algorithm
|
||||
// published in the late 1980's by Ratcliff and Obershelp under the
|
||||
// hyperbolic name "gestalt pattern matching". The basic idea is to find
|
||||
// the longest contiguous matching subsequence that contains no "junk"
|
||||
// elements (R-O doesn't address junk). The same idea is then applied
|
||||
// recursively to the pieces of the sequences to the left and to the right
|
||||
// of the matching subsequence. This does not yield minimal edit
|
||||
// sequences, but does tend to yield matches that "look right" to people.
|
||||
//
|
||||
// SequenceMatcher tries to compute a "human-friendly diff" between two
|
||||
// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
|
||||
// longest *contiguous* & junk-free matching subsequence. That's what
|
||||
// catches peoples' eyes. The Windows(tm) windiff has another interesting
|
||||
// notion, pairing up elements that appear uniquely in each sequence.
|
||||
// That, and the method here, appear to yield more intuitive difference
|
||||
// reports than does diff. This method appears to be the least vulnerable
|
||||
// to synching up on blocks of "junk lines", though (like blank lines in
|
||||
// ordinary text files, or maybe "<P>" lines in HTML files). That may be
|
||||
// because this is the only method of the 3 that has a *concept* of
|
||||
// "junk" <wink>.
|
||||
//
|
||||
// Timing: Basic R-O is cubic time worst case and quadratic time expected
|
||||
// case. SequenceMatcher is quadratic time for the worst case and has
|
||||
// expected-case behavior dependent in a complicated way on how many
|
||||
// elements the sequences have in common; best case time is linear.
|
||||
type SequenceMatcher struct {
|
||||
a []string
|
||||
b []string
|
||||
b2j map[string][]int
|
||||
IsJunk func(string) bool
|
||||
autoJunk bool
|
||||
bJunk map[string]struct{}
|
||||
matchingBlocks []Match
|
||||
fullBCount map[string]int
|
||||
bPopular map[string]struct{}
|
||||
opCodes []OpCode
|
||||
}
|
||||
|
||||
func NewMatcher(a, b []string) *SequenceMatcher {
|
||||
m := SequenceMatcher{autoJunk: true}
|
||||
m.SetSeqs(a, b)
|
||||
return &m
|
||||
}
|
||||
|
||||
func NewMatcherWithJunk(a, b []string, autoJunk bool,
|
||||
isJunk func(string) bool) *SequenceMatcher {
|
||||
|
||||
m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
|
||||
m.SetSeqs(a, b)
|
||||
return &m
|
||||
}
|
||||
|
||||
// Set two sequences to be compared.
|
||||
func (m *SequenceMatcher) SetSeqs(a, b []string) {
|
||||
m.SetSeq1(a)
|
||||
m.SetSeq2(b)
|
||||
}
|
||||
|
||||
// Set the first sequence to be compared. The second sequence to be compared is
|
||||
// not changed.
|
||||
//
|
||||
// SequenceMatcher computes and caches detailed information about the second
|
||||
// sequence, so if you want to compare one sequence S against many sequences,
|
||||
// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
|
||||
// sequences.
|
||||
//
|
||||
// See also SetSeqs() and SetSeq2().
|
||||
func (m *SequenceMatcher) SetSeq1(a []string) {
|
||||
if &a == &m.a {
|
||||
return
|
||||
}
|
||||
m.a = a
|
||||
m.matchingBlocks = nil
|
||||
m.opCodes = nil
|
||||
}
|
||||
|
||||
// Set the second sequence to be compared. The first sequence to be compared is
|
||||
// not changed.
|
||||
func (m *SequenceMatcher) SetSeq2(b []string) {
|
||||
if &b == &m.b {
|
||||
return
|
||||
}
|
||||
m.b = b
|
||||
m.matchingBlocks = nil
|
||||
m.opCodes = nil
|
||||
m.fullBCount = nil
|
||||
m.chainB()
|
||||
}
|
||||
|
||||
func (m *SequenceMatcher) chainB() {
|
||||
// Populate line -> index mapping
|
||||
b2j := map[string][]int{}
|
||||
for i, s := range m.b {
|
||||
indices := b2j[s]
|
||||
indices = append(indices, i)
|
||||
b2j[s] = indices
|
||||
}
|
||||
|
||||
// Purge junk elements
|
||||
m.bJunk = map[string]struct{}{}
|
||||
if m.IsJunk != nil {
|
||||
junk := m.bJunk
|
||||
for s, _ := range b2j {
|
||||
if m.IsJunk(s) {
|
||||
junk[s] = struct{}{}
|
||||
}
|
||||
}
|
||||
for s, _ := range junk {
|
||||
delete(b2j, s)
|
||||
}
|
||||
}
|
||||
|
||||
// Purge remaining popular elements
|
||||
popular := map[string]struct{}{}
|
||||
n := len(m.b)
|
||||
if m.autoJunk && n >= 200 {
|
||||
ntest := n/100 + 1
|
||||
for s, indices := range b2j {
|
||||
if len(indices) > ntest {
|
||||
popular[s] = struct{}{}
|
||||
}
|
||||
}
|
||||
for s, _ := range popular {
|
||||
delete(b2j, s)
|
||||
}
|
||||
}
|
||||
m.bPopular = popular
|
||||
m.b2j = b2j
|
||||
}
|
||||
|
||||
func (m *SequenceMatcher) isBJunk(s string) bool {
|
||||
_, ok := m.bJunk[s]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Find longest matching block in a[alo:ahi] and b[blo:bhi].
|
||||
//
|
||||
// If IsJunk is not defined:
|
||||
//
|
||||
// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
|
||||
// alo <= i <= i+k <= ahi
|
||||
// blo <= j <= j+k <= bhi
|
||||
// and for all (i',j',k') meeting those conditions,
|
||||
// k >= k'
|
||||
// i <= i'
|
||||
// and if i == i', j <= j'
|
||||
//
|
||||
// In other words, of all maximal matching blocks, return one that
|
||||
// starts earliest in a, and of all those maximal matching blocks that
|
||||
// start earliest in a, return the one that starts earliest in b.
|
||||
//
|
||||
// If IsJunk is defined, first the longest matching block is
|
||||
// determined as above, but with the additional restriction that no
|
||||
// junk element appears in the block. Then that block is extended as
|
||||
// far as possible by matching (only) junk elements on both sides. So
|
||||
// the resulting block never matches on junk except as identical junk
|
||||
// happens to be adjacent to an "interesting" match.
|
||||
//
|
||||
// If no blocks match, return (alo, blo, 0).
|
||||
func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
|
||||
// CAUTION: stripping common prefix or suffix would be incorrect.
|
||||
// E.g.,
|
||||
// ab
|
||||
// acab
|
||||
// Longest matching block is "ab", but if common prefix is
|
||||
// stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
|
||||
// strip, so ends up claiming that ab is changed to acab by
|
||||
// inserting "ca" in the middle. That's minimal but unintuitive:
|
||||
// "it's obvious" that someone inserted "ac" at the front.
|
||||
// Windiff ends up at the same place as diff, but by pairing up
|
||||
// the unique 'b's and then matching the first two 'a's.
|
||||
besti, bestj, bestsize := alo, blo, 0
|
||||
|
||||
// find longest junk-free match
|
||||
// during an iteration of the loop, j2len[j] = length of longest
|
||||
// junk-free match ending with a[i-1] and b[j]
|
||||
j2len := map[int]int{}
|
||||
for i := alo; i != ahi; i++ {
|
||||
// look at all instances of a[i] in b; note that because
|
||||
// b2j has no junk keys, the loop is skipped if a[i] is junk
|
||||
newj2len := map[int]int{}
|
||||
for _, j := range m.b2j[m.a[i]] {
|
||||
// a[i] matches b[j]
|
||||
if j < blo {
|
||||
continue
|
||||
}
|
||||
if j >= bhi {
|
||||
break
|
||||
}
|
||||
k := j2len[j-1] + 1
|
||||
newj2len[j] = k
|
||||
if k > bestsize {
|
||||
besti, bestj, bestsize = i-k+1, j-k+1, k
|
||||
}
|
||||
}
|
||||
j2len = newj2len
|
||||
}
|
||||
|
||||
// Extend the best by non-junk elements on each end. In particular,
|
||||
// "popular" non-junk elements aren't in b2j, which greatly speeds
|
||||
// the inner loop above, but also means "the best" match so far
|
||||
// doesn't contain any junk *or* popular non-junk elements.
|
||||
for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
|
||||
m.a[besti-1] == m.b[bestj-1] {
|
||||
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
|
||||
}
|
||||
for besti+bestsize < ahi && bestj+bestsize < bhi &&
|
||||
!m.isBJunk(m.b[bestj+bestsize]) &&
|
||||
m.a[besti+bestsize] == m.b[bestj+bestsize] {
|
||||
bestsize += 1
|
||||
}
|
||||
|
||||
// Now that we have a wholly interesting match (albeit possibly
|
||||
// empty!), we may as well suck up the matching junk on each
|
||||
// side of it too. Can't think of a good reason not to, and it
|
||||
// saves post-processing the (possibly considerable) expense of
|
||||
// figuring out what to do with it. In the case of an empty
|
||||
// interesting match, this is clearly the right thing to do,
|
||||
// because no other kind of match is possible in the regions.
|
||||
for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
|
||||
m.a[besti-1] == m.b[bestj-1] {
|
||||
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
|
||||
}
|
||||
for besti+bestsize < ahi && bestj+bestsize < bhi &&
|
||||
m.isBJunk(m.b[bestj+bestsize]) &&
|
||||
m.a[besti+bestsize] == m.b[bestj+bestsize] {
|
||||
bestsize += 1
|
||||
}
|
||||
|
||||
return Match{A: besti, B: bestj, Size: bestsize}
|
||||
}
|
||||
|
||||
// Return list of triples describing matching subsequences.
|
||||
//
|
||||
// Each triple is of the form (i, j, n), and means that
|
||||
// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
|
||||
// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
|
||||
// adjacent triples in the list, and the second is not the last triple in the
|
||||
// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
|
||||
// adjacent equal blocks.
|
||||
//
|
||||
// The last triple is a dummy, (len(a), len(b), 0), and is the only
|
||||
// triple with n==0.
|
||||
func (m *SequenceMatcher) GetMatchingBlocks() []Match {
|
||||
if m.matchingBlocks != nil {
|
||||
return m.matchingBlocks
|
||||
}
|
||||
|
||||
var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
|
||||
matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
|
||||
match := m.findLongestMatch(alo, ahi, blo, bhi)
|
||||
i, j, k := match.A, match.B, match.Size
|
||||
if match.Size > 0 {
|
||||
if alo < i && blo < j {
|
||||
matched = matchBlocks(alo, i, blo, j, matched)
|
||||
}
|
||||
matched = append(matched, match)
|
||||
if i+k < ahi && j+k < bhi {
|
||||
matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
|
||||
}
|
||||
}
|
||||
return matched
|
||||
}
|
||||
matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
|
||||
|
||||
// It's possible that we have adjacent equal blocks in the
|
||||
// matching_blocks list now.
|
||||
nonAdjacent := []Match{}
|
||||
i1, j1, k1 := 0, 0, 0
|
||||
for _, b := range matched {
|
||||
// Is this block adjacent to i1, j1, k1?
|
||||
i2, j2, k2 := b.A, b.B, b.Size
|
||||
if i1+k1 == i2 && j1+k1 == j2 {
|
||||
// Yes, so collapse them -- this just increases the length of
|
||||
// the first block by the length of the second, and the first
|
||||
// block so lengthened remains the block to compare against.
|
||||
k1 += k2
|
||||
} else {
|
||||
// Not adjacent. Remember the first block (k1==0 means it's
|
||||
// the dummy we started with), and make the second block the
|
||||
// new block to compare against.
|
||||
if k1 > 0 {
|
||||
nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
|
||||
}
|
||||
i1, j1, k1 = i2, j2, k2
|
||||
}
|
||||
}
|
||||
if k1 > 0 {
|
||||
nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
|
||||
}
|
||||
|
||||
nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
|
||||
m.matchingBlocks = nonAdjacent
|
||||
return m.matchingBlocks
|
||||
}
|
||||
|
||||
// Return list of 5-tuples describing how to turn a into b.
|
||||
//
|
||||
// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
|
||||
// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
|
||||
// tuple preceding it, and likewise for j1 == the previous j2.
|
||||
//
|
||||
// The tags are characters, with these meanings:
|
||||
//
|
||||
// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2]
|
||||
//
|
||||
// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case.
|
||||
//
|
||||
// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
|
||||
//
|
||||
// 'e' (equal): a[i1:i2] == b[j1:j2]
|
||||
func (m *SequenceMatcher) GetOpCodes() []OpCode {
|
||||
if m.opCodes != nil {
|
||||
return m.opCodes
|
||||
}
|
||||
i, j := 0, 0
|
||||
matching := m.GetMatchingBlocks()
|
||||
opCodes := make([]OpCode, 0, len(matching))
|
||||
for _, m := range matching {
|
||||
// invariant: we've pumped out correct diffs to change
|
||||
// a[:i] into b[:j], and the next matching block is
|
||||
// a[ai:ai+size] == b[bj:bj+size]. So we need to pump
|
||||
// out a diff to change a[i:ai] into b[j:bj], pump out
|
||||
// the matching block, and move (i,j) beyond the match
|
||||
ai, bj, size := m.A, m.B, m.Size
|
||||
tag := byte(0)
|
||||
if i < ai && j < bj {
|
||||
tag = 'r'
|
||||
} else if i < ai {
|
||||
tag = 'd'
|
||||
} else if j < bj {
|
||||
tag = 'i'
|
||||
}
|
||||
if tag > 0 {
|
||||
opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
|
||||
}
|
||||
i, j = ai+size, bj+size
|
||||
// the list of matching blocks is terminated by a
|
||||
// sentinel with size 0
|
||||
if size > 0 {
|
||||
opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
|
||||
}
|
||||
}
|
||||
m.opCodes = opCodes
|
||||
return m.opCodes
|
||||
}
|
||||
|
||||
// Isolate change clusters by eliminating ranges with no changes.
|
||||
//
|
||||
// Return a generator of groups with up to n lines of context.
|
||||
// Each group is in the same format as returned by GetOpCodes().
|
||||
func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
|
||||
if n < 0 {
|
||||
n = 3
|
||||
}
|
||||
codes := m.GetOpCodes()
|
||||
if len(codes) == 0 {
|
||||
codes = []OpCode{OpCode{'e', 0, 1, 0, 1}}
|
||||
}
|
||||
// Fixup leading and trailing groups if they show no changes.
|
||||
if codes[0].Tag == 'e' {
|
||||
c := codes[0]
|
||||
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||
codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
|
||||
}
|
||||
if codes[len(codes)-1].Tag == 'e' {
|
||||
c := codes[len(codes)-1]
|
||||
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||
codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
|
||||
}
|
||||
nn := n + n
|
||||
groups := [][]OpCode{}
|
||||
group := []OpCode{}
|
||||
for _, c := range codes {
|
||||
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||
// End the current group and start a new one whenever
|
||||
// there is a large range with no changes.
|
||||
if c.Tag == 'e' && i2-i1 > nn {
|
||||
group = append(group, OpCode{c.Tag, i1, min(i2, i1+n),
|
||||
j1, min(j2, j1+n)})
|
||||
groups = append(groups, group)
|
||||
group = []OpCode{}
|
||||
i1, j1 = max(i1, i2-n), max(j1, j2-n)
|
||||
}
|
||||
group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
|
||||
}
|
||||
if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
|
||||
groups = append(groups, group)
|
||||
}
|
||||
return groups
|
||||
}
|
||||
|
||||
// Return a measure of the sequences' similarity (float in [0,1]).
|
||||
//
|
||||
// Where T is the total number of elements in both sequences, and
|
||||
// M is the number of matches, this is 2.0*M / T.
|
||||
// Note that this is 1 if the sequences are identical, and 0 if
|
||||
// they have nothing in common.
|
||||
//
|
||||
// .Ratio() is expensive to compute if you haven't already computed
|
||||
// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
|
||||
// want to try .QuickRatio() or .RealQuickRation() first to get an
|
||||
// upper bound.
|
||||
func (m *SequenceMatcher) Ratio() float64 {
|
||||
matches := 0
|
||||
for _, m := range m.GetMatchingBlocks() {
|
||||
matches += m.Size
|
||||
}
|
||||
return calculateRatio(matches, len(m.a)+len(m.b))
|
||||
}
|
||||
|
||||
// Return an upper bound on ratio() relatively quickly.
|
||||
//
|
||||
// This isn't defined beyond that it is an upper bound on .Ratio(), and
|
||||
// is faster to compute.
|
||||
func (m *SequenceMatcher) QuickRatio() float64 {
|
||||
// viewing a and b as multisets, set matches to the cardinality
|
||||
// of their intersection; this counts the number of matches
|
||||
// without regard to order, so is clearly an upper bound
|
||||
if m.fullBCount == nil {
|
||||
m.fullBCount = map[string]int{}
|
||||
for _, s := range m.b {
|
||||
m.fullBCount[s] = m.fullBCount[s] + 1
|
||||
}
|
||||
}
|
||||
|
||||
// avail[x] is the number of times x appears in 'b' less the
|
||||
// number of times we've seen it in 'a' so far ... kinda
|
||||
avail := map[string]int{}
|
||||
matches := 0
|
||||
for _, s := range m.a {
|
||||
n, ok := avail[s]
|
||||
if !ok {
|
||||
n = m.fullBCount[s]
|
||||
}
|
||||
avail[s] = n - 1
|
||||
if n > 0 {
|
||||
matches += 1
|
||||
}
|
||||
}
|
||||
return calculateRatio(matches, len(m.a)+len(m.b))
|
||||
}
|
||||
|
||||
// Return an upper bound on ratio() very quickly.
|
||||
//
|
||||
// This isn't defined beyond that it is an upper bound on .Ratio(), and
|
||||
// is faster to compute than either .Ratio() or .QuickRatio().
|
||||
func (m *SequenceMatcher) RealQuickRatio() float64 {
|
||||
la, lb := len(m.a), len(m.b)
|
||||
return calculateRatio(min(la, lb), la+lb)
|
||||
}
|
||||
|
||||
// Convert range to the "ed" format
|
||||
func formatRangeUnified(start, stop int) string {
|
||||
// Per the diff spec at http://www.unix.org/single_unix_specification/
|
||||
beginning := start + 1 // lines start numbering with one
|
||||
length := stop - start
|
||||
if length == 1 {
|
||||
return fmt.Sprintf("%d", beginning)
|
||||
}
|
||||
if length == 0 {
|
||||
beginning -= 1 // empty ranges begin at line just before the range
|
||||
}
|
||||
return fmt.Sprintf("%d,%d", beginning, length)
|
||||
}
|
||||
|
||||
// Unified diff parameters
|
||||
type UnifiedDiff struct {
|
||||
A []string // First sequence lines
|
||||
FromFile string // First file name
|
||||
FromDate string // First file time
|
||||
B []string // Second sequence lines
|
||||
ToFile string // Second file name
|
||||
ToDate string // Second file time
|
||||
Eol string // Headers end of line, defaults to LF
|
||||
Context int // Number of context lines
|
||||
}
|
||||
|
||||
// Compare two sequences of lines; generate the delta as a unified diff.
|
||||
//
|
||||
// Unified diffs are a compact way of showing line changes and a few
|
||||
// lines of context. The number of context lines is set by 'n' which
|
||||
// defaults to three.
|
||||
//
|
||||
// By default, the diff control lines (those with ---, +++, or @@) are
|
||||
// created with a trailing newline. This is helpful so that inputs
|
||||
// created from file.readlines() result in diffs that are suitable for
|
||||
// file.writelines() since both the inputs and outputs have trailing
|
||||
// newlines.
|
||||
//
|
||||
// For inputs that do not have trailing newlines, set the lineterm
|
||||
// argument to "" so that the output will be uniformly newline free.
|
||||
//
|
||||
// The unidiff format normally has a header for filenames and modification
|
||||
// times. Any or all of these may be specified using strings for
|
||||
// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
|
||||
// The modification times are normally expressed in the ISO 8601 format.
|
||||
func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
|
||||
buf := bufio.NewWriter(writer)
|
||||
defer buf.Flush()
|
||||
wf := func(format string, args ...interface{}) error {
|
||||
_, err := buf.WriteString(fmt.Sprintf(format, args...))
|
||||
return err
|
||||
}
|
||||
ws := func(s string) error {
|
||||
_, err := buf.WriteString(s)
|
||||
return err
|
||||
}
|
||||
|
||||
if len(diff.Eol) == 0 {
|
||||
diff.Eol = "\n"
|
||||
}
|
||||
|
||||
started := false
|
||||
m := NewMatcher(diff.A, diff.B)
|
||||
for _, g := range m.GetGroupedOpCodes(diff.Context) {
|
||||
if !started {
|
||||
started = true
|
||||
fromDate := ""
|
||||
if len(diff.FromDate) > 0 {
|
||||
fromDate = "\t" + diff.FromDate
|
||||
}
|
||||
toDate := ""
|
||||
if len(diff.ToDate) > 0 {
|
||||
toDate = "\t" + diff.ToDate
|
||||
}
|
||||
if diff.FromFile != "" || diff.ToFile != "" {
|
||||
err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
first, last := g[0], g[len(g)-1]
|
||||
range1 := formatRangeUnified(first.I1, last.I2)
|
||||
range2 := formatRangeUnified(first.J1, last.J2)
|
||||
if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, c := range g {
|
||||
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||
if c.Tag == 'e' {
|
||||
for _, line := range diff.A[i1:i2] {
|
||||
if err := ws(" " + line); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if c.Tag == 'r' || c.Tag == 'd' {
|
||||
for _, line := range diff.A[i1:i2] {
|
||||
if err := ws("-" + line); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if c.Tag == 'r' || c.Tag == 'i' {
|
||||
for _, line := range diff.B[j1:j2] {
|
||||
if err := ws("+" + line); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Like WriteUnifiedDiff but returns the diff a string.
|
||||
func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
|
||||
w := &bytes.Buffer{}
|
||||
err := WriteUnifiedDiff(w, diff)
|
||||
return string(w.Bytes()), err
|
||||
}
|
||||
|
||||
// Convert range to the "ed" format.
|
||||
func formatRangeContext(start, stop int) string {
|
||||
// Per the diff spec at http://www.unix.org/single_unix_specification/
|
||||
beginning := start + 1 // lines start numbering with one
|
||||
length := stop - start
|
||||
if length == 0 {
|
||||
beginning -= 1 // empty ranges begin at line just before the range
|
||||
}
|
||||
if length <= 1 {
|
||||
return fmt.Sprintf("%d", beginning)
|
||||
}
|
||||
return fmt.Sprintf("%d,%d", beginning, beginning+length-1)
|
||||
}
|
||||
|
||||
type ContextDiff UnifiedDiff
|
||||
|
||||
// Compare two sequences of lines; generate the delta as a context diff.
|
||||
//
|
||||
// Context diffs are a compact way of showing line changes and a few
|
||||
// lines of context. The number of context lines is set by diff.Context
|
||||
// which defaults to three.
|
||||
//
|
||||
// By default, the diff control lines (those with *** or ---) are
|
||||
// created with a trailing newline.
|
||||
//
|
||||
// For inputs that do not have trailing newlines, set the diff.Eol
|
||||
// argument to "" so that the output will be uniformly newline free.
|
||||
//
|
||||
// The context diff format normally has a header for filenames and
|
||||
// modification times. Any or all of these may be specified using
|
||||
// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate.
|
||||
// The modification times are normally expressed in the ISO 8601 format.
|
||||
// If not specified, the strings default to blanks.
|
||||
func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
|
||||
buf := bufio.NewWriter(writer)
|
||||
defer buf.Flush()
|
||||
var diffErr error
|
||||
wf := func(format string, args ...interface{}) {
|
||||
_, err := buf.WriteString(fmt.Sprintf(format, args...))
|
||||
if diffErr == nil && err != nil {
|
||||
diffErr = err
|
||||
}
|
||||
}
|
||||
ws := func(s string) {
|
||||
_, err := buf.WriteString(s)
|
||||
if diffErr == nil && err != nil {
|
||||
diffErr = err
|
||||
}
|
||||
}
|
||||
|
||||
if len(diff.Eol) == 0 {
|
||||
diff.Eol = "\n"
|
||||
}
|
||||
|
||||
prefix := map[byte]string{
|
||||
'i': "+ ",
|
||||
'd': "- ",
|
||||
'r': "! ",
|
||||
'e': " ",
|
||||
}
|
||||
|
||||
started := false
|
||||
m := NewMatcher(diff.A, diff.B)
|
||||
for _, g := range m.GetGroupedOpCodes(diff.Context) {
|
||||
if !started {
|
||||
started = true
|
||||
fromDate := ""
|
||||
if len(diff.FromDate) > 0 {
|
||||
fromDate = "\t" + diff.FromDate
|
||||
}
|
||||
toDate := ""
|
||||
if len(diff.ToDate) > 0 {
|
||||
toDate = "\t" + diff.ToDate
|
||||
}
|
||||
if diff.FromFile != "" || diff.ToFile != "" {
|
||||
wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol)
|
||||
wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol)
|
||||
}
|
||||
}
|
||||
|
||||
first, last := g[0], g[len(g)-1]
|
||||
ws("***************" + diff.Eol)
|
||||
|
||||
range1 := formatRangeContext(first.I1, last.I2)
|
||||
wf("*** %s ****%s", range1, diff.Eol)
|
||||
for _, c := range g {
|
||||
if c.Tag == 'r' || c.Tag == 'd' {
|
||||
for _, cc := range g {
|
||||
if cc.Tag == 'i' {
|
||||
continue
|
||||
}
|
||||
for _, line := range diff.A[cc.I1:cc.I2] {
|
||||
ws(prefix[cc.Tag] + line)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
range2 := formatRangeContext(first.J1, last.J2)
|
||||
wf("--- %s ----%s", range2, diff.Eol)
|
||||
for _, c := range g {
|
||||
if c.Tag == 'r' || c.Tag == 'i' {
|
||||
for _, cc := range g {
|
||||
if cc.Tag == 'd' {
|
||||
continue
|
||||
}
|
||||
for _, line := range diff.B[cc.J1:cc.J2] {
|
||||
ws(prefix[cc.Tag] + line)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return diffErr
|
||||
}
|
||||
|
||||
// Like WriteContextDiff but returns the diff a string.
|
||||
func GetContextDiffString(diff ContextDiff) (string, error) {
|
||||
w := &bytes.Buffer{}
|
||||
err := WriteContextDiff(w, diff)
|
||||
return string(w.Bytes()), err
|
||||
}
|
||||
|
||||
// Split a string on "\n" while preserving them. The output can be used
|
||||
// as input for UnifiedDiff and ContextDiff structures.
|
||||
func SplitLines(s string) []string {
|
||||
lines := strings.SplitAfter(s, "\n")
|
||||
lines[len(lines)-1] += "\n"
|
||||
return lines
|
||||
}
|
||||
|
|
@ -0,0 +1,50 @@
|
|||
|
||||
This project is covered by two different licenses: MIT and Apache.
|
||||
|
||||
#### MIT License ####
|
||||
|
||||
The following files were ported to Go from C files of libyaml, and thus
|
||||
are still covered by their original MIT license, with the additional
|
||||
copyright staring in 2011 when the project was ported over:
|
||||
|
||||
apic.go emitterc.go parserc.go readerc.go scannerc.go
|
||||
writerc.go yamlh.go yamlprivateh.go
|
||||
|
||||
Copyright (c) 2006-2010 Kirill Simonov
|
||||
Copyright (c) 2006-2011 Kirill Simonov
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
### Apache License ###
|
||||
|
||||
All the remaining project files are covered by the Apache license:
|
||||
|
||||
Copyright (c) 2011-2019 Canonical Ltd
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
Copyright 2011-2016 Canonical Ltd.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
|
@ -0,0 +1,171 @@
|
|||
go.yaml.in/yaml
|
||||
===============
|
||||
|
||||
YAML Support for the Go Language
|
||||
|
||||
|
||||
## Introduction
|
||||
|
||||
The `yaml` package enables [Go](https://go.dev/) programs to comfortably encode
|
||||
and decode [YAML](https://yaml.org/) values.
|
||||
|
||||
It was originally developed within [Canonical](https://www.canonical.com) as
|
||||
part of the [juju](https://juju.ubuntu.com) project, and is based on a pure Go
|
||||
port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) C library to
|
||||
parse and generate YAML data quickly and reliably.
|
||||
|
||||
|
||||
## Project Status
|
||||
|
||||
This project started as a fork of the extremely popular [go-yaml](
|
||||
https://github.com/go-yaml/yaml/)
|
||||
project, and is being maintained by the official [YAML organization](
|
||||
https://github.com/yaml/).
|
||||
|
||||
The YAML team took over ongoing maintenance and development of the project after
|
||||
discussion with go-yaml's author, @niemeyer, following his decision to
|
||||
[label the project repository as "unmaintained"](
|
||||
https://github.com/go-yaml/yaml/blob/944c86a7d2/README.md) in April 2025.
|
||||
|
||||
We have put together a team of dedicated maintainers including representatives
|
||||
of go-yaml's most important downstream projects.
|
||||
|
||||
We will strive to earn the trust of the various go-yaml forks to switch back to
|
||||
this repository as their upstream.
|
||||
|
||||
Please [contact us](https://cloud-native.slack.com/archives/C08PPAT8PS7) if you
|
||||
would like to contribute or be involved.
|
||||
|
||||
|
||||
## Compatibility
|
||||
|
||||
The `yaml` package supports most of YAML 1.2, but preserves some behavior from
|
||||
1.1 for backwards compatibility.
|
||||
|
||||
Specifically, v3 of the `yaml` package:
|
||||
|
||||
* Supports YAML 1.1 bools (`yes`/`no`, `on`/`off`) as long as they are being
|
||||
decoded into a typed bool value.
|
||||
Otherwise they behave as a string.
|
||||
Booleans in YAML 1.2 are `true`/`false` only.
|
||||
* Supports octals encoded and decoded as `0777` per YAML 1.1, rather than
|
||||
`0o777` as specified in YAML 1.2, because most parsers still use the old
|
||||
format.
|
||||
Octals in the `0o777` format are supported though, so new files work.
|
||||
* Does not support base-60 floats.
|
||||
These are gone from YAML 1.2, and were actually never supported by this
|
||||
package as it's clearly a poor choice.
|
||||
|
||||
|
||||
## Installation and Usage
|
||||
|
||||
The import path for the package is *go.yaml.in/yaml/v3*.
|
||||
|
||||
To install it, run:
|
||||
|
||||
```bash
|
||||
go get go.yaml.in/yaml/v3
|
||||
```
|
||||
|
||||
|
||||
## API Documentation
|
||||
|
||||
See: <https://pkg.go.dev/go.yaml.in/yaml/v3>
|
||||
|
||||
|
||||
## API Stability
|
||||
|
||||
The package API for yaml v3 will remain stable as described in [gopkg.in](
|
||||
https://gopkg.in).
|
||||
|
||||
|
||||
## Example
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"go.yaml.in/yaml/v3"
|
||||
)
|
||||
|
||||
var data = `
|
||||
a: Easy!
|
||||
b:
|
||||
c: 2
|
||||
d: [3, 4]
|
||||
`
|
||||
|
||||
// Note: struct fields must be public in order for unmarshal to
|
||||
// correctly populate the data.
|
||||
type T struct {
|
||||
A string
|
||||
B struct {
|
||||
RenamedC int `yaml:"c"`
|
||||
D []int `yaml:",flow"`
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
t := T{}
|
||||
|
||||
err := yaml.Unmarshal([]byte(data), &t)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- t:\n%v\n\n", t)
|
||||
|
||||
d, err := yaml.Marshal(&t)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- t dump:\n%s\n\n", string(d))
|
||||
|
||||
m := make(map[interface{}]interface{})
|
||||
|
||||
err = yaml.Unmarshal([]byte(data), &m)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- m:\n%v\n\n", m)
|
||||
|
||||
d, err = yaml.Marshal(&m)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
fmt.Printf("--- m dump:\n%s\n\n", string(d))
|
||||
}
|
||||
```
|
||||
|
||||
This example will generate the following output:
|
||||
|
||||
```
|
||||
--- t:
|
||||
{Easy! {2 [3 4]}}
|
||||
|
||||
--- t dump:
|
||||
a: Easy!
|
||||
b:
|
||||
c: 2
|
||||
d: [3, 4]
|
||||
|
||||
|
||||
--- m:
|
||||
map[a:Easy! b:map[c:2 d:[3 4]]]
|
||||
|
||||
--- m dump:
|
||||
a: Easy!
|
||||
b:
|
||||
c: 2
|
||||
d:
|
||||
- 3
|
||||
- 4
|
||||
```
|
||||
|
||||
|
||||
## License
|
||||
|
||||
The yaml package is licensed under the MIT and Apache License 2.0 licenses.
|
||||
Please see the LICENSE file for details.
|
||||
|
|
@ -0,0 +1,747 @@
|
|||
//
|
||||
// Copyright (c) 2011-2019 Canonical Ltd
|
||||
// Copyright (c) 2006-2010 Kirill Simonov
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
// this software and associated documentation files (the "Software"), to deal in
|
||||
// the Software without restriction, including without limitation the rights to
|
||||
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
// of the Software, and to permit persons to whom the Software is furnished to do
|
||||
// so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
// SOFTWARE.
|
||||
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
|
||||
//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
|
||||
|
||||
// Check if we can move the queue at the beginning of the buffer.
|
||||
if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
|
||||
if parser.tokens_head != len(parser.tokens) {
|
||||
copy(parser.tokens, parser.tokens[parser.tokens_head:])
|
||||
}
|
||||
parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
|
||||
parser.tokens_head = 0
|
||||
}
|
||||
parser.tokens = append(parser.tokens, *token)
|
||||
if pos < 0 {
|
||||
return
|
||||
}
|
||||
copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
|
||||
parser.tokens[parser.tokens_head+pos] = *token
|
||||
}
|
||||
|
||||
// Create a new parser object.
|
||||
func yaml_parser_initialize(parser *yaml_parser_t) bool {
|
||||
*parser = yaml_parser_t{
|
||||
raw_buffer: make([]byte, 0, input_raw_buffer_size),
|
||||
buffer: make([]byte, 0, input_buffer_size),
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Destroy a parser object.
|
||||
func yaml_parser_delete(parser *yaml_parser_t) {
|
||||
*parser = yaml_parser_t{}
|
||||
}
|
||||
|
||||
// String read handler.
|
||||
func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
|
||||
if parser.input_pos == len(parser.input) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n = copy(buffer, parser.input[parser.input_pos:])
|
||||
parser.input_pos += n
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Reader read handler.
|
||||
func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
|
||||
return parser.input_reader.Read(buffer)
|
||||
}
|
||||
|
||||
// Set a string input.
|
||||
func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
|
||||
if parser.read_handler != nil {
|
||||
panic("must set the input source only once")
|
||||
}
|
||||
parser.read_handler = yaml_string_read_handler
|
||||
parser.input = input
|
||||
parser.input_pos = 0
|
||||
}
|
||||
|
||||
// Set a file input.
|
||||
func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
|
||||
if parser.read_handler != nil {
|
||||
panic("must set the input source only once")
|
||||
}
|
||||
parser.read_handler = yaml_reader_read_handler
|
||||
parser.input_reader = r
|
||||
}
|
||||
|
||||
// Set the source encoding.
|
||||
func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
|
||||
if parser.encoding != yaml_ANY_ENCODING {
|
||||
panic("must set the encoding only once")
|
||||
}
|
||||
parser.encoding = encoding
|
||||
}
|
||||
|
||||
// Create a new emitter object.
|
||||
func yaml_emitter_initialize(emitter *yaml_emitter_t) {
|
||||
*emitter = yaml_emitter_t{
|
||||
buffer: make([]byte, output_buffer_size),
|
||||
raw_buffer: make([]byte, 0, output_raw_buffer_size),
|
||||
states: make([]yaml_emitter_state_t, 0, initial_stack_size),
|
||||
events: make([]yaml_event_t, 0, initial_queue_size),
|
||||
best_width: -1,
|
||||
}
|
||||
}
|
||||
|
||||
// Destroy an emitter object.
|
||||
func yaml_emitter_delete(emitter *yaml_emitter_t) {
|
||||
*emitter = yaml_emitter_t{}
|
||||
}
|
||||
|
||||
// String write handler.
|
||||
func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
||||
*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// yaml_writer_write_handler uses emitter.output_writer to write the
|
||||
// emitted text.
|
||||
func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
||||
_, err := emitter.output_writer.Write(buffer)
|
||||
return err
|
||||
}
|
||||
|
||||
// Set a string output.
|
||||
func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
|
||||
if emitter.write_handler != nil {
|
||||
panic("must set the output target only once")
|
||||
}
|
||||
emitter.write_handler = yaml_string_write_handler
|
||||
emitter.output_buffer = output_buffer
|
||||
}
|
||||
|
||||
// Set a file output.
|
||||
func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
|
||||
if emitter.write_handler != nil {
|
||||
panic("must set the output target only once")
|
||||
}
|
||||
emitter.write_handler = yaml_writer_write_handler
|
||||
emitter.output_writer = w
|
||||
}
|
||||
|
||||
// Set the output encoding.
|
||||
func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
|
||||
if emitter.encoding != yaml_ANY_ENCODING {
|
||||
panic("must set the output encoding only once")
|
||||
}
|
||||
emitter.encoding = encoding
|
||||
}
|
||||
|
||||
// Set the canonical output style.
|
||||
func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
|
||||
emitter.canonical = canonical
|
||||
}
|
||||
|
||||
// Set the indentation increment.
|
||||
func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
|
||||
if indent < 2 || indent > 9 {
|
||||
indent = 2
|
||||
}
|
||||
emitter.best_indent = indent
|
||||
}
|
||||
|
||||
// Set the preferred line width.
|
||||
func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
|
||||
if width < 0 {
|
||||
width = -1
|
||||
}
|
||||
emitter.best_width = width
|
||||
}
|
||||
|
||||
// Set if unescaped non-ASCII characters are allowed.
|
||||
func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
|
||||
emitter.unicode = unicode
|
||||
}
|
||||
|
||||
// Set the preferred line break character.
|
||||
func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
|
||||
emitter.line_break = line_break
|
||||
}
|
||||
|
||||
///*
|
||||
// * Destroy a token object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(void)
|
||||
//yaml_token_delete(yaml_token_t *token)
|
||||
//{
|
||||
// assert(token); // Non-NULL token object expected.
|
||||
//
|
||||
// switch (token.type)
|
||||
// {
|
||||
// case YAML_TAG_DIRECTIVE_TOKEN:
|
||||
// yaml_free(token.data.tag_directive.handle);
|
||||
// yaml_free(token.data.tag_directive.prefix);
|
||||
// break;
|
||||
//
|
||||
// case YAML_ALIAS_TOKEN:
|
||||
// yaml_free(token.data.alias.value);
|
||||
// break;
|
||||
//
|
||||
// case YAML_ANCHOR_TOKEN:
|
||||
// yaml_free(token.data.anchor.value);
|
||||
// break;
|
||||
//
|
||||
// case YAML_TAG_TOKEN:
|
||||
// yaml_free(token.data.tag.handle);
|
||||
// yaml_free(token.data.tag.suffix);
|
||||
// break;
|
||||
//
|
||||
// case YAML_SCALAR_TOKEN:
|
||||
// yaml_free(token.data.scalar.value);
|
||||
// break;
|
||||
//
|
||||
// default:
|
||||
// break;
|
||||
// }
|
||||
//
|
||||
// memset(token, 0, sizeof(yaml_token_t));
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Check if a string is a valid UTF-8 sequence.
|
||||
// *
|
||||
// * Check 'reader.c' for more details on UTF-8 encoding.
|
||||
// */
|
||||
//
|
||||
//static int
|
||||
//yaml_check_utf8(yaml_char_t *start, size_t length)
|
||||
//{
|
||||
// yaml_char_t *end = start+length;
|
||||
// yaml_char_t *pointer = start;
|
||||
//
|
||||
// while (pointer < end) {
|
||||
// unsigned char octet;
|
||||
// unsigned int width;
|
||||
// unsigned int value;
|
||||
// size_t k;
|
||||
//
|
||||
// octet = pointer[0];
|
||||
// width = (octet & 0x80) == 0x00 ? 1 :
|
||||
// (octet & 0xE0) == 0xC0 ? 2 :
|
||||
// (octet & 0xF0) == 0xE0 ? 3 :
|
||||
// (octet & 0xF8) == 0xF0 ? 4 : 0;
|
||||
// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
|
||||
// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
|
||||
// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
|
||||
// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
|
||||
// if (!width) return 0;
|
||||
// if (pointer+width > end) return 0;
|
||||
// for (k = 1; k < width; k ++) {
|
||||
// octet = pointer[k];
|
||||
// if ((octet & 0xC0) != 0x80) return 0;
|
||||
// value = (value << 6) + (octet & 0x3F);
|
||||
// }
|
||||
// if (!((width == 1) ||
|
||||
// (width == 2 && value >= 0x80) ||
|
||||
// (width == 3 && value >= 0x800) ||
|
||||
// (width == 4 && value >= 0x10000))) return 0;
|
||||
//
|
||||
// pointer += width;
|
||||
// }
|
||||
//
|
||||
// return 1;
|
||||
//}
|
||||
//
|
||||
|
||||
// Create STREAM-START.
|
||||
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_STREAM_START_EVENT,
|
||||
encoding: encoding,
|
||||
}
|
||||
}
|
||||
|
||||
// Create STREAM-END.
|
||||
func yaml_stream_end_event_initialize(event *yaml_event_t) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_STREAM_END_EVENT,
|
||||
}
|
||||
}
|
||||
|
||||
// Create DOCUMENT-START.
|
||||
func yaml_document_start_event_initialize(
|
||||
event *yaml_event_t,
|
||||
version_directive *yaml_version_directive_t,
|
||||
tag_directives []yaml_tag_directive_t,
|
||||
implicit bool,
|
||||
) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_DOCUMENT_START_EVENT,
|
||||
version_directive: version_directive,
|
||||
tag_directives: tag_directives,
|
||||
implicit: implicit,
|
||||
}
|
||||
}
|
||||
|
||||
// Create DOCUMENT-END.
|
||||
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_DOCUMENT_END_EVENT,
|
||||
implicit: implicit,
|
||||
}
|
||||
}
|
||||
|
||||
// Create ALIAS.
|
||||
func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_ALIAS_EVENT,
|
||||
anchor: anchor,
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create SCALAR.
|
||||
func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_SCALAR_EVENT,
|
||||
anchor: anchor,
|
||||
tag: tag,
|
||||
value: value,
|
||||
implicit: plain_implicit,
|
||||
quoted_implicit: quoted_implicit,
|
||||
style: yaml_style_t(style),
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create SEQUENCE-START.
|
||||
func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_SEQUENCE_START_EVENT,
|
||||
anchor: anchor,
|
||||
tag: tag,
|
||||
implicit: implicit,
|
||||
style: yaml_style_t(style),
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create SEQUENCE-END.
|
||||
func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_SEQUENCE_END_EVENT,
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create MAPPING-START.
|
||||
func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_MAPPING_START_EVENT,
|
||||
anchor: anchor,
|
||||
tag: tag,
|
||||
implicit: implicit,
|
||||
style: yaml_style_t(style),
|
||||
}
|
||||
}
|
||||
|
||||
// Create MAPPING-END.
|
||||
func yaml_mapping_end_event_initialize(event *yaml_event_t) {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_MAPPING_END_EVENT,
|
||||
}
|
||||
}
|
||||
|
||||
// Destroy an event object.
|
||||
func yaml_event_delete(event *yaml_event_t) {
|
||||
*event = yaml_event_t{}
|
||||
}
|
||||
|
||||
///*
|
||||
// * Create a document object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_initialize(document *yaml_document_t,
|
||||
// version_directive *yaml_version_directive_t,
|
||||
// tag_directives_start *yaml_tag_directive_t,
|
||||
// tag_directives_end *yaml_tag_directive_t,
|
||||
// start_implicit int, end_implicit int)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// struct {
|
||||
// start *yaml_node_t
|
||||
// end *yaml_node_t
|
||||
// top *yaml_node_t
|
||||
// } nodes = { NULL, NULL, NULL }
|
||||
// version_directive_copy *yaml_version_directive_t = NULL
|
||||
// struct {
|
||||
// start *yaml_tag_directive_t
|
||||
// end *yaml_tag_directive_t
|
||||
// top *yaml_tag_directive_t
|
||||
// } tag_directives_copy = { NULL, NULL, NULL }
|
||||
// value yaml_tag_directive_t = { NULL, NULL }
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
// assert((tag_directives_start && tag_directives_end) ||
|
||||
// (tag_directives_start == tag_directives_end))
|
||||
// // Valid tag directives are expected.
|
||||
//
|
||||
// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
|
||||
//
|
||||
// if (version_directive) {
|
||||
// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
|
||||
// if (!version_directive_copy) goto error
|
||||
// version_directive_copy.major = version_directive.major
|
||||
// version_directive_copy.minor = version_directive.minor
|
||||
// }
|
||||
//
|
||||
// if (tag_directives_start != tag_directives_end) {
|
||||
// tag_directive *yaml_tag_directive_t
|
||||
// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
|
||||
// goto error
|
||||
// for (tag_directive = tag_directives_start
|
||||
// tag_directive != tag_directives_end; tag_directive ++) {
|
||||
// assert(tag_directive.handle)
|
||||
// assert(tag_directive.prefix)
|
||||
// if (!yaml_check_utf8(tag_directive.handle,
|
||||
// strlen((char *)tag_directive.handle)))
|
||||
// goto error
|
||||
// if (!yaml_check_utf8(tag_directive.prefix,
|
||||
// strlen((char *)tag_directive.prefix)))
|
||||
// goto error
|
||||
// value.handle = yaml_strdup(tag_directive.handle)
|
||||
// value.prefix = yaml_strdup(tag_directive.prefix)
|
||||
// if (!value.handle || !value.prefix) goto error
|
||||
// if (!PUSH(&context, tag_directives_copy, value))
|
||||
// goto error
|
||||
// value.handle = NULL
|
||||
// value.prefix = NULL
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
|
||||
// tag_directives_copy.start, tag_directives_copy.top,
|
||||
// start_implicit, end_implicit, mark, mark)
|
||||
//
|
||||
// return 1
|
||||
//
|
||||
//error:
|
||||
// STACK_DEL(&context, nodes)
|
||||
// yaml_free(version_directive_copy)
|
||||
// while (!STACK_EMPTY(&context, tag_directives_copy)) {
|
||||
// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
|
||||
// yaml_free(value.handle)
|
||||
// yaml_free(value.prefix)
|
||||
// }
|
||||
// STACK_DEL(&context, tag_directives_copy)
|
||||
// yaml_free(value.handle)
|
||||
// yaml_free(value.prefix)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Destroy a document object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(void)
|
||||
//yaml_document_delete(document *yaml_document_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// tag_directive *yaml_tag_directive_t
|
||||
//
|
||||
// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// while (!STACK_EMPTY(&context, document.nodes)) {
|
||||
// node yaml_node_t = POP(&context, document.nodes)
|
||||
// yaml_free(node.tag)
|
||||
// switch (node.type) {
|
||||
// case YAML_SCALAR_NODE:
|
||||
// yaml_free(node.data.scalar.value)
|
||||
// break
|
||||
// case YAML_SEQUENCE_NODE:
|
||||
// STACK_DEL(&context, node.data.sequence.items)
|
||||
// break
|
||||
// case YAML_MAPPING_NODE:
|
||||
// STACK_DEL(&context, node.data.mapping.pairs)
|
||||
// break
|
||||
// default:
|
||||
// assert(0) // Should not happen.
|
||||
// }
|
||||
// }
|
||||
// STACK_DEL(&context, document.nodes)
|
||||
//
|
||||
// yaml_free(document.version_directive)
|
||||
// for (tag_directive = document.tag_directives.start
|
||||
// tag_directive != document.tag_directives.end
|
||||
// tag_directive++) {
|
||||
// yaml_free(tag_directive.handle)
|
||||
// yaml_free(tag_directive.prefix)
|
||||
// }
|
||||
// yaml_free(document.tag_directives.start)
|
||||
//
|
||||
// memset(document, 0, sizeof(yaml_document_t))
|
||||
//}
|
||||
//
|
||||
///**
|
||||
// * Get a document node.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(yaml_node_t *)
|
||||
//yaml_document_get_node(document *yaml_document_t, index int)
|
||||
//{
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
|
||||
// return document.nodes.start + index - 1
|
||||
// }
|
||||
// return NULL
|
||||
//}
|
||||
//
|
||||
///**
|
||||
// * Get the root object.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(yaml_node_t *)
|
||||
//yaml_document_get_root_node(document *yaml_document_t)
|
||||
//{
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (document.nodes.top != document.nodes.start) {
|
||||
// return document.nodes.start
|
||||
// }
|
||||
// return NULL
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Add a scalar node to a document.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_add_scalar(document *yaml_document_t,
|
||||
// tag *yaml_char_t, value *yaml_char_t, length int,
|
||||
// style yaml_scalar_style_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
// tag_copy *yaml_char_t = NULL
|
||||
// value_copy *yaml_char_t = NULL
|
||||
// node yaml_node_t
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
// assert(value) // Non-NULL value is expected.
|
||||
//
|
||||
// if (!tag) {
|
||||
// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
||||
// tag_copy = yaml_strdup(tag)
|
||||
// if (!tag_copy) goto error
|
||||
//
|
||||
// if (length < 0) {
|
||||
// length = strlen((char *)value)
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(value, length)) goto error
|
||||
// value_copy = yaml_malloc(length+1)
|
||||
// if (!value_copy) goto error
|
||||
// memcpy(value_copy, value, length)
|
||||
// value_copy[length] = '\0'
|
||||
//
|
||||
// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
|
||||
// if (!PUSH(&context, document.nodes, node)) goto error
|
||||
//
|
||||
// return document.nodes.top - document.nodes.start
|
||||
//
|
||||
//error:
|
||||
// yaml_free(tag_copy)
|
||||
// yaml_free(value_copy)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Add a sequence node to a document.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_add_sequence(document *yaml_document_t,
|
||||
// tag *yaml_char_t, style yaml_sequence_style_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
// tag_copy *yaml_char_t = NULL
|
||||
// struct {
|
||||
// start *yaml_node_item_t
|
||||
// end *yaml_node_item_t
|
||||
// top *yaml_node_item_t
|
||||
// } items = { NULL, NULL, NULL }
|
||||
// node yaml_node_t
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (!tag) {
|
||||
// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
||||
// tag_copy = yaml_strdup(tag)
|
||||
// if (!tag_copy) goto error
|
||||
//
|
||||
// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
|
||||
//
|
||||
// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
|
||||
// style, mark, mark)
|
||||
// if (!PUSH(&context, document.nodes, node)) goto error
|
||||
//
|
||||
// return document.nodes.top - document.nodes.start
|
||||
//
|
||||
//error:
|
||||
// STACK_DEL(&context, items)
|
||||
// yaml_free(tag_copy)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Add a mapping node to a document.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_add_mapping(document *yaml_document_t,
|
||||
// tag *yaml_char_t, style yaml_mapping_style_t)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
// mark yaml_mark_t = { 0, 0, 0 }
|
||||
// tag_copy *yaml_char_t = NULL
|
||||
// struct {
|
||||
// start *yaml_node_pair_t
|
||||
// end *yaml_node_pair_t
|
||||
// top *yaml_node_pair_t
|
||||
// } pairs = { NULL, NULL, NULL }
|
||||
// node yaml_node_t
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
// if (!tag) {
|
||||
// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
|
||||
// }
|
||||
//
|
||||
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
|
||||
// tag_copy = yaml_strdup(tag)
|
||||
// if (!tag_copy) goto error
|
||||
//
|
||||
// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
|
||||
//
|
||||
// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
|
||||
// style, mark, mark)
|
||||
// if (!PUSH(&context, document.nodes, node)) goto error
|
||||
//
|
||||
// return document.nodes.top - document.nodes.start
|
||||
//
|
||||
//error:
|
||||
// STACK_DEL(&context, pairs)
|
||||
// yaml_free(tag_copy)
|
||||
//
|
||||
// return 0
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Append an item to a sequence node.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_append_sequence_item(document *yaml_document_t,
|
||||
// sequence int, item int)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
//
|
||||
// assert(document) // Non-NULL document is required.
|
||||
// assert(sequence > 0
|
||||
// && document.nodes.start + sequence <= document.nodes.top)
|
||||
// // Valid sequence id is required.
|
||||
// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
|
||||
// // A sequence node is required.
|
||||
// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
|
||||
// // Valid item id is required.
|
||||
//
|
||||
// if (!PUSH(&context,
|
||||
// document.nodes.start[sequence-1].data.sequence.items, item))
|
||||
// return 0
|
||||
//
|
||||
// return 1
|
||||
//}
|
||||
//
|
||||
///*
|
||||
// * Append a pair of a key and a value to a mapping node.
|
||||
// */
|
||||
//
|
||||
//YAML_DECLARE(int)
|
||||
//yaml_document_append_mapping_pair(document *yaml_document_t,
|
||||
// mapping int, key int, value int)
|
||||
//{
|
||||
// struct {
|
||||
// error yaml_error_type_t
|
||||
// } context
|
||||
//
|
||||
// pair yaml_node_pair_t
|
||||
//
|
||||
// assert(document) // Non-NULL document is required.
|
||||
// assert(mapping > 0
|
||||
// && document.nodes.start + mapping <= document.nodes.top)
|
||||
// // Valid mapping id is required.
|
||||
// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
|
||||
// // A mapping node is required.
|
||||
// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
|
||||
// // Valid key id is required.
|
||||
// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
|
||||
// // Valid value id is required.
|
||||
//
|
||||
// pair.key = key
|
||||
// pair.value = value
|
||||
//
|
||||
// if (!PUSH(&context,
|
||||
// document.nodes.start[mapping-1].data.mapping.pairs, pair))
|
||||
// return 0
|
||||
//
|
||||
// return 1
|
||||
//}
|
||||
//
|
||||
//
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,577 @@
|
|||
//
|
||||
// Copyright (c) 2011-2019 Canonical Ltd
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type encoder struct {
|
||||
emitter yaml_emitter_t
|
||||
event yaml_event_t
|
||||
out []byte
|
||||
flow bool
|
||||
indent int
|
||||
doneInit bool
|
||||
}
|
||||
|
||||
func newEncoder() *encoder {
|
||||
e := &encoder{}
|
||||
yaml_emitter_initialize(&e.emitter)
|
||||
yaml_emitter_set_output_string(&e.emitter, &e.out)
|
||||
yaml_emitter_set_unicode(&e.emitter, true)
|
||||
return e
|
||||
}
|
||||
|
||||
func newEncoderWithWriter(w io.Writer) *encoder {
|
||||
e := &encoder{}
|
||||
yaml_emitter_initialize(&e.emitter)
|
||||
yaml_emitter_set_output_writer(&e.emitter, w)
|
||||
yaml_emitter_set_unicode(&e.emitter, true)
|
||||
return e
|
||||
}
|
||||
|
||||
func (e *encoder) init() {
|
||||
if e.doneInit {
|
||||
return
|
||||
}
|
||||
if e.indent == 0 {
|
||||
e.indent = 4
|
||||
}
|
||||
e.emitter.best_indent = e.indent
|
||||
yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
|
||||
e.emit()
|
||||
e.doneInit = true
|
||||
}
|
||||
|
||||
func (e *encoder) finish() {
|
||||
e.emitter.open_ended = false
|
||||
yaml_stream_end_event_initialize(&e.event)
|
||||
e.emit()
|
||||
}
|
||||
|
||||
func (e *encoder) destroy() {
|
||||
yaml_emitter_delete(&e.emitter)
|
||||
}
|
||||
|
||||
func (e *encoder) emit() {
|
||||
// This will internally delete the e.event value.
|
||||
e.must(yaml_emitter_emit(&e.emitter, &e.event))
|
||||
}
|
||||
|
||||
func (e *encoder) must(ok bool) {
|
||||
if !ok {
|
||||
msg := e.emitter.problem
|
||||
if msg == "" {
|
||||
msg = "unknown problem generating YAML content"
|
||||
}
|
||||
failf("%s", msg)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) marshalDoc(tag string, in reflect.Value) {
|
||||
e.init()
|
||||
var node *Node
|
||||
if in.IsValid() {
|
||||
node, _ = in.Interface().(*Node)
|
||||
}
|
||||
if node != nil && node.Kind == DocumentNode {
|
||||
e.nodev(in)
|
||||
} else {
|
||||
yaml_document_start_event_initialize(&e.event, nil, nil, true)
|
||||
e.emit()
|
||||
e.marshal(tag, in)
|
||||
yaml_document_end_event_initialize(&e.event, true)
|
||||
e.emit()
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) marshal(tag string, in reflect.Value) {
|
||||
tag = shortTag(tag)
|
||||
if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
|
||||
e.nilv()
|
||||
return
|
||||
}
|
||||
iface := in.Interface()
|
||||
switch value := iface.(type) {
|
||||
case *Node:
|
||||
e.nodev(in)
|
||||
return
|
||||
case Node:
|
||||
if !in.CanAddr() {
|
||||
var n = reflect.New(in.Type()).Elem()
|
||||
n.Set(in)
|
||||
in = n
|
||||
}
|
||||
e.nodev(in.Addr())
|
||||
return
|
||||
case time.Time:
|
||||
e.timev(tag, in)
|
||||
return
|
||||
case *time.Time:
|
||||
e.timev(tag, in.Elem())
|
||||
return
|
||||
case time.Duration:
|
||||
e.stringv(tag, reflect.ValueOf(value.String()))
|
||||
return
|
||||
case Marshaler:
|
||||
v, err := value.MarshalYAML()
|
||||
if err != nil {
|
||||
fail(err)
|
||||
}
|
||||
if v == nil {
|
||||
e.nilv()
|
||||
return
|
||||
}
|
||||
e.marshal(tag, reflect.ValueOf(v))
|
||||
return
|
||||
case encoding.TextMarshaler:
|
||||
text, err := value.MarshalText()
|
||||
if err != nil {
|
||||
fail(err)
|
||||
}
|
||||
in = reflect.ValueOf(string(text))
|
||||
case nil:
|
||||
e.nilv()
|
||||
return
|
||||
}
|
||||
switch in.Kind() {
|
||||
case reflect.Interface:
|
||||
e.marshal(tag, in.Elem())
|
||||
case reflect.Map:
|
||||
e.mapv(tag, in)
|
||||
case reflect.Ptr:
|
||||
e.marshal(tag, in.Elem())
|
||||
case reflect.Struct:
|
||||
e.structv(tag, in)
|
||||
case reflect.Slice, reflect.Array:
|
||||
e.slicev(tag, in)
|
||||
case reflect.String:
|
||||
e.stringv(tag, in)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
e.intv(tag, in)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
e.uintv(tag, in)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
e.floatv(tag, in)
|
||||
case reflect.Bool:
|
||||
e.boolv(tag, in)
|
||||
default:
|
||||
panic("cannot marshal type: " + in.Type().String())
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) mapv(tag string, in reflect.Value) {
|
||||
e.mappingv(tag, func() {
|
||||
keys := keyList(in.MapKeys())
|
||||
sort.Sort(keys)
|
||||
for _, k := range keys {
|
||||
e.marshal("", k)
|
||||
e.marshal("", in.MapIndex(k))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) {
|
||||
for _, num := range index {
|
||||
for {
|
||||
if v.Kind() == reflect.Ptr {
|
||||
if v.IsNil() {
|
||||
return reflect.Value{}
|
||||
}
|
||||
v = v.Elem()
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
v = v.Field(num)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (e *encoder) structv(tag string, in reflect.Value) {
|
||||
sinfo, err := getStructInfo(in.Type())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
e.mappingv(tag, func() {
|
||||
for _, info := range sinfo.FieldsList {
|
||||
var value reflect.Value
|
||||
if info.Inline == nil {
|
||||
value = in.Field(info.Num)
|
||||
} else {
|
||||
value = e.fieldByIndex(in, info.Inline)
|
||||
if !value.IsValid() {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if info.OmitEmpty && isZero(value) {
|
||||
continue
|
||||
}
|
||||
e.marshal("", reflect.ValueOf(info.Key))
|
||||
e.flow = info.Flow
|
||||
e.marshal("", value)
|
||||
}
|
||||
if sinfo.InlineMap >= 0 {
|
||||
m := in.Field(sinfo.InlineMap)
|
||||
if m.Len() > 0 {
|
||||
e.flow = false
|
||||
keys := keyList(m.MapKeys())
|
||||
sort.Sort(keys)
|
||||
for _, k := range keys {
|
||||
if _, found := sinfo.FieldsMap[k.String()]; found {
|
||||
panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String()))
|
||||
}
|
||||
e.marshal("", k)
|
||||
e.flow = false
|
||||
e.marshal("", m.MapIndex(k))
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (e *encoder) mappingv(tag string, f func()) {
|
||||
implicit := tag == ""
|
||||
style := yaml_BLOCK_MAPPING_STYLE
|
||||
if e.flow {
|
||||
e.flow = false
|
||||
style = yaml_FLOW_MAPPING_STYLE
|
||||
}
|
||||
yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
|
||||
e.emit()
|
||||
f()
|
||||
yaml_mapping_end_event_initialize(&e.event)
|
||||
e.emit()
|
||||
}
|
||||
|
||||
func (e *encoder) slicev(tag string, in reflect.Value) {
|
||||
implicit := tag == ""
|
||||
style := yaml_BLOCK_SEQUENCE_STYLE
|
||||
if e.flow {
|
||||
e.flow = false
|
||||
style = yaml_FLOW_SEQUENCE_STYLE
|
||||
}
|
||||
e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
|
||||
e.emit()
|
||||
n := in.Len()
|
||||
for i := 0; i < n; i++ {
|
||||
e.marshal("", in.Index(i))
|
||||
}
|
||||
e.must(yaml_sequence_end_event_initialize(&e.event))
|
||||
e.emit()
|
||||
}
|
||||
|
||||
// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
|
||||
//
|
||||
// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
|
||||
// in YAML 1.2 and by this package, but these should be marshalled quoted for
|
||||
// the time being for compatibility with other parsers.
|
||||
func isBase60Float(s string) (result bool) {
|
||||
// Fast path.
|
||||
if s == "" {
|
||||
return false
|
||||
}
|
||||
c := s[0]
|
||||
if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
|
||||
return false
|
||||
}
|
||||
// Do the full match.
|
||||
return base60float.MatchString(s)
|
||||
}
|
||||
|
||||
// From http://yaml.org/type/float.html, except the regular expression there
|
||||
// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
|
||||
var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
|
||||
|
||||
// isOldBool returns whether s is bool notation as defined in YAML 1.1.
|
||||
//
|
||||
// We continue to force strings that YAML 1.1 would interpret as booleans to be
|
||||
// rendered as quotes strings so that the marshalled output valid for YAML 1.1
|
||||
// parsing.
|
||||
func isOldBool(s string) (result bool) {
|
||||
switch s {
|
||||
case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON",
|
||||
"n", "N", "no", "No", "NO", "off", "Off", "OFF":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) stringv(tag string, in reflect.Value) {
|
||||
var style yaml_scalar_style_t
|
||||
s := in.String()
|
||||
canUsePlain := true
|
||||
switch {
|
||||
case !utf8.ValidString(s):
|
||||
if tag == binaryTag {
|
||||
failf("explicitly tagged !!binary data must be base64-encoded")
|
||||
}
|
||||
if tag != "" {
|
||||
failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
|
||||
}
|
||||
// It can't be encoded directly as YAML so use a binary tag
|
||||
// and encode it as base64.
|
||||
tag = binaryTag
|
||||
s = encodeBase64(s)
|
||||
case tag == "":
|
||||
// Check to see if it would resolve to a specific
|
||||
// tag when encoded unquoted. If it doesn't,
|
||||
// there's no need to quote it.
|
||||
rtag, _ := resolve("", s)
|
||||
canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s))
|
||||
}
|
||||
// Note: it's possible for user code to emit invalid YAML
|
||||
// if they explicitly specify a tag and a string containing
|
||||
// text that's incompatible with that tag.
|
||||
switch {
|
||||
case strings.Contains(s, "\n"):
|
||||
if e.flow {
|
||||
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
||||
} else {
|
||||
style = yaml_LITERAL_SCALAR_STYLE
|
||||
}
|
||||
case canUsePlain:
|
||||
style = yaml_PLAIN_SCALAR_STYLE
|
||||
default:
|
||||
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
||||
}
|
||||
e.emitScalar(s, "", tag, style, nil, nil, nil, nil)
|
||||
}
|
||||
|
||||
func (e *encoder) boolv(tag string, in reflect.Value) {
|
||||
var s string
|
||||
if in.Bool() {
|
||||
s = "true"
|
||||
} else {
|
||||
s = "false"
|
||||
}
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
|
||||
}
|
||||
|
||||
func (e *encoder) intv(tag string, in reflect.Value) {
|
||||
s := strconv.FormatInt(in.Int(), 10)
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
|
||||
}
|
||||
|
||||
func (e *encoder) uintv(tag string, in reflect.Value) {
|
||||
s := strconv.FormatUint(in.Uint(), 10)
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
|
||||
}
|
||||
|
||||
func (e *encoder) timev(tag string, in reflect.Value) {
|
||||
t := in.Interface().(time.Time)
|
||||
s := t.Format(time.RFC3339Nano)
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
|
||||
}
|
||||
|
||||
func (e *encoder) floatv(tag string, in reflect.Value) {
|
||||
// Issue #352: When formatting, use the precision of the underlying value
|
||||
precision := 64
|
||||
if in.Kind() == reflect.Float32 {
|
||||
precision = 32
|
||||
}
|
||||
|
||||
s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
|
||||
switch s {
|
||||
case "+Inf":
|
||||
s = ".inf"
|
||||
case "-Inf":
|
||||
s = "-.inf"
|
||||
case "NaN":
|
||||
s = ".nan"
|
||||
}
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
|
||||
}
|
||||
|
||||
func (e *encoder) nilv() {
|
||||
e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
|
||||
}
|
||||
|
||||
func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) {
|
||||
// TODO Kill this function. Replace all initialize calls by their underlining Go literals.
|
||||
implicit := tag == ""
|
||||
if !implicit {
|
||||
tag = longTag(tag)
|
||||
}
|
||||
e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
|
||||
e.event.head_comment = head
|
||||
e.event.line_comment = line
|
||||
e.event.foot_comment = foot
|
||||
e.event.tail_comment = tail
|
||||
e.emit()
|
||||
}
|
||||
|
||||
func (e *encoder) nodev(in reflect.Value) {
|
||||
e.node(in.Interface().(*Node), "")
|
||||
}
|
||||
|
||||
func (e *encoder) node(node *Node, tail string) {
|
||||
// Zero nodes behave as nil.
|
||||
if node.Kind == 0 && node.IsZero() {
|
||||
e.nilv()
|
||||
return
|
||||
}
|
||||
|
||||
// If the tag was not explicitly requested, and dropping it won't change the
|
||||
// implicit tag of the value, don't include it in the presentation.
|
||||
var tag = node.Tag
|
||||
var stag = shortTag(tag)
|
||||
var forceQuoting bool
|
||||
if tag != "" && node.Style&TaggedStyle == 0 {
|
||||
if node.Kind == ScalarNode {
|
||||
if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 {
|
||||
tag = ""
|
||||
} else {
|
||||
rtag, _ := resolve("", node.Value)
|
||||
if rtag == stag {
|
||||
tag = ""
|
||||
} else if stag == strTag {
|
||||
tag = ""
|
||||
forceQuoting = true
|
||||
}
|
||||
}
|
||||
} else {
|
||||
var rtag string
|
||||
switch node.Kind {
|
||||
case MappingNode:
|
||||
rtag = mapTag
|
||||
case SequenceNode:
|
||||
rtag = seqTag
|
||||
}
|
||||
if rtag == stag {
|
||||
tag = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch node.Kind {
|
||||
case DocumentNode:
|
||||
yaml_document_start_event_initialize(&e.event, nil, nil, true)
|
||||
e.event.head_comment = []byte(node.HeadComment)
|
||||
e.emit()
|
||||
for _, node := range node.Content {
|
||||
e.node(node, "")
|
||||
}
|
||||
yaml_document_end_event_initialize(&e.event, true)
|
||||
e.event.foot_comment = []byte(node.FootComment)
|
||||
e.emit()
|
||||
|
||||
case SequenceNode:
|
||||
style := yaml_BLOCK_SEQUENCE_STYLE
|
||||
if node.Style&FlowStyle != 0 {
|
||||
style = yaml_FLOW_SEQUENCE_STYLE
|
||||
}
|
||||
e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style))
|
||||
e.event.head_comment = []byte(node.HeadComment)
|
||||
e.emit()
|
||||
for _, node := range node.Content {
|
||||
e.node(node, "")
|
||||
}
|
||||
e.must(yaml_sequence_end_event_initialize(&e.event))
|
||||
e.event.line_comment = []byte(node.LineComment)
|
||||
e.event.foot_comment = []byte(node.FootComment)
|
||||
e.emit()
|
||||
|
||||
case MappingNode:
|
||||
style := yaml_BLOCK_MAPPING_STYLE
|
||||
if node.Style&FlowStyle != 0 {
|
||||
style = yaml_FLOW_MAPPING_STYLE
|
||||
}
|
||||
yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)
|
||||
e.event.tail_comment = []byte(tail)
|
||||
e.event.head_comment = []byte(node.HeadComment)
|
||||
e.emit()
|
||||
|
||||
// The tail logic below moves the foot comment of prior keys to the following key,
|
||||
// since the value for each key may be a nested structure and the foot needs to be
|
||||
// processed only the entirety of the value is streamed. The last tail is processed
|
||||
// with the mapping end event.
|
||||
var tail string
|
||||
for i := 0; i+1 < len(node.Content); i += 2 {
|
||||
k := node.Content[i]
|
||||
foot := k.FootComment
|
||||
if foot != "" {
|
||||
kopy := *k
|
||||
kopy.FootComment = ""
|
||||
k = &kopy
|
||||
}
|
||||
e.node(k, tail)
|
||||
tail = foot
|
||||
|
||||
v := node.Content[i+1]
|
||||
e.node(v, "")
|
||||
}
|
||||
|
||||
yaml_mapping_end_event_initialize(&e.event)
|
||||
e.event.tail_comment = []byte(tail)
|
||||
e.event.line_comment = []byte(node.LineComment)
|
||||
e.event.foot_comment = []byte(node.FootComment)
|
||||
e.emit()
|
||||
|
||||
case AliasNode:
|
||||
yaml_alias_event_initialize(&e.event, []byte(node.Value))
|
||||
e.event.head_comment = []byte(node.HeadComment)
|
||||
e.event.line_comment = []byte(node.LineComment)
|
||||
e.event.foot_comment = []byte(node.FootComment)
|
||||
e.emit()
|
||||
|
||||
case ScalarNode:
|
||||
value := node.Value
|
||||
if !utf8.ValidString(value) {
|
||||
if stag == binaryTag {
|
||||
failf("explicitly tagged !!binary data must be base64-encoded")
|
||||
}
|
||||
if stag != "" {
|
||||
failf("cannot marshal invalid UTF-8 data as %s", stag)
|
||||
}
|
||||
// It can't be encoded directly as YAML so use a binary tag
|
||||
// and encode it as base64.
|
||||
tag = binaryTag
|
||||
value = encodeBase64(value)
|
||||
}
|
||||
|
||||
style := yaml_PLAIN_SCALAR_STYLE
|
||||
switch {
|
||||
case node.Style&DoubleQuotedStyle != 0:
|
||||
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
||||
case node.Style&SingleQuotedStyle != 0:
|
||||
style = yaml_SINGLE_QUOTED_SCALAR_STYLE
|
||||
case node.Style&LiteralStyle != 0:
|
||||
style = yaml_LITERAL_SCALAR_STYLE
|
||||
case node.Style&FoldedStyle != 0:
|
||||
style = yaml_FOLDED_SCALAR_STYLE
|
||||
case strings.Contains(value, "\n"):
|
||||
style = yaml_LITERAL_SCALAR_STYLE
|
||||
case forceQuoting:
|
||||
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
||||
}
|
||||
|
||||
e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail))
|
||||
default:
|
||||
failf("cannot encode node with unknown kind %d", node.Kind)
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,434 @@
|
|||
//
|
||||
// Copyright (c) 2011-2019 Canonical Ltd
|
||||
// Copyright (c) 2006-2010 Kirill Simonov
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
// this software and associated documentation files (the "Software"), to deal in
|
||||
// the Software without restriction, including without limitation the rights to
|
||||
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
// of the Software, and to permit persons to whom the Software is furnished to do
|
||||
// so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
// SOFTWARE.
|
||||
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// Set the reader error and return 0.
|
||||
func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
|
||||
parser.error = yaml_READER_ERROR
|
||||
parser.problem = problem
|
||||
parser.problem_offset = offset
|
||||
parser.problem_value = value
|
||||
return false
|
||||
}
|
||||
|
||||
// Byte order marks.
|
||||
const (
|
||||
bom_UTF8 = "\xef\xbb\xbf"
|
||||
bom_UTF16LE = "\xff\xfe"
|
||||
bom_UTF16BE = "\xfe\xff"
|
||||
)
|
||||
|
||||
// Determine the input stream encoding by checking the BOM symbol. If no BOM is
|
||||
// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
|
||||
func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
|
||||
// Ensure that we had enough bytes in the raw buffer.
|
||||
for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
|
||||
if !yaml_parser_update_raw_buffer(parser) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Determine the encoding.
|
||||
buf := parser.raw_buffer
|
||||
pos := parser.raw_buffer_pos
|
||||
avail := len(buf) - pos
|
||||
if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
|
||||
parser.encoding = yaml_UTF16LE_ENCODING
|
||||
parser.raw_buffer_pos += 2
|
||||
parser.offset += 2
|
||||
} else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
|
||||
parser.encoding = yaml_UTF16BE_ENCODING
|
||||
parser.raw_buffer_pos += 2
|
||||
parser.offset += 2
|
||||
} else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
|
||||
parser.encoding = yaml_UTF8_ENCODING
|
||||
parser.raw_buffer_pos += 3
|
||||
parser.offset += 3
|
||||
} else {
|
||||
parser.encoding = yaml_UTF8_ENCODING
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Update the raw buffer.
|
||||
func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
|
||||
size_read := 0
|
||||
|
||||
// Return if the raw buffer is full.
|
||||
if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Return on EOF.
|
||||
if parser.eof {
|
||||
return true
|
||||
}
|
||||
|
||||
// Move the remaining bytes in the raw buffer to the beginning.
|
||||
if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
|
||||
copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
|
||||
}
|
||||
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
|
||||
parser.raw_buffer_pos = 0
|
||||
|
||||
// Call the read handler to fill the buffer.
|
||||
size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
|
||||
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
|
||||
if err == io.EOF {
|
||||
parser.eof = true
|
||||
} else if err != nil {
|
||||
return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Ensure that the buffer contains at least `length` characters.
|
||||
// Return true on success, false on failure.
|
||||
//
|
||||
// The length is supposed to be significantly less that the buffer size.
|
||||
func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
|
||||
if parser.read_handler == nil {
|
||||
panic("read handler must be set")
|
||||
}
|
||||
|
||||
// [Go] This function was changed to guarantee the requested length size at EOF.
|
||||
// The fact we need to do this is pretty awful, but the description above implies
|
||||
// for that to be the case, and there are tests
|
||||
|
||||
// If the EOF flag is set and the raw buffer is empty, do nothing.
|
||||
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
|
||||
// [Go] ACTUALLY! Read the documentation of this function above.
|
||||
// This is just broken. To return true, we need to have the
|
||||
// given length in the buffer. Not doing that means every single
|
||||
// check that calls this function to make sure the buffer has a
|
||||
// given length is Go) panicking; or C) accessing invalid memory.
|
||||
//return true
|
||||
}
|
||||
|
||||
// Return if the buffer contains enough characters.
|
||||
if parser.unread >= length {
|
||||
return true
|
||||
}
|
||||
|
||||
// Determine the input encoding if it is not known yet.
|
||||
if parser.encoding == yaml_ANY_ENCODING {
|
||||
if !yaml_parser_determine_encoding(parser) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Move the unread characters to the beginning of the buffer.
|
||||
buffer_len := len(parser.buffer)
|
||||
if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
|
||||
copy(parser.buffer, parser.buffer[parser.buffer_pos:])
|
||||
buffer_len -= parser.buffer_pos
|
||||
parser.buffer_pos = 0
|
||||
} else if parser.buffer_pos == buffer_len {
|
||||
buffer_len = 0
|
||||
parser.buffer_pos = 0
|
||||
}
|
||||
|
||||
// Open the whole buffer for writing, and cut it before returning.
|
||||
parser.buffer = parser.buffer[:cap(parser.buffer)]
|
||||
|
||||
// Fill the buffer until it has enough characters.
|
||||
first := true
|
||||
for parser.unread < length {
|
||||
|
||||
// Fill the raw buffer if necessary.
|
||||
if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
|
||||
if !yaml_parser_update_raw_buffer(parser) {
|
||||
parser.buffer = parser.buffer[:buffer_len]
|
||||
return false
|
||||
}
|
||||
}
|
||||
first = false
|
||||
|
||||
// Decode the raw buffer.
|
||||
inner:
|
||||
for parser.raw_buffer_pos != len(parser.raw_buffer) {
|
||||
var value rune
|
||||
var width int
|
||||
|
||||
raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
|
||||
|
||||
// Decode the next character.
|
||||
switch parser.encoding {
|
||||
case yaml_UTF8_ENCODING:
|
||||
// Decode a UTF-8 character. Check RFC 3629
|
||||
// (http://www.ietf.org/rfc/rfc3629.txt) for more details.
|
||||
//
|
||||
// The following table (taken from the RFC) is used for
|
||||
// decoding.
|
||||
//
|
||||
// Char. number range | UTF-8 octet sequence
|
||||
// (hexadecimal) | (binary)
|
||||
// --------------------+------------------------------------
|
||||
// 0000 0000-0000 007F | 0xxxxxxx
|
||||
// 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
|
||||
// 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
|
||||
// 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
|
||||
//
|
||||
// Additionally, the characters in the range 0xD800-0xDFFF
|
||||
// are prohibited as they are reserved for use with UTF-16
|
||||
// surrogate pairs.
|
||||
|
||||
// Determine the length of the UTF-8 sequence.
|
||||
octet := parser.raw_buffer[parser.raw_buffer_pos]
|
||||
switch {
|
||||
case octet&0x80 == 0x00:
|
||||
width = 1
|
||||
case octet&0xE0 == 0xC0:
|
||||
width = 2
|
||||
case octet&0xF0 == 0xE0:
|
||||
width = 3
|
||||
case octet&0xF8 == 0xF0:
|
||||
width = 4
|
||||
default:
|
||||
// The leading octet is invalid.
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"invalid leading UTF-8 octet",
|
||||
parser.offset, int(octet))
|
||||
}
|
||||
|
||||
// Check if the raw buffer contains an incomplete character.
|
||||
if width > raw_unread {
|
||||
if parser.eof {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"incomplete UTF-8 octet sequence",
|
||||
parser.offset, -1)
|
||||
}
|
||||
break inner
|
||||
}
|
||||
|
||||
// Decode the leading octet.
|
||||
switch {
|
||||
case octet&0x80 == 0x00:
|
||||
value = rune(octet & 0x7F)
|
||||
case octet&0xE0 == 0xC0:
|
||||
value = rune(octet & 0x1F)
|
||||
case octet&0xF0 == 0xE0:
|
||||
value = rune(octet & 0x0F)
|
||||
case octet&0xF8 == 0xF0:
|
||||
value = rune(octet & 0x07)
|
||||
default:
|
||||
value = 0
|
||||
}
|
||||
|
||||
// Check and decode the trailing octets.
|
||||
for k := 1; k < width; k++ {
|
||||
octet = parser.raw_buffer[parser.raw_buffer_pos+k]
|
||||
|
||||
// Check if the octet is valid.
|
||||
if (octet & 0xC0) != 0x80 {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"invalid trailing UTF-8 octet",
|
||||
parser.offset+k, int(octet))
|
||||
}
|
||||
|
||||
// Decode the octet.
|
||||
value = (value << 6) + rune(octet&0x3F)
|
||||
}
|
||||
|
||||
// Check the length of the sequence against the value.
|
||||
switch {
|
||||
case width == 1:
|
||||
case width == 2 && value >= 0x80:
|
||||
case width == 3 && value >= 0x800:
|
||||
case width == 4 && value >= 0x10000:
|
||||
default:
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"invalid length of a UTF-8 sequence",
|
||||
parser.offset, -1)
|
||||
}
|
||||
|
||||
// Check the range of the value.
|
||||
if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"invalid Unicode character",
|
||||
parser.offset, int(value))
|
||||
}
|
||||
|
||||
case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
|
||||
var low, high int
|
||||
if parser.encoding == yaml_UTF16LE_ENCODING {
|
||||
low, high = 0, 1
|
||||
} else {
|
||||
low, high = 1, 0
|
||||
}
|
||||
|
||||
// The UTF-16 encoding is not as simple as one might
|
||||
// naively think. Check RFC 2781
|
||||
// (http://www.ietf.org/rfc/rfc2781.txt).
|
||||
//
|
||||
// Normally, two subsequent bytes describe a Unicode
|
||||
// character. However a special technique (called a
|
||||
// surrogate pair) is used for specifying character
|
||||
// values larger than 0xFFFF.
|
||||
//
|
||||
// A surrogate pair consists of two pseudo-characters:
|
||||
// high surrogate area (0xD800-0xDBFF)
|
||||
// low surrogate area (0xDC00-0xDFFF)
|
||||
//
|
||||
// The following formulas are used for decoding
|
||||
// and encoding characters using surrogate pairs:
|
||||
//
|
||||
// U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
|
||||
// U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
|
||||
// W1 = 110110yyyyyyyyyy
|
||||
// W2 = 110111xxxxxxxxxx
|
||||
//
|
||||
// where U is the character value, W1 is the high surrogate
|
||||
// area, W2 is the low surrogate area.
|
||||
|
||||
// Check for incomplete UTF-16 character.
|
||||
if raw_unread < 2 {
|
||||
if parser.eof {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"incomplete UTF-16 character",
|
||||
parser.offset, -1)
|
||||
}
|
||||
break inner
|
||||
}
|
||||
|
||||
// Get the character.
|
||||
value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
|
||||
(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
|
||||
|
||||
// Check for unexpected low surrogate area.
|
||||
if value&0xFC00 == 0xDC00 {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"unexpected low surrogate area",
|
||||
parser.offset, int(value))
|
||||
}
|
||||
|
||||
// Check for a high surrogate area.
|
||||
if value&0xFC00 == 0xD800 {
|
||||
width = 4
|
||||
|
||||
// Check for incomplete surrogate pair.
|
||||
if raw_unread < 4 {
|
||||
if parser.eof {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"incomplete UTF-16 surrogate pair",
|
||||
parser.offset, -1)
|
||||
}
|
||||
break inner
|
||||
}
|
||||
|
||||
// Get the next character.
|
||||
value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
|
||||
(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
|
||||
|
||||
// Check for a low surrogate area.
|
||||
if value2&0xFC00 != 0xDC00 {
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"expected low surrogate area",
|
||||
parser.offset+2, int(value2))
|
||||
}
|
||||
|
||||
// Generate the value of the surrogate pair.
|
||||
value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
|
||||
} else {
|
||||
width = 2
|
||||
}
|
||||
|
||||
default:
|
||||
panic("impossible")
|
||||
}
|
||||
|
||||
// Check if the character is in the allowed range:
|
||||
// #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
|
||||
// | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
|
||||
// | [#x10000-#x10FFFF] (32 bit)
|
||||
switch {
|
||||
case value == 0x09:
|
||||
case value == 0x0A:
|
||||
case value == 0x0D:
|
||||
case value >= 0x20 && value <= 0x7E:
|
||||
case value == 0x85:
|
||||
case value >= 0xA0 && value <= 0xD7FF:
|
||||
case value >= 0xE000 && value <= 0xFFFD:
|
||||
case value >= 0x10000 && value <= 0x10FFFF:
|
||||
default:
|
||||
return yaml_parser_set_reader_error(parser,
|
||||
"control characters are not allowed",
|
||||
parser.offset, int(value))
|
||||
}
|
||||
|
||||
// Move the raw pointers.
|
||||
parser.raw_buffer_pos += width
|
||||
parser.offset += width
|
||||
|
||||
// Finally put the character into the buffer.
|
||||
if value <= 0x7F {
|
||||
// 0000 0000-0000 007F . 0xxxxxxx
|
||||
parser.buffer[buffer_len+0] = byte(value)
|
||||
buffer_len += 1
|
||||
} else if value <= 0x7FF {
|
||||
// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
|
||||
parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
|
||||
parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
|
||||
buffer_len += 2
|
||||
} else if value <= 0xFFFF {
|
||||
// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
|
||||
parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
|
||||
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
|
||||
parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
|
||||
buffer_len += 3
|
||||
} else {
|
||||
// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
|
||||
parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
|
||||
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
|
||||
parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
|
||||
parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
|
||||
buffer_len += 4
|
||||
}
|
||||
|
||||
parser.unread++
|
||||
}
|
||||
|
||||
// On EOF, put NUL into the buffer and return.
|
||||
if parser.eof {
|
||||
parser.buffer[buffer_len] = 0
|
||||
buffer_len++
|
||||
parser.unread++
|
||||
break
|
||||
}
|
||||
}
|
||||
// [Go] Read the documentation of this function above. To return true,
|
||||
// we need to have the given length in the buffer. Not doing that means
|
||||
// every single check that calls this function to make sure the buffer
|
||||
// has a given length is Go) panicking; or C) accessing invalid memory.
|
||||
// This happens here due to the EOF above breaking early.
|
||||
for buffer_len < length {
|
||||
parser.buffer[buffer_len] = 0
|
||||
buffer_len++
|
||||
}
|
||||
parser.buffer = parser.buffer[:buffer_len]
|
||||
return true
|
||||
}
|
||||
|
|
@ -0,0 +1,326 @@
|
|||
//
|
||||
// Copyright (c) 2011-2019 Canonical Ltd
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"math"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type resolveMapItem struct {
|
||||
value interface{}
|
||||
tag string
|
||||
}
|
||||
|
||||
var resolveTable = make([]byte, 256)
|
||||
var resolveMap = make(map[string]resolveMapItem)
|
||||
|
||||
func init() {
|
||||
t := resolveTable
|
||||
t[int('+')] = 'S' // Sign
|
||||
t[int('-')] = 'S'
|
||||
for _, c := range "0123456789" {
|
||||
t[int(c)] = 'D' // Digit
|
||||
}
|
||||
for _, c := range "yYnNtTfFoO~" {
|
||||
t[int(c)] = 'M' // In map
|
||||
}
|
||||
t[int('.')] = '.' // Float (potentially in map)
|
||||
|
||||
var resolveMapList = []struct {
|
||||
v interface{}
|
||||
tag string
|
||||
l []string
|
||||
}{
|
||||
{true, boolTag, []string{"true", "True", "TRUE"}},
|
||||
{false, boolTag, []string{"false", "False", "FALSE"}},
|
||||
{nil, nullTag, []string{"", "~", "null", "Null", "NULL"}},
|
||||
{math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}},
|
||||
{math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}},
|
||||
{math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}},
|
||||
{math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}},
|
||||
{"<<", mergeTag, []string{"<<"}},
|
||||
}
|
||||
|
||||
m := resolveMap
|
||||
for _, item := range resolveMapList {
|
||||
for _, s := range item.l {
|
||||
m[s] = resolveMapItem{item.v, item.tag}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
nullTag = "!!null"
|
||||
boolTag = "!!bool"
|
||||
strTag = "!!str"
|
||||
intTag = "!!int"
|
||||
floatTag = "!!float"
|
||||
timestampTag = "!!timestamp"
|
||||
seqTag = "!!seq"
|
||||
mapTag = "!!map"
|
||||
binaryTag = "!!binary"
|
||||
mergeTag = "!!merge"
|
||||
)
|
||||
|
||||
var longTags = make(map[string]string)
|
||||
var shortTags = make(map[string]string)
|
||||
|
||||
func init() {
|
||||
for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} {
|
||||
ltag := longTag(stag)
|
||||
longTags[stag] = ltag
|
||||
shortTags[ltag] = stag
|
||||
}
|
||||
}
|
||||
|
||||
const longTagPrefix = "tag:yaml.org,2002:"
|
||||
|
||||
func shortTag(tag string) string {
|
||||
if strings.HasPrefix(tag, longTagPrefix) {
|
||||
if stag, ok := shortTags[tag]; ok {
|
||||
return stag
|
||||
}
|
||||
return "!!" + tag[len(longTagPrefix):]
|
||||
}
|
||||
return tag
|
||||
}
|
||||
|
||||
func longTag(tag string) string {
|
||||
if strings.HasPrefix(tag, "!!") {
|
||||
if ltag, ok := longTags[tag]; ok {
|
||||
return ltag
|
||||
}
|
||||
return longTagPrefix + tag[2:]
|
||||
}
|
||||
return tag
|
||||
}
|
||||
|
||||
func resolvableTag(tag string) bool {
|
||||
switch tag {
|
||||
case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
|
||||
|
||||
func resolve(tag string, in string) (rtag string, out interface{}) {
|
||||
tag = shortTag(tag)
|
||||
if !resolvableTag(tag) {
|
||||
return tag, in
|
||||
}
|
||||
|
||||
defer func() {
|
||||
switch tag {
|
||||
case "", rtag, strTag, binaryTag:
|
||||
return
|
||||
case floatTag:
|
||||
if rtag == intTag {
|
||||
switch v := out.(type) {
|
||||
case int64:
|
||||
rtag = floatTag
|
||||
out = float64(v)
|
||||
return
|
||||
case int:
|
||||
rtag = floatTag
|
||||
out = float64(v)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
|
||||
}()
|
||||
|
||||
// Any data is accepted as a !!str or !!binary.
|
||||
// Otherwise, the prefix is enough of a hint about what it might be.
|
||||
hint := byte('N')
|
||||
if in != "" {
|
||||
hint = resolveTable[in[0]]
|
||||
}
|
||||
if hint != 0 && tag != strTag && tag != binaryTag {
|
||||
// Handle things we can lookup in a map.
|
||||
if item, ok := resolveMap[in]; ok {
|
||||
return item.tag, item.value
|
||||
}
|
||||
|
||||
// Base 60 floats are a bad idea, were dropped in YAML 1.2, and
|
||||
// are purposefully unsupported here. They're still quoted on
|
||||
// the way out for compatibility with other parser, though.
|
||||
|
||||
switch hint {
|
||||
case 'M':
|
||||
// We've already checked the map above.
|
||||
|
||||
case '.':
|
||||
// Not in the map, so maybe a normal float.
|
||||
floatv, err := strconv.ParseFloat(in, 64)
|
||||
if err == nil {
|
||||
return floatTag, floatv
|
||||
}
|
||||
|
||||
case 'D', 'S':
|
||||
// Int, float, or timestamp.
|
||||
// Only try values as a timestamp if the value is unquoted or there's an explicit
|
||||
// !!timestamp tag.
|
||||
if tag == "" || tag == timestampTag {
|
||||
t, ok := parseTimestamp(in)
|
||||
if ok {
|
||||
return timestampTag, t
|
||||
}
|
||||
}
|
||||
|
||||
plain := strings.Replace(in, "_", "", -1)
|
||||
intv, err := strconv.ParseInt(plain, 0, 64)
|
||||
if err == nil {
|
||||
if intv == int64(int(intv)) {
|
||||
return intTag, int(intv)
|
||||
} else {
|
||||
return intTag, intv
|
||||
}
|
||||
}
|
||||
uintv, err := strconv.ParseUint(plain, 0, 64)
|
||||
if err == nil {
|
||||
return intTag, uintv
|
||||
}
|
||||
if yamlStyleFloat.MatchString(plain) {
|
||||
floatv, err := strconv.ParseFloat(plain, 64)
|
||||
if err == nil {
|
||||
return floatTag, floatv
|
||||
}
|
||||
}
|
||||
if strings.HasPrefix(plain, "0b") {
|
||||
intv, err := strconv.ParseInt(plain[2:], 2, 64)
|
||||
if err == nil {
|
||||
if intv == int64(int(intv)) {
|
||||
return intTag, int(intv)
|
||||
} else {
|
||||
return intTag, intv
|
||||
}
|
||||
}
|
||||
uintv, err := strconv.ParseUint(plain[2:], 2, 64)
|
||||
if err == nil {
|
||||
return intTag, uintv
|
||||
}
|
||||
} else if strings.HasPrefix(plain, "-0b") {
|
||||
intv, err := strconv.ParseInt("-"+plain[3:], 2, 64)
|
||||
if err == nil {
|
||||
if true || intv == int64(int(intv)) {
|
||||
return intTag, int(intv)
|
||||
} else {
|
||||
return intTag, intv
|
||||
}
|
||||
}
|
||||
}
|
||||
// Octals as introduced in version 1.2 of the spec.
|
||||
// Octals from the 1.1 spec, spelled as 0777, are still
|
||||
// decoded by default in v3 as well for compatibility.
|
||||
// May be dropped in v4 depending on how usage evolves.
|
||||
if strings.HasPrefix(plain, "0o") {
|
||||
intv, err := strconv.ParseInt(plain[2:], 8, 64)
|
||||
if err == nil {
|
||||
if intv == int64(int(intv)) {
|
||||
return intTag, int(intv)
|
||||
} else {
|
||||
return intTag, intv
|
||||
}
|
||||
}
|
||||
uintv, err := strconv.ParseUint(plain[2:], 8, 64)
|
||||
if err == nil {
|
||||
return intTag, uintv
|
||||
}
|
||||
} else if strings.HasPrefix(plain, "-0o") {
|
||||
intv, err := strconv.ParseInt("-"+plain[3:], 8, 64)
|
||||
if err == nil {
|
||||
if true || intv == int64(int(intv)) {
|
||||
return intTag, int(intv)
|
||||
} else {
|
||||
return intTag, intv
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")")
|
||||
}
|
||||
}
|
||||
return strTag, in
|
||||
}
|
||||
|
||||
// encodeBase64 encodes s as base64 that is broken up into multiple lines
|
||||
// as appropriate for the resulting length.
|
||||
func encodeBase64(s string) string {
|
||||
const lineLen = 70
|
||||
encLen := base64.StdEncoding.EncodedLen(len(s))
|
||||
lines := encLen/lineLen + 1
|
||||
buf := make([]byte, encLen*2+lines)
|
||||
in := buf[0:encLen]
|
||||
out := buf[encLen:]
|
||||
base64.StdEncoding.Encode(in, []byte(s))
|
||||
k := 0
|
||||
for i := 0; i < len(in); i += lineLen {
|
||||
j := i + lineLen
|
||||
if j > len(in) {
|
||||
j = len(in)
|
||||
}
|
||||
k += copy(out[k:], in[i:j])
|
||||
if lines > 1 {
|
||||
out[k] = '\n'
|
||||
k++
|
||||
}
|
||||
}
|
||||
return string(out[:k])
|
||||
}
|
||||
|
||||
// This is a subset of the formats allowed by the regular expression
|
||||
// defined at http://yaml.org/type/timestamp.html.
|
||||
var allowedTimestampFormats = []string{
|
||||
"2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
|
||||
"2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
|
||||
"2006-1-2 15:4:5.999999999", // space separated with no time zone
|
||||
"2006-1-2", // date only
|
||||
// Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
|
||||
// from the set of examples.
|
||||
}
|
||||
|
||||
// parseTimestamp parses s as a timestamp string and
|
||||
// returns the timestamp and reports whether it succeeded.
|
||||
// Timestamp formats are defined at http://yaml.org/type/timestamp.html
|
||||
func parseTimestamp(s string) (time.Time, bool) {
|
||||
// TODO write code to check all the formats supported by
|
||||
// http://yaml.org/type/timestamp.html instead of using time.Parse.
|
||||
|
||||
// Quick check: all date formats start with YYYY-.
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if c := s[i]; c < '0' || c > '9' {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i != 4 || i == len(s) || s[i] != '-' {
|
||||
return time.Time{}, false
|
||||
}
|
||||
for _, format := range allowedTimestampFormats {
|
||||
if t, err := time.Parse(format, s); err == nil {
|
||||
return t, true
|
||||
}
|
||||
}
|
||||
return time.Time{}, false
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,134 @@
|
|||
//
|
||||
// Copyright (c) 2011-2019 Canonical Ltd
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
type keyList []reflect.Value
|
||||
|
||||
func (l keyList) Len() int { return len(l) }
|
||||
func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
|
||||
func (l keyList) Less(i, j int) bool {
|
||||
a := l[i]
|
||||
b := l[j]
|
||||
ak := a.Kind()
|
||||
bk := b.Kind()
|
||||
for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
|
||||
a = a.Elem()
|
||||
ak = a.Kind()
|
||||
}
|
||||
for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
|
||||
b = b.Elem()
|
||||
bk = b.Kind()
|
||||
}
|
||||
af, aok := keyFloat(a)
|
||||
bf, bok := keyFloat(b)
|
||||
if aok && bok {
|
||||
if af != bf {
|
||||
return af < bf
|
||||
}
|
||||
if ak != bk {
|
||||
return ak < bk
|
||||
}
|
||||
return numLess(a, b)
|
||||
}
|
||||
if ak != reflect.String || bk != reflect.String {
|
||||
return ak < bk
|
||||
}
|
||||
ar, br := []rune(a.String()), []rune(b.String())
|
||||
digits := false
|
||||
for i := 0; i < len(ar) && i < len(br); i++ {
|
||||
if ar[i] == br[i] {
|
||||
digits = unicode.IsDigit(ar[i])
|
||||
continue
|
||||
}
|
||||
al := unicode.IsLetter(ar[i])
|
||||
bl := unicode.IsLetter(br[i])
|
||||
if al && bl {
|
||||
return ar[i] < br[i]
|
||||
}
|
||||
if al || bl {
|
||||
if digits {
|
||||
return al
|
||||
} else {
|
||||
return bl
|
||||
}
|
||||
}
|
||||
var ai, bi int
|
||||
var an, bn int64
|
||||
if ar[i] == '0' || br[i] == '0' {
|
||||
for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
|
||||
if ar[j] != '0' {
|
||||
an = 1
|
||||
bn = 1
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
|
||||
an = an*10 + int64(ar[ai]-'0')
|
||||
}
|
||||
for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
|
||||
bn = bn*10 + int64(br[bi]-'0')
|
||||
}
|
||||
if an != bn {
|
||||
return an < bn
|
||||
}
|
||||
if ai != bi {
|
||||
return ai < bi
|
||||
}
|
||||
return ar[i] < br[i]
|
||||
}
|
||||
return len(ar) < len(br)
|
||||
}
|
||||
|
||||
// keyFloat returns a float value for v if it is a number/bool
|
||||
// and whether it is a number/bool or not.
|
||||
func keyFloat(v reflect.Value) (f float64, ok bool) {
|
||||
switch v.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return float64(v.Int()), true
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float(), true
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return float64(v.Uint()), true
|
||||
case reflect.Bool:
|
||||
if v.Bool() {
|
||||
return 1, true
|
||||
}
|
||||
return 0, true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// numLess returns whether a < b.
|
||||
// a and b must necessarily have the same kind.
|
||||
func numLess(a, b reflect.Value) bool {
|
||||
switch a.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return a.Int() < b.Int()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return a.Float() < b.Float()
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return a.Uint() < b.Uint()
|
||||
case reflect.Bool:
|
||||
return !a.Bool() && b.Bool()
|
||||
}
|
||||
panic("not a number")
|
||||
}
|
||||
|
|
@ -0,0 +1,48 @@
|
|||
//
|
||||
// Copyright (c) 2011-2019 Canonical Ltd
|
||||
// Copyright (c) 2006-2010 Kirill Simonov
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
// this software and associated documentation files (the "Software"), to deal in
|
||||
// the Software without restriction, including without limitation the rights to
|
||||
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
// of the Software, and to permit persons to whom the Software is furnished to do
|
||||
// so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
// SOFTWARE.
|
||||
|
||||
package yaml
|
||||
|
||||
// Set the writer error and return false.
|
||||
func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
|
||||
emitter.error = yaml_WRITER_ERROR
|
||||
emitter.problem = problem
|
||||
return false
|
||||
}
|
||||
|
||||
// Flush the output buffer.
|
||||
func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
|
||||
if emitter.write_handler == nil {
|
||||
panic("write handler not set")
|
||||
}
|
||||
|
||||
// Check if the buffer is empty.
|
||||
if emitter.buffer_pos == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
|
||||
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
|
||||
}
|
||||
emitter.buffer_pos = 0
|
||||
return true
|
||||
}
|
||||
|
|
@ -0,0 +1,703 @@
|
|||
//
|
||||
// Copyright (c) 2011-2019 Canonical Ltd
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package yaml implements YAML support for the Go language.
|
||||
//
|
||||
// Source code and other details for the project are available at GitHub:
|
||||
//
|
||||
// https://github.com/yaml/go-yaml
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// The Unmarshaler interface may be implemented by types to customize their
|
||||
// behavior when being unmarshaled from a YAML document.
|
||||
type Unmarshaler interface {
|
||||
UnmarshalYAML(value *Node) error
|
||||
}
|
||||
|
||||
type obsoleteUnmarshaler interface {
|
||||
UnmarshalYAML(unmarshal func(interface{}) error) error
|
||||
}
|
||||
|
||||
// The Marshaler interface may be implemented by types to customize their
|
||||
// behavior when being marshaled into a YAML document. The returned value
|
||||
// is marshaled in place of the original value implementing Marshaler.
|
||||
//
|
||||
// If an error is returned by MarshalYAML, the marshaling procedure stops
|
||||
// and returns with the provided error.
|
||||
type Marshaler interface {
|
||||
MarshalYAML() (interface{}, error)
|
||||
}
|
||||
|
||||
// Unmarshal decodes the first document found within the in byte slice
|
||||
// and assigns decoded values into the out value.
|
||||
//
|
||||
// Maps and pointers (to a struct, string, int, etc) are accepted as out
|
||||
// values. If an internal pointer within a struct is not initialized,
|
||||
// the yaml package will initialize it if necessary for unmarshalling
|
||||
// the provided data. The out parameter must not be nil.
|
||||
//
|
||||
// The type of the decoded values should be compatible with the respective
|
||||
// values in out. If one or more values cannot be decoded due to a type
|
||||
// mismatches, decoding continues partially until the end of the YAML
|
||||
// content, and a *yaml.TypeError is returned with details for all
|
||||
// missed values.
|
||||
//
|
||||
// Struct fields are only unmarshalled if they are exported (have an
|
||||
// upper case first letter), and are unmarshalled using the field name
|
||||
// lowercased as the default key. Custom keys may be defined via the
|
||||
// "yaml" name in the field tag: the content preceding the first comma
|
||||
// is used as the key, and the following comma-separated options are
|
||||
// used to tweak the marshalling process (see Marshal).
|
||||
// Conflicting names result in a runtime error.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// type T struct {
|
||||
// F int `yaml:"a,omitempty"`
|
||||
// B int
|
||||
// }
|
||||
// var t T
|
||||
// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
|
||||
//
|
||||
// See the documentation of Marshal for the format of tags and a list of
|
||||
// supported tag options.
|
||||
func Unmarshal(in []byte, out interface{}) (err error) {
|
||||
return unmarshal(in, out, false)
|
||||
}
|
||||
|
||||
// A Decoder reads and decodes YAML values from an input stream.
|
||||
type Decoder struct {
|
||||
parser *parser
|
||||
knownFields bool
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder that reads from r.
|
||||
//
|
||||
// The decoder introduces its own buffering and may read
|
||||
// data from r beyond the YAML values requested.
|
||||
func NewDecoder(r io.Reader) *Decoder {
|
||||
return &Decoder{
|
||||
parser: newParserFromReader(r),
|
||||
}
|
||||
}
|
||||
|
||||
// KnownFields ensures that the keys in decoded mappings to
|
||||
// exist as fields in the struct being decoded into.
|
||||
func (dec *Decoder) KnownFields(enable bool) {
|
||||
dec.knownFields = enable
|
||||
}
|
||||
|
||||
// Decode reads the next YAML-encoded value from its input
|
||||
// and stores it in the value pointed to by v.
|
||||
//
|
||||
// See the documentation for Unmarshal for details about the
|
||||
// conversion of YAML into a Go value.
|
||||
func (dec *Decoder) Decode(v interface{}) (err error) {
|
||||
d := newDecoder()
|
||||
d.knownFields = dec.knownFields
|
||||
defer handleErr(&err)
|
||||
node := dec.parser.parse()
|
||||
if node == nil {
|
||||
return io.EOF
|
||||
}
|
||||
out := reflect.ValueOf(v)
|
||||
if out.Kind() == reflect.Ptr && !out.IsNil() {
|
||||
out = out.Elem()
|
||||
}
|
||||
d.unmarshal(node, out)
|
||||
if len(d.terrors) > 0 {
|
||||
return &TypeError{d.terrors}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode decodes the node and stores its data into the value pointed to by v.
|
||||
//
|
||||
// See the documentation for Unmarshal for details about the
|
||||
// conversion of YAML into a Go value.
|
||||
func (n *Node) Decode(v interface{}) (err error) {
|
||||
d := newDecoder()
|
||||
defer handleErr(&err)
|
||||
out := reflect.ValueOf(v)
|
||||
if out.Kind() == reflect.Ptr && !out.IsNil() {
|
||||
out = out.Elem()
|
||||
}
|
||||
d.unmarshal(n, out)
|
||||
if len(d.terrors) > 0 {
|
||||
return &TypeError{d.terrors}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmarshal(in []byte, out interface{}, strict bool) (err error) {
|
||||
defer handleErr(&err)
|
||||
d := newDecoder()
|
||||
p := newParser(in)
|
||||
defer p.destroy()
|
||||
node := p.parse()
|
||||
if node != nil {
|
||||
v := reflect.ValueOf(out)
|
||||
if v.Kind() == reflect.Ptr && !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
d.unmarshal(node, v)
|
||||
}
|
||||
if len(d.terrors) > 0 {
|
||||
return &TypeError{d.terrors}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Marshal serializes the value provided into a YAML document. The structure
|
||||
// of the generated document will reflect the structure of the value itself.
|
||||
// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
|
||||
//
|
||||
// Struct fields are only marshalled if they are exported (have an upper case
|
||||
// first letter), and are marshalled using the field name lowercased as the
|
||||
// default key. Custom keys may be defined via the "yaml" name in the field
|
||||
// tag: the content preceding the first comma is used as the key, and the
|
||||
// following comma-separated options are used to tweak the marshalling process.
|
||||
// Conflicting names result in a runtime error.
|
||||
//
|
||||
// The field tag format accepted is:
|
||||
//
|
||||
// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
|
||||
//
|
||||
// The following flags are currently supported:
|
||||
//
|
||||
// omitempty Only include the field if it's not set to the zero
|
||||
// value for the type or to empty slices or maps.
|
||||
// Zero valued structs will be omitted if all their public
|
||||
// fields are zero, unless they implement an IsZero
|
||||
// method (see the IsZeroer interface type), in which
|
||||
// case the field will be excluded if IsZero returns true.
|
||||
//
|
||||
// flow Marshal using a flow style (useful for structs,
|
||||
// sequences and maps).
|
||||
//
|
||||
// inline Inline the field, which must be a struct or a map,
|
||||
// causing all of its fields or keys to be processed as if
|
||||
// they were part of the outer struct. For maps, keys must
|
||||
// not conflict with the yaml keys of other struct fields.
|
||||
//
|
||||
// In addition, if the key is "-", the field is ignored.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// type T struct {
|
||||
// F int `yaml:"a,omitempty"`
|
||||
// B int
|
||||
// }
|
||||
// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
|
||||
// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
|
||||
func Marshal(in interface{}) (out []byte, err error) {
|
||||
defer handleErr(&err)
|
||||
e := newEncoder()
|
||||
defer e.destroy()
|
||||
e.marshalDoc("", reflect.ValueOf(in))
|
||||
e.finish()
|
||||
out = e.out
|
||||
return
|
||||
}
|
||||
|
||||
// An Encoder writes YAML values to an output stream.
|
||||
type Encoder struct {
|
||||
encoder *encoder
|
||||
}
|
||||
|
||||
// NewEncoder returns a new encoder that writes to w.
|
||||
// The Encoder should be closed after use to flush all data
|
||||
// to w.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{
|
||||
encoder: newEncoderWithWriter(w),
|
||||
}
|
||||
}
|
||||
|
||||
// Encode writes the YAML encoding of v to the stream.
|
||||
// If multiple items are encoded to the stream, the
|
||||
// second and subsequent document will be preceded
|
||||
// with a "---" document separator, but the first will not.
|
||||
//
|
||||
// See the documentation for Marshal for details about the conversion of Go
|
||||
// values to YAML.
|
||||
func (e *Encoder) Encode(v interface{}) (err error) {
|
||||
defer handleErr(&err)
|
||||
e.encoder.marshalDoc("", reflect.ValueOf(v))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Encode encodes value v and stores its representation in n.
|
||||
//
|
||||
// See the documentation for Marshal for details about the
|
||||
// conversion of Go values into YAML.
|
||||
func (n *Node) Encode(v interface{}) (err error) {
|
||||
defer handleErr(&err)
|
||||
e := newEncoder()
|
||||
defer e.destroy()
|
||||
e.marshalDoc("", reflect.ValueOf(v))
|
||||
e.finish()
|
||||
p := newParser(e.out)
|
||||
p.textless = true
|
||||
defer p.destroy()
|
||||
doc := p.parse()
|
||||
*n = *doc.Content[0]
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetIndent changes the used indentation used when encoding.
|
||||
func (e *Encoder) SetIndent(spaces int) {
|
||||
if spaces < 0 {
|
||||
panic("yaml: cannot indent to a negative number of spaces")
|
||||
}
|
||||
e.encoder.indent = spaces
|
||||
}
|
||||
|
||||
// CompactSeqIndent makes it so that '- ' is considered part of the indentation.
|
||||
func (e *Encoder) CompactSeqIndent() {
|
||||
e.encoder.emitter.compact_sequence_indent = true
|
||||
}
|
||||
|
||||
// DefaultSeqIndent makes it so that '- ' is not considered part of the indentation.
|
||||
func (e *Encoder) DefaultSeqIndent() {
|
||||
e.encoder.emitter.compact_sequence_indent = false
|
||||
}
|
||||
|
||||
// Close closes the encoder by writing any remaining data.
|
||||
// It does not write a stream terminating string "...".
|
||||
func (e *Encoder) Close() (err error) {
|
||||
defer handleErr(&err)
|
||||
e.encoder.finish()
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleErr(err *error) {
|
||||
if v := recover(); v != nil {
|
||||
if e, ok := v.(yamlError); ok {
|
||||
*err = e.err
|
||||
} else {
|
||||
panic(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type yamlError struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func fail(err error) {
|
||||
panic(yamlError{err})
|
||||
}
|
||||
|
||||
func failf(format string, args ...interface{}) {
|
||||
panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
|
||||
}
|
||||
|
||||
// A TypeError is returned by Unmarshal when one or more fields in
|
||||
// the YAML document cannot be properly decoded into the requested
|
||||
// types. When this error is returned, the value is still
|
||||
// unmarshaled partially.
|
||||
type TypeError struct {
|
||||
Errors []string
|
||||
}
|
||||
|
||||
func (e *TypeError) Error() string {
|
||||
return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
|
||||
}
|
||||
|
||||
type Kind uint32
|
||||
|
||||
const (
|
||||
DocumentNode Kind = 1 << iota
|
||||
SequenceNode
|
||||
MappingNode
|
||||
ScalarNode
|
||||
AliasNode
|
||||
)
|
||||
|
||||
type Style uint32
|
||||
|
||||
const (
|
||||
TaggedStyle Style = 1 << iota
|
||||
DoubleQuotedStyle
|
||||
SingleQuotedStyle
|
||||
LiteralStyle
|
||||
FoldedStyle
|
||||
FlowStyle
|
||||
)
|
||||
|
||||
// Node represents an element in the YAML document hierarchy. While documents
|
||||
// are typically encoded and decoded into higher level types, such as structs
|
||||
// and maps, Node is an intermediate representation that allows detailed
|
||||
// control over the content being decoded or encoded.
|
||||
//
|
||||
// It's worth noting that although Node offers access into details such as
|
||||
// line numbers, colums, and comments, the content when re-encoded will not
|
||||
// have its original textual representation preserved. An effort is made to
|
||||
// render the data plesantly, and to preserve comments near the data they
|
||||
// describe, though.
|
||||
//
|
||||
// Values that make use of the Node type interact with the yaml package in the
|
||||
// same way any other type would do, by encoding and decoding yaml data
|
||||
// directly or indirectly into them.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// var person struct {
|
||||
// Name string
|
||||
// Address yaml.Node
|
||||
// }
|
||||
// err := yaml.Unmarshal(data, &person)
|
||||
//
|
||||
// Or by itself:
|
||||
//
|
||||
// var person Node
|
||||
// err := yaml.Unmarshal(data, &person)
|
||||
type Node struct {
|
||||
// Kind defines whether the node is a document, a mapping, a sequence,
|
||||
// a scalar value, or an alias to another node. The specific data type of
|
||||
// scalar nodes may be obtained via the ShortTag and LongTag methods.
|
||||
Kind Kind
|
||||
|
||||
// Style allows customizing the apperance of the node in the tree.
|
||||
Style Style
|
||||
|
||||
// Tag holds the YAML tag defining the data type for the value.
|
||||
// When decoding, this field will always be set to the resolved tag,
|
||||
// even when it wasn't explicitly provided in the YAML content.
|
||||
// When encoding, if this field is unset the value type will be
|
||||
// implied from the node properties, and if it is set, it will only
|
||||
// be serialized into the representation if TaggedStyle is used or
|
||||
// the implicit tag diverges from the provided one.
|
||||
Tag string
|
||||
|
||||
// Value holds the unescaped and unquoted represenation of the value.
|
||||
Value string
|
||||
|
||||
// Anchor holds the anchor name for this node, which allows aliases to point to it.
|
||||
Anchor string
|
||||
|
||||
// Alias holds the node that this alias points to. Only valid when Kind is AliasNode.
|
||||
Alias *Node
|
||||
|
||||
// Content holds contained nodes for documents, mappings, and sequences.
|
||||
Content []*Node
|
||||
|
||||
// HeadComment holds any comments in the lines preceding the node and
|
||||
// not separated by an empty line.
|
||||
HeadComment string
|
||||
|
||||
// LineComment holds any comments at the end of the line where the node is in.
|
||||
LineComment string
|
||||
|
||||
// FootComment holds any comments following the node and before empty lines.
|
||||
FootComment string
|
||||
|
||||
// Line and Column hold the node position in the decoded YAML text.
|
||||
// These fields are not respected when encoding the node.
|
||||
Line int
|
||||
Column int
|
||||
}
|
||||
|
||||
// IsZero returns whether the node has all of its fields unset.
|
||||
func (n *Node) IsZero() bool {
|
||||
return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil &&
|
||||
n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0
|
||||
}
|
||||
|
||||
// LongTag returns the long form of the tag that indicates the data type for
|
||||
// the node. If the Tag field isn't explicitly defined, one will be computed
|
||||
// based on the node properties.
|
||||
func (n *Node) LongTag() string {
|
||||
return longTag(n.ShortTag())
|
||||
}
|
||||
|
||||
// ShortTag returns the short form of the YAML tag that indicates data type for
|
||||
// the node. If the Tag field isn't explicitly defined, one will be computed
|
||||
// based on the node properties.
|
||||
func (n *Node) ShortTag() string {
|
||||
if n.indicatedString() {
|
||||
return strTag
|
||||
}
|
||||
if n.Tag == "" || n.Tag == "!" {
|
||||
switch n.Kind {
|
||||
case MappingNode:
|
||||
return mapTag
|
||||
case SequenceNode:
|
||||
return seqTag
|
||||
case AliasNode:
|
||||
if n.Alias != nil {
|
||||
return n.Alias.ShortTag()
|
||||
}
|
||||
case ScalarNode:
|
||||
tag, _ := resolve("", n.Value)
|
||||
return tag
|
||||
case 0:
|
||||
// Special case to make the zero value convenient.
|
||||
if n.IsZero() {
|
||||
return nullTag
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
return shortTag(n.Tag)
|
||||
}
|
||||
|
||||
func (n *Node) indicatedString() bool {
|
||||
return n.Kind == ScalarNode &&
|
||||
(shortTag(n.Tag) == strTag ||
|
||||
(n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0)
|
||||
}
|
||||
|
||||
// SetString is a convenience function that sets the node to a string value
|
||||
// and defines its style in a pleasant way depending on its content.
|
||||
func (n *Node) SetString(s string) {
|
||||
n.Kind = ScalarNode
|
||||
if utf8.ValidString(s) {
|
||||
n.Value = s
|
||||
n.Tag = strTag
|
||||
} else {
|
||||
n.Value = encodeBase64(s)
|
||||
n.Tag = binaryTag
|
||||
}
|
||||
if strings.Contains(n.Value, "\n") {
|
||||
n.Style = LiteralStyle
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Maintain a mapping of keys to structure field indexes
|
||||
|
||||
// The code in this section was copied from mgo/bson.
|
||||
|
||||
// structInfo holds details for the serialization of fields of
|
||||
// a given struct.
|
||||
type structInfo struct {
|
||||
FieldsMap map[string]fieldInfo
|
||||
FieldsList []fieldInfo
|
||||
|
||||
// InlineMap is the number of the field in the struct that
|
||||
// contains an ,inline map, or -1 if there's none.
|
||||
InlineMap int
|
||||
|
||||
// InlineUnmarshalers holds indexes to inlined fields that
|
||||
// contain unmarshaler values.
|
||||
InlineUnmarshalers [][]int
|
||||
}
|
||||
|
||||
type fieldInfo struct {
|
||||
Key string
|
||||
Num int
|
||||
OmitEmpty bool
|
||||
Flow bool
|
||||
// Id holds the unique field identifier, so we can cheaply
|
||||
// check for field duplicates without maintaining an extra map.
|
||||
Id int
|
||||
|
||||
// Inline holds the field index if the field is part of an inlined struct.
|
||||
Inline []int
|
||||
}
|
||||
|
||||
var structMap = make(map[reflect.Type]*structInfo)
|
||||
var fieldMapMutex sync.RWMutex
|
||||
var unmarshalerType reflect.Type
|
||||
|
||||
func init() {
|
||||
var v Unmarshaler
|
||||
unmarshalerType = reflect.ValueOf(&v).Elem().Type()
|
||||
}
|
||||
|
||||
func getStructInfo(st reflect.Type) (*structInfo, error) {
|
||||
fieldMapMutex.RLock()
|
||||
sinfo, found := structMap[st]
|
||||
fieldMapMutex.RUnlock()
|
||||
if found {
|
||||
return sinfo, nil
|
||||
}
|
||||
|
||||
n := st.NumField()
|
||||
fieldsMap := make(map[string]fieldInfo)
|
||||
fieldsList := make([]fieldInfo, 0, n)
|
||||
inlineMap := -1
|
||||
inlineUnmarshalers := [][]int(nil)
|
||||
for i := 0; i != n; i++ {
|
||||
field := st.Field(i)
|
||||
if field.PkgPath != "" && !field.Anonymous {
|
||||
continue // Private field
|
||||
}
|
||||
|
||||
info := fieldInfo{Num: i}
|
||||
|
||||
tag := field.Tag.Get("yaml")
|
||||
if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
|
||||
tag = string(field.Tag)
|
||||
}
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
inline := false
|
||||
fields := strings.Split(tag, ",")
|
||||
if len(fields) > 1 {
|
||||
for _, flag := range fields[1:] {
|
||||
switch flag {
|
||||
case "omitempty":
|
||||
info.OmitEmpty = true
|
||||
case "flow":
|
||||
info.Flow = true
|
||||
case "inline":
|
||||
inline = true
|
||||
default:
|
||||
return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st))
|
||||
}
|
||||
}
|
||||
tag = fields[0]
|
||||
}
|
||||
|
||||
if inline {
|
||||
switch field.Type.Kind() {
|
||||
case reflect.Map:
|
||||
if inlineMap >= 0 {
|
||||
return nil, errors.New("multiple ,inline maps in struct " + st.String())
|
||||
}
|
||||
if field.Type.Key() != reflect.TypeOf("") {
|
||||
return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String())
|
||||
}
|
||||
inlineMap = info.Num
|
||||
case reflect.Struct, reflect.Ptr:
|
||||
ftype := field.Type
|
||||
for ftype.Kind() == reflect.Ptr {
|
||||
ftype = ftype.Elem()
|
||||
}
|
||||
if ftype.Kind() != reflect.Struct {
|
||||
return nil, errors.New("option ,inline may only be used on a struct or map field")
|
||||
}
|
||||
if reflect.PtrTo(ftype).Implements(unmarshalerType) {
|
||||
inlineUnmarshalers = append(inlineUnmarshalers, []int{i})
|
||||
} else {
|
||||
sinfo, err := getStructInfo(ftype)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, index := range sinfo.InlineUnmarshalers {
|
||||
inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...))
|
||||
}
|
||||
for _, finfo := range sinfo.FieldsList {
|
||||
if _, found := fieldsMap[finfo.Key]; found {
|
||||
msg := "duplicated key '" + finfo.Key + "' in struct " + st.String()
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
if finfo.Inline == nil {
|
||||
finfo.Inline = []int{i, finfo.Num}
|
||||
} else {
|
||||
finfo.Inline = append([]int{i}, finfo.Inline...)
|
||||
}
|
||||
finfo.Id = len(fieldsList)
|
||||
fieldsMap[finfo.Key] = finfo
|
||||
fieldsList = append(fieldsList, finfo)
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("option ,inline may only be used on a struct or map field")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if tag != "" {
|
||||
info.Key = tag
|
||||
} else {
|
||||
info.Key = strings.ToLower(field.Name)
|
||||
}
|
||||
|
||||
if _, found = fieldsMap[info.Key]; found {
|
||||
msg := "duplicated key '" + info.Key + "' in struct " + st.String()
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
|
||||
info.Id = len(fieldsList)
|
||||
fieldsList = append(fieldsList, info)
|
||||
fieldsMap[info.Key] = info
|
||||
}
|
||||
|
||||
sinfo = &structInfo{
|
||||
FieldsMap: fieldsMap,
|
||||
FieldsList: fieldsList,
|
||||
InlineMap: inlineMap,
|
||||
InlineUnmarshalers: inlineUnmarshalers,
|
||||
}
|
||||
|
||||
fieldMapMutex.Lock()
|
||||
structMap[st] = sinfo
|
||||
fieldMapMutex.Unlock()
|
||||
return sinfo, nil
|
||||
}
|
||||
|
||||
// IsZeroer is used to check whether an object is zero to
|
||||
// determine whether it should be omitted when marshaling
|
||||
// with the omitempty flag. One notable implementation
|
||||
// is time.Time.
|
||||
type IsZeroer interface {
|
||||
IsZero() bool
|
||||
}
|
||||
|
||||
func isZero(v reflect.Value) bool {
|
||||
kind := v.Kind()
|
||||
if z, ok := v.Interface().(IsZeroer); ok {
|
||||
if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
|
||||
return true
|
||||
}
|
||||
return z.IsZero()
|
||||
}
|
||||
switch kind {
|
||||
case reflect.String:
|
||||
return len(v.String()) == 0
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
return v.IsNil()
|
||||
case reflect.Slice:
|
||||
return v.Len() == 0
|
||||
case reflect.Map:
|
||||
return v.Len() == 0
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Struct:
|
||||
vt := v.Type()
|
||||
for i := v.NumField() - 1; i >= 0; i-- {
|
||||
if vt.Field(i).PkgPath != "" {
|
||||
continue // Private field
|
||||
}
|
||||
if !isZero(v.Field(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
@ -0,0 +1,811 @@
|
|||
//
|
||||
// Copyright (c) 2011-2019 Canonical Ltd
|
||||
// Copyright (c) 2006-2010 Kirill Simonov
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
// this software and associated documentation files (the "Software"), to deal in
|
||||
// the Software without restriction, including without limitation the rights to
|
||||
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
// of the Software, and to permit persons to whom the Software is furnished to do
|
||||
// so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
// SOFTWARE.
|
||||
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// The version directive data.
|
||||
type yaml_version_directive_t struct {
|
||||
major int8 // The major version number.
|
||||
minor int8 // The minor version number.
|
||||
}
|
||||
|
||||
// The tag directive data.
|
||||
type yaml_tag_directive_t struct {
|
||||
handle []byte // The tag handle.
|
||||
prefix []byte // The tag prefix.
|
||||
}
|
||||
|
||||
type yaml_encoding_t int
|
||||
|
||||
// The stream encoding.
|
||||
const (
|
||||
// Let the parser choose the encoding.
|
||||
yaml_ANY_ENCODING yaml_encoding_t = iota
|
||||
|
||||
yaml_UTF8_ENCODING // The default UTF-8 encoding.
|
||||
yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
|
||||
yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
|
||||
)
|
||||
|
||||
type yaml_break_t int
|
||||
|
||||
// Line break types.
|
||||
const (
|
||||
// Let the parser choose the break type.
|
||||
yaml_ANY_BREAK yaml_break_t = iota
|
||||
|
||||
yaml_CR_BREAK // Use CR for line breaks (Mac style).
|
||||
yaml_LN_BREAK // Use LN for line breaks (Unix style).
|
||||
yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
|
||||
)
|
||||
|
||||
type yaml_error_type_t int
|
||||
|
||||
// Many bad things could happen with the parser and emitter.
|
||||
const (
|
||||
// No error is produced.
|
||||
yaml_NO_ERROR yaml_error_type_t = iota
|
||||
|
||||
yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
|
||||
yaml_READER_ERROR // Cannot read or decode the input stream.
|
||||
yaml_SCANNER_ERROR // Cannot scan the input stream.
|
||||
yaml_PARSER_ERROR // Cannot parse the input stream.
|
||||
yaml_COMPOSER_ERROR // Cannot compose a YAML document.
|
||||
yaml_WRITER_ERROR // Cannot write to the output stream.
|
||||
yaml_EMITTER_ERROR // Cannot emit a YAML stream.
|
||||
)
|
||||
|
||||
// The pointer position.
|
||||
type yaml_mark_t struct {
|
||||
index int // The position index.
|
||||
line int // The position line.
|
||||
column int // The position column.
|
||||
}
|
||||
|
||||
// Node Styles
|
||||
|
||||
type yaml_style_t int8
|
||||
|
||||
type yaml_scalar_style_t yaml_style_t
|
||||
|
||||
// Scalar styles.
|
||||
const (
|
||||
// Let the emitter choose the style.
|
||||
yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0
|
||||
|
||||
yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style.
|
||||
yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
|
||||
yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
|
||||
yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
|
||||
yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
|
||||
)
|
||||
|
||||
type yaml_sequence_style_t yaml_style_t
|
||||
|
||||
// Sequence styles.
|
||||
const (
|
||||
// Let the emitter choose the style.
|
||||
yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
|
||||
|
||||
yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
|
||||
yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
|
||||
)
|
||||
|
||||
type yaml_mapping_style_t yaml_style_t
|
||||
|
||||
// Mapping styles.
|
||||
const (
|
||||
// Let the emitter choose the style.
|
||||
yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
|
||||
|
||||
yaml_BLOCK_MAPPING_STYLE // The block mapping style.
|
||||
yaml_FLOW_MAPPING_STYLE // The flow mapping style.
|
||||
)
|
||||
|
||||
// Tokens
|
||||
|
||||
type yaml_token_type_t int
|
||||
|
||||
// Token types.
|
||||
const (
|
||||
// An empty token.
|
||||
yaml_NO_TOKEN yaml_token_type_t = iota
|
||||
|
||||
yaml_STREAM_START_TOKEN // A STREAM-START token.
|
||||
yaml_STREAM_END_TOKEN // A STREAM-END token.
|
||||
|
||||
yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
|
||||
yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
|
||||
yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
|
||||
yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
|
||||
|
||||
yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
|
||||
yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
|
||||
yaml_BLOCK_END_TOKEN // A BLOCK-END token.
|
||||
|
||||
yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
|
||||
yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
|
||||
yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
|
||||
yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
|
||||
|
||||
yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
|
||||
yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
|
||||
yaml_KEY_TOKEN // A KEY token.
|
||||
yaml_VALUE_TOKEN // A VALUE token.
|
||||
|
||||
yaml_ALIAS_TOKEN // An ALIAS token.
|
||||
yaml_ANCHOR_TOKEN // An ANCHOR token.
|
||||
yaml_TAG_TOKEN // A TAG token.
|
||||
yaml_SCALAR_TOKEN // A SCALAR token.
|
||||
)
|
||||
|
||||
func (tt yaml_token_type_t) String() string {
|
||||
switch tt {
|
||||
case yaml_NO_TOKEN:
|
||||
return "yaml_NO_TOKEN"
|
||||
case yaml_STREAM_START_TOKEN:
|
||||
return "yaml_STREAM_START_TOKEN"
|
||||
case yaml_STREAM_END_TOKEN:
|
||||
return "yaml_STREAM_END_TOKEN"
|
||||
case yaml_VERSION_DIRECTIVE_TOKEN:
|
||||
return "yaml_VERSION_DIRECTIVE_TOKEN"
|
||||
case yaml_TAG_DIRECTIVE_TOKEN:
|
||||
return "yaml_TAG_DIRECTIVE_TOKEN"
|
||||
case yaml_DOCUMENT_START_TOKEN:
|
||||
return "yaml_DOCUMENT_START_TOKEN"
|
||||
case yaml_DOCUMENT_END_TOKEN:
|
||||
return "yaml_DOCUMENT_END_TOKEN"
|
||||
case yaml_BLOCK_SEQUENCE_START_TOKEN:
|
||||
return "yaml_BLOCK_SEQUENCE_START_TOKEN"
|
||||
case yaml_BLOCK_MAPPING_START_TOKEN:
|
||||
return "yaml_BLOCK_MAPPING_START_TOKEN"
|
||||
case yaml_BLOCK_END_TOKEN:
|
||||
return "yaml_BLOCK_END_TOKEN"
|
||||
case yaml_FLOW_SEQUENCE_START_TOKEN:
|
||||
return "yaml_FLOW_SEQUENCE_START_TOKEN"
|
||||
case yaml_FLOW_SEQUENCE_END_TOKEN:
|
||||
return "yaml_FLOW_SEQUENCE_END_TOKEN"
|
||||
case yaml_FLOW_MAPPING_START_TOKEN:
|
||||
return "yaml_FLOW_MAPPING_START_TOKEN"
|
||||
case yaml_FLOW_MAPPING_END_TOKEN:
|
||||
return "yaml_FLOW_MAPPING_END_TOKEN"
|
||||
case yaml_BLOCK_ENTRY_TOKEN:
|
||||
return "yaml_BLOCK_ENTRY_TOKEN"
|
||||
case yaml_FLOW_ENTRY_TOKEN:
|
||||
return "yaml_FLOW_ENTRY_TOKEN"
|
||||
case yaml_KEY_TOKEN:
|
||||
return "yaml_KEY_TOKEN"
|
||||
case yaml_VALUE_TOKEN:
|
||||
return "yaml_VALUE_TOKEN"
|
||||
case yaml_ALIAS_TOKEN:
|
||||
return "yaml_ALIAS_TOKEN"
|
||||
case yaml_ANCHOR_TOKEN:
|
||||
return "yaml_ANCHOR_TOKEN"
|
||||
case yaml_TAG_TOKEN:
|
||||
return "yaml_TAG_TOKEN"
|
||||
case yaml_SCALAR_TOKEN:
|
||||
return "yaml_SCALAR_TOKEN"
|
||||
}
|
||||
return "<unknown token>"
|
||||
}
|
||||
|
||||
// The token structure.
|
||||
type yaml_token_t struct {
|
||||
// The token type.
|
||||
typ yaml_token_type_t
|
||||
|
||||
// The start/end of the token.
|
||||
start_mark, end_mark yaml_mark_t
|
||||
|
||||
// The stream encoding (for yaml_STREAM_START_TOKEN).
|
||||
encoding yaml_encoding_t
|
||||
|
||||
// The alias/anchor/scalar value or tag/tag directive handle
|
||||
// (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
|
||||
value []byte
|
||||
|
||||
// The tag suffix (for yaml_TAG_TOKEN).
|
||||
suffix []byte
|
||||
|
||||
// The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
|
||||
prefix []byte
|
||||
|
||||
// The scalar style (for yaml_SCALAR_TOKEN).
|
||||
style yaml_scalar_style_t
|
||||
|
||||
// The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
|
||||
major, minor int8
|
||||
}
|
||||
|
||||
// Events
|
||||
|
||||
type yaml_event_type_t int8
|
||||
|
||||
// Event types.
|
||||
const (
|
||||
// An empty event.
|
||||
yaml_NO_EVENT yaml_event_type_t = iota
|
||||
|
||||
yaml_STREAM_START_EVENT // A STREAM-START event.
|
||||
yaml_STREAM_END_EVENT // A STREAM-END event.
|
||||
yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
|
||||
yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
|
||||
yaml_ALIAS_EVENT // An ALIAS event.
|
||||
yaml_SCALAR_EVENT // A SCALAR event.
|
||||
yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
|
||||
yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
|
||||
yaml_MAPPING_START_EVENT // A MAPPING-START event.
|
||||
yaml_MAPPING_END_EVENT // A MAPPING-END event.
|
||||
yaml_TAIL_COMMENT_EVENT
|
||||
)
|
||||
|
||||
var eventStrings = []string{
|
||||
yaml_NO_EVENT: "none",
|
||||
yaml_STREAM_START_EVENT: "stream start",
|
||||
yaml_STREAM_END_EVENT: "stream end",
|
||||
yaml_DOCUMENT_START_EVENT: "document start",
|
||||
yaml_DOCUMENT_END_EVENT: "document end",
|
||||
yaml_ALIAS_EVENT: "alias",
|
||||
yaml_SCALAR_EVENT: "scalar",
|
||||
yaml_SEQUENCE_START_EVENT: "sequence start",
|
||||
yaml_SEQUENCE_END_EVENT: "sequence end",
|
||||
yaml_MAPPING_START_EVENT: "mapping start",
|
||||
yaml_MAPPING_END_EVENT: "mapping end",
|
||||
yaml_TAIL_COMMENT_EVENT: "tail comment",
|
||||
}
|
||||
|
||||
func (e yaml_event_type_t) String() string {
|
||||
if e < 0 || int(e) >= len(eventStrings) {
|
||||
return fmt.Sprintf("unknown event %d", e)
|
||||
}
|
||||
return eventStrings[e]
|
||||
}
|
||||
|
||||
// The event structure.
|
||||
type yaml_event_t struct {
|
||||
|
||||
// The event type.
|
||||
typ yaml_event_type_t
|
||||
|
||||
// The start and end of the event.
|
||||
start_mark, end_mark yaml_mark_t
|
||||
|
||||
// The document encoding (for yaml_STREAM_START_EVENT).
|
||||
encoding yaml_encoding_t
|
||||
|
||||
// The version directive (for yaml_DOCUMENT_START_EVENT).
|
||||
version_directive *yaml_version_directive_t
|
||||
|
||||
// The list of tag directives (for yaml_DOCUMENT_START_EVENT).
|
||||
tag_directives []yaml_tag_directive_t
|
||||
|
||||
// The comments
|
||||
head_comment []byte
|
||||
line_comment []byte
|
||||
foot_comment []byte
|
||||
tail_comment []byte
|
||||
|
||||
// The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
|
||||
anchor []byte
|
||||
|
||||
// The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
|
||||
tag []byte
|
||||
|
||||
// The scalar value (for yaml_SCALAR_EVENT).
|
||||
value []byte
|
||||
|
||||
// Is the document start/end indicator implicit, or the tag optional?
|
||||
// (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
|
||||
implicit bool
|
||||
|
||||
// Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
|
||||
quoted_implicit bool
|
||||
|
||||
// The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
|
||||
style yaml_style_t
|
||||
}
|
||||
|
||||
func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
|
||||
func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
|
||||
func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
|
||||
|
||||
// Nodes
|
||||
|
||||
const (
|
||||
yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
|
||||
yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
|
||||
yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
|
||||
yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
|
||||
yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
|
||||
yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
|
||||
|
||||
yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
|
||||
yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
|
||||
|
||||
// Not in original libyaml.
|
||||
yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
|
||||
yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
|
||||
|
||||
yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
|
||||
yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
|
||||
yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
|
||||
)
|
||||
|
||||
type yaml_node_type_t int
|
||||
|
||||
// Node types.
|
||||
const (
|
||||
// An empty node.
|
||||
yaml_NO_NODE yaml_node_type_t = iota
|
||||
|
||||
yaml_SCALAR_NODE // A scalar node.
|
||||
yaml_SEQUENCE_NODE // A sequence node.
|
||||
yaml_MAPPING_NODE // A mapping node.
|
||||
)
|
||||
|
||||
// An element of a sequence node.
|
||||
type yaml_node_item_t int
|
||||
|
||||
// An element of a mapping node.
|
||||
type yaml_node_pair_t struct {
|
||||
key int // The key of the element.
|
||||
value int // The value of the element.
|
||||
}
|
||||
|
||||
// The node structure.
|
||||
type yaml_node_t struct {
|
||||
typ yaml_node_type_t // The node type.
|
||||
tag []byte // The node tag.
|
||||
|
||||
// The node data.
|
||||
|
||||
// The scalar parameters (for yaml_SCALAR_NODE).
|
||||
scalar struct {
|
||||
value []byte // The scalar value.
|
||||
length int // The length of the scalar value.
|
||||
style yaml_scalar_style_t // The scalar style.
|
||||
}
|
||||
|
||||
// The sequence parameters (for YAML_SEQUENCE_NODE).
|
||||
sequence struct {
|
||||
items_data []yaml_node_item_t // The stack of sequence items.
|
||||
style yaml_sequence_style_t // The sequence style.
|
||||
}
|
||||
|
||||
// The mapping parameters (for yaml_MAPPING_NODE).
|
||||
mapping struct {
|
||||
pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
|
||||
pairs_start *yaml_node_pair_t // The beginning of the stack.
|
||||
pairs_end *yaml_node_pair_t // The end of the stack.
|
||||
pairs_top *yaml_node_pair_t // The top of the stack.
|
||||
style yaml_mapping_style_t // The mapping style.
|
||||
}
|
||||
|
||||
start_mark yaml_mark_t // The beginning of the node.
|
||||
end_mark yaml_mark_t // The end of the node.
|
||||
|
||||
}
|
||||
|
||||
// The document structure.
|
||||
type yaml_document_t struct {
|
||||
|
||||
// The document nodes.
|
||||
nodes []yaml_node_t
|
||||
|
||||
// The version directive.
|
||||
version_directive *yaml_version_directive_t
|
||||
|
||||
// The list of tag directives.
|
||||
tag_directives_data []yaml_tag_directive_t
|
||||
tag_directives_start int // The beginning of the tag directives list.
|
||||
tag_directives_end int // The end of the tag directives list.
|
||||
|
||||
start_implicit int // Is the document start indicator implicit?
|
||||
end_implicit int // Is the document end indicator implicit?
|
||||
|
||||
// The start/end of the document.
|
||||
start_mark, end_mark yaml_mark_t
|
||||
}
|
||||
|
||||
// The prototype of a read handler.
|
||||
//
|
||||
// The read handler is called when the parser needs to read more bytes from the
|
||||
// source. The handler should write not more than size bytes to the buffer.
|
||||
// The number of written bytes should be set to the size_read variable.
|
||||
//
|
||||
// [in,out] data A pointer to an application data specified by
|
||||
//
|
||||
// yaml_parser_set_input().
|
||||
//
|
||||
// [out] buffer The buffer to write the data from the source.
|
||||
// [in] size The size of the buffer.
|
||||
// [out] size_read The actual number of bytes read from the source.
|
||||
//
|
||||
// On success, the handler should return 1. If the handler failed,
|
||||
// the returned value should be 0. On EOF, the handler should set the
|
||||
// size_read to 0 and return 1.
|
||||
type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
|
||||
|
||||
// This structure holds information about a potential simple key.
|
||||
type yaml_simple_key_t struct {
|
||||
possible bool // Is a simple key possible?
|
||||
required bool // Is a simple key required?
|
||||
token_number int // The number of the token.
|
||||
mark yaml_mark_t // The position mark.
|
||||
}
|
||||
|
||||
// The states of the parser.
|
||||
type yaml_parser_state_t int
|
||||
|
||||
const (
|
||||
yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
|
||||
|
||||
yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
|
||||
yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
|
||||
yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
|
||||
yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
|
||||
yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
|
||||
yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
|
||||
yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
|
||||
yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
|
||||
yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
|
||||
yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
|
||||
yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
|
||||
yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
|
||||
yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
|
||||
yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
|
||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
|
||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
|
||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
|
||||
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
|
||||
yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
|
||||
yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
|
||||
yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
|
||||
yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
|
||||
yaml_PARSE_END_STATE // Expect nothing.
|
||||
)
|
||||
|
||||
func (ps yaml_parser_state_t) String() string {
|
||||
switch ps {
|
||||
case yaml_PARSE_STREAM_START_STATE:
|
||||
return "yaml_PARSE_STREAM_START_STATE"
|
||||
case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
|
||||
return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
|
||||
case yaml_PARSE_DOCUMENT_START_STATE:
|
||||
return "yaml_PARSE_DOCUMENT_START_STATE"
|
||||
case yaml_PARSE_DOCUMENT_CONTENT_STATE:
|
||||
return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
|
||||
case yaml_PARSE_DOCUMENT_END_STATE:
|
||||
return "yaml_PARSE_DOCUMENT_END_STATE"
|
||||
case yaml_PARSE_BLOCK_NODE_STATE:
|
||||
return "yaml_PARSE_BLOCK_NODE_STATE"
|
||||
case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
|
||||
return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
|
||||
case yaml_PARSE_FLOW_NODE_STATE:
|
||||
return "yaml_PARSE_FLOW_NODE_STATE"
|
||||
case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
|
||||
return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
|
||||
case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
|
||||
return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
|
||||
case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
|
||||
return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
|
||||
case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
|
||||
return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
|
||||
case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
|
||||
return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
|
||||
case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
|
||||
return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
|
||||
case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
|
||||
return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
|
||||
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
|
||||
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
|
||||
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
|
||||
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
|
||||
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
|
||||
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
|
||||
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
|
||||
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
|
||||
case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
|
||||
return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
|
||||
case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
|
||||
return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
|
||||
case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
|
||||
return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
|
||||
case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
|
||||
return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
|
||||
case yaml_PARSE_END_STATE:
|
||||
return "yaml_PARSE_END_STATE"
|
||||
}
|
||||
return "<unknown parser state>"
|
||||
}
|
||||
|
||||
// This structure holds aliases data.
|
||||
type yaml_alias_data_t struct {
|
||||
anchor []byte // The anchor.
|
||||
index int // The node id.
|
||||
mark yaml_mark_t // The anchor mark.
|
||||
}
|
||||
|
||||
// The parser structure.
|
||||
//
|
||||
// All members are internal. Manage the structure using the
|
||||
// yaml_parser_ family of functions.
|
||||
type yaml_parser_t struct {
|
||||
|
||||
// Error handling
|
||||
|
||||
error yaml_error_type_t // Error type.
|
||||
|
||||
problem string // Error description.
|
||||
|
||||
// The byte about which the problem occurred.
|
||||
problem_offset int
|
||||
problem_value int
|
||||
problem_mark yaml_mark_t
|
||||
|
||||
// The error context.
|
||||
context string
|
||||
context_mark yaml_mark_t
|
||||
|
||||
// Reader stuff
|
||||
|
||||
read_handler yaml_read_handler_t // Read handler.
|
||||
|
||||
input_reader io.Reader // File input data.
|
||||
input []byte // String input data.
|
||||
input_pos int
|
||||
|
||||
eof bool // EOF flag
|
||||
|
||||
buffer []byte // The working buffer.
|
||||
buffer_pos int // The current position of the buffer.
|
||||
|
||||
unread int // The number of unread characters in the buffer.
|
||||
|
||||
newlines int // The number of line breaks since last non-break/non-blank character
|
||||
|
||||
raw_buffer []byte // The raw buffer.
|
||||
raw_buffer_pos int // The current position of the buffer.
|
||||
|
||||
encoding yaml_encoding_t // The input encoding.
|
||||
|
||||
offset int // The offset of the current position (in bytes).
|
||||
mark yaml_mark_t // The mark of the current position.
|
||||
|
||||
// Comments
|
||||
|
||||
head_comment []byte // The current head comments
|
||||
line_comment []byte // The current line comments
|
||||
foot_comment []byte // The current foot comments
|
||||
tail_comment []byte // Foot comment that happens at the end of a block.
|
||||
stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc)
|
||||
|
||||
comments []yaml_comment_t // The folded comments for all parsed tokens
|
||||
comments_head int
|
||||
|
||||
// Scanner stuff
|
||||
|
||||
stream_start_produced bool // Have we started to scan the input stream?
|
||||
stream_end_produced bool // Have we reached the end of the input stream?
|
||||
|
||||
flow_level int // The number of unclosed '[' and '{' indicators.
|
||||
|
||||
tokens []yaml_token_t // The tokens queue.
|
||||
tokens_head int // The head of the tokens queue.
|
||||
tokens_parsed int // The number of tokens fetched from the queue.
|
||||
token_available bool // Does the tokens queue contain a token ready for dequeueing.
|
||||
|
||||
indent int // The current indentation level.
|
||||
indents []int // The indentation levels stack.
|
||||
|
||||
simple_key_allowed bool // May a simple key occur at the current position?
|
||||
simple_keys []yaml_simple_key_t // The stack of simple keys.
|
||||
simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number
|
||||
|
||||
// Parser stuff
|
||||
|
||||
state yaml_parser_state_t // The current parser state.
|
||||
states []yaml_parser_state_t // The parser states stack.
|
||||
marks []yaml_mark_t // The stack of marks.
|
||||
tag_directives []yaml_tag_directive_t // The list of TAG directives.
|
||||
|
||||
// Dumper stuff
|
||||
|
||||
aliases []yaml_alias_data_t // The alias data.
|
||||
|
||||
document *yaml_document_t // The currently parsed document.
|
||||
}
|
||||
|
||||
type yaml_comment_t struct {
|
||||
scan_mark yaml_mark_t // Position where scanning for comments started
|
||||
token_mark yaml_mark_t // Position after which tokens will be associated with this comment
|
||||
start_mark yaml_mark_t // Position of '#' comment mark
|
||||
end_mark yaml_mark_t // Position where comment terminated
|
||||
|
||||
head []byte
|
||||
line []byte
|
||||
foot []byte
|
||||
}
|
||||
|
||||
// Emitter Definitions
|
||||
|
||||
// The prototype of a write handler.
|
||||
//
|
||||
// The write handler is called when the emitter needs to flush the accumulated
|
||||
// characters to the output. The handler should write @a size bytes of the
|
||||
// @a buffer to the output.
|
||||
//
|
||||
// @param[in,out] data A pointer to an application data specified by
|
||||
//
|
||||
// yaml_emitter_set_output().
|
||||
//
|
||||
// @param[in] buffer The buffer with bytes to be written.
|
||||
// @param[in] size The size of the buffer.
|
||||
//
|
||||
// @returns On success, the handler should return @c 1. If the handler failed,
|
||||
// the returned value should be @c 0.
|
||||
type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
|
||||
|
||||
type yaml_emitter_state_t int
|
||||
|
||||
// The emitter states.
|
||||
const (
|
||||
// Expect STREAM-START.
|
||||
yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
|
||||
|
||||
yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
|
||||
yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
|
||||
yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
|
||||
yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
|
||||
yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
|
||||
yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out
|
||||
yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
|
||||
yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
|
||||
yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out
|
||||
yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
|
||||
yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
|
||||
yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
|
||||
yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
|
||||
yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
|
||||
yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
|
||||
yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
|
||||
yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
|
||||
yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
|
||||
yaml_EMIT_END_STATE // Expect nothing.
|
||||
)
|
||||
|
||||
// The emitter structure.
|
||||
//
|
||||
// All members are internal. Manage the structure using the @c yaml_emitter_
|
||||
// family of functions.
|
||||
type yaml_emitter_t struct {
|
||||
|
||||
// Error handling
|
||||
|
||||
error yaml_error_type_t // Error type.
|
||||
problem string // Error description.
|
||||
|
||||
// Writer stuff
|
||||
|
||||
write_handler yaml_write_handler_t // Write handler.
|
||||
|
||||
output_buffer *[]byte // String output data.
|
||||
output_writer io.Writer // File output data.
|
||||
|
||||
buffer []byte // The working buffer.
|
||||
buffer_pos int // The current position of the buffer.
|
||||
|
||||
raw_buffer []byte // The raw buffer.
|
||||
raw_buffer_pos int // The current position of the buffer.
|
||||
|
||||
encoding yaml_encoding_t // The stream encoding.
|
||||
|
||||
// Emitter stuff
|
||||
|
||||
canonical bool // If the output is in the canonical style?
|
||||
best_indent int // The number of indentation spaces.
|
||||
best_width int // The preferred width of the output lines.
|
||||
unicode bool // Allow unescaped non-ASCII characters?
|
||||
line_break yaml_break_t // The preferred line break.
|
||||
|
||||
state yaml_emitter_state_t // The current emitter state.
|
||||
states []yaml_emitter_state_t // The stack of states.
|
||||
|
||||
events []yaml_event_t // The event queue.
|
||||
events_head int // The head of the event queue.
|
||||
|
||||
indents []int // The stack of indentation levels.
|
||||
|
||||
tag_directives []yaml_tag_directive_t // The list of tag directives.
|
||||
|
||||
indent int // The current indentation level.
|
||||
|
||||
compact_sequence_indent bool // Is '- ' is considered part of the indentation for sequence elements?
|
||||
|
||||
flow_level int // The current flow level.
|
||||
|
||||
root_context bool // Is it the document root context?
|
||||
sequence_context bool // Is it a sequence context?
|
||||
mapping_context bool // Is it a mapping context?
|
||||
simple_key_context bool // Is it a simple mapping key context?
|
||||
|
||||
line int // The current line.
|
||||
column int // The current column.
|
||||
whitespace bool // If the last character was a whitespace?
|
||||
indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
|
||||
open_ended bool // If an explicit document end is required?
|
||||
|
||||
space_above bool // Is there's an empty line above?
|
||||
foot_indent int // The indent used to write the foot comment above, or -1 if none.
|
||||
|
||||
// Anchor analysis.
|
||||
anchor_data struct {
|
||||
anchor []byte // The anchor value.
|
||||
alias bool // Is it an alias?
|
||||
}
|
||||
|
||||
// Tag analysis.
|
||||
tag_data struct {
|
||||
handle []byte // The tag handle.
|
||||
suffix []byte // The tag suffix.
|
||||
}
|
||||
|
||||
// Scalar analysis.
|
||||
scalar_data struct {
|
||||
value []byte // The scalar value.
|
||||
multiline bool // Does the scalar contain line breaks?
|
||||
flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
|
||||
block_plain_allowed bool // Can the scalar be expressed in the block plain style?
|
||||
single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
|
||||
block_allowed bool // Can the scalar be expressed in the literal or folded styles?
|
||||
style yaml_scalar_style_t // The output style.
|
||||
}
|
||||
|
||||
// Comments
|
||||
head_comment []byte
|
||||
line_comment []byte
|
||||
foot_comment []byte
|
||||
tail_comment []byte
|
||||
|
||||
key_line_comment []byte
|
||||
|
||||
// Dumper stuff
|
||||
|
||||
opened bool // If the stream was already opened?
|
||||
closed bool // If the stream was already closed?
|
||||
|
||||
// The information associated with the document nodes.
|
||||
anchors *struct {
|
||||
references int // The number of references.
|
||||
anchor int // The anchor id.
|
||||
serialized bool // If the node has been emitted?
|
||||
}
|
||||
|
||||
last_anchor_id int // The last assigned anchor id.
|
||||
|
||||
document *yaml_document_t // The currently emitted document.
|
||||
}
|
||||
|
|
@ -0,0 +1,198 @@
|
|||
//
|
||||
// Copyright (c) 2011-2019 Canonical Ltd
|
||||
// Copyright (c) 2006-2010 Kirill Simonov
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
// this software and associated documentation files (the "Software"), to deal in
|
||||
// the Software without restriction, including without limitation the rights to
|
||||
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
// of the Software, and to permit persons to whom the Software is furnished to do
|
||||
// so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
// SOFTWARE.
|
||||
|
||||
package yaml
|
||||
|
||||
const (
|
||||
// The size of the input raw buffer.
|
||||
input_raw_buffer_size = 512
|
||||
|
||||
// The size of the input buffer.
|
||||
// It should be possible to decode the whole raw buffer.
|
||||
input_buffer_size = input_raw_buffer_size * 3
|
||||
|
||||
// The size of the output buffer.
|
||||
output_buffer_size = 128
|
||||
|
||||
// The size of the output raw buffer.
|
||||
// It should be possible to encode the whole output buffer.
|
||||
output_raw_buffer_size = (output_buffer_size*2 + 2)
|
||||
|
||||
// The size of other stacks and queues.
|
||||
initial_stack_size = 16
|
||||
initial_queue_size = 16
|
||||
initial_string_size = 16
|
||||
)
|
||||
|
||||
// Check if the character at the specified position is an alphabetical
|
||||
// character, a digit, '_', or '-'.
|
||||
func is_alpha(b []byte, i int) bool {
|
||||
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is a digit.
|
||||
func is_digit(b []byte, i int) bool {
|
||||
return b[i] >= '0' && b[i] <= '9'
|
||||
}
|
||||
|
||||
// Get the value of a digit.
|
||||
func as_digit(b []byte, i int) int {
|
||||
return int(b[i]) - '0'
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is a hex-digit.
|
||||
func is_hex(b []byte, i int) bool {
|
||||
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
|
||||
}
|
||||
|
||||
// Get the value of a hex-digit.
|
||||
func as_hex(b []byte, i int) int {
|
||||
bi := b[i]
|
||||
if bi >= 'A' && bi <= 'F' {
|
||||
return int(bi) - 'A' + 10
|
||||
}
|
||||
if bi >= 'a' && bi <= 'f' {
|
||||
return int(bi) - 'a' + 10
|
||||
}
|
||||
return int(bi) - '0'
|
||||
}
|
||||
|
||||
// Check if the character is ASCII.
|
||||
func is_ascii(b []byte, i int) bool {
|
||||
return b[i] <= 0x7F
|
||||
}
|
||||
|
||||
// Check if the character at the start of the buffer can be printed unescaped.
|
||||
func is_printable(b []byte, i int) bool {
|
||||
return ((b[i] == 0x0A) || // . == #x0A
|
||||
(b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
|
||||
(b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
|
||||
(b[i] > 0xC2 && b[i] < 0xED) ||
|
||||
(b[i] == 0xED && b[i+1] < 0xA0) ||
|
||||
(b[i] == 0xEE) ||
|
||||
(b[i] == 0xEF && // #xE000 <= . <= #xFFFD
|
||||
!(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
|
||||
!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is NUL.
|
||||
func is_z(b []byte, i int) bool {
|
||||
return b[i] == 0x00
|
||||
}
|
||||
|
||||
// Check if the beginning of the buffer is a BOM.
|
||||
func is_bom(b []byte, i int) bool {
|
||||
return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is space.
|
||||
func is_space(b []byte, i int) bool {
|
||||
return b[i] == ' '
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is tab.
|
||||
func is_tab(b []byte, i int) bool {
|
||||
return b[i] == '\t'
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is blank (space or tab).
|
||||
func is_blank(b []byte, i int) bool {
|
||||
//return is_space(b, i) || is_tab(b, i)
|
||||
return b[i] == ' ' || b[i] == '\t'
|
||||
}
|
||||
|
||||
// Check if the character at the specified position is a line break.
|
||||
func is_break(b []byte, i int) bool {
|
||||
return (b[i] == '\r' || // CR (#xD)
|
||||
b[i] == '\n' || // LF (#xA)
|
||||
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
|
||||
}
|
||||
|
||||
func is_crlf(b []byte, i int) bool {
|
||||
return b[i] == '\r' && b[i+1] == '\n'
|
||||
}
|
||||
|
||||
// Check if the character is a line break or NUL.
|
||||
func is_breakz(b []byte, i int) bool {
|
||||
//return is_break(b, i) || is_z(b, i)
|
||||
return (
|
||||
// is_break:
|
||||
b[i] == '\r' || // CR (#xD)
|
||||
b[i] == '\n' || // LF (#xA)
|
||||
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
|
||||
// is_z:
|
||||
b[i] == 0)
|
||||
}
|
||||
|
||||
// Check if the character is a line break, space, or NUL.
|
||||
func is_spacez(b []byte, i int) bool {
|
||||
//return is_space(b, i) || is_breakz(b, i)
|
||||
return (
|
||||
// is_space:
|
||||
b[i] == ' ' ||
|
||||
// is_breakz:
|
||||
b[i] == '\r' || // CR (#xD)
|
||||
b[i] == '\n' || // LF (#xA)
|
||||
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
|
||||
b[i] == 0)
|
||||
}
|
||||
|
||||
// Check if the character is a line break, space, tab, or NUL.
|
||||
func is_blankz(b []byte, i int) bool {
|
||||
//return is_blank(b, i) || is_breakz(b, i)
|
||||
return (
|
||||
// is_blank:
|
||||
b[i] == ' ' || b[i] == '\t' ||
|
||||
// is_breakz:
|
||||
b[i] == '\r' || // CR (#xD)
|
||||
b[i] == '\n' || // LF (#xA)
|
||||
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
|
||||
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
|
||||
b[i] == 0)
|
||||
}
|
||||
|
||||
// Determine the width of the character.
|
||||
func width(b byte) int {
|
||||
// Don't replace these by a switch without first
|
||||
// confirming that it is being inlined.
|
||||
if b&0x80 == 0x00 {
|
||||
return 1
|
||||
}
|
||||
if b&0xE0 == 0xC0 {
|
||||
return 2
|
||||
}
|
||||
if b&0xF0 == 0xE0 {
|
||||
return 3
|
||||
}
|
||||
if b&0xF8 == 0xF0 {
|
||||
return 4
|
||||
}
|
||||
return 0
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apitesting
|
||||
|
||||
import (
|
||||
"io"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Close and fail the test if it returns an error.
|
||||
func Close(t TestingT, c io.Closer) {
|
||||
t.Helper()
|
||||
assertNoError(t, c.Close())
|
||||
}
|
||||
|
||||
// CloseNoOp does nothing. Use as a replacement for Close when you
|
||||
// need to disable a defer.
|
||||
func CloseNoOp(TestingT, io.Closer) {}
|
||||
|
||||
// TestingT simulates assert.TestingT and assert.tHelper without adding
|
||||
// testify as a non-test dependency.
|
||||
type TestingT interface {
|
||||
Errorf(format string, args ...interface{})
|
||||
Helper()
|
||||
}
|
||||
|
||||
// Ensure that testing T & B satisfy the TestingT interface
|
||||
var _ TestingT = &testing.T{}
|
||||
var _ TestingT = &testing.B{}
|
||||
|
||||
// assertNoError simulates assert.NoError without adding testify as a
|
||||
// non-test dependency.
|
||||
//
|
||||
// In test files, use github.com/stretchr/testify/assert instead.
|
||||
func assertNoError(t TestingT, err error) {
|
||||
t.Helper()
|
||||
if err != nil {
|
||||
t.Errorf("Received unexpected error:\n%+v", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -20,6 +20,7 @@ import (
|
|||
"bytes"
|
||||
gojson "encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
|
|
@ -28,7 +29,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp" //nolint:depguard
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
|
@ -115,7 +116,7 @@ func (c *CompatibilityTestOptions) Complete(t *testing.T) *CompatibilityTestOpti
|
|||
c.TestDataDir = "testdata"
|
||||
}
|
||||
if c.TestDataDirCurrentVersion == "" {
|
||||
c.TestDataDirCurrentVersion = filepath.Join(c.TestDataDir, "HEAD")
|
||||
c.TestDataDirCurrentVersion = filepath.Join(c.TestDataDir, http.MethodHead)
|
||||
}
|
||||
if c.TestDataDirsPreviousVersions == nil {
|
||||
dirs, err := filepath.Glob(filepath.Join(c.TestDataDir, "v*"))
|
||||
|
|
@ -199,7 +200,7 @@ func (c *CompatibilityTestOptions) Run(t *testing.T) {
|
|||
for _, gvk := range c.Kinds {
|
||||
t.Run(makeName(gvk), func(t *testing.T) {
|
||||
|
||||
t.Run("HEAD", func(t *testing.T) {
|
||||
t.Run(http.MethodHead, func(t *testing.T) {
|
||||
c.runCurrentVersionTest(t, gvk, usedHEADFixtures)
|
||||
})
|
||||
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp" //nolint:depguard
|
||||
"github.com/google/go-cmp/cmp"
|
||||
flag "github.com/spf13/pflag"
|
||||
"sigs.k8s.io/randfill"
|
||||
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ import (
|
|||
jsonserializer "k8s.io/apimachinery/pkg/runtime/serializer/json"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"github.com/google/go-cmp/cmp" //nolint:depguard
|
||||
"github.com/google/go-cmp/cmp"
|
||||
)
|
||||
|
||||
// RoundtripToUnstructured verifies the roundtrip faithfulness of all external types in a scheme
|
||||
|
|
|
|||
|
|
@ -0,0 +1,16 @@
|
|||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
reviewers:
|
||||
- thockin
|
||||
- smarterclayton
|
||||
- wojtek-t
|
||||
- deads2k
|
||||
- derekwaynecarr
|
||||
- caesarxuchao
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- saad-ali
|
||||
- janetkuo
|
||||
- tallclair
|
||||
- dims
|
||||
- cjcullen
|
||||
|
|
@ -438,7 +438,7 @@ func NewGenericServerResponse(code int, verb string, qualifiedResource schema.Gr
|
|||
message := fmt.Sprintf("the server responded with the status code %d but did not return more information", code)
|
||||
switch code {
|
||||
case http.StatusConflict:
|
||||
if verb == "POST" {
|
||||
if verb == http.MethodPost {
|
||||
reason = metav1.StatusReasonAlreadyExists
|
||||
} else {
|
||||
reason = metav1.StatusReasonConflict
|
||||
|
|
|
|||
|
|
@ -0,0 +1,15 @@
|
|||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
reviewers:
|
||||
- thockin
|
||||
- smarterclayton
|
||||
- wojtek-t
|
||||
- deads2k
|
||||
- derekwaynecarr
|
||||
- caesarxuchao
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- janetkuo
|
||||
- dims
|
||||
emeritus_reviewers:
|
||||
- ncdc
|
||||
|
|
@ -16,7 +16,10 @@ limitations under the License.
|
|||
|
||||
package operation
|
||||
|
||||
import "k8s.io/apimachinery/pkg/util/sets"
|
||||
import (
|
||||
"slices"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Operation provides contextual information about a validation request and the API
|
||||
// operation being validated.
|
||||
|
|
@ -41,7 +44,50 @@ type Operation struct {
|
|||
// resource first began using the feature.
|
||||
//
|
||||
// Unset options are disabled/false.
|
||||
Options sets.Set[string]
|
||||
Options []string
|
||||
|
||||
// Request provides information about the request being validated.
|
||||
Request Request
|
||||
}
|
||||
|
||||
// HasOption returns true if the given string is in the Options slice.
|
||||
func (o Operation) HasOption(option string) bool {
|
||||
return slices.Contains(o.Options, option)
|
||||
}
|
||||
|
||||
// Request provides information about the request being validated.
|
||||
type Request struct {
|
||||
// Subresources identifies the subresource path components of the request. For
|
||||
// example, Subresources for a request to `/api/v1/pods/my-pod/status` would be
|
||||
// `["status"]`. For `/api/v1/widget/my-widget/x/y/z`, it would be `["x", "y",
|
||||
// "z"]`. For a root resource (`/api/v1/pods/my-pod`), Subresources will be an
|
||||
// empty slice.
|
||||
//
|
||||
// Validation logic should only consult this field if the validation rules for a
|
||||
// particular field differ depending on whether the main resource or a specific
|
||||
// subresource is being accessed. For example:
|
||||
//
|
||||
// Updates to a Pod resource (`/`) normally cannot change container resource
|
||||
// requests/limits after the Pod is created (they are immutable). However, when
|
||||
// accessing the Pod's "resize" subresource (`/resize`), these specific fields
|
||||
// are allowed to be modified. In this scenario, the validation logic for
|
||||
// `spec.container[*].resources` must check `Subresources` to permit changes only
|
||||
// when the request targets the "resize" subresource.
|
||||
//
|
||||
// Note: This field should not be used to control which fields a subresource
|
||||
// operation is allowed to write. This is the responsibility of "field wiping".
|
||||
// Field wiping logic is expected to be handled in resource strategies by
|
||||
// modifying the incoming object before it is validated.
|
||||
Subresources []string
|
||||
}
|
||||
|
||||
// SubresourcePath returns the path is a slash-separated list of subresource
|
||||
// names. For example, `/status`, `/resize`, or `/x/y/z`.
|
||||
func (r Request) SubresourcePath() string {
|
||||
if len(r.Subresources) == 0 {
|
||||
return "/"
|
||||
}
|
||||
return "/" + strings.Join(r.Subresources, "/")
|
||||
}
|
||||
|
||||
// Code is the request operation to be validated.
|
||||
|
|
|
|||
|
|
@ -0,0 +1,10 @@
|
|||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
reviewers:
|
||||
- thockin
|
||||
- smarterclayton
|
||||
- wojtek-t
|
||||
- derekwaynecarr
|
||||
- mikedanese
|
||||
- saad-ali
|
||||
- janetkuo
|
||||
|
|
@ -0,0 +1,11 @@
|
|||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
# Disable inheritance as this is an api owners file
|
||||
options:
|
||||
no_parent_owners: true
|
||||
approvers:
|
||||
- api-approvers
|
||||
reviewers:
|
||||
- api-reviewers
|
||||
labels:
|
||||
- kind/api-change
|
||||
|
|
@ -74,13 +74,13 @@ func validateOwnerReference(ownerReference metav1.OwnerReference, fldPath *field
|
|||
allErrs = append(allErrs, field.Invalid(fldPath.Child("apiVersion"), ownerReference.APIVersion, "version must not be empty"))
|
||||
}
|
||||
if len(gvk.Kind) == 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("kind"), ownerReference.Kind, "kind must not be empty"))
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("kind"), ownerReference.Kind, "must not be empty"))
|
||||
}
|
||||
if len(ownerReference.Name) == 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), ownerReference.Name, "name must not be empty"))
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), ownerReference.Name, "must not be empty"))
|
||||
}
|
||||
if len(ownerReference.UID) == 0 {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("uid"), ownerReference.UID, "uid must not be empty"))
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("uid"), ownerReference.UID, "must not be empty"))
|
||||
}
|
||||
if _, ok := BannedOwners[gvk]; ok {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, ownerReference, fmt.Sprintf("%s is disallowed from being an owner", gvk)))
|
||||
|
|
|
|||
|
|
@ -0,0 +1,17 @@
|
|||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
reviewers:
|
||||
- thockin
|
||||
- smarterclayton
|
||||
- wojtek-t
|
||||
- deads2k
|
||||
- caesarxuchao
|
||||
- liggitt
|
||||
- sttts
|
||||
- luxas
|
||||
- janetkuo
|
||||
- justinsb
|
||||
- soltysh
|
||||
- dims
|
||||
emeritus_reviewers:
|
||||
- ncdc
|
||||
|
|
@ -185,7 +185,7 @@ type ObjectMeta struct {
|
|||
// Null for lists.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
CreationTimestamp Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"`
|
||||
CreationTimestamp Time `json:"creationTimestamp,omitempty,omitzero" protobuf:"bytes,8,opt,name=creationTimestamp"`
|
||||
|
||||
// DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This
|
||||
// field is set by the server when a graceful deletion is requested by the user, and is not
|
||||
|
|
@ -439,20 +439,6 @@ const (
|
|||
//
|
||||
// The annotation is added to a "Bookmark" event.
|
||||
InitialEventsAnnotationKey = "k8s.io/initial-events-end"
|
||||
|
||||
// InitialEventsListBlueprintAnnotationKey is the name of the key
|
||||
// where an empty, versioned list is encoded in the requested format
|
||||
// (e.g., protobuf, JSON, CBOR), then base64-encoded and stored as a string.
|
||||
//
|
||||
// This encoding matches the request encoding format, which may be
|
||||
// protobuf, JSON, CBOR, or others, depending on what the client requested.
|
||||
// This ensures that the reconstructed list can be processed through the
|
||||
// same decoder chain that would handle a standard LIST call response.
|
||||
//
|
||||
// The annotation is added to a "Bookmark" event and is used by clients
|
||||
// to guarantee the format consistency when reconstructing
|
||||
// the list during WatchList processing.
|
||||
InitialEventsListBlueprintAnnotationKey = "kubernetes.io/initial-events-list-blueprint"
|
||||
)
|
||||
|
||||
// resourceVersionMatch specifies how the resourceVersion parameter is applied. resourceVersionMatch
|
||||
|
|
|
|||
|
|
@ -328,11 +328,11 @@ func ValidateCondition(condition metav1.Condition, fldPath *field.Path) field.Er
|
|||
}
|
||||
|
||||
if condition.LastTransitionTime.IsZero() {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("lastTransitionTime"), "must be set"))
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("lastTransitionTime"), ""))
|
||||
}
|
||||
|
||||
if len(condition.Reason) == 0 {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("reason"), "must be set"))
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("reason"), ""))
|
||||
} else {
|
||||
for _, currErr := range isValidConditionReason(condition.Reason) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("reason"), condition.Reason, currErr))
|
||||
|
|
|
|||
|
|
@ -31,6 +31,9 @@ type Labels interface {
|
|||
|
||||
// Get returns the value for the provided label.
|
||||
Get(label string) (value string)
|
||||
|
||||
// Lookup returns the value for the provided label if it exists and whether the provided label exist
|
||||
Lookup(label string) (value string, exists bool)
|
||||
}
|
||||
|
||||
// Set is a map of label:value. It implements Labels.
|
||||
|
|
@ -59,6 +62,12 @@ func (ls Set) Get(label string) string {
|
|||
return ls[label]
|
||||
}
|
||||
|
||||
// Lookup returns the value for the provided label if it exists and whether the provided label exist
|
||||
func (ls Set) Lookup(label string) (string, bool) {
|
||||
val, exists := ls[label]
|
||||
return val, exists
|
||||
}
|
||||
|
||||
// AsSelector converts labels into a selectors. It does not
|
||||
// perform any validation, which means the server will reject
|
||||
// the request if the Set contains invalid values.
|
||||
|
|
|
|||
|
|
@ -23,11 +23,12 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
@ -115,6 +116,15 @@ func Nothing() Selector {
|
|||
return sharedNothingSelector
|
||||
}
|
||||
|
||||
// MatchesNothing only returns true for selectors which are definitively determined to match no objects.
|
||||
// This currently only detects the `labels.Nothing()` selector, but may change over time to detect more selectors that match no objects.
|
||||
//
|
||||
// Note: The current implementation does not check for selector conflict scenarios (e.g., a=a,a!=a).
|
||||
// Support for detecting such cases can be added in the future.
|
||||
func MatchesNothing(selector Selector) bool {
|
||||
return selector == sharedNothingSelector
|
||||
}
|
||||
|
||||
// NewSelector returns a nil selector
|
||||
func NewSelector() Selector {
|
||||
return internalSelector(nil)
|
||||
|
|
@ -236,26 +246,29 @@ func (r *Requirement) hasValue(value string) bool {
|
|||
func (r *Requirement) Matches(ls Labels) bool {
|
||||
switch r.operator {
|
||||
case selection.In, selection.Equals, selection.DoubleEquals:
|
||||
if !ls.Has(r.key) {
|
||||
val, exists := ls.Lookup(r.key)
|
||||
if !exists {
|
||||
return false
|
||||
}
|
||||
return r.hasValue(ls.Get(r.key))
|
||||
return r.hasValue(val)
|
||||
case selection.NotIn, selection.NotEquals:
|
||||
if !ls.Has(r.key) {
|
||||
val, exists := ls.Lookup(r.key)
|
||||
if !exists {
|
||||
return true
|
||||
}
|
||||
return !r.hasValue(ls.Get(r.key))
|
||||
return !r.hasValue(val)
|
||||
case selection.Exists:
|
||||
return ls.Has(r.key)
|
||||
case selection.DoesNotExist:
|
||||
return !ls.Has(r.key)
|
||||
case selection.GreaterThan, selection.LessThan:
|
||||
if !ls.Has(r.key) {
|
||||
val, exists := ls.Lookup(r.key)
|
||||
if !exists {
|
||||
return false
|
||||
}
|
||||
lsValue, err := strconv.ParseInt(ls.Get(r.key), 10, 64)
|
||||
lsValue, err := strconv.ParseInt(val, 10, 64)
|
||||
if err != nil {
|
||||
klog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err)
|
||||
klog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", val, ls, err)
|
||||
return false
|
||||
}
|
||||
|
||||
|
|
@ -996,7 +1009,8 @@ type ValidatedSetSelector Set
|
|||
|
||||
func (s ValidatedSetSelector) Matches(labels Labels) bool {
|
||||
for k, v := range s {
|
||||
if !labels.Has(k) || v != labels.Get(k) {
|
||||
val, exists := labels.Lookup(k)
|
||||
if !exists || v != val {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,10 +29,11 @@ import (
|
|||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"sigs.k8s.io/structured-merge-diff/v6/value"
|
||||
|
||||
"k8s.io/apimachinery/pkg/conversion"
|
||||
"k8s.io/apimachinery/pkg/util/json"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/value"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
|
@ -53,6 +54,7 @@ type fieldInfo struct {
|
|||
name string
|
||||
nameValue reflect.Value
|
||||
omitempty bool
|
||||
omitzero func(dv reflect.Value) bool
|
||||
}
|
||||
|
||||
type fieldsCacheMap map[structField]*fieldInfo
|
||||
|
|
@ -376,19 +378,24 @@ func fieldInfoFromField(structType reflect.Type, field int) *fieldInfo {
|
|||
typeField := structType.Field(field)
|
||||
jsonTag := typeField.Tag.Get("json")
|
||||
if len(jsonTag) == 0 {
|
||||
// Make the first character lowercase.
|
||||
if typeField.Name == "" {
|
||||
if !typeField.Anonymous {
|
||||
// match stdlib behavior for naming fields that don't specify a json tag name
|
||||
info.name = typeField.Name
|
||||
} else {
|
||||
info.name = strings.ToLower(typeField.Name[:1]) + typeField.Name[1:]
|
||||
}
|
||||
} else {
|
||||
items := strings.Split(jsonTag, ",")
|
||||
info.name = items[0]
|
||||
if len(info.name) == 0 && !typeField.Anonymous {
|
||||
// match stdlib behavior for naming fields that don't specify a json tag name
|
||||
info.name = typeField.Name
|
||||
}
|
||||
|
||||
for i := range items {
|
||||
if items[i] == "omitempty" {
|
||||
if i > 0 && items[i] == "omitempty" {
|
||||
info.omitempty = true
|
||||
break
|
||||
}
|
||||
if i > 0 && items[i] == "omitzero" {
|
||||
info.omitzero = value.OmitZeroFunc(typeField.Type)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -775,7 +782,7 @@ func pointerToUnstructured(sv, dv reflect.Value) error {
|
|||
return toUnstructured(sv.Elem(), dv)
|
||||
}
|
||||
|
||||
func isZero(v reflect.Value) bool {
|
||||
func isEmpty(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.String:
|
||||
return v.Len() == 0
|
||||
|
|
@ -816,10 +823,14 @@ func structToUnstructured(sv, dv reflect.Value) error {
|
|||
// This field should be skipped.
|
||||
continue
|
||||
}
|
||||
if fieldInfo.omitempty && isZero(fv) {
|
||||
if fieldInfo.omitempty && isEmpty(fv) {
|
||||
// omitempty fields should be ignored.
|
||||
continue
|
||||
}
|
||||
if fieldInfo.omitzero != nil && fieldInfo.omitzero(fv) {
|
||||
// omitzero fields should be ignored
|
||||
continue
|
||||
}
|
||||
if len(fieldInfo.name) == 0 {
|
||||
// This field is inlined.
|
||||
if err := toUnstructured(fv, dv); err != nil {
|
||||
|
|
|
|||
|
|
@ -385,3 +385,9 @@ type Unstructured interface {
|
|||
// If the items passed to fn are not retained, or are retained for the same duration, use EachListItem instead for memory efficiency.
|
||||
EachListItemWithAlloc(func(Object) error) error
|
||||
}
|
||||
|
||||
// ApplyConfiguration is an interface that root apply configuration types implement.
|
||||
type ApplyConfiguration interface {
|
||||
// IsApplyConfiguration is implemented if the object is the root of an apply configuration.
|
||||
IsApplyConfiguration()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -75,7 +75,7 @@ type Scheme struct {
|
|||
// The provided object must be a pointer.
|
||||
// If oldObject is non-nil, update validation is performed and may perform additional
|
||||
// validation such as transition rules and immutability checks.
|
||||
validationFuncs map[reflect.Type]func(ctx context.Context, op operation.Operation, object, oldObject interface{}, subresources ...string) field.ErrorList
|
||||
validationFuncs map[reflect.Type]func(ctx context.Context, op operation.Operation, object, oldObject interface{}) field.ErrorList
|
||||
|
||||
// converter stores all registered conversion functions. It also has
|
||||
// default converting behavior.
|
||||
|
|
@ -105,7 +105,7 @@ func NewScheme() *Scheme {
|
|||
unversionedKinds: map[string]reflect.Type{},
|
||||
fieldLabelConversionFuncs: map[schema.GroupVersionKind]FieldLabelConversionFunc{},
|
||||
defaulterFuncs: map[reflect.Type]func(interface{}){},
|
||||
validationFuncs: map[reflect.Type]func(ctx context.Context, op operation.Operation, object, oldObject interface{}, subresource ...string) field.ErrorList{},
|
||||
validationFuncs: map[reflect.Type]func(ctx context.Context, op operation.Operation, object, oldObject interface{}) field.ErrorList{},
|
||||
versionPriority: map[string][]string{},
|
||||
schemeName: naming.GetNameFromCallsite(internalPackages...),
|
||||
}
|
||||
|
|
@ -362,16 +362,16 @@ func (s *Scheme) Default(src Object) {
|
|||
// is called. The function will never be called unless the validated object
|
||||
// matches srcType. If this function is invoked twice with the same srcType, the
|
||||
// fn passed to the later call will be used instead.
|
||||
func (s *Scheme) AddValidationFunc(srcType Object, fn func(ctx context.Context, op operation.Operation, object, oldObject interface{}, subresources ...string) field.ErrorList) {
|
||||
func (s *Scheme) AddValidationFunc(srcType Object, fn func(ctx context.Context, op operation.Operation, object, oldObject interface{}) field.ErrorList) {
|
||||
s.validationFuncs[reflect.TypeOf(srcType)] = fn
|
||||
}
|
||||
|
||||
// Validate validates the provided Object according to the generated declarative validation code.
|
||||
// WARNING: This does not validate all objects! The handwritten validation code in validation.go
|
||||
// is not run when this is called. Only the generated zz_generated.validations.go validation code is run.
|
||||
func (s *Scheme) Validate(ctx context.Context, options sets.Set[string], object Object, subresources ...string) field.ErrorList {
|
||||
func (s *Scheme) Validate(ctx context.Context, options []string, object Object, subresources ...string) field.ErrorList {
|
||||
if fn, ok := s.validationFuncs[reflect.TypeOf(object)]; ok {
|
||||
return fn(ctx, operation.Operation{Type: operation.Create, Options: options}, object, nil, subresources...)
|
||||
return fn(ctx, operation.Operation{Type: operation.Create, Request: operation.Request{Subresources: subresources}, Options: options}, object, nil)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -379,9 +379,9 @@ func (s *Scheme) Validate(ctx context.Context, options sets.Set[string], object
|
|||
// ValidateUpdate validates the provided object and oldObject according to the generated declarative validation code.
|
||||
// WARNING: This does not validate all objects! The handwritten validation code in validation.go
|
||||
// is not run when this is called. Only the generated zz_generated.validations.go validation code is run.
|
||||
func (s *Scheme) ValidateUpdate(ctx context.Context, options sets.Set[string], object, oldObject Object, subresources ...string) field.ErrorList {
|
||||
func (s *Scheme) ValidateUpdate(ctx context.Context, options []string, object, oldObject Object, subresources ...string) field.ErrorList {
|
||||
if fn, ok := s.validationFuncs[reflect.TypeOf(object)]; ok {
|
||||
return fn(ctx, operation.Operation{Type: operation.Update, Options: options}, object, oldObject, subresources...)
|
||||
return fn(ctx, operation.Operation{Type: operation.Update, Request: operation.Request{Subresources: subresources}, Options: options}, object, oldObject)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -743,3 +743,67 @@ func (s *Scheme) Name() string {
|
|||
// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common
|
||||
// call chains to NewReflector, so they'd be low entropy names for reflectors
|
||||
var internalPackages = []string{"k8s.io/apimachinery/pkg/runtime/scheme.go"}
|
||||
|
||||
// ToOpenAPIDefinitionName returns the REST-friendly OpenAPI definition name known type identified by groupVersionKind.
|
||||
// If the groupVersionKind does not identify a known type, an error is returned.
|
||||
// The Version field of groupVersionKind is required, and the Group and Kind fields are required for unstructured.Unstructured
|
||||
// types. If a required field is empty, an error is returned.
|
||||
//
|
||||
// The OpenAPI definition name is the canonical name of the type, with the group and version removed.
|
||||
// For example, the OpenAPI definition name of Pod is `io.k8s.api.core.v1.Pod`.
|
||||
//
|
||||
// A known type that is registered as an unstructured.Unstructured type is treated as a custom resource and
|
||||
// which has an OpenAPI definition name of the form `<reversed-group>.<version.<kind>`.
|
||||
// For example, the OpenAPI definition name of `group: stable.example.com, version: v1, kind: Pod` is
|
||||
// `com.example.stable.v1.Pod`.
|
||||
func (s *Scheme) ToOpenAPIDefinitionName(groupVersionKind schema.GroupVersionKind) (string, error) {
|
||||
if groupVersionKind.Version == "" { // Empty version is not allowed by New() so check it first to avoid a panic.
|
||||
return "", fmt.Errorf("version is required on all types: %v", groupVersionKind)
|
||||
}
|
||||
example, err := s.New(groupVersionKind)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if _, ok := example.(Unstructured); ok {
|
||||
if groupVersionKind.Group == "" || groupVersionKind.Kind == "" {
|
||||
return "", fmt.Errorf("unable to convert GroupVersionKind with empty fields to unstructured type to an OpenAPI definition name: %v", groupVersionKind)
|
||||
}
|
||||
return reverseParts(groupVersionKind.Group) + "." + groupVersionKind.Version + "." + groupVersionKind.Kind, nil
|
||||
}
|
||||
rtype := reflect.TypeOf(example).Elem()
|
||||
name := toOpenAPIDefinitionName(rtype.PkgPath() + "." + rtype.Name())
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// toOpenAPIDefinitionName converts Golang package/type canonical name into REST friendly OpenAPI name.
|
||||
// Input is expected to be `PkgPath + "." TypeName.
|
||||
//
|
||||
// Examples of REST friendly OpenAPI name:
|
||||
//
|
||||
// Input: k8s.io/api/core/v1.Pod
|
||||
// Output: io.k8s.api.core.v1.Pod
|
||||
//
|
||||
// Input: k8s.io/api/core/v1
|
||||
// Output: io.k8s.api.core.v1
|
||||
//
|
||||
// Input: csi.storage.k8s.io/v1alpha1.CSINodeInfo
|
||||
// Output: io.k8s.storage.csi.v1alpha1.CSINodeInfo
|
||||
//
|
||||
// Note that this is a copy of ToRESTFriendlyName from k8s.io/kube-openapi/pkg/util. It is duplicated here to avoid
|
||||
// a dependency on kube-openapi.
|
||||
func toOpenAPIDefinitionName(name string) string {
|
||||
nameParts := strings.Split(name, "/")
|
||||
// Reverse first part. e.g., io.k8s... instead of k8s.io...
|
||||
if len(nameParts) > 0 && strings.Contains(nameParts[0], ".") {
|
||||
nameParts[0] = reverseParts(nameParts[0])
|
||||
}
|
||||
return strings.Join(nameParts, ".")
|
||||
}
|
||||
|
||||
func reverseParts(dotSeparatedName string) string {
|
||||
parts := strings.Split(dotSeparatedName, ".")
|
||||
for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 {
|
||||
parts[i], parts[j] = parts[j], parts[i]
|
||||
}
|
||||
return strings.Join(parts, ".")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -152,10 +152,6 @@ func (s *serializer) encode(mode modes.EncMode, obj runtime.Object, w io.Writer)
|
|||
v = u.UnstructuredContent()
|
||||
}
|
||||
|
||||
if err := modes.RejectCustomMarshalers(v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := w.Write(selfDescribedCBOR); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -270,8 +266,6 @@ func (s *serializer) unmarshal(data []byte, into interface{}) (strict, lax error
|
|||
}
|
||||
}()
|
||||
into = &content
|
||||
} else if err := modes.RejectCustomMarshalers(into); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !s.options.strict {
|
||||
|
|
|
|||
|
|
@ -25,31 +25,13 @@ import (
|
|||
|
||||
// Marshal serializes a value to CBOR. If there is more than one way to encode the value, it will
|
||||
// make the same choice as the CBOR implementation of runtime.Serializer.
|
||||
//
|
||||
// Note: Support for CBOR is at an alpha stage. If the value (or, for composite types, any of its
|
||||
// nested values) implement any of the interfaces encoding.TextMarshaler, encoding.TextUnmarshaler,
|
||||
// encoding/json.Marshaler, or encoding/json.Unmarshaler, a non-nil error will be returned unless
|
||||
// the value also implements the corresponding CBOR interfaces. This limitation will ultimately be
|
||||
// removed in favor of automatic transcoding to CBOR.
|
||||
func Marshal(src interface{}) ([]byte, error) {
|
||||
if err := modes.RejectCustomMarshalers(src); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func Marshal(src any) ([]byte, error) {
|
||||
return modes.Encode.Marshal(src)
|
||||
}
|
||||
|
||||
// Unmarshal deserializes from CBOR into an addressable value. If there is more than one way to
|
||||
// unmarshal a value, it will make the same choice as the CBOR implementation of runtime.Serializer.
|
||||
//
|
||||
// Note: Support for CBOR is at an alpha stage. If the value (or, for composite types, any of its
|
||||
// nested values) implement any of the interfaces encoding.TextMarshaler, encoding.TextUnmarshaler,
|
||||
// encoding/json.Marshaler, or encoding/json.Unmarshaler, a non-nil error will be returned unless
|
||||
// the value also implements the corresponding CBOR interfaces. This limitation will ultimately be
|
||||
// removed in favor of automatic transcoding to CBOR.
|
||||
func Unmarshal(src []byte, dst interface{}) error {
|
||||
if err := modes.RejectCustomMarshalers(dst); err != nil {
|
||||
return err
|
||||
}
|
||||
func Unmarshal(src []byte, dst any) error {
|
||||
return modes.Decode.Unmarshal(src, dst)
|
||||
}
|
||||
|
||||
|
|
|
|||
422
vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go
generated
vendored
422
vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go
generated
vendored
|
|
@ -1,422 +0,0 @@
|
|||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package modes
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
)
|
||||
|
||||
// Returns a non-nil error if and only if the argument's type (or one of its component types, for
|
||||
// composite types) implements json.Marshaler or encoding.TextMarshaler without also implementing
|
||||
// cbor.Marshaler and likewise for the respective Unmarshaler interfaces.
|
||||
//
|
||||
// This is a temporary, graduation-blocking restriction and will be removed in favor of automatic
|
||||
// transcoding between CBOR and JSON/text for these types. This restriction allows CBOR to be
|
||||
// exercised for in-tree and unstructured types while mitigating the risk of mangling out-of-tree
|
||||
// types in client programs.
|
||||
func RejectCustomMarshalers(v interface{}) error {
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
rv := reflect.ValueOf(v)
|
||||
if err := marshalerCache.getChecker(rv.Type()).check(rv, maxDepth); err != nil {
|
||||
return fmt.Errorf("unable to serialize %T: %w", v, err)
|
||||
}
|
||||
if err := unmarshalerCache.getChecker(rv.Type()).check(rv, maxDepth); err != nil {
|
||||
return fmt.Errorf("unable to serialize %T: %w", v, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Recursion depth is limited as a basic mitigation against cyclic objects. Objects created by the
|
||||
// decoder shouldn't be able to contain cycles, but practically any object can be passed to the
|
||||
// encoder.
|
||||
var errMaxDepthExceeded = errors.New("object depth exceeds limit (possible cycle?)")
|
||||
|
||||
// The JSON encoder begins detecting cycles after depth 1000. Use a generous limit here, knowing
|
||||
// that it can might deeply nested acyclic objects. The limit will be removed along with the rest of
|
||||
// this mechanism.
|
||||
const maxDepth = 2048
|
||||
|
||||
var marshalerCache = checkers{
|
||||
cborInterface: reflect.TypeFor[cbor.Marshaler](),
|
||||
nonCBORInterfaces: []reflect.Type{
|
||||
reflect.TypeFor[json.Marshaler](),
|
||||
reflect.TypeFor[encoding.TextMarshaler](),
|
||||
},
|
||||
}
|
||||
|
||||
var unmarshalerCache = checkers{
|
||||
cborInterface: reflect.TypeFor[cbor.Unmarshaler](),
|
||||
nonCBORInterfaces: []reflect.Type{
|
||||
reflect.TypeFor[json.Unmarshaler](),
|
||||
reflect.TypeFor[encoding.TextUnmarshaler](),
|
||||
},
|
||||
assumeAddressableValues: true,
|
||||
}
|
||||
|
||||
// checker wraps a function for dynamically checking a value of a specific type for custom JSON
|
||||
// behaviors not matched by a custom CBOR behavior.
|
||||
type checker struct {
|
||||
// check returns a non-nil error if the given value might be marshalled to or from CBOR
|
||||
// using the default behavior for its kind, but marshalled to or from JSON using custom
|
||||
// behavior.
|
||||
check func(rv reflect.Value, depth int) error
|
||||
|
||||
// safe returns true if all values of this type are safe from mismatched custom marshalers.
|
||||
safe func() bool
|
||||
}
|
||||
|
||||
// TODO: stale
|
||||
// Having a single addressable checker for comparisons lets us prune and collapse parts of the
|
||||
// object traversal that are statically known to be safe. Depending on the type, it may be
|
||||
// unnecessary to inspect each value of that type. For example, no value of the built-in type bool
|
||||
// can implement json.Marshaler (a named type whose underlying type is bool could, but it is a
|
||||
// distinct type from bool).
|
||||
var noop = checker{
|
||||
safe: func() bool {
|
||||
return true
|
||||
},
|
||||
check: func(rv reflect.Value, depth int) error {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
type checkers struct {
|
||||
m sync.Map // reflect.Type => *checker
|
||||
|
||||
cborInterface reflect.Type
|
||||
nonCBORInterfaces []reflect.Type
|
||||
|
||||
assumeAddressableValues bool
|
||||
}
|
||||
|
||||
func (cache *checkers) getChecker(rt reflect.Type) checker {
|
||||
if ptr, ok := cache.m.Load(rt); ok {
|
||||
return *ptr.(*checker)
|
||||
}
|
||||
|
||||
return cache.getCheckerInternal(rt, nil)
|
||||
}
|
||||
|
||||
// linked list node representing the path from a composite type to an element type
|
||||
type path struct {
|
||||
Type reflect.Type
|
||||
Parent *path
|
||||
}
|
||||
|
||||
func (p path) cyclic(rt reflect.Type) bool {
|
||||
for ancestor := &p; ancestor != nil; ancestor = ancestor.Parent {
|
||||
if ancestor.Type == rt {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (cache *checkers) getCheckerInternal(rt reflect.Type, parent *path) (c checker) {
|
||||
// Store a placeholder cache entry first to handle cyclic types.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
defer wg.Done()
|
||||
placeholder := checker{
|
||||
safe: func() bool {
|
||||
wg.Wait()
|
||||
return c.safe()
|
||||
},
|
||||
check: func(rv reflect.Value, depth int) error {
|
||||
wg.Wait()
|
||||
return c.check(rv, depth)
|
||||
},
|
||||
}
|
||||
if actual, loaded := cache.m.LoadOrStore(rt, &placeholder); loaded {
|
||||
// Someone else stored an entry for this type, use it.
|
||||
return *actual.(*checker)
|
||||
}
|
||||
|
||||
// Take a nonreflective path for the unstructured container types. They're common and
|
||||
// usually nested inside one another.
|
||||
switch rt {
|
||||
case reflect.TypeFor[map[string]interface{}](), reflect.TypeFor[[]interface{}]():
|
||||
return checker{
|
||||
safe: func() bool {
|
||||
return false
|
||||
},
|
||||
check: func(rv reflect.Value, depth int) error {
|
||||
return checkUnstructuredValue(cache, rv.Interface(), depth)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// It's possible that one of the relevant interfaces is implemented on a type with a pointer
|
||||
// receiver, but that a particular value of that type is not addressable. For example:
|
||||
//
|
||||
// func (Foo) MarshalText() ([]byte, error) { ... }
|
||||
// func (*Foo) MarshalCBOR() ([]byte, error) { ... }
|
||||
//
|
||||
// Both methods are in the method set of *Foo, but the method set of Foo contains only
|
||||
// MarshalText.
|
||||
//
|
||||
// Both the unmarshaler and marshaler checks assume that methods implementing a JSON or text
|
||||
// interface with a pointer receiver are always accessible. Only the unmarshaler check
|
||||
// assumes that CBOR methods with pointer receivers are accessible.
|
||||
|
||||
if rt.Implements(cache.cborInterface) {
|
||||
return noop
|
||||
}
|
||||
for _, unsafe := range cache.nonCBORInterfaces {
|
||||
if rt.Implements(unsafe) {
|
||||
err := fmt.Errorf("%v implements %v without corresponding cbor interface", rt, unsafe)
|
||||
return checker{
|
||||
safe: func() bool {
|
||||
return false
|
||||
},
|
||||
check: func(reflect.Value, int) error {
|
||||
return err
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cache.assumeAddressableValues && reflect.PointerTo(rt).Implements(cache.cborInterface) {
|
||||
return noop
|
||||
}
|
||||
for _, unsafe := range cache.nonCBORInterfaces {
|
||||
if reflect.PointerTo(rt).Implements(unsafe) {
|
||||
err := fmt.Errorf("%v implements %v without corresponding cbor interface", reflect.PointerTo(rt), unsafe)
|
||||
return checker{
|
||||
safe: func() bool {
|
||||
return false
|
||||
},
|
||||
check: func(reflect.Value, int) error {
|
||||
return err
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self := &path{Type: rt, Parent: parent}
|
||||
|
||||
switch rt.Kind() {
|
||||
case reflect.Array:
|
||||
ce := cache.getCheckerInternal(rt.Elem(), self)
|
||||
rtlen := rt.Len()
|
||||
if rtlen == 0 || (!self.cyclic(rt.Elem()) && ce.safe()) {
|
||||
return noop
|
||||
}
|
||||
return checker{
|
||||
safe: func() bool {
|
||||
return false
|
||||
},
|
||||
check: func(rv reflect.Value, depth int) error {
|
||||
if depth <= 0 {
|
||||
return errMaxDepthExceeded
|
||||
}
|
||||
for i := 0; i < rtlen; i++ {
|
||||
if err := ce.check(rv.Index(i), depth-1); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
case reflect.Interface:
|
||||
// All interface values have to be checked because their dynamic type might
|
||||
// implement one of the interesting interfaces or be composed of another type that
|
||||
// does.
|
||||
return checker{
|
||||
safe: func() bool {
|
||||
return false
|
||||
},
|
||||
check: func(rv reflect.Value, depth int) error {
|
||||
if rv.IsNil() {
|
||||
return nil
|
||||
}
|
||||
// Unpacking interfaces must count against recursion depth,
|
||||
// consider this cycle:
|
||||
// > var i interface{}
|
||||
// > var p *interface{} = &i
|
||||
// > i = p
|
||||
// > rv := reflect.ValueOf(i)
|
||||
// > for {
|
||||
// > rv = rv.Elem()
|
||||
// > }
|
||||
if depth <= 0 {
|
||||
return errMaxDepthExceeded
|
||||
}
|
||||
rv = rv.Elem()
|
||||
return cache.getChecker(rv.Type()).check(rv, depth-1)
|
||||
},
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
rtk := rt.Key()
|
||||
ck := cache.getCheckerInternal(rtk, self)
|
||||
rte := rt.Elem()
|
||||
ce := cache.getCheckerInternal(rte, self)
|
||||
if !self.cyclic(rtk) && !self.cyclic(rte) && ck.safe() && ce.safe() {
|
||||
return noop
|
||||
}
|
||||
return checker{
|
||||
safe: func() bool {
|
||||
return false
|
||||
},
|
||||
check: func(rv reflect.Value, depth int) error {
|
||||
if depth <= 0 {
|
||||
return errMaxDepthExceeded
|
||||
}
|
||||
iter := rv.MapRange()
|
||||
rvk := reflect.New(rtk).Elem()
|
||||
rve := reflect.New(rte).Elem()
|
||||
for iter.Next() {
|
||||
rvk.SetIterKey(iter)
|
||||
if err := ck.check(rvk, depth-1); err != nil {
|
||||
return err
|
||||
}
|
||||
rve.SetIterValue(iter)
|
||||
if err := ce.check(rve, depth-1); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
case reflect.Pointer:
|
||||
ce := cache.getCheckerInternal(rt.Elem(), self)
|
||||
if !self.cyclic(rt.Elem()) && ce.safe() {
|
||||
return noop
|
||||
}
|
||||
return checker{
|
||||
safe: func() bool {
|
||||
return false
|
||||
},
|
||||
check: func(rv reflect.Value, depth int) error {
|
||||
if rv.IsNil() {
|
||||
return nil
|
||||
}
|
||||
if depth <= 0 {
|
||||
return errMaxDepthExceeded
|
||||
}
|
||||
return ce.check(rv.Elem(), depth-1)
|
||||
},
|
||||
}
|
||||
|
||||
case reflect.Slice:
|
||||
ce := cache.getCheckerInternal(rt.Elem(), self)
|
||||
if !self.cyclic(rt.Elem()) && ce.safe() {
|
||||
return noop
|
||||
}
|
||||
return checker{
|
||||
safe: func() bool {
|
||||
return false
|
||||
},
|
||||
check: func(rv reflect.Value, depth int) error {
|
||||
if depth <= 0 {
|
||||
return errMaxDepthExceeded
|
||||
}
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
if err := ce.check(rv.Index(i), depth-1); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
case reflect.Struct:
|
||||
type field struct {
|
||||
Index int
|
||||
Checker checker
|
||||
}
|
||||
var fields []field
|
||||
for i := 0; i < rt.NumField(); i++ {
|
||||
f := rt.Field(i)
|
||||
cf := cache.getCheckerInternal(f.Type, self)
|
||||
if !self.cyclic(f.Type) && cf.safe() {
|
||||
continue
|
||||
}
|
||||
fields = append(fields, field{Index: i, Checker: cf})
|
||||
}
|
||||
if len(fields) == 0 {
|
||||
return noop
|
||||
}
|
||||
return checker{
|
||||
safe: func() bool {
|
||||
return false
|
||||
},
|
||||
check: func(rv reflect.Value, depth int) error {
|
||||
if depth <= 0 {
|
||||
return errMaxDepthExceeded
|
||||
}
|
||||
for _, fi := range fields {
|
||||
if err := fi.Checker.check(rv.Field(fi.Index), depth-1); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
default:
|
||||
// Not a serializable composite type (funcs and channels are composite types but are
|
||||
// rejected by JSON and CBOR serialization).
|
||||
return noop
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func checkUnstructuredValue(cache *checkers, v interface{}, depth int) error {
|
||||
switch v := v.(type) {
|
||||
case nil, bool, int64, float64, string:
|
||||
return nil
|
||||
case []interface{}:
|
||||
if depth <= 0 {
|
||||
return errMaxDepthExceeded
|
||||
}
|
||||
for _, element := range v {
|
||||
if err := checkUnstructuredValue(cache, element, depth-1); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
case map[string]interface{}:
|
||||
if depth <= 0 {
|
||||
return errMaxDepthExceeded
|
||||
}
|
||||
for _, element := range v {
|
||||
if err := checkUnstructuredValue(cache, element, depth-1); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
// Unmarshaling an unstructured doesn't use other dynamic types, but nothing
|
||||
// prevents inserting values with arbitrary dynamic types into unstructured content,
|
||||
// as long as they can be marshalled.
|
||||
rv := reflect.ValueOf(v)
|
||||
return cache.getChecker(rv.Type()).check(rv, depth)
|
||||
}
|
||||
}
|
||||
22
vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go
generated
vendored
22
vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go
generated
vendored
|
|
@ -44,7 +44,10 @@ var simpleValues *cbor.SimpleValueRegistry = func() *cbor.SimpleValueRegistry {
|
|||
return simpleValues
|
||||
}()
|
||||
|
||||
var Decode cbor.DecMode = func() cbor.DecMode {
|
||||
// decode is the basis for the Decode mode, with no JSONUnmarshalerTranscoder
|
||||
// configured. TranscodeToJSON uses this directly rather than Decode to avoid an initialization
|
||||
// cycle between the two. Everything else should use one of the exported DecModes.
|
||||
var decode cbor.DecMode = func() cbor.DecMode {
|
||||
decode, err := cbor.DecOptions{
|
||||
// Maps with duplicate keys are well-formed but invalid according to the CBOR spec
|
||||
// and never acceptable. Unlike the JSON serializer, inputs containing duplicate map
|
||||
|
|
@ -139,6 +142,10 @@ var Decode cbor.DecMode = func() cbor.DecMode {
|
|||
// Disable default recognition of types implementing encoding.BinaryUnmarshaler,
|
||||
// which is not recognized for JSON decoding.
|
||||
BinaryUnmarshaler: cbor.BinaryUnmarshalerNone,
|
||||
|
||||
// Marshal types that implement encoding.TextMarshaler by calling their MarshalText
|
||||
// method and encoding the result to a CBOR text string.
|
||||
TextUnmarshaler: cbor.TextUnmarshalerTextString,
|
||||
}.DecMode()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
|
@ -146,6 +153,19 @@ var Decode cbor.DecMode = func() cbor.DecMode {
|
|||
return decode
|
||||
}()
|
||||
|
||||
var Decode cbor.DecMode = func() cbor.DecMode {
|
||||
opts := decode.DecOptions()
|
||||
// When decoding into a value of a type that implements json.Unmarshaler (and does not
|
||||
// implement cbor.Unmarshaler), transcode the input to JSON and pass it to the value's
|
||||
// UnmarshalJSON method.
|
||||
opts.JSONUnmarshalerTranscoder = TranscodeFunc(TranscodeToJSON)
|
||||
dm, err := opts.DecMode()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return dm
|
||||
}()
|
||||
|
||||
// DecodeLax is derived from Decode, but does not complain about unknown fields in the input.
|
||||
var DecodeLax cbor.DecMode = func() cbor.DecMode {
|
||||
opts := Decode.DecOptions()
|
||||
|
|
|
|||
24
vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go
generated
vendored
24
vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go
generated
vendored
|
|
@ -22,7 +22,10 @@ import (
|
|||
"github.com/fxamacker/cbor/v2"
|
||||
)
|
||||
|
||||
var Encode = EncMode{
|
||||
// encode is the basis for the Encode mode, with no JSONMarshalerTranscoder
|
||||
// configured. TranscodeFromJSON uses this directly rather than Encode to avoid an initialization
|
||||
// cycle between the two. Everything else should use one of the exported EncModes.
|
||||
var encode = EncMode{
|
||||
delegate: func() cbor.UserBufferEncMode {
|
||||
encode, err := cbor.EncOptions{
|
||||
// Map keys need to be sorted to have deterministic output, and this is the order
|
||||
|
|
@ -94,6 +97,10 @@ var Encode = EncMode{
|
|||
// Disable default recognition of types implementing encoding.BinaryMarshaler, which
|
||||
// is not recognized for JSON encoding.
|
||||
BinaryMarshaler: cbor.BinaryMarshalerNone,
|
||||
|
||||
// Unmarshal into types that implement encoding.TextUnmarshaler by passing
|
||||
// the contents of a CBOR string to their UnmarshalText method.
|
||||
TextMarshaler: cbor.TextMarshalerTextString,
|
||||
}.UserBufferEncMode()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
|
@ -102,6 +109,21 @@ var Encode = EncMode{
|
|||
}(),
|
||||
}
|
||||
|
||||
var Encode = EncMode{
|
||||
delegate: func() cbor.UserBufferEncMode {
|
||||
opts := encode.options()
|
||||
// To encode a value of a type that implements json.Marshaler (and does not
|
||||
// implement cbor.Marshaler), transcode the result of calling its MarshalJSON method
|
||||
// directly to CBOR.
|
||||
opts.JSONMarshalerTranscoder = TranscodeFunc(TranscodeFromJSON)
|
||||
em, err := opts.UserBufferEncMode()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return em
|
||||
}(),
|
||||
}
|
||||
|
||||
var EncodeNondeterministic = EncMode{
|
||||
delegate: func() cbor.UserBufferEncMode {
|
||||
opts := Encode.options()
|
||||
|
|
|
|||
108
vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/transcoding.go
generated
vendored
Normal file
108
vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/transcoding.go
generated
vendored
Normal file
|
|
@ -0,0 +1,108 @@
|
|||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package modes
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
kjson "sigs.k8s.io/json"
|
||||
)
|
||||
|
||||
type TranscodeFunc func(dst io.Writer, src io.Reader) error
|
||||
|
||||
func (f TranscodeFunc) Transcode(dst io.Writer, src io.Reader) error {
|
||||
return f(dst, src)
|
||||
}
|
||||
|
||||
func TranscodeFromJSON(dst io.Writer, src io.Reader) error {
|
||||
var tmp any
|
||||
dec := kjson.NewDecoderCaseSensitivePreserveInts(src)
|
||||
if err := dec.Decode(&tmp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := dec.Decode(&struct{}{}); !errors.Is(err, io.EOF) {
|
||||
return errors.New("extraneous data")
|
||||
}
|
||||
|
||||
return encode.MarshalTo(tmp, dst)
|
||||
}
|
||||
|
||||
func TranscodeToJSON(dst io.Writer, src io.Reader) error {
|
||||
var tmp any
|
||||
dec := decode.NewDecoder(src)
|
||||
if err := dec.Decode(&tmp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := dec.Decode(&struct{}{}); !errors.Is(err, io.EOF) {
|
||||
return errors.New("extraneous data")
|
||||
}
|
||||
|
||||
// Use an Encoder to avoid the extra []byte allocated by Marshal. Encode, unlike Marshal,
|
||||
// appends a trailing newline to separate consecutive encodings of JSON values that aren't
|
||||
// self-delimiting, like numbers. Strip the newline to avoid the assumption that every
|
||||
// json.Unmarshaler implementation will accept trailing whitespace.
|
||||
enc := json.NewEncoder(&trailingLinefeedSuppressor{delegate: dst})
|
||||
enc.SetIndent("", "")
|
||||
return enc.Encode(tmp)
|
||||
}
|
||||
|
||||
// trailingLinefeedSuppressor is an io.Writer that wraps another io.Writer, suppressing a single
|
||||
// trailing linefeed if it is the last byte written by the latest call to Write.
|
||||
type trailingLinefeedSuppressor struct {
|
||||
lf bool
|
||||
delegate io.Writer
|
||||
}
|
||||
|
||||
func (w *trailingLinefeedSuppressor) Write(p []byte) (int, error) {
|
||||
if len(p) == 0 {
|
||||
// Avoid flushing a buffered linefeeds on an empty write.
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if w.lf {
|
||||
// The previous write had a trailing linefeed that was buffered. That wasn't the
|
||||
// last Write call, so flush the buffered linefeed before continuing.
|
||||
n, err := w.delegate.Write([]byte{'\n'})
|
||||
if n > 0 {
|
||||
w.lf = false
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if p[len(p)-1] != '\n' {
|
||||
return w.delegate.Write(p)
|
||||
}
|
||||
|
||||
p = p[:len(p)-1]
|
||||
|
||||
if len(p) == 0 { // []byte{'\n'}
|
||||
w.lf = true
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
n, err := w.delegate.Write(p)
|
||||
if n == len(p) {
|
||||
// Everything up to the trailing linefeed has been flushed. Eat the linefeed.
|
||||
w.lf = true
|
||||
n++
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
//go:build usegocmp
|
||||
// +build usegocmp
|
||||
|
||||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package diff
|
||||
|
||||
import (
|
||||
"github.com/google/go-cmp/cmp" //nolint:depguard
|
||||
)
|
||||
|
||||
// Diff returns a string representation of the difference between two objects.
|
||||
// When built with the usegocmp tag, it uses go-cmp/cmp to generate a diff
|
||||
// between the objects.
|
||||
func Diff(a, b any) string {
|
||||
return cmp.Diff(a, b)
|
||||
}
|
||||
|
|
@ -1,5 +1,8 @@
|
|||
//go:build !usegocmp
|
||||
// +build !usegocmp
|
||||
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
|
@ -17,122 +20,43 @@ limitations under the License.
|
|||
package diff
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/google/go-cmp/cmp" //nolint:depguard
|
||||
"github.com/pmezard/go-difflib/difflib"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/dump"
|
||||
)
|
||||
|
||||
func legacyDiff(a, b interface{}) string {
|
||||
return cmp.Diff(a, b)
|
||||
}
|
||||
// Diff returns a string representation of the difference between two objects.
|
||||
// When built without the usegocmp tag, it uses go-difflib/difflib to generate a
|
||||
// unified diff of the objects. It attempts to use JSON serialization first,
|
||||
// falling back to an object dump via the dump package if JSON marshaling fails.
|
||||
func Diff(a, b any) string {
|
||||
|
||||
// StringDiff diffs a and b and returns a human readable diff.
|
||||
// DEPRECATED: use github.com/google/go-cmp/cmp.Diff
|
||||
func StringDiff(a, b string) string {
|
||||
return legacyDiff(a, b)
|
||||
}
|
||||
|
||||
// ObjectDiff prints the diff of two go objects and fails if the objects
|
||||
// contain unhandled unexported fields.
|
||||
// DEPRECATED: use github.com/google/go-cmp/cmp.Diff
|
||||
func ObjectDiff(a, b interface{}) string {
|
||||
return legacyDiff(a, b)
|
||||
}
|
||||
|
||||
// ObjectGoPrintDiff prints the diff of two go objects and fails if the objects
|
||||
// contain unhandled unexported fields.
|
||||
// DEPRECATED: use github.com/google/go-cmp/cmp.Diff
|
||||
func ObjectGoPrintDiff(a, b interface{}) string {
|
||||
return legacyDiff(a, b)
|
||||
}
|
||||
|
||||
// ObjectReflectDiff prints the diff of two go objects and fails if the objects
|
||||
// contain unhandled unexported fields.
|
||||
// DEPRECATED: use github.com/google/go-cmp/cmp.Diff
|
||||
func ObjectReflectDiff(a, b interface{}) string {
|
||||
return legacyDiff(a, b)
|
||||
}
|
||||
|
||||
// ObjectGoPrintSideBySide prints a and b as textual dumps side by side,
|
||||
// enabling easy visual scanning for mismatches.
|
||||
func ObjectGoPrintSideBySide(a, b interface{}) string {
|
||||
sA := dump.Pretty(a)
|
||||
sB := dump.Pretty(b)
|
||||
|
||||
linesA := strings.Split(sA, "\n")
|
||||
linesB := strings.Split(sB, "\n")
|
||||
width := 0
|
||||
for _, s := range linesA {
|
||||
l := len(s)
|
||||
if l > width {
|
||||
width = l
|
||||
}
|
||||
aStr, aErr := toPrettyJSON(a)
|
||||
bStr, bErr := toPrettyJSON(b)
|
||||
if aErr != nil || bErr != nil {
|
||||
aStr = dump.Pretty(a)
|
||||
bStr = dump.Pretty(b)
|
||||
}
|
||||
for _, s := range linesB {
|
||||
l := len(s)
|
||||
if l > width {
|
||||
width = l
|
||||
}
|
||||
|
||||
diff := difflib.UnifiedDiff{
|
||||
A: difflib.SplitLines(aStr),
|
||||
B: difflib.SplitLines(bStr),
|
||||
Context: 3,
|
||||
}
|
||||
buf := &bytes.Buffer{}
|
||||
w := tabwriter.NewWriter(buf, width, 0, 1, ' ', 0)
|
||||
max := len(linesA)
|
||||
if len(linesB) > max {
|
||||
max = len(linesB)
|
||||
|
||||
diffstr, err := difflib.GetUnifiedDiffString(diff)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("error generating diff: %v", err)
|
||||
}
|
||||
for i := 0; i < max; i++ {
|
||||
var a, b string
|
||||
if i < len(linesA) {
|
||||
a = linesA[i]
|
||||
}
|
||||
if i < len(linesB) {
|
||||
b = linesB[i]
|
||||
}
|
||||
fmt.Fprintf(w, "%s\t%s\n", a, b)
|
||||
}
|
||||
w.Flush()
|
||||
return buf.String()
|
||||
|
||||
return diffstr
|
||||
}
|
||||
|
||||
// IgnoreUnset is an option that ignores fields that are unset on the right
|
||||
// hand side of a comparison. This is useful in testing to assert that an
|
||||
// object is a derivative.
|
||||
func IgnoreUnset() cmp.Option {
|
||||
return cmp.Options{
|
||||
// ignore unset fields in v2
|
||||
cmp.FilterPath(func(path cmp.Path) bool {
|
||||
_, v2 := path.Last().Values()
|
||||
switch v2.Kind() {
|
||||
case reflect.Slice, reflect.Map:
|
||||
if v2.IsNil() || v2.Len() == 0 {
|
||||
return true
|
||||
}
|
||||
case reflect.String:
|
||||
if v2.Len() == 0 {
|
||||
return true
|
||||
}
|
||||
case reflect.Interface, reflect.Pointer:
|
||||
if v2.IsNil() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}, cmp.Ignore()),
|
||||
// ignore map entries that aren't set in v2
|
||||
cmp.FilterPath(func(path cmp.Path) bool {
|
||||
switch i := path.Last().(type) {
|
||||
case cmp.MapIndex:
|
||||
if _, v2 := i.Values(); !v2.IsValid() {
|
||||
fmt.Println("E")
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}, cmp.Ignore()),
|
||||
}
|
||||
// toPrettyJSON converts an object to a pretty-printed JSON string.
|
||||
func toPrettyJSON(data any) (string, error) {
|
||||
jsonData, err := json.MarshalIndent(data, "", " ")
|
||||
return string(jsonData), err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package diff
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/dump"
|
||||
)
|
||||
|
||||
// ObjectGoPrintSideBySide prints a and b as textual dumps side by side,
|
||||
// enabling easy visual scanning for mismatches.
|
||||
func ObjectGoPrintSideBySide(a, b interface{}) string {
|
||||
sA := dump.Pretty(a)
|
||||
sB := dump.Pretty(b)
|
||||
|
||||
linesA := strings.Split(sA, "\n")
|
||||
linesB := strings.Split(sB, "\n")
|
||||
width := 0
|
||||
for _, s := range linesA {
|
||||
l := len(s)
|
||||
if l > width {
|
||||
width = l
|
||||
}
|
||||
}
|
||||
for _, s := range linesB {
|
||||
l := len(s)
|
||||
if l > width {
|
||||
width = l
|
||||
}
|
||||
}
|
||||
buf := &bytes.Buffer{}
|
||||
w := tabwriter.NewWriter(buf, width, 0, 1, ' ', 0)
|
||||
max := len(linesA)
|
||||
if len(linesB) > max {
|
||||
max = len(linesB)
|
||||
}
|
||||
for i := 0; i < max; i++ {
|
||||
var a, b string
|
||||
if i < len(linesA) {
|
||||
a = linesA[i]
|
||||
}
|
||||
if i < len(linesB) {
|
||||
b = linesB[i]
|
||||
}
|
||||
_, _ = fmt.Fprintf(w, "%s\t%s\n", a, b)
|
||||
}
|
||||
_ = w.Flush()
|
||||
return buf.String()
|
||||
}
|
||||
|
|
@ -24,6 +24,7 @@ import (
|
|||
)
|
||||
|
||||
// MessageCountMap contains occurrence for each error message.
|
||||
// Deprecated: Not used anymore in the k8s.io codebase, use `errors.Join` instead.
|
||||
type MessageCountMap map[string]int
|
||||
|
||||
// Aggregate represents an object that contains multiple errors, but does not
|
||||
|
|
@ -199,6 +200,7 @@ func Flatten(agg Aggregate) Aggregate {
|
|||
}
|
||||
|
||||
// CreateAggregateFromMessageCountMap converts MessageCountMap Aggregate
|
||||
// Deprecated: Not used anymore in the k8s.io codebase, use `errors.Join` instead.
|
||||
func CreateAggregateFromMessageCountMap(m MessageCountMap) Aggregate {
|
||||
if m == nil {
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -20,8 +20,8 @@ import (
|
|||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/typed"
|
||||
"sigs.k8s.io/structured-merge-diff/v6/fieldpath"
|
||||
"sigs.k8s.io/structured-merge-diff/v6/typed"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ package managedfields
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
"sigs.k8s.io/structured-merge-diff/v6/fieldpath"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
|
|
|||
|
|
@ -22,8 +22,8 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/kube-openapi/pkg/schemaconv"
|
||||
"k8s.io/kube-openapi/pkg/util/proto"
|
||||
smdschema "sigs.k8s.io/structured-merge-diff/v4/schema"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/typed"
|
||||
smdschema "sigs.k8s.io/structured-merge-diff/v6/schema"
|
||||
"sigs.k8s.io/structured-merge-diff/v6/typed"
|
||||
)
|
||||
|
||||
// groupVersionKindExtensionKey is the key used to lookup the
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ import (
|
|||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
"sigs.k8s.io/structured-merge-diff/v6/fieldpath"
|
||||
)
|
||||
|
||||
type capManagersManager struct {
|
||||
|
|
|
|||
|
|
@ -25,8 +25,8 @@ import (
|
|||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/merge"
|
||||
"sigs.k8s.io/structured-merge-diff/v6/fieldpath"
|
||||
"sigs.k8s.io/structured-merge-diff/v6/merge"
|
||||
)
|
||||
|
||||
// NewConflictError returns an error including details on the requests apply conflicts
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/merge"
|
||||
"sigs.k8s.io/structured-merge-diff/v6/merge"
|
||||
)
|
||||
|
||||
// DefaultMaxUpdateManagers defines the default maximum retained number of managedFields entries from updates
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ import (
|
|||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
"sigs.k8s.io/structured-merge-diff/v6/fieldpath"
|
||||
)
|
||||
|
||||
// EmptyFields represents a set with no paths
|
||||
|
|
|
|||
4
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedmanager.go
generated
vendored
4
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/lastappliedmanager.go
generated
vendored
|
|
@ -24,8 +24,8 @@ import (
|
|||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/merge"
|
||||
"sigs.k8s.io/structured-merge-diff/v6/fieldpath"
|
||||
"sigs.k8s.io/structured-merge-diff/v6/merge"
|
||||
)
|
||||
|
||||
type lastAppliedManager struct {
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
"sigs.k8s.io/structured-merge-diff/v6/fieldpath"
|
||||
)
|
||||
|
||||
// ManagedInterface groups a fieldpath.ManagedFields together with the timestamps associated with each operation.
|
||||
|
|
|
|||
2
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfieldsupdater.go
generated
vendored
2
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/managedfieldsupdater.go
generated
vendored
|
|
@ -21,7 +21,7 @@ import (
|
|||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
"sigs.k8s.io/structured-merge-diff/v6/fieldpath"
|
||||
)
|
||||
|
||||
type managedFieldsUpdater struct {
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ package internal
|
|||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
"sigs.k8s.io/structured-merge-diff/v6/fieldpath"
|
||||
)
|
||||
|
||||
// Managed groups a fieldpath.ManagedFields together with the timestamps associated with each operation.
|
||||
|
|
|
|||
|
|
@ -23,8 +23,8 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/value"
|
||||
"sigs.k8s.io/structured-merge-diff/v6/fieldpath"
|
||||
"sigs.k8s.io/structured-merge-diff/v6/value"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
|||
62
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/runtimetypeconverter.go
generated
vendored
Normal file
62
vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/runtimetypeconverter.go
generated
vendored
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
/*
|
||||
Copyright 2025 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/structured-merge-diff/v6/typed"
|
||||
)
|
||||
|
||||
type schemeTypeConverter struct {
|
||||
scheme *runtime.Scheme
|
||||
parser *typed.Parser
|
||||
}
|
||||
|
||||
var _ TypeConverter = &schemeTypeConverter{}
|
||||
|
||||
// NewSchemeTypeConverter creates a TypeConverter that uses the provided scheme to
|
||||
// convert between runtime.Objects and TypedValues.
|
||||
func NewSchemeTypeConverter(scheme *runtime.Scheme, parser *typed.Parser) TypeConverter {
|
||||
return &schemeTypeConverter{scheme: scheme, parser: parser}
|
||||
}
|
||||
|
||||
func (tc schemeTypeConverter) ObjectToTyped(obj runtime.Object, opts ...typed.ValidationOptions) (*typed.TypedValue, error) {
|
||||
gvk := obj.GetObjectKind().GroupVersionKind()
|
||||
name, err := tc.scheme.ToOpenAPIDefinitionName(gvk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t := tc.parser.Type(name)
|
||||
switch o := obj.(type) {
|
||||
case *unstructured.Unstructured:
|
||||
return t.FromUnstructured(o.UnstructuredContent(), opts...)
|
||||
default:
|
||||
return t.FromStructured(obj, opts...)
|
||||
}
|
||||
}
|
||||
|
||||
func (tc schemeTypeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) {
|
||||
vu := value.AsValue().Unstructured()
|
||||
switch o := vu.(type) {
|
||||
case map[string]interface{}:
|
||||
return &unstructured.Unstructured{Object: o}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("failed to convert value to unstructured for type %T", vu)
|
||||
}
|
||||
}
|
||||
|
|
@ -80,6 +80,9 @@ func (f *skipNonAppliedManager) Apply(liveObj, appliedObj runtime.Object, manage
|
|||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create empty object of type %v: %v", gvk, err)
|
||||
}
|
||||
if unstructured, isUnstructured := emptyObj.(runtime.Unstructured); isUnstructured {
|
||||
unstructured.GetObjectKind().SetGroupVersionKind(gvk)
|
||||
}
|
||||
liveObj, managed, err = f.fieldManager.Update(emptyObj, liveObj, managed, f.beforeApplyManagerName)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create manager for existing fields: %v", err)
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
"sigs.k8s.io/structured-merge-diff/v6/fieldpath"
|
||||
)
|
||||
|
||||
type stripMetaManager struct {
|
||||
|
|
|
|||
|
|
@ -19,9 +19,9 @@ package internal
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/merge"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/typed"
|
||||
"sigs.k8s.io/structured-merge-diff/v6/fieldpath"
|
||||
"sigs.k8s.io/structured-merge-diff/v6/merge"
|
||||
"sigs.k8s.io/structured-merge-diff/v6/typed"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
|
|
|
|||
|
|
@ -24,9 +24,9 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/kube-openapi/pkg/schemaconv"
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
smdschema "sigs.k8s.io/structured-merge-diff/v4/schema"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/typed"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/value"
|
||||
smdschema "sigs.k8s.io/structured-merge-diff/v6/schema"
|
||||
"sigs.k8s.io/structured-merge-diff/v6/typed"
|
||||
"sigs.k8s.io/structured-merge-diff/v6/value"
|
||||
)
|
||||
|
||||
// TypeConverter allows you to convert from runtime.Object to
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue