Merge branch 'master' into keymutex
Kubernetes-commit: e72b32558c8e9ed16690ef5a8e909c12fcc47f87
This commit is contained in:
commit
1c887af65f
File diff suppressed because it is too large
Load Diff
|
|
@ -85,7 +85,7 @@ func NewStorageCodec(opts StorageCodecConfig) (runtime.Codec, error) {
|
||||||
)
|
)
|
||||||
decoder := opts.StorageSerializer.DecoderToVersion(
|
decoder := opts.StorageSerializer.DecoderToVersion(
|
||||||
recognizer.NewDecoder(decoders...),
|
recognizer.NewDecoder(decoders...),
|
||||||
runtime.NewMultiGroupVersioner(
|
runtime.NewCoercingMultiGroupVersioner(
|
||||||
opts.MemoryVersion,
|
opts.MemoryVersion,
|
||||||
schema.GroupKind{Group: opts.MemoryVersion.Group},
|
schema.GroupKind{Group: opts.MemoryVersion.Group},
|
||||||
schema.GroupKind{Group: opts.StorageVersion.Group},
|
schema.GroupKind{Group: opts.StorageVersion.Group},
|
||||||
|
|
|
||||||
|
|
@ -25,6 +25,23 @@ go get -u github.com/evanphx/json-patch
|
||||||
* [Comparing JSON documents](#comparing-json-documents)
|
* [Comparing JSON documents](#comparing-json-documents)
|
||||||
* [Combine merge patches](#combine-merge-patches)
|
* [Combine merge patches](#combine-merge-patches)
|
||||||
|
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
|
||||||
|
* There is a global configuration variable `jsonpatch.SupportNegativeIndices`.
|
||||||
|
This defaults to `true` and enables the non-standard practice of allowing
|
||||||
|
negative indices to mean indices starting at the end of an array. This
|
||||||
|
functionality can be disabled by setting `jsonpatch.SupportNegativeIndices =
|
||||||
|
false`.
|
||||||
|
|
||||||
|
* There is a global configuration variable `jsonpatch.ArraySizeLimit`, which
|
||||||
|
limits the length of any array the patched object can have. It defaults to 0,
|
||||||
|
which means there is no limit.
|
||||||
|
|
||||||
|
* There is a global configuration variable `jsonpatch.ArraySizeAdditionLimit`,
|
||||||
|
which limits the increase of array length caused by each operation. It
|
||||||
|
defaults to 0, which means there is no limit.
|
||||||
|
|
||||||
## Create and apply a merge patch
|
## Create and apply a merge patch
|
||||||
Given both an original JSON document and a modified JSON document, you can create
|
Given both an original JSON document and a modified JSON document, you can create
|
||||||
a [Merge Patch](https://tools.ietf.org/html/rfc7396) document.
|
a [Merge Patch](https://tools.ietf.org/html/rfc7396) document.
|
||||||
|
|
|
||||||
|
|
@ -14,6 +14,10 @@ const (
|
||||||
eAry
|
eAry
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var SupportNegativeIndices bool = true
|
||||||
|
var ArraySizeLimit int = 0
|
||||||
|
var ArraySizeAdditionLimit int = 0
|
||||||
|
|
||||||
type lazyNode struct {
|
type lazyNode struct {
|
||||||
raw *json.RawMessage
|
raw *json.RawMessage
|
||||||
doc partialDoc
|
doc partialDoc
|
||||||
|
|
@ -61,6 +65,19 @@ func (n *lazyNode) UnmarshalJSON(data []byte) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func deepCopy(src *lazyNode) (*lazyNode, error) {
|
||||||
|
if src == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
a, err := src.MarshalJSON()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ra := make(json.RawMessage, len(a))
|
||||||
|
copy(ra, a)
|
||||||
|
return newLazyNode(&ra), nil
|
||||||
|
}
|
||||||
|
|
||||||
func (n *lazyNode) intoDoc() (*partialDoc, error) {
|
func (n *lazyNode) intoDoc() (*partialDoc, error) {
|
||||||
if n.which == eDoc {
|
if n.which == eDoc {
|
||||||
return &n.doc, nil
|
return &n.doc, nil
|
||||||
|
|
@ -354,10 +371,19 @@ func (d *partialArray) set(key string, val *lazyNode) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
sz := len(*d)
|
sz := len(*d)
|
||||||
|
|
||||||
|
if diff := idx + 1 - sz; ArraySizeAdditionLimit > 0 && diff > ArraySizeAdditionLimit {
|
||||||
|
return fmt.Errorf("Unable to increase the array size by %d, the limit is %d", diff, ArraySizeAdditionLimit)
|
||||||
|
}
|
||||||
|
|
||||||
if idx+1 > sz {
|
if idx+1 > sz {
|
||||||
sz = idx + 1
|
sz = idx + 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ArraySizeLimit > 0 && sz > ArraySizeLimit {
|
||||||
|
return fmt.Errorf("Unable to create array of size %d, limit is %d", sz, ArraySizeLimit)
|
||||||
|
}
|
||||||
|
|
||||||
ary := make([]*lazyNode, sz)
|
ary := make([]*lazyNode, sz)
|
||||||
|
|
||||||
cur := *d
|
cur := *d
|
||||||
|
|
@ -385,17 +411,29 @@ func (d *partialArray) add(key string, val *lazyNode) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
ary := make([]*lazyNode, len(*d)+1)
|
sz := len(*d) + 1
|
||||||
|
if ArraySizeLimit > 0 && sz > ArraySizeLimit {
|
||||||
|
return fmt.Errorf("Unable to create array of size %d, limit is %d", sz, ArraySizeLimit)
|
||||||
|
}
|
||||||
|
|
||||||
|
ary := make([]*lazyNode, sz)
|
||||||
|
|
||||||
cur := *d
|
cur := *d
|
||||||
|
|
||||||
if idx < -len(ary) || idx >= len(ary) {
|
if idx >= len(ary) {
|
||||||
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||||
}
|
}
|
||||||
|
|
||||||
if idx < 0 {
|
if SupportNegativeIndices {
|
||||||
idx += len(ary)
|
if idx < -len(ary) {
|
||||||
|
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||||
|
}
|
||||||
|
|
||||||
|
if idx < 0 {
|
||||||
|
idx += len(ary)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
copy(ary[0:idx], cur[0:idx])
|
copy(ary[0:idx], cur[0:idx])
|
||||||
ary[idx] = val
|
ary[idx] = val
|
||||||
copy(ary[idx+1:], cur[idx:])
|
copy(ary[idx+1:], cur[idx:])
|
||||||
|
|
@ -426,11 +464,18 @@ func (d *partialArray) remove(key string) error {
|
||||||
|
|
||||||
cur := *d
|
cur := *d
|
||||||
|
|
||||||
if idx < -len(cur) || idx >= len(cur) {
|
if idx >= len(cur) {
|
||||||
return fmt.Errorf("Unable to remove invalid index: %d", idx)
|
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||||
}
|
}
|
||||||
if idx < 0 {
|
|
||||||
idx += len(cur)
|
if SupportNegativeIndices {
|
||||||
|
if idx < -len(cur) {
|
||||||
|
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||||
|
}
|
||||||
|
|
||||||
|
if idx < 0 {
|
||||||
|
idx += len(cur)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ary := make([]*lazyNode, len(cur)-1)
|
ary := make([]*lazyNode, len(cur)-1)
|
||||||
|
|
@ -567,7 +612,12 @@ func (p Patch) copy(doc *container, op operation) error {
|
||||||
return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing destination path: %s", path)
|
return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing destination path: %s", path)
|
||||||
}
|
}
|
||||||
|
|
||||||
return con.set(key, val)
|
valCopy, err := deepCopy(val)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return con.add(key, valCopy)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Equal indicates if 2 JSON documents have the same structural equality.
|
// Equal indicates if 2 JSON documents have the same structural equality.
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue