Merge branch 'master' into keymutex
Kubernetes-commit: e72b32558c8e9ed16690ef5a8e909c12fcc47f87
This commit is contained in:
commit
1c887af65f
File diff suppressed because it is too large
Load Diff
|
|
@ -85,7 +85,7 @@ func NewStorageCodec(opts StorageCodecConfig) (runtime.Codec, error) {
|
|||
)
|
||||
decoder := opts.StorageSerializer.DecoderToVersion(
|
||||
recognizer.NewDecoder(decoders...),
|
||||
runtime.NewMultiGroupVersioner(
|
||||
runtime.NewCoercingMultiGroupVersioner(
|
||||
opts.MemoryVersion,
|
||||
schema.GroupKind{Group: opts.MemoryVersion.Group},
|
||||
schema.GroupKind{Group: opts.StorageVersion.Group},
|
||||
|
|
|
|||
|
|
@ -25,6 +25,23 @@ go get -u github.com/evanphx/json-patch
|
|||
* [Comparing JSON documents](#comparing-json-documents)
|
||||
* [Combine merge patches](#combine-merge-patches)
|
||||
|
||||
|
||||
# Configuration
|
||||
|
||||
* There is a global configuration variable `jsonpatch.SupportNegativeIndices`.
|
||||
This defaults to `true` and enables the non-standard practice of allowing
|
||||
negative indices to mean indices starting at the end of an array. This
|
||||
functionality can be disabled by setting `jsonpatch.SupportNegativeIndices =
|
||||
false`.
|
||||
|
||||
* There is a global configuration variable `jsonpatch.ArraySizeLimit`, which
|
||||
limits the length of any array the patched object can have. It defaults to 0,
|
||||
which means there is no limit.
|
||||
|
||||
* There is a global configuration variable `jsonpatch.ArraySizeAdditionLimit`,
|
||||
which limits the increase of array length caused by each operation. It
|
||||
defaults to 0, which means there is no limit.
|
||||
|
||||
## Create and apply a merge patch
|
||||
Given both an original JSON document and a modified JSON document, you can create
|
||||
a [Merge Patch](https://tools.ietf.org/html/rfc7396) document.
|
||||
|
|
|
|||
|
|
@ -14,6 +14,10 @@ const (
|
|||
eAry
|
||||
)
|
||||
|
||||
var SupportNegativeIndices bool = true
|
||||
var ArraySizeLimit int = 0
|
||||
var ArraySizeAdditionLimit int = 0
|
||||
|
||||
type lazyNode struct {
|
||||
raw *json.RawMessage
|
||||
doc partialDoc
|
||||
|
|
@ -61,6 +65,19 @@ func (n *lazyNode) UnmarshalJSON(data []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func deepCopy(src *lazyNode) (*lazyNode, error) {
|
||||
if src == nil {
|
||||
return nil, nil
|
||||
}
|
||||
a, err := src.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ra := make(json.RawMessage, len(a))
|
||||
copy(ra, a)
|
||||
return newLazyNode(&ra), nil
|
||||
}
|
||||
|
||||
func (n *lazyNode) intoDoc() (*partialDoc, error) {
|
||||
if n.which == eDoc {
|
||||
return &n.doc, nil
|
||||
|
|
@ -354,10 +371,19 @@ func (d *partialArray) set(key string, val *lazyNode) error {
|
|||
}
|
||||
|
||||
sz := len(*d)
|
||||
|
||||
if diff := idx + 1 - sz; ArraySizeAdditionLimit > 0 && diff > ArraySizeAdditionLimit {
|
||||
return fmt.Errorf("Unable to increase the array size by %d, the limit is %d", diff, ArraySizeAdditionLimit)
|
||||
}
|
||||
|
||||
if idx+1 > sz {
|
||||
sz = idx + 1
|
||||
}
|
||||
|
||||
if ArraySizeLimit > 0 && sz > ArraySizeLimit {
|
||||
return fmt.Errorf("Unable to create array of size %d, limit is %d", sz, ArraySizeLimit)
|
||||
}
|
||||
|
||||
ary := make([]*lazyNode, sz)
|
||||
|
||||
cur := *d
|
||||
|
|
@ -385,17 +411,29 @@ func (d *partialArray) add(key string, val *lazyNode) error {
|
|||
return err
|
||||
}
|
||||
|
||||
ary := make([]*lazyNode, len(*d)+1)
|
||||
sz := len(*d) + 1
|
||||
if ArraySizeLimit > 0 && sz > ArraySizeLimit {
|
||||
return fmt.Errorf("Unable to create array of size %d, limit is %d", sz, ArraySizeLimit)
|
||||
}
|
||||
|
||||
ary := make([]*lazyNode, sz)
|
||||
|
||||
cur := *d
|
||||
|
||||
if idx < -len(ary) || idx >= len(ary) {
|
||||
if idx >= len(ary) {
|
||||
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
if idx < 0 {
|
||||
idx += len(ary)
|
||||
if SupportNegativeIndices {
|
||||
if idx < -len(ary) {
|
||||
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
if idx < 0 {
|
||||
idx += len(ary)
|
||||
}
|
||||
}
|
||||
|
||||
copy(ary[0:idx], cur[0:idx])
|
||||
ary[idx] = val
|
||||
copy(ary[idx+1:], cur[idx:])
|
||||
|
|
@ -426,11 +464,18 @@ func (d *partialArray) remove(key string) error {
|
|||
|
||||
cur := *d
|
||||
|
||||
if idx < -len(cur) || idx >= len(cur) {
|
||||
return fmt.Errorf("Unable to remove invalid index: %d", idx)
|
||||
if idx >= len(cur) {
|
||||
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
}
|
||||
if idx < 0 {
|
||||
idx += len(cur)
|
||||
|
||||
if SupportNegativeIndices {
|
||||
if idx < -len(cur) {
|
||||
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
if idx < 0 {
|
||||
idx += len(cur)
|
||||
}
|
||||
}
|
||||
|
||||
ary := make([]*lazyNode, len(cur)-1)
|
||||
|
|
@ -567,7 +612,12 @@ func (p Patch) copy(doc *container, op operation) error {
|
|||
return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing destination path: %s", path)
|
||||
}
|
||||
|
||||
return con.set(key, val)
|
||||
valCopy, err := deepCopy(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return con.add(key, valCopy)
|
||||
}
|
||||
|
||||
// Equal indicates if 2 JSON documents have the same structural equality.
|
||||
|
|
|
|||
Loading…
Reference in New Issue