fix(deps): update module github.com/burntsushi/toml to v1.3.0

Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
This commit is contained in:
renovate[bot] 2023-05-30 16:13:20 +00:00 committed by GitHub
parent fa61bb2594
commit f48fa11131
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 175 additions and 72 deletions

2
go.mod
View File

@ -3,7 +3,7 @@ module github.com/containers/podman/v4
go 1.18 go 1.18
require ( require (
github.com/BurntSushi/toml v1.2.1 github.com/BurntSushi/toml v1.3.0
github.com/Microsoft/go-winio v0.6.1 github.com/Microsoft/go-winio v0.6.1
github.com/blang/semver/v4 v4.0.0 github.com/blang/semver/v4 v4.0.0
github.com/buger/goterm v1.0.4 github.com/buger/goterm v1.0.4

3
go.sum
View File

@ -42,8 +42,9 @@ github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/toml v1.3.0 h1:Ws8e5YmnrGEHzZEzg0YvK/7COGYtTC5PbaH9oSSbgfA=
github.com/BurntSushi/toml v1.3.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=

View File

@ -91,7 +91,7 @@ const (
// UnmarshalText method. See the Unmarshaler example for a demonstration with // UnmarshalText method. See the Unmarshaler example for a demonstration with
// email addresses. // email addresses.
// //
// ### Key mapping // # Key mapping
// //
// TOML keys can map to either keys in a Go map or field names in a Go struct. // TOML keys can map to either keys in a Go map or field names in a Go struct.
// The special `toml` struct tag can be used to map TOML keys to struct fields // The special `toml` struct tag can be used to map TOML keys to struct fields

View File

@ -5,17 +5,25 @@ import (
"io" "io"
) )
// TextMarshaler is an alias for encoding.TextMarshaler.
//
// Deprecated: use encoding.TextMarshaler // Deprecated: use encoding.TextMarshaler
type TextMarshaler encoding.TextMarshaler type TextMarshaler encoding.TextMarshaler
// TextUnmarshaler is an alias for encoding.TextUnmarshaler.
//
// Deprecated: use encoding.TextUnmarshaler // Deprecated: use encoding.TextUnmarshaler
type TextUnmarshaler encoding.TextUnmarshaler type TextUnmarshaler encoding.TextUnmarshaler
// PrimitiveDecode is an alias for MetaData.PrimitiveDecode().
//
// Deprecated: use MetaData.PrimitiveDecode. // Deprecated: use MetaData.PrimitiveDecode.
func PrimitiveDecode(primValue Primitive, v interface{}) error { func PrimitiveDecode(primValue Primitive, v interface{}) error {
md := MetaData{decoded: make(map[string]struct{})} md := MetaData{decoded: make(map[string]struct{})}
return md.unify(primValue.undecoded, rvalue(v)) return md.unify(primValue.undecoded, rvalue(v))
} }
// DecodeReader is an alias for NewDecoder(r).Decode(v).
//
// Deprecated: use NewDecoder(reader).Decode(&value). // Deprecated: use NewDecoder(reader).Decode(&value).
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) } func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) }

View File

@ -136,6 +136,9 @@ func NewEncoder(w io.Writer) *Encoder {
// document. // document.
func (enc *Encoder) Encode(v interface{}) error { func (enc *Encoder) Encode(v interface{}) error {
rv := eindirect(reflect.ValueOf(v)) rv := eindirect(reflect.ValueOf(v))
// XXX
if err := enc.safeEncode(Key([]string{}), rv); err != nil { if err := enc.safeEncode(Key([]string{}), rv); err != nil {
return err return err
} }
@ -457,6 +460,16 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
frv := eindirect(rv.Field(i)) frv := eindirect(rv.Field(i))
if is32Bit {
// Copy so it works correct on 32bit archs; not clear why this
// is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
// This also works fine on 64bit, but 32bit archs are somewhat
// rare and this is a wee bit faster.
copyStart := make([]int, len(start))
copy(copyStart, start)
start = copyStart
}
// Treat anonymous struct fields with tag names as though they are // Treat anonymous struct fields with tag names as though they are
// not anonymous, like encoding/json does. // not anonymous, like encoding/json does.
// //
@ -470,44 +483,37 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
if typeIsTable(tomlTypeOfGo(frv)) { if typeIsTable(tomlTypeOfGo(frv)) {
fieldsSub = append(fieldsSub, append(start, f.Index...)) fieldsSub = append(fieldsSub, append(start, f.Index...))
} else {
// Copy so it works correct on 32bit archs; not clear why this
// is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
// This also works fine on 64bit, but 32bit archs are somewhat
// rare and this is a wee bit faster.
if is32Bit {
copyStart := make([]int, len(start))
copy(copyStart, start)
fieldsDirect = append(fieldsDirect, append(copyStart, f.Index...))
} else { } else {
fieldsDirect = append(fieldsDirect, append(start, f.Index...)) fieldsDirect = append(fieldsDirect, append(start, f.Index...))
} }
} }
} }
}
addFields(rt, rv, nil) addFields(rt, rv, nil)
writeFields := func(fields [][]int) { writeFields := func(fields [][]int) {
for _, fieldIndex := range fields { for _, fieldIndex := range fields {
fieldType := rt.FieldByIndex(fieldIndex) fieldType := rt.FieldByIndex(fieldIndex)
fieldVal := eindirect(rv.FieldByIndex(fieldIndex)) fieldVal := rv.FieldByIndex(fieldIndex)
if isNil(fieldVal) { /// Don't write anything for nil fields.
continue
}
opts := getOptions(fieldType.Tag) opts := getOptions(fieldType.Tag)
if opts.skip { if opts.skip {
continue continue
} }
if opts.omitempty && isEmpty(fieldVal) {
continue
}
fieldVal = eindirect(fieldVal)
if isNil(fieldVal) { // Don't write anything for nil fields.
continue
}
keyName := fieldType.Name keyName := fieldType.Name
if opts.name != "" { if opts.name != "" {
keyName = opts.name keyName = opts.name
} }
if opts.omitempty && enc.isEmpty(fieldVal) {
continue
}
if opts.omitzero && isZero(fieldVal) { if opts.omitzero && isZero(fieldVal) {
continue continue
} }
@ -649,7 +655,7 @@ func isZero(rv reflect.Value) bool {
return false return false
} }
func (enc *Encoder) isEmpty(rv reflect.Value) bool { func isEmpty(rv reflect.Value) bool {
switch rv.Kind() { switch rv.Kind() {
case reflect.Array, reflect.Slice, reflect.Map, reflect.String: case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
return rv.Len() == 0 return rv.Len() == 0
@ -664,13 +670,15 @@ func (enc *Encoder) isEmpty(rv reflect.Value) bool {
// type b struct{ s []string } // type b struct{ s []string }
// s := a{field: b{s: []string{"AAA"}}} // s := a{field: b{s: []string{"AAA"}}}
for i := 0; i < rv.NumField(); i++ { for i := 0; i < rv.NumField(); i++ {
if !enc.isEmpty(rv.Field(i)) { if !isEmpty(rv.Field(i)) {
return false return false
} }
} }
return true return true
case reflect.Bool: case reflect.Bool:
return !rv.Bool() return !rv.Bool()
case reflect.Ptr:
return rv.IsNil()
} }
return false return false
} }
@ -693,8 +701,11 @@ func (enc *Encoder) newline() {
// v v v v vv // v v v v vv
// key = {k = 1, k2 = 2} // key = {k = 1, k2 = 2}
func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
/// Marshaler used on top-level document; call eElement() to just call
/// Marshal{TOML,Text}.
if len(key) == 0 { if len(key) == 0 {
encPanic(errNoKey) enc.eElement(val)
return
} }
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
enc.eElement(val) enc.eElement(val)

View File

@ -84,7 +84,7 @@ func (pe ParseError) Error() string {
pe.Position.Line, pe.LastKey, msg) pe.Position.Line, pe.LastKey, msg)
} }
// ErrorWithUsage() returns the error with detailed location context. // ErrorWithPosition returns the error with detailed location context.
// //
// See the documentation on [ParseError]. // See the documentation on [ParseError].
func (pe ParseError) ErrorWithPosition() string { func (pe ParseError) ErrorWithPosition() string {
@ -124,7 +124,7 @@ func (pe ParseError) ErrorWithPosition() string {
return b.String() return b.String()
} }
// ErrorWithUsage() returns the error with detailed location context and usage // ErrorWithUsage returns the error with detailed location context and usage
// guidance. // guidance.
// //
// See the documentation on [ParseError]. // See the documentation on [ParseError].

View File

@ -618,6 +618,9 @@ func lexInlineTableValue(lx *lexer) stateFn {
case isWhitespace(r): case isWhitespace(r):
return lexSkip(lx, lexInlineTableValue) return lexSkip(lx, lexInlineTableValue)
case isNL(r): case isNL(r):
if tomlNext {
return lexSkip(lx, lexInlineTableValue)
}
return lx.errorPrevLine(errLexInlineTableNL{}) return lx.errorPrevLine(errLexInlineTableNL{})
case r == '#': case r == '#':
lx.push(lexInlineTableValue) lx.push(lexInlineTableValue)
@ -640,6 +643,9 @@ func lexInlineTableValueEnd(lx *lexer) stateFn {
case isWhitespace(r): case isWhitespace(r):
return lexSkip(lx, lexInlineTableValueEnd) return lexSkip(lx, lexInlineTableValueEnd)
case isNL(r): case isNL(r):
if tomlNext {
return lexSkip(lx, lexInlineTableValueEnd)
}
return lx.errorPrevLine(errLexInlineTableNL{}) return lx.errorPrevLine(errLexInlineTableNL{})
case r == '#': case r == '#':
lx.push(lexInlineTableValueEnd) lx.push(lexInlineTableValueEnd)
@ -648,6 +654,9 @@ func lexInlineTableValueEnd(lx *lexer) stateFn {
lx.ignore() lx.ignore()
lx.skip(isWhitespace) lx.skip(isWhitespace)
if lx.peek() == '}' { if lx.peek() == '}' {
if tomlNext {
return lexInlineTableValueEnd
}
return lx.errorf("trailing comma not allowed in inline tables") return lx.errorf("trailing comma not allowed in inline tables")
} }
return lexInlineTableValue return lexInlineTableValue
@ -770,8 +779,8 @@ func lexRawString(lx *lexer) stateFn {
} }
} }
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such // lexMultilineRawString consumes a raw string. Nothing can be escaped in such a
// a string. It assumes that the beginning ''' has already been consumed and // string. It assumes that the beginning triple-' has already been consumed and
// ignored. // ignored.
func lexMultilineRawString(lx *lexer) stateFn { func lexMultilineRawString(lx *lexer) stateFn {
r := lx.next() r := lx.next()
@ -828,6 +837,11 @@ func lexMultilineStringEscape(lx *lexer) stateFn {
func lexStringEscape(lx *lexer) stateFn { func lexStringEscape(lx *lexer) stateFn {
r := lx.next() r := lx.next()
switch r { switch r {
case 'e':
if !tomlNext {
return lx.error(errLexEscape{r})
}
fallthrough
case 'b': case 'b':
fallthrough fallthrough
case 't': case 't':
@ -846,6 +860,11 @@ func lexStringEscape(lx *lexer) stateFn {
fallthrough fallthrough
case '\\': case '\\':
return lx.pop() return lx.pop()
case 'x':
if !tomlNext {
return lx.error(errLexEscape{r})
}
return lexHexEscape
case 'u': case 'u':
return lexShortUnicodeEscape return lexShortUnicodeEscape
case 'U': case 'U':
@ -854,6 +873,19 @@ func lexStringEscape(lx *lexer) stateFn {
return lx.error(errLexEscape{r}) return lx.error(errLexEscape{r})
} }
func lexHexEscape(lx *lexer) stateFn {
var r rune
for i := 0; i < 2; i++ {
r = lx.next()
if !isHexadecimal(r) {
return lx.errorf(
`expected two hexadecimal digits after '\x', but got %q instead`,
lx.current())
}
}
return lx.pop()
}
func lexShortUnicodeEscape(lx *lexer) stateFn { func lexShortUnicodeEscape(lx *lexer) stateFn {
var r rune var r rune
for i := 0; i < 4; i++ { for i := 0; i < 4; i++ {
@ -1225,7 +1257,23 @@ func isOctal(r rune) bool { return r >= '0' && r <= '7' }
func isHexadecimal(r rune) bool { func isHexadecimal(r rune) bool {
return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F') return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F')
} }
func isBareKeyChar(r rune) bool { func isBareKeyChar(r rune) bool {
if tomlNext {
return (r >= 'A' && r <= 'Z') ||
(r >= 'a' && r <= 'z') ||
(r >= '0' && r <= '9') ||
r == '_' || r == '-' ||
r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) ||
(r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) ||
(r >= 0x037f && r <= 0x1fff) ||
(r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) ||
(r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) ||
(r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) ||
(r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) ||
(r >= 0x10000 && r <= 0xeffff)
}
return (r >= 'A' && r <= 'Z') || return (r >= 'A' && r <= 'Z') ||
(r >= 'a' && r <= 'z') || (r >= 'a' && r <= 'z') ||
(r >= '0' && r <= '9') || (r >= '0' && r <= '9') ||

View File

@ -2,6 +2,7 @@ package toml
import ( import (
"fmt" "fmt"
"os"
"strconv" "strconv"
"strings" "strings"
"time" "time"
@ -10,6 +11,11 @@ import (
"github.com/BurntSushi/toml/internal" "github.com/BurntSushi/toml/internal"
) )
var tomlNext = func() bool {
_, ok := os.LookupEnv("BURNTSUSHI_TOML_110")
return ok
}()
type parser struct { type parser struct {
lx *lexer lx *lexer
context Key // Full key for the current hash in scope. context Key // Full key for the current hash in scope.
@ -41,9 +47,12 @@ func parse(data string) (p *parser, err error) {
}() }()
// Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString() // Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString()
// which mangles stuff. // which mangles stuff. UTF-16 BOM isn't strictly valid, but some tools add
if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // it anyway.
if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16
data = data[2:] data = data[2:]
} else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8
data = data[3:]
} }
// Examine first few bytes for NULL bytes; this probably means it's a UTF-16 // Examine first few bytes for NULL bytes; this probably means it's a UTF-16
@ -236,7 +245,7 @@ func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
case itemString: case itemString:
return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it) return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it)
case itemMultilineString: case itemMultilineString:
return p.replaceEscapes(it, stripFirstNewline(p.stripEscapedNewlines(it.val))), p.typeOfPrimitive(it) return p.replaceEscapes(it, p.stripEscapedNewlines(stripFirstNewline(it.val))), p.typeOfPrimitive(it)
case itemRawString: case itemRawString:
return it.val, p.typeOfPrimitive(it) return it.val, p.typeOfPrimitive(it)
case itemRawMultilineString: case itemRawMultilineString:
@ -331,11 +340,17 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) {
var dtTypes = []struct { var dtTypes = []struct {
fmt string fmt string
zone *time.Location zone *time.Location
next bool
}{ }{
{time.RFC3339Nano, time.Local}, {time.RFC3339Nano, time.Local, false},
{"2006-01-02T15:04:05.999999999", internal.LocalDatetime}, {"2006-01-02T15:04:05.999999999", internal.LocalDatetime, false},
{"2006-01-02", internal.LocalDate}, {"2006-01-02", internal.LocalDate, false},
{"15:04:05.999999999", internal.LocalTime}, {"15:04:05.999999999", internal.LocalTime, false},
// tomlNext
{"2006-01-02T15:04Z07:00", time.Local, true},
{"2006-01-02T15:04", internal.LocalDatetime, true},
{"15:04", internal.LocalTime, true},
} }
func (p *parser) valueDatetime(it item) (interface{}, tomlType) { func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
@ -346,6 +361,9 @@ func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
err error err error
) )
for _, dt := range dtTypes { for _, dt := range dtTypes {
if dt.next && !tomlNext {
continue
}
t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone) t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone)
if err == nil { if err == nil {
ok = true ok = true
@ -384,6 +402,7 @@ func (p *parser) valueArray(it item) (interface{}, tomlType) {
// //
// Not entirely sure how to best store this; could use "key[0]", // Not entirely sure how to best store this; could use "key[0]",
// "key[1]" notation, or maybe store it on the Array type? // "key[1]" notation, or maybe store it on the Array type?
_ = types
} }
return array, tomlArray return array, tomlArray
} }
@ -662,49 +681,54 @@ func stripFirstNewline(s string) string {
return s return s
} }
// Remove newlines inside triple-quoted strings if a line ends with "\". // stripEscapedNewlines removes whitespace after line-ending backslashes in
// multiline strings.
//
// A line-ending backslash is an unescaped \ followed only by whitespace until
// the next newline. After a line-ending backslash, all whitespace is removed
// until the next non-whitespace character.
func (p *parser) stripEscapedNewlines(s string) string { func (p *parser) stripEscapedNewlines(s string) string {
split := strings.Split(s, "\n") var b strings.Builder
if len(split) < 1 { var i int
return s for {
ix := strings.Index(s[i:], `\`)
if ix < 0 {
b.WriteString(s)
return b.String()
} }
i += ix
escNL := false // Keep track of the last non-blank line was escaped. if len(s) > i+1 && s[i+1] == '\\' {
for i, line := range split { // Escaped backslash.
line = strings.TrimRight(line, " \t\r") i += 2
if len(line) == 0 || line[len(line)-1] != '\\' {
split[i] = strings.TrimRight(split[i], "\r")
if !escNL && i != len(split)-1 {
split[i] += "\n"
}
continue continue
} }
// Scan until the next non-whitespace.
escBS := true j := i + 1
for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- { whitespaceLoop:
escBS = !escBS for ; j < len(s); j++ {
switch s[j] {
case ' ', '\t', '\r', '\n':
default:
break whitespaceLoop
} }
if escNL {
line = strings.TrimLeft(line, " \t\r")
} }
escNL = !escBS if j == i+1 {
// Not a whitespace escape.
if escBS { i++
split[i] += "\n"
continue continue
} }
if !strings.Contains(s[i:j], "\n") {
if i == len(split)-1 { // This is not a line-ending backslash.
p.panicf("invalid escape: '\\ '") // (It's a bad escape sequence, but we can let
// replaceEscapes catch it.)
i++
continue
} }
b.WriteString(s[:i])
split[i] = line[:len(line)-1] // Remove \ s = s[j:]
if len(split)-1 > i { i = 0
split[i+1] = strings.TrimLeft(split[i+1], " \t\r")
} }
}
return strings.Join(split, "")
} }
func (p *parser) replaceEscapes(it item, str string) string { func (p *parser) replaceEscapes(it item, str string) string {
@ -743,12 +767,23 @@ func (p *parser) replaceEscapes(it item, str string) string {
case 'r': case 'r':
replaced = append(replaced, rune(0x000D)) replaced = append(replaced, rune(0x000D))
r += 1 r += 1
case 'e':
if tomlNext {
replaced = append(replaced, rune(0x001B))
r += 1
}
case '"': case '"':
replaced = append(replaced, rune(0x0022)) replaced = append(replaced, rune(0x0022))
r += 1 r += 1
case '\\': case '\\':
replaced = append(replaced, rune(0x005C)) replaced = append(replaced, rune(0x005C))
r += 1 r += 1
case 'x':
if tomlNext {
escaped := p.asciiEscapeToUnicode(it, s[r+1:r+3])
replaced = append(replaced, escaped)
r += 3
}
case 'u': case 'u':
// At this point, we know we have a Unicode escape of the form // At this point, we know we have a Unicode escape of the form
// `uXXXX` at [r, r+5). (Because the lexer guarantees this // `uXXXX` at [r, r+5). (Because the lexer guarantees this

2
vendor/modules.txt vendored
View File

@ -2,7 +2,7 @@
## explicit; go 1.16 ## explicit; go 1.16
github.com/Azure/go-ansiterm github.com/Azure/go-ansiterm
github.com/Azure/go-ansiterm/winterm github.com/Azure/go-ansiterm/winterm
# github.com/BurntSushi/toml v1.2.1 # github.com/BurntSushi/toml v1.3.0
## explicit; go 1.16 ## explicit; go 1.16
github.com/BurntSushi/toml github.com/BurntSushi/toml
github.com/BurntSushi/toml/internal github.com/BurntSushi/toml/internal