mirror of https://github.com/etcd-io/dbtester.git
vendor: update
This commit is contained in:
parent
ce2d57ad02
commit
65754b40c5
|
|
@ -0,0 +1,20 @@
|
||||||
|
Copyright (C) 2013 Blake Mizerany
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
a copy of this software and associated documentation files (the
|
||||||
|
"Software"), to deal in the Software without restriction, including
|
||||||
|
without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be
|
||||||
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
@ -133,7 +133,7 @@ func (s *Stream) Query(q float64) float64 {
|
||||||
if l == 0 {
|
if l == 0 {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
i := int(float64(l) * q)
|
i := int(math.Ceil(float64(l) * q))
|
||||||
if i > 0 {
|
if i > 0 {
|
||||||
i -= 1
|
i -= 1
|
||||||
}
|
}
|
||||||
|
|
|
||||||
File diff suppressed because it is too large
Load Diff
|
|
@ -97,7 +97,12 @@ func (c *Client) Close() error {
|
||||||
func (c *Client) Ctx() context.Context { return c.ctx }
|
func (c *Client) Ctx() context.Context { return c.ctx }
|
||||||
|
|
||||||
// Endpoints lists the registered endpoints for the client.
|
// Endpoints lists the registered endpoints for the client.
|
||||||
func (c *Client) Endpoints() []string { return c.cfg.Endpoints }
|
func (c *Client) Endpoints() (eps []string) {
|
||||||
|
// copy the slice; protect original endpoints from being changed
|
||||||
|
eps = make([]string, len(c.cfg.Endpoints))
|
||||||
|
copy(eps, c.cfg.Endpoints)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// SetEndpoints updates client's endpoints.
|
// SetEndpoints updates client's endpoints.
|
||||||
func (c *Client) SetEndpoints(eps ...string) {
|
func (c *Client) SetEndpoints(eps ...string) {
|
||||||
|
|
|
||||||
|
|
@ -138,15 +138,24 @@ func BigIBytes(s *big.Int) string {
|
||||||
// ParseBigBytes("42mib") -> 44040192, nil
|
// ParseBigBytes("42mib") -> 44040192, nil
|
||||||
func ParseBigBytes(s string) (*big.Int, error) {
|
func ParseBigBytes(s string) (*big.Int, error) {
|
||||||
lastDigit := 0
|
lastDigit := 0
|
||||||
|
hasComma := false
|
||||||
for _, r := range s {
|
for _, r := range s {
|
||||||
if !(unicode.IsDigit(r) || r == '.') {
|
if !(unicode.IsDigit(r) || r == '.' || r == ',') {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
if r == ',' {
|
||||||
|
hasComma = true
|
||||||
|
}
|
||||||
lastDigit++
|
lastDigit++
|
||||||
}
|
}
|
||||||
|
|
||||||
|
num := s[:lastDigit]
|
||||||
|
if hasComma {
|
||||||
|
num = strings.Replace(num, ",", "", -1)
|
||||||
|
}
|
||||||
|
|
||||||
val := &big.Rat{}
|
val := &big.Rat{}
|
||||||
_, err := fmt.Sscanf(s[:lastDigit], "%f", val)
|
_, err := fmt.Sscanf(num, "%f", val)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -109,14 +109,23 @@ func IBytes(s uint64) string {
|
||||||
// ParseBytes("42mib") -> 44040192, nil
|
// ParseBytes("42mib") -> 44040192, nil
|
||||||
func ParseBytes(s string) (uint64, error) {
|
func ParseBytes(s string) (uint64, error) {
|
||||||
lastDigit := 0
|
lastDigit := 0
|
||||||
|
hasComma := false
|
||||||
for _, r := range s {
|
for _, r := range s {
|
||||||
if !(unicode.IsDigit(r) || r == '.') {
|
if !(unicode.IsDigit(r) || r == '.' || r == ',') {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
if r == ',' {
|
||||||
|
hasComma = true
|
||||||
|
}
|
||||||
lastDigit++
|
lastDigit++
|
||||||
}
|
}
|
||||||
|
|
||||||
f, err := strconv.ParseFloat(s[:lastDigit], 64)
|
num := s[:lastDigit]
|
||||||
|
if hasComma {
|
||||||
|
num = strings.Replace(num, ",", "", -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := strconv.ParseFloat(num, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -39,7 +39,7 @@ func Comma(v int64) string {
|
||||||
// Commaf produces a string form of the given number in base 10 with
|
// Commaf produces a string form of the given number in base 10 with
|
||||||
// commas after every three orders of magnitude.
|
// commas after every three orders of magnitude.
|
||||||
//
|
//
|
||||||
// e.g. Comma(834142.32) -> 834,142.32
|
// e.g. Commaf(834142.32) -> 834,142.32
|
||||||
func Commaf(v float64) string {
|
func Commaf(v float64) string {
|
||||||
buf := &bytes.Buffer{}
|
buf := &bytes.Buffer{}
|
||||||
if v < 0 {
|
if v < 0 {
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,40 @@
|
||||||
|
// +build go1.6
|
||||||
|
|
||||||
|
package humanize
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"math/big"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BigCommaf produces a string form of the given big.Float in base 10
|
||||||
|
// with commas after every three orders of magnitude.
|
||||||
|
func BigCommaf(v *big.Float) string {
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
if v.Sign() < 0 {
|
||||||
|
buf.Write([]byte{'-'})
|
||||||
|
v.Abs(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
comma := []byte{','}
|
||||||
|
|
||||||
|
parts := strings.Split(v.Text('f', -1), ".")
|
||||||
|
pos := 0
|
||||||
|
if len(parts[0])%3 != 0 {
|
||||||
|
pos += len(parts[0]) % 3
|
||||||
|
buf.WriteString(parts[0][:pos])
|
||||||
|
buf.Write(comma)
|
||||||
|
}
|
||||||
|
for ; pos < len(parts[0]); pos += 3 {
|
||||||
|
buf.WriteString(parts[0][pos : pos+3])
|
||||||
|
buf.Write(comma)
|
||||||
|
}
|
||||||
|
buf.Truncate(buf.Len() - 1)
|
||||||
|
|
||||||
|
if len(parts) > 1 {
|
||||||
|
buf.Write([]byte{'.'})
|
||||||
|
buf.WriteString(parts[1])
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
@ -41,7 +41,7 @@ func revfmap(in map[float64]string) map[string]float64 {
|
||||||
var riParseRegex *regexp.Regexp
|
var riParseRegex *regexp.Regexp
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
ri := `^([0-9.]+)\s?([`
|
ri := `^([\-0-9.]+)\s?([`
|
||||||
for _, v := range siPrefixTable {
|
for _, v := range siPrefixTable {
|
||||||
ri += v
|
ri += v
|
||||||
}
|
}
|
||||||
|
|
@ -61,18 +61,21 @@ func ComputeSI(input float64) (float64, string) {
|
||||||
if input == 0 {
|
if input == 0 {
|
||||||
return 0, ""
|
return 0, ""
|
||||||
}
|
}
|
||||||
exponent := math.Floor(logn(input, 10))
|
mag := math.Abs(input)
|
||||||
|
exponent := math.Floor(logn(mag, 10))
|
||||||
exponent = math.Floor(exponent/3) * 3
|
exponent = math.Floor(exponent/3) * 3
|
||||||
|
|
||||||
value := input / math.Pow(10, exponent)
|
value := mag / math.Pow(10, exponent)
|
||||||
|
|
||||||
// Handle special case where value is exactly 1000.0
|
// Handle special case where value is exactly 1000.0
|
||||||
// Should return 1M instead of 1000k
|
// Should return 1M instead of 1000k
|
||||||
if value == 1000.0 {
|
if value == 1000.0 {
|
||||||
exponent += 3
|
exponent += 3
|
||||||
value = input / math.Pow(10, exponent)
|
value = mag / math.Pow(10, exponent)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
value = math.Copysign(value, input)
|
||||||
|
|
||||||
prefix := siPrefixTable[exponent]
|
prefix := siPrefixTable[exponent]
|
||||||
return value, prefix
|
return value, prefix
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -9,9 +9,7 @@ import (
|
||||||
|
|
||||||
// Seconds-based time units
|
// Seconds-based time units
|
||||||
const (
|
const (
|
||||||
Minute = 60
|
Day = 24 * time.Hour
|
||||||
Hour = 60 * Minute
|
|
||||||
Day = 24 * Hour
|
|
||||||
Week = 7 * Day
|
Week = 7 * Day
|
||||||
Month = 30 * Day
|
Month = 30 * Day
|
||||||
Year = 12 * Month
|
Year = 12 * Month
|
||||||
|
|
@ -25,18 +23,35 @@ func Time(then time.Time) string {
|
||||||
return RelTime(then, time.Now(), "ago", "from now")
|
return RelTime(then, time.Now(), "ago", "from now")
|
||||||
}
|
}
|
||||||
|
|
||||||
var magnitudes = []struct {
|
// A RelTimeMagnitude struct contains a relative time point at which
|
||||||
d int64
|
// the relative format of time will switch to a new format string. A
|
||||||
format string
|
// slice of these in ascending order by their "D" field is passed to
|
||||||
divby int64
|
// CustomRelTime to format durations.
|
||||||
}{
|
//
|
||||||
{1, "now", 1},
|
// The Format field is a string that may contain a "%s" which will be
|
||||||
{2, "1 second %s", 1},
|
// replaced with the appropriate signed label (e.g. "ago" or "from
|
||||||
{Minute, "%d seconds %s", 1},
|
// now") and a "%d" that will be replaced by the quantity.
|
||||||
{2 * Minute, "1 minute %s", 1},
|
//
|
||||||
{Hour, "%d minutes %s", Minute},
|
// The DivBy field is the amount of time the time difference must be
|
||||||
{2 * Hour, "1 hour %s", 1},
|
// divided by in order to display correctly.
|
||||||
{Day, "%d hours %s", Hour},
|
//
|
||||||
|
// e.g. if D is 2*time.Minute and you want to display "%d minutes %s"
|
||||||
|
// DivBy should be time.Minute so whatever the duration is will be
|
||||||
|
// expressed in minutes.
|
||||||
|
type RelTimeMagnitude struct {
|
||||||
|
D time.Duration
|
||||||
|
Format string
|
||||||
|
DivBy time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultMagnitudes = []RelTimeMagnitude{
|
||||||
|
{time.Second, "now", time.Second},
|
||||||
|
{2 * time.Second, "1 second %s", 1},
|
||||||
|
{time.Minute, "%d seconds %s", time.Second},
|
||||||
|
{2 * time.Minute, "1 minute %s", 1},
|
||||||
|
{time.Hour, "%d minutes %s", time.Minute},
|
||||||
|
{2 * time.Hour, "1 hour %s", 1},
|
||||||
|
{Day, "%d hours %s", time.Hour},
|
||||||
{2 * Day, "1 day %s", 1},
|
{2 * Day, "1 day %s", 1},
|
||||||
{Week, "%d days %s", Day},
|
{Week, "%d days %s", Day},
|
||||||
{2 * Week, "1 week %s", 1},
|
{2 * Week, "1 week %s", 1},
|
||||||
|
|
@ -57,35 +72,46 @@ var magnitudes = []struct {
|
||||||
//
|
//
|
||||||
// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier"
|
// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier"
|
||||||
func RelTime(a, b time.Time, albl, blbl string) string {
|
func RelTime(a, b time.Time, albl, blbl string) string {
|
||||||
lbl := albl
|
return CustomRelTime(a, b, albl, blbl, defaultMagnitudes)
|
||||||
diff := b.Unix() - a.Unix()
|
}
|
||||||
|
|
||||||
after := a.After(b)
|
// CustomRelTime formats a time into a relative string.
|
||||||
if after {
|
//
|
||||||
|
// It takes two times two labels and a table of relative time formats.
|
||||||
|
// In addition to the generic time delta string (e.g. 5 minutes), the
|
||||||
|
// labels are used applied so that the label corresponding to the
|
||||||
|
// smaller time is applied.
|
||||||
|
func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string {
|
||||||
|
lbl := albl
|
||||||
|
diff := b.Sub(a)
|
||||||
|
|
||||||
|
if a.After(b) {
|
||||||
lbl = blbl
|
lbl = blbl
|
||||||
diff = a.Unix() - b.Unix()
|
diff = a.Sub(b)
|
||||||
}
|
}
|
||||||
|
|
||||||
n := sort.Search(len(magnitudes), func(i int) bool {
|
n := sort.Search(len(magnitudes), func(i int) bool {
|
||||||
return magnitudes[i].d > diff
|
return magnitudes[i].D >= diff
|
||||||
})
|
})
|
||||||
|
|
||||||
|
if n >= len(magnitudes) {
|
||||||
|
n = len(magnitudes) - 1
|
||||||
|
}
|
||||||
mag := magnitudes[n]
|
mag := magnitudes[n]
|
||||||
args := []interface{}{}
|
args := []interface{}{}
|
||||||
escaped := false
|
escaped := false
|
||||||
for _, ch := range mag.format {
|
for _, ch := range mag.Format {
|
||||||
if escaped {
|
if escaped {
|
||||||
switch ch {
|
switch ch {
|
||||||
case '%':
|
|
||||||
case 's':
|
case 's':
|
||||||
args = append(args, lbl)
|
args = append(args, lbl)
|
||||||
case 'd':
|
case 'd':
|
||||||
args = append(args, diff/mag.divby)
|
args = append(args, diff/mag.DivBy)
|
||||||
}
|
}
|
||||||
escaped = false
|
escaped = false
|
||||||
} else {
|
} else {
|
||||||
escaped = ch == '%'
|
escaped = ch == '%'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return fmt.Sprintf(mag.format, args...)
|
return fmt.Sprintf(mag.Format, args...)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"fmt"
|
"fmt"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -73,7 +74,8 @@ type AgentServiceCheck struct {
|
||||||
HTTP string `json:",omitempty"`
|
HTTP string `json:",omitempty"`
|
||||||
TCP string `json:",omitempty"`
|
TCP string `json:",omitempty"`
|
||||||
Status string `json:",omitempty"`
|
Status string `json:",omitempty"`
|
||||||
TLSSkipVerify string `json:",omitempty"`
|
Notes string `json:",omitempty"`
|
||||||
|
TLSSkipVerify bool `json:",omitempty"`
|
||||||
|
|
||||||
// In Consul 0.7 and later, checks that are associated with a service
|
// In Consul 0.7 and later, checks that are associated with a service
|
||||||
// may also contain this optional DeregisterCriticalServiceAfter field,
|
// may also contain this optional DeregisterCriticalServiceAfter field,
|
||||||
|
|
@ -115,6 +117,17 @@ func (a *Agent) Self() (map[string]map[string]interface{}, error) {
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Reload triggers a configuration reload for the agent we are connected to.
|
||||||
|
func (a *Agent) Reload() error {
|
||||||
|
r := a.c.newRequest("PUT", "/v1/agent/reload")
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// NodeName is used to get the node name of the agent
|
// NodeName is used to get the node name of the agent
|
||||||
func (a *Agent) NodeName() (string, error) {
|
func (a *Agent) NodeName() (string, error) {
|
||||||
if a.nodeName != "" {
|
if a.nodeName != "" {
|
||||||
|
|
@ -346,6 +359,17 @@ func (a *Agent) Join(addr string, wan bool) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Leave is used to have the agent gracefully leave the cluster and shutdown
|
||||||
|
func (a *Agent) Leave() error {
|
||||||
|
r := a.c.newRequest("PUT", "/v1/agent/leave")
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// ForceLeave is used to have the agent eject a failed node
|
// ForceLeave is used to have the agent eject a failed node
|
||||||
func (a *Agent) ForceLeave(node string) error {
|
func (a *Agent) ForceLeave(node string) error {
|
||||||
r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node)
|
r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node)
|
||||||
|
|
@ -410,3 +434,38 @@ func (a *Agent) DisableNodeMaintenance() error {
|
||||||
resp.Body.Close()
|
resp.Body.Close()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Monitor returns a channel which will receive streaming logs from the agent
|
||||||
|
// Providing a non-nil stopCh can be used to close the connection and stop the
|
||||||
|
// log stream
|
||||||
|
func (a *Agent) Monitor(loglevel string, stopCh chan struct{}, q *QueryOptions) (chan string, error) {
|
||||||
|
r := a.c.newRequest("GET", "/v1/agent/monitor")
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
if loglevel != "" {
|
||||||
|
r.params.Add("loglevel", loglevel)
|
||||||
|
}
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
logCh := make(chan string, 64)
|
||||||
|
go func() {
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(resp.Body)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-stopCh:
|
||||||
|
close(logCh)
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
if scanner.Scan() {
|
||||||
|
logCh <- scanner.Text()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return logCh, nil
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -20,6 +20,28 @@ import (
|
||||||
"github.com/hashicorp/go-cleanhttp"
|
"github.com/hashicorp/go-cleanhttp"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// HTTPAddrEnvName defines an environment variable name which sets
|
||||||
|
// the HTTP address if there is no -http-addr specified.
|
||||||
|
HTTPAddrEnvName = "CONSUL_HTTP_ADDR"
|
||||||
|
|
||||||
|
// HTTPTokenEnvName defines an environment variable name which sets
|
||||||
|
// the HTTP token.
|
||||||
|
HTTPTokenEnvName = "CONSUL_HTTP_TOKEN"
|
||||||
|
|
||||||
|
// HTTPAuthEnvName defines an environment variable name which sets
|
||||||
|
// the HTTP authentication header.
|
||||||
|
HTTPAuthEnvName = "CONSUL_HTTP_AUTH"
|
||||||
|
|
||||||
|
// HTTPSSLEnvName defines an environment variable name which sets
|
||||||
|
// whether or not to use HTTPS.
|
||||||
|
HTTPSSLEnvName = "CONSUL_HTTP_SSL"
|
||||||
|
|
||||||
|
// HTTPSSLVerifyEnvName defines an environment variable name which sets
|
||||||
|
// whether or not to disable certificate checking.
|
||||||
|
HTTPSSLVerifyEnvName = "CONSUL_HTTP_SSL_VERIFY"
|
||||||
|
)
|
||||||
|
|
||||||
// QueryOptions are used to parameterize a query
|
// QueryOptions are used to parameterize a query
|
||||||
type QueryOptions struct {
|
type QueryOptions struct {
|
||||||
// Providing a datacenter overwrites the DC provided
|
// Providing a datacenter overwrites the DC provided
|
||||||
|
|
@ -181,15 +203,15 @@ func defaultConfig(transportFn func() *http.Transport) *Config {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
if addr := os.Getenv("CONSUL_HTTP_ADDR"); addr != "" {
|
if addr := os.Getenv(HTTPAddrEnvName); addr != "" {
|
||||||
config.Address = addr
|
config.Address = addr
|
||||||
}
|
}
|
||||||
|
|
||||||
if token := os.Getenv("CONSUL_HTTP_TOKEN"); token != "" {
|
if token := os.Getenv(HTTPTokenEnvName); token != "" {
|
||||||
config.Token = token
|
config.Token = token
|
||||||
}
|
}
|
||||||
|
|
||||||
if auth := os.Getenv("CONSUL_HTTP_AUTH"); auth != "" {
|
if auth := os.Getenv(HTTPAuthEnvName); auth != "" {
|
||||||
var username, password string
|
var username, password string
|
||||||
if strings.Contains(auth, ":") {
|
if strings.Contains(auth, ":") {
|
||||||
split := strings.SplitN(auth, ":", 2)
|
split := strings.SplitN(auth, ":", 2)
|
||||||
|
|
@ -205,10 +227,10 @@ func defaultConfig(transportFn func() *http.Transport) *Config {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if ssl := os.Getenv("CONSUL_HTTP_SSL"); ssl != "" {
|
if ssl := os.Getenv(HTTPSSLEnvName); ssl != "" {
|
||||||
enabled, err := strconv.ParseBool(ssl)
|
enabled, err := strconv.ParseBool(ssl)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("[WARN] client: could not parse CONSUL_HTTP_SSL: %s", err)
|
log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLEnvName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if enabled {
|
if enabled {
|
||||||
|
|
@ -216,10 +238,10 @@ func defaultConfig(transportFn func() *http.Transport) *Config {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if verify := os.Getenv("CONSUL_HTTP_SSL_VERIFY"); verify != "" {
|
if verify := os.Getenv(HTTPSSLVerifyEnvName); verify != "" {
|
||||||
doVerify, err := strconv.ParseBool(verify)
|
doVerify, err := strconv.ParseBool(verify)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("[WARN] client: could not parse CONSUL_HTTP_SSL_VERIFY: %s", err)
|
log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLVerifyEnvName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !doVerify {
|
if !doVerify {
|
||||||
|
|
|
||||||
|
|
@ -16,6 +16,8 @@ type CatalogService struct {
|
||||||
ServiceTags []string
|
ServiceTags []string
|
||||||
ServicePort int
|
ServicePort int
|
||||||
ServiceEnableTagOverride bool
|
ServiceEnableTagOverride bool
|
||||||
|
CreateIndex uint64
|
||||||
|
ModifyIndex uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
type CatalogNode struct {
|
type CatalogNode struct {
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,7 @@ package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -11,6 +12,15 @@ const (
|
||||||
HealthPassing = "passing"
|
HealthPassing = "passing"
|
||||||
HealthWarning = "warning"
|
HealthWarning = "warning"
|
||||||
HealthCritical = "critical"
|
HealthCritical = "critical"
|
||||||
|
HealthMaint = "maintenance"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// NodeMaint is the special key set by a node in maintenance mode.
|
||||||
|
NodeMaint = "_node_maintenance"
|
||||||
|
|
||||||
|
// ServiceMaintPrefix is the prefix for a service in maintenance mode.
|
||||||
|
ServiceMaintPrefix = "_service_maintenance:"
|
||||||
)
|
)
|
||||||
|
|
||||||
// HealthCheck is used to represent a single check
|
// HealthCheck is used to represent a single check
|
||||||
|
|
@ -25,11 +35,56 @@ type HealthCheck struct {
|
||||||
ServiceName string
|
ServiceName string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HealthChecks is a collection of HealthCheck structs.
|
||||||
|
type HealthChecks []*HealthCheck
|
||||||
|
|
||||||
|
// AggregatedStatus returns the "best" status for the list of health checks.
|
||||||
|
// Because a given entry may have many service and node-level health checks
|
||||||
|
// attached, this function determines the best representative of the status as
|
||||||
|
// as single string using the following heuristic:
|
||||||
|
//
|
||||||
|
// maintenance > critical > warning > passing
|
||||||
|
//
|
||||||
|
func (c HealthChecks) AggregatedStatus() string {
|
||||||
|
var passing, warning, critical, maintenance bool
|
||||||
|
for _, check := range c {
|
||||||
|
id := string(check.CheckID)
|
||||||
|
if id == NodeMaint || strings.HasPrefix(id, ServiceMaintPrefix) {
|
||||||
|
maintenance = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch check.Status {
|
||||||
|
case HealthPassing:
|
||||||
|
passing = true
|
||||||
|
case HealthWarning:
|
||||||
|
warning = true
|
||||||
|
case HealthCritical:
|
||||||
|
critical = true
|
||||||
|
default:
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case maintenance:
|
||||||
|
return HealthMaint
|
||||||
|
case critical:
|
||||||
|
return HealthCritical
|
||||||
|
case warning:
|
||||||
|
return HealthWarning
|
||||||
|
case passing:
|
||||||
|
return HealthPassing
|
||||||
|
default:
|
||||||
|
return HealthPassing
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ServiceEntry is used for the health service endpoint
|
// ServiceEntry is used for the health service endpoint
|
||||||
type ServiceEntry struct {
|
type ServiceEntry struct {
|
||||||
Node *Node
|
Node *Node
|
||||||
Service *AgentService
|
Service *AgentService
|
||||||
Checks []*HealthCheck
|
Checks HealthChecks
|
||||||
}
|
}
|
||||||
|
|
||||||
// Health can be used to query the Health endpoints
|
// Health can be used to query the Health endpoints
|
||||||
|
|
@ -43,7 +98,7 @@ func (c *Client) Health() *Health {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Node is used to query for checks belonging to a given node
|
// Node is used to query for checks belonging to a given node
|
||||||
func (h *Health) Node(node string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
|
func (h *Health) Node(node string, q *QueryOptions) (HealthChecks, *QueryMeta, error) {
|
||||||
r := h.c.newRequest("GET", "/v1/health/node/"+node)
|
r := h.c.newRequest("GET", "/v1/health/node/"+node)
|
||||||
r.setQueryOptions(q)
|
r.setQueryOptions(q)
|
||||||
rtt, resp, err := requireOK(h.c.doRequest(r))
|
rtt, resp, err := requireOK(h.c.doRequest(r))
|
||||||
|
|
@ -56,7 +111,7 @@ func (h *Health) Node(node string, q *QueryOptions) ([]*HealthCheck, *QueryMeta,
|
||||||
parseQueryMeta(resp, qm)
|
parseQueryMeta(resp, qm)
|
||||||
qm.RequestTime = rtt
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
var out []*HealthCheck
|
var out HealthChecks
|
||||||
if err := decodeBody(resp, &out); err != nil {
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
@ -64,7 +119,7 @@ func (h *Health) Node(node string, q *QueryOptions) ([]*HealthCheck, *QueryMeta,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Checks is used to return the checks associated with a service
|
// Checks is used to return the checks associated with a service
|
||||||
func (h *Health) Checks(service string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
|
func (h *Health) Checks(service string, q *QueryOptions) (HealthChecks, *QueryMeta, error) {
|
||||||
r := h.c.newRequest("GET", "/v1/health/checks/"+service)
|
r := h.c.newRequest("GET", "/v1/health/checks/"+service)
|
||||||
r.setQueryOptions(q)
|
r.setQueryOptions(q)
|
||||||
rtt, resp, err := requireOK(h.c.doRequest(r))
|
rtt, resp, err := requireOK(h.c.doRequest(r))
|
||||||
|
|
@ -77,7 +132,7 @@ func (h *Health) Checks(service string, q *QueryOptions) ([]*HealthCheck, *Query
|
||||||
parseQueryMeta(resp, qm)
|
parseQueryMeta(resp, qm)
|
||||||
qm.RequestTime = rtt
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
var out []*HealthCheck
|
var out HealthChecks
|
||||||
if err := decodeBody(resp, &out); err != nil {
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
@ -115,7 +170,7 @@ func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions)
|
||||||
|
|
||||||
// State is used to retrieve all the checks in a given state.
|
// State is used to retrieve all the checks in a given state.
|
||||||
// The wildcard "any" state can also be used for all checks.
|
// The wildcard "any" state can also be used for all checks.
|
||||||
func (h *Health) State(state string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) {
|
func (h *Health) State(state string, q *QueryOptions) (HealthChecks, *QueryMeta, error) {
|
||||||
switch state {
|
switch state {
|
||||||
case HealthAny:
|
case HealthAny:
|
||||||
case HealthWarning:
|
case HealthWarning:
|
||||||
|
|
@ -136,7 +191,7 @@ func (h *Health) State(state string, q *QueryOptions) ([]*HealthCheck, *QueryMet
|
||||||
parseQueryMeta(resp, qm)
|
parseQueryMeta(resp, qm)
|
||||||
qm.RequestTime = rtt
|
qm.RequestTime = rtt
|
||||||
|
|
||||||
var out []*HealthCheck
|
var out HealthChecks
|
||||||
if err := decodeBody(resp, &out); err != nil {
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -50,21 +50,21 @@ type KVOp string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
KVSet KVOp = "set"
|
KVSet KVOp = "set"
|
||||||
KVDelete = "delete"
|
KVDelete KVOp = "delete"
|
||||||
KVDeleteCAS = "delete-cas"
|
KVDeleteCAS KVOp = "delete-cas"
|
||||||
KVDeleteTree = "delete-tree"
|
KVDeleteTree KVOp = "delete-tree"
|
||||||
KVCAS = "cas"
|
KVCAS KVOp = "cas"
|
||||||
KVLock = "lock"
|
KVLock KVOp = "lock"
|
||||||
KVUnlock = "unlock"
|
KVUnlock KVOp = "unlock"
|
||||||
KVGet = "get"
|
KVGet KVOp = "get"
|
||||||
KVGetTree = "get-tree"
|
KVGetTree KVOp = "get-tree"
|
||||||
KVCheckSession = "check-session"
|
KVCheckSession KVOp = "check-session"
|
||||||
KVCheckIndex = "check-index"
|
KVCheckIndex KVOp = "check-index"
|
||||||
)
|
)
|
||||||
|
|
||||||
// KVTxnOp defines a single operation inside a transaction.
|
// KVTxnOp defines a single operation inside a transaction.
|
||||||
type KVTxnOp struct {
|
type KVTxnOp struct {
|
||||||
Verb string
|
Verb KVOp
|
||||||
Key string
|
Key string
|
||||||
Value []byte
|
Value []byte
|
||||||
Flags uint64
|
Flags uint64
|
||||||
|
|
|
||||||
|
|
@ -43,6 +43,26 @@ type RaftConfiguration struct {
|
||||||
Index uint64
|
Index uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// keyringRequest is used for performing Keyring operations
|
||||||
|
type keyringRequest struct {
|
||||||
|
Key string
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyringResponse is returned when listing the gossip encryption keys
|
||||||
|
type KeyringResponse struct {
|
||||||
|
// Whether this response is for a WAN ring
|
||||||
|
WAN bool
|
||||||
|
|
||||||
|
// The datacenter name this request corresponds to
|
||||||
|
Datacenter string
|
||||||
|
|
||||||
|
// A map of the encryption keys to the number of nodes they're installed on
|
||||||
|
Keys map[string]int
|
||||||
|
|
||||||
|
// The total number of nodes in this ring
|
||||||
|
NumNodes int
|
||||||
|
}
|
||||||
|
|
||||||
// RaftGetConfiguration is used to query the current Raft peer set.
|
// RaftGetConfiguration is used to query the current Raft peer set.
|
||||||
func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) {
|
func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) {
|
||||||
r := op.c.newRequest("GET", "/v1/operator/raft/configuration")
|
r := op.c.newRequest("GET", "/v1/operator/raft/configuration")
|
||||||
|
|
@ -79,3 +99,65 @@ func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) err
|
||||||
resp.Body.Close()
|
resp.Body.Close()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// KeyringInstall is used to install a new gossip encryption key into the cluster
|
||||||
|
func (op *Operator) KeyringInstall(key string, q *WriteOptions) error {
|
||||||
|
r := op.c.newRequest("POST", "/v1/operator/keyring")
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
r.obj = keyringRequest{
|
||||||
|
Key: key,
|
||||||
|
}
|
||||||
|
_, resp, err := requireOK(op.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyringList is used to list the gossip keys installed in the cluster
|
||||||
|
func (op *Operator) KeyringList(q *QueryOptions) ([]*KeyringResponse, error) {
|
||||||
|
r := op.c.newRequest("GET", "/v1/operator/keyring")
|
||||||
|
r.setQueryOptions(q)
|
||||||
|
_, resp, err := requireOK(op.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var out []*KeyringResponse
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyringRemove is used to remove a gossip encryption key from the cluster
|
||||||
|
func (op *Operator) KeyringRemove(key string, q *WriteOptions) error {
|
||||||
|
r := op.c.newRequest("DELETE", "/v1/operator/keyring")
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
r.obj = keyringRequest{
|
||||||
|
Key: key,
|
||||||
|
}
|
||||||
|
_, resp, err := requireOK(op.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyringUse is used to change the active gossip encryption key
|
||||||
|
func (op *Operator) KeyringUse(key string, q *WriteOptions) error {
|
||||||
|
r := op.c.newRequest("PUT", "/v1/operator/keyring")
|
||||||
|
r.setWriteOptions(q)
|
||||||
|
r.obj = keyringRequest{
|
||||||
|
Key: key,
|
||||||
|
}
|
||||||
|
_, resp, err := requireOK(op.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
resp.Body.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,7 @@ type Flattener interface {
|
||||||
MoveTo(x, y float64)
|
MoveTo(x, y float64)
|
||||||
// LineTo Draw a line from the current position to the point (x, y)
|
// LineTo Draw a line from the current position to the point (x, y)
|
||||||
LineTo(x, y float64)
|
LineTo(x, y float64)
|
||||||
// LineJoin add the most recent starting point to close the path to create a polygon
|
// LineJoin use Round, Bevel or miter to join points
|
||||||
LineJoin()
|
LineJoin()
|
||||||
// Close add the most recent starting point to close the path to create a polygon
|
// Close add the most recent starting point to close the path to create a polygon
|
||||||
Close()
|
Close()
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,7 @@
|
||||||
package draw2dbase
|
package draw2dbase
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"image"
|
"image"
|
||||||
"image/color"
|
"image/color"
|
||||||
|
|
||||||
|
|
@ -132,6 +133,10 @@ func (gc *StackGraphicContext) BeginPath() {
|
||||||
gc.Current.Path.Clear()
|
gc.Current.Path.Clear()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (gc *StackGraphicContext) GetPath() draw2d.Path {
|
||||||
|
return *gc.Current.Path.Copy()
|
||||||
|
}
|
||||||
|
|
||||||
func (gc *StackGraphicContext) IsEmpty() bool {
|
func (gc *StackGraphicContext) IsEmpty() bool {
|
||||||
return gc.Current.Path.IsEmpty()
|
return gc.Current.Path.IsEmpty()
|
||||||
}
|
}
|
||||||
|
|
@ -191,3 +196,8 @@ func (gc *StackGraphicContext) Restore() {
|
||||||
oldContext.Previous = nil
|
oldContext.Previous = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (gc *StackGraphicContext) GetFontName() string {
|
||||||
|
fontData := gc.Current.FontData
|
||||||
|
return fmt.Sprintf("%s:%d:%d:%d", fontData.Name, fontData.Family, fontData.Style, gc.Current.FontSize)
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,68 @@
|
||||||
|
package draw2dbase
|
||||||
|
|
||||||
|
import "github.com/llgcode/draw2d"
|
||||||
|
|
||||||
|
var glyphCache map[string]map[rune]*Glyph
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
glyphCache = make(map[string]map[rune]*Glyph)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FetchGlyph fetches a glyph from the cache, calling renderGlyph first if it doesn't already exist
|
||||||
|
func FetchGlyph(gc draw2d.GraphicContext, fontName string, chr rune) *Glyph {
|
||||||
|
if glyphCache[fontName] == nil {
|
||||||
|
glyphCache[fontName] = make(map[rune]*Glyph, 60)
|
||||||
|
}
|
||||||
|
if glyphCache[fontName][chr] == nil {
|
||||||
|
glyphCache[fontName][chr] = renderGlyph(gc, fontName, chr)
|
||||||
|
}
|
||||||
|
return glyphCache[fontName][chr].Copy()
|
||||||
|
}
|
||||||
|
|
||||||
|
// renderGlyph renders a glyph then caches and returns it
|
||||||
|
func renderGlyph(gc draw2d.GraphicContext, fontName string, chr rune) *Glyph {
|
||||||
|
gc.Save()
|
||||||
|
defer gc.Restore()
|
||||||
|
gc.BeginPath()
|
||||||
|
width := gc.CreateStringPath(string(chr), 0, 0)
|
||||||
|
path := gc.GetPath()
|
||||||
|
return &Glyph{
|
||||||
|
path: &path,
|
||||||
|
Width: width,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Glyph represents a rune which has been converted to a Path and width
|
||||||
|
type Glyph struct {
|
||||||
|
// path represents a glyph, it is always at (0, 0)
|
||||||
|
path *draw2d.Path
|
||||||
|
// Width of the glyph
|
||||||
|
Width float64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *Glyph) Copy() *Glyph {
|
||||||
|
return &Glyph{
|
||||||
|
path: g.path.Copy(),
|
||||||
|
Width: g.Width,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fill copies a glyph from the cache, and fills it
|
||||||
|
func (g *Glyph) Fill(gc draw2d.GraphicContext, x, y float64) float64 {
|
||||||
|
gc.Save()
|
||||||
|
gc.BeginPath()
|
||||||
|
gc.Translate(x, y)
|
||||||
|
gc.Fill(g.path)
|
||||||
|
gc.Restore()
|
||||||
|
return g.Width
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stroke fetches a glyph from the cache, and strokes it
|
||||||
|
func (g *Glyph) Stroke(gc draw2d.GraphicContext, x, y float64) float64 {
|
||||||
|
gc.Save()
|
||||||
|
gc.BeginPath()
|
||||||
|
gc.Translate(x, y)
|
||||||
|
gc.Stroke(g.path)
|
||||||
|
gc.Restore()
|
||||||
|
return g.Width
|
||||||
|
}
|
||||||
|
|
@ -117,27 +117,57 @@ func (gc *GraphicContext) DrawImage(img image.Image) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// FillString draws the text at point (0, 0)
|
// FillString draws the text at point (0, 0)
|
||||||
func (gc *GraphicContext) FillString(text string) (cursor float64) {
|
func (gc *GraphicContext) FillString(text string) (width float64) {
|
||||||
return gc.FillStringAt(text, 0, 0)
|
return gc.FillStringAt(text, 0, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FillStringAt draws the text at the specified point (x, y)
|
// FillStringAt draws the text at the specified point (x, y)
|
||||||
func (gc *GraphicContext) FillStringAt(text string, x, y float64) (cursor float64) {
|
func (gc *GraphicContext) FillStringAt(text string, x, y float64) (width float64) {
|
||||||
width := gc.CreateStringPath(text, x, y)
|
f, err := gc.loadCurrentFont()
|
||||||
gc.Fill()
|
if err != nil {
|
||||||
return width
|
log.Println(err)
|
||||||
|
return 0.0
|
||||||
|
}
|
||||||
|
startx := x
|
||||||
|
prev, hasPrev := truetype.Index(0), false
|
||||||
|
fontName := gc.GetFontName()
|
||||||
|
for _, r := range text {
|
||||||
|
index := f.Index(r)
|
||||||
|
if hasPrev {
|
||||||
|
x += fUnitsToFloat64(f.Kern(fixed.Int26_6(gc.Current.Scale), prev, index))
|
||||||
|
}
|
||||||
|
glyph := draw2dbase.FetchGlyph(gc, fontName, r)
|
||||||
|
x += glyph.Fill(gc, x, y)
|
||||||
|
prev, hasPrev = index, true
|
||||||
|
}
|
||||||
|
return x - startx
|
||||||
}
|
}
|
||||||
|
|
||||||
// StrokeString draws the contour of the text at point (0, 0)
|
// StrokeString draws the contour of the text at point (0, 0)
|
||||||
func (gc *GraphicContext) StrokeString(text string) (cursor float64) {
|
func (gc *GraphicContext) StrokeString(text string) (width float64) {
|
||||||
return gc.StrokeStringAt(text, 0, 0)
|
return gc.StrokeStringAt(text, 0, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// StrokeStringAt draws the contour of the text at point (x, y)
|
// StrokeStringAt draws the contour of the text at point (x, y)
|
||||||
func (gc *GraphicContext) StrokeStringAt(text string, x, y float64) (cursor float64) {
|
func (gc *GraphicContext) StrokeStringAt(text string, x, y float64) (width float64) {
|
||||||
width := gc.CreateStringPath(text, x, y)
|
f, err := gc.loadCurrentFont()
|
||||||
gc.Stroke()
|
if err != nil {
|
||||||
return width
|
log.Println(err)
|
||||||
|
return 0.0
|
||||||
|
}
|
||||||
|
startx := x
|
||||||
|
prev, hasPrev := truetype.Index(0), false
|
||||||
|
fontName := gc.GetFontName()
|
||||||
|
for _, r := range text {
|
||||||
|
index := f.Index(r)
|
||||||
|
if hasPrev {
|
||||||
|
x += fUnitsToFloat64(f.Kern(fixed.Int26_6(gc.Current.Scale), prev, index))
|
||||||
|
}
|
||||||
|
glyph := draw2dbase.FetchGlyph(gc, fontName, r)
|
||||||
|
x += glyph.Stroke(gc, x, y)
|
||||||
|
prev, hasPrev = index, true
|
||||||
|
}
|
||||||
|
return x - startx
|
||||||
}
|
}
|
||||||
|
|
||||||
func (gc *GraphicContext) loadCurrentFont() (*truetype.Font, error) {
|
func (gc *GraphicContext) loadCurrentFont() (*truetype.Font, error) {
|
||||||
|
|
|
||||||
|
|
@ -14,6 +14,8 @@ type GraphicContext interface {
|
||||||
PathBuilder
|
PathBuilder
|
||||||
// BeginPath creates a new path
|
// BeginPath creates a new path
|
||||||
BeginPath()
|
BeginPath()
|
||||||
|
// GetPath copies the current path, then returns it
|
||||||
|
GetPath() Path
|
||||||
// GetMatrixTransform returns the current transformation matrix
|
// GetMatrixTransform returns the current transformation matrix
|
||||||
GetMatrixTransform() Matrix
|
GetMatrixTransform() Matrix
|
||||||
// SetMatrixTransform sets the current transformation matrix
|
// SetMatrixTransform sets the current transformation matrix
|
||||||
|
|
@ -48,6 +50,8 @@ type GraphicContext interface {
|
||||||
SetFontData(fontData FontData)
|
SetFontData(fontData FontData)
|
||||||
// GetFontData gets the current FontData
|
// GetFontData gets the current FontData
|
||||||
GetFontData() FontData
|
GetFontData() FontData
|
||||||
|
// GetFontName gets the current FontData as a string
|
||||||
|
GetFontName() string
|
||||||
// DrawImage draws the raster image in the current canvas
|
// DrawImage draws the raster image in the current canvas
|
||||||
DrawImage(image image.Image)
|
DrawImage(image image.Image)
|
||||||
// Save the context and push it to the context stack
|
// Save the context and push it to the context stack
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,21 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2016 Yasuhiro Matsumoto
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
|
@ -1,7 +1,12 @@
|
||||||
package runewidth
|
package runewidth
|
||||||
|
|
||||||
var EastAsianWidth = IsEastAsian()
|
var (
|
||||||
var DefaultCondition = &Condition{EastAsianWidth}
|
// EastAsianWidth will be set true if the current locale is CJK
|
||||||
|
EastAsianWidth = IsEastAsian()
|
||||||
|
|
||||||
|
// DefaultCondition is a condition in current locale
|
||||||
|
DefaultCondition = &Condition{EastAsianWidth}
|
||||||
|
)
|
||||||
|
|
||||||
type interval struct {
|
type interval struct {
|
||||||
first rune
|
first rune
|
||||||
|
|
@ -302,10 +307,12 @@ var ctypes = []intervalType{
|
||||||
{0x100000, 0x10FFFE, ambiguous},
|
{0x100000, 0x10FFFE, ambiguous},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Condition have flag EastAsianWidth whether the current locale is CJK or not.
|
||||||
type Condition struct {
|
type Condition struct {
|
||||||
EastAsianWidth bool
|
EastAsianWidth bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewCondition return new instance of Condition which is current locale.
|
||||||
func NewCondition() *Condition {
|
func NewCondition() *Condition {
|
||||||
return &Condition{EastAsianWidth}
|
return &Condition{EastAsianWidth}
|
||||||
}
|
}
|
||||||
|
|
@ -344,6 +351,7 @@ func (c *Condition) RuneWidth(r rune) int {
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StringWidth return width as you can see
|
||||||
func (c *Condition) StringWidth(s string) (width int) {
|
func (c *Condition) StringWidth(s string) (width int) {
|
||||||
for _, r := range []rune(s) {
|
for _, r := range []rune(s) {
|
||||||
width += c.RuneWidth(r)
|
width += c.RuneWidth(r)
|
||||||
|
|
@ -351,6 +359,7 @@ func (c *Condition) StringWidth(s string) (width int) {
|
||||||
return width
|
return width
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Truncate return string truncated with w cells
|
||||||
func (c *Condition) Truncate(s string, w int, tail string) string {
|
func (c *Condition) Truncate(s string, w int, tail string) string {
|
||||||
if c.StringWidth(s) <= w {
|
if c.StringWidth(s) <= w {
|
||||||
return s
|
return s
|
||||||
|
|
@ -370,6 +379,7 @@ func (c *Condition) Truncate(s string, w int, tail string) string {
|
||||||
return string(r[0:i]) + tail
|
return string(r[0:i]) + tail
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Wrap return string wrapped with w cells
|
||||||
func (c *Condition) Wrap(s string, w int) string {
|
func (c *Condition) Wrap(s string, w int) string {
|
||||||
width := 0
|
width := 0
|
||||||
out := ""
|
out := ""
|
||||||
|
|
@ -392,6 +402,7 @@ func (c *Condition) Wrap(s string, w int) string {
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FillLeft return string filled in left by spaces in w cells
|
||||||
func (c *Condition) FillLeft(s string, w int) string {
|
func (c *Condition) FillLeft(s string, w int) string {
|
||||||
width := c.StringWidth(s)
|
width := c.StringWidth(s)
|
||||||
count := w - width
|
count := w - width
|
||||||
|
|
@ -405,6 +416,7 @@ func (c *Condition) FillLeft(s string, w int) string {
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FillRight return string filled in left by spaces in w cells
|
||||||
func (c *Condition) FillRight(s string, w int) string {
|
func (c *Condition) FillRight(s string, w int) string {
|
||||||
width := c.StringWidth(s)
|
width := c.StringWidth(s)
|
||||||
count := w - width
|
count := w - width
|
||||||
|
|
@ -438,27 +450,32 @@ func IsAmbiguousWidth(r rune) bool {
|
||||||
return ct(r) == ambiguous
|
return ct(r) == ambiguous
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsAmbiguousWidth returns whether is ambiguous width or not.
|
// IsNeutralWidth returns whether is neutral width or not.
|
||||||
func IsNeutralWidth(r rune) bool {
|
func IsNeutralWidth(r rune) bool {
|
||||||
return ct(r) == neutral
|
return ct(r) == neutral
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StringWidth return width as you can see
|
||||||
func StringWidth(s string) (width int) {
|
func StringWidth(s string) (width int) {
|
||||||
return DefaultCondition.StringWidth(s)
|
return DefaultCondition.StringWidth(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Truncate return string truncated with w cells
|
||||||
func Truncate(s string, w int, tail string) string {
|
func Truncate(s string, w int, tail string) string {
|
||||||
return DefaultCondition.Truncate(s, w, tail)
|
return DefaultCondition.Truncate(s, w, tail)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Wrap return string wrapped with w cells
|
||||||
func Wrap(s string, w int) string {
|
func Wrap(s string, w int) string {
|
||||||
return DefaultCondition.Wrap(s, w)
|
return DefaultCondition.Wrap(s, w)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FillLeft return string filled in left by spaces in w cells
|
||||||
func FillLeft(s string, w int) string {
|
func FillLeft(s string, w int) string {
|
||||||
return DefaultCondition.FillLeft(s, w)
|
return DefaultCondition.FillLeft(s, w)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FillRight return string filled in left by spaces in w cells
|
||||||
func FillRight(s string, w int) string {
|
func FillRight(s string, w int) string {
|
||||||
return DefaultCondition.FillRight(s, w)
|
return DefaultCondition.FillRight(s, w)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -10,20 +10,25 @@ import (
|
||||||
|
|
||||||
var reLoc = regexp.MustCompile(`^[a-z][a-z][a-z]?(?:_[A-Z][A-Z])?\.(.+)`)
|
var reLoc = regexp.MustCompile(`^[a-z][a-z][a-z]?(?:_[A-Z][A-Z])?\.(.+)`)
|
||||||
|
|
||||||
func IsEastAsian() bool {
|
var mblenTable = map[string]int{
|
||||||
locale := os.Getenv("LC_CTYPE")
|
"utf-8": 6,
|
||||||
if locale == "" {
|
"utf8": 6,
|
||||||
locale = os.Getenv("LANG")
|
"jis": 8,
|
||||||
}
|
"eucjp": 3,
|
||||||
|
"euckr": 2,
|
||||||
// ignore C locale
|
"euccn": 2,
|
||||||
if locale == "POSIX" || locale == "C" {
|
"sjis": 2,
|
||||||
return false
|
"cp932": 2,
|
||||||
}
|
"cp51932": 2,
|
||||||
if len(locale) > 1 && locale[0] == 'C' && (locale[1] == '.' || locale[1] == '-') {
|
"cp936": 2,
|
||||||
return false
|
"cp949": 2,
|
||||||
}
|
"cp950": 2,
|
||||||
|
"big5": 2,
|
||||||
|
"gbk": 2,
|
||||||
|
"gb2312": 2,
|
||||||
|
}
|
||||||
|
|
||||||
|
func isEastAsian(locale string) bool {
|
||||||
charset := strings.ToLower(locale)
|
charset := strings.ToLower(locale)
|
||||||
r := reLoc.FindStringSubmatch(locale)
|
r := reLoc.FindStringSubmatch(locale)
|
||||||
if len(r) == 2 {
|
if len(r) == 2 {
|
||||||
|
|
@ -40,26 +45,11 @@ func IsEastAsian() bool {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
max := 1
|
||||||
mbc_max := 1
|
if m, ok := mblenTable[charset]; ok {
|
||||||
switch charset {
|
max = m
|
||||||
case "utf-8", "utf8":
|
|
||||||
mbc_max = 6
|
|
||||||
case "jis":
|
|
||||||
mbc_max = 8
|
|
||||||
case "eucjp":
|
|
||||||
mbc_max = 3
|
|
||||||
case "euckr", "euccn":
|
|
||||||
mbc_max = 2
|
|
||||||
case "sjis", "cp932", "cp51932", "cp936", "cp949", "cp950":
|
|
||||||
mbc_max = 2
|
|
||||||
case "big5":
|
|
||||||
mbc_max = 2
|
|
||||||
case "gbk", "gb2312":
|
|
||||||
mbc_max = 2
|
|
||||||
}
|
}
|
||||||
|
if max > 1 && (charset[0] != 'u' ||
|
||||||
if mbc_max > 1 && (charset[0] != 'u' ||
|
|
||||||
strings.HasPrefix(locale, "ja") ||
|
strings.HasPrefix(locale, "ja") ||
|
||||||
strings.HasPrefix(locale, "ko") ||
|
strings.HasPrefix(locale, "ko") ||
|
||||||
strings.HasPrefix(locale, "zh")) {
|
strings.HasPrefix(locale, "zh")) {
|
||||||
|
|
@ -67,3 +57,21 @@ func IsEastAsian() bool {
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsEastAsian return true if the current locale is CJK
|
||||||
|
func IsEastAsian() bool {
|
||||||
|
locale := os.Getenv("LC_CTYPE")
|
||||||
|
if locale == "" {
|
||||||
|
locale = os.Getenv("LANG")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ignore C locale
|
||||||
|
if locale == "POSIX" || locale == "C" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if len(locale) > 1 && locale[0] == 'C' && (locale[1] == '.' || locale[1] == '-') {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return isEastAsian(locale)
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -9,6 +9,7 @@ var (
|
||||||
procGetConsoleOutputCP = kernel32.NewProc("GetConsoleOutputCP")
|
procGetConsoleOutputCP = kernel32.NewProc("GetConsoleOutputCP")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// IsEastAsian return true if the current locale is CJK
|
||||||
func IsEastAsian() bool {
|
func IsEastAsian() bool {
|
||||||
r1, _, _ := procGetConsoleOutputCP.Call()
|
r1, _, _ := procGetConsoleOutputCP.Call()
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
|
|
|
||||||
|
|
@ -178,7 +178,7 @@
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
To apply the Apache License to your work, attach the following
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||||
replaced with your own identifying information. (Don't include
|
replaced with your own identifying information. (Don't include
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
comment syntax for the file format. We also recommend that a
|
comment syntax for the file format. We also recommend that a
|
||||||
|
|
@ -186,7 +186,7 @@
|
||||||
same "printed page" as the copyright notice for easier
|
same "printed page" as the copyright notice for easier
|
||||||
identification within third-party archives.
|
identification within third-party archives.
|
||||||
|
|
||||||
Copyright 2013 Matt T. Proud
|
Copyright {yyyy} {name of copyright owner}
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1 @@
|
||||||
|
Copyright 2012 Matt T. Proud (matt.proud@gmail.com)
|
||||||
|
|
@ -38,7 +38,7 @@ var errInvalidVarint = errors.New("invalid varint32 encountered")
|
||||||
func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
|
func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
|
||||||
// Per AbstractParser#parsePartialDelimitedFrom with
|
// Per AbstractParser#parsePartialDelimitedFrom with
|
||||||
// CodedInputStream#readRawVarint32.
|
// CodedInputStream#readRawVarint32.
|
||||||
headerBuf := make([]byte, binary.MaxVarintLen32)
|
var headerBuf [binary.MaxVarintLen32]byte
|
||||||
var bytesRead, varIntBytes int
|
var bytesRead, varIntBytes int
|
||||||
var messageLength uint64
|
var messageLength uint64
|
||||||
for varIntBytes == 0 { // i.e. no varint has been decoded yet.
|
for varIntBytes == 0 { // i.e. no varint has been decoded yet.
|
||||||
|
|
|
||||||
|
|
@ -33,8 +33,8 @@ func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
buf := make([]byte, binary.MaxVarintLen32)
|
var buf [binary.MaxVarintLen32]byte
|
||||||
encodedLength := binary.PutUvarint(buf, uint64(len(buffer)))
|
encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer)))
|
||||||
|
|
||||||
sync, err := w.Write(buf[:encodedLength])
|
sync, err := w.Write(buf[:encodedLength])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
||||||
|
|
@ -46,10 +46,7 @@ func ResponseFormat(h http.Header) Format {
|
||||||
return FmtUnknown
|
return FmtUnknown
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const textType = "text/plain"
|
||||||
textType = "text/plain"
|
|
||||||
jsonType = "application/json"
|
|
||||||
)
|
|
||||||
|
|
||||||
switch mediatype {
|
switch mediatype {
|
||||||
case ProtoType:
|
case ProtoType:
|
||||||
|
|
@ -66,22 +63,6 @@ func ResponseFormat(h http.Header) Format {
|
||||||
return FmtUnknown
|
return FmtUnknown
|
||||||
}
|
}
|
||||||
return FmtText
|
return FmtText
|
||||||
|
|
||||||
case jsonType:
|
|
||||||
var prometheusAPIVersion string
|
|
||||||
|
|
||||||
if params["schema"] == "prometheus/telemetry" && params["version"] != "" {
|
|
||||||
prometheusAPIVersion = params["version"]
|
|
||||||
} else {
|
|
||||||
prometheusAPIVersion = h.Get("X-Prometheus-API-Version")
|
|
||||||
}
|
|
||||||
|
|
||||||
switch prometheusAPIVersion {
|
|
||||||
case "0.0.2", "":
|
|
||||||
return fmtJSON2
|
|
||||||
default:
|
|
||||||
return FmtUnknown
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return FmtUnknown
|
return FmtUnknown
|
||||||
|
|
@ -93,8 +74,6 @@ func NewDecoder(r io.Reader, format Format) Decoder {
|
||||||
switch format {
|
switch format {
|
||||||
case FmtProtoDelim:
|
case FmtProtoDelim:
|
||||||
return &protoDecoder{r: r}
|
return &protoDecoder{r: r}
|
||||||
case fmtJSON2:
|
|
||||||
return newJSON2Decoder(r)
|
|
||||||
}
|
}
|
||||||
return &textDecoder{r: r}
|
return &textDecoder{r: r}
|
||||||
}
|
}
|
||||||
|
|
@ -107,10 +86,32 @@ type protoDecoder struct {
|
||||||
// Decode implements the Decoder interface.
|
// Decode implements the Decoder interface.
|
||||||
func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
|
func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
|
||||||
_, err := pbutil.ReadDelimited(d.r, v)
|
_, err := pbutil.ReadDelimited(d.r, v)
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
}
|
||||||
|
if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
|
||||||
|
return fmt.Errorf("invalid metric name %q", v.GetName())
|
||||||
|
}
|
||||||
|
for _, m := range v.GetMetric() {
|
||||||
|
if m == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, l := range m.GetLabel() {
|
||||||
|
if l == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !model.LabelValue(l.GetValue()).IsValid() {
|
||||||
|
return fmt.Errorf("invalid label value %q", l.GetValue())
|
||||||
|
}
|
||||||
|
if !model.LabelName(l.GetName()).IsValid() {
|
||||||
|
return fmt.Errorf("invalid label name %q", l.GetName())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// textDecoder implements the Decoder interface for the text protcol.
|
// textDecoder implements the Decoder interface for the text protocol.
|
||||||
type textDecoder struct {
|
type textDecoder struct {
|
||||||
r io.Reader
|
r io.Reader
|
||||||
p TextParser
|
p TextParser
|
||||||
|
|
|
||||||
|
|
@ -18,9 +18,9 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"bitbucket.org/ww/goautoneg"
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"github.com/matttproud/golang_protobuf_extensions/pbutil"
|
"github.com/matttproud/golang_protobuf_extensions/pbutil"
|
||||||
|
"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -29,9 +29,6 @@ const (
|
||||||
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
|
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
|
||||||
FmtProtoText Format = ProtoFmt + ` encoding=text`
|
FmtProtoText Format = ProtoFmt + ` encoding=text`
|
||||||
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
|
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
|
||||||
|
|
||||||
// fmtJSON2 is hidden as it is deprecated.
|
|
||||||
fmtJSON2 Format = `application/json; version=0.0.2`
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
|
||||||
|
|
@ -20,8 +20,8 @@ import "bytes"
|
||||||
|
|
||||||
// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
|
// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
|
||||||
//
|
//
|
||||||
// go-fuzz-build github.com/prometheus/client_golang/text
|
// go-fuzz-build github.com/prometheus/common/expfmt
|
||||||
// go-fuzz -bin text-fuzz.zip -workdir fuzz
|
// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
|
||||||
//
|
//
|
||||||
// Further input samples should go in the folder fuzz/corpus.
|
// Further input samples should go in the folder fuzz/corpus.
|
||||||
func Fuzz(in []byte) int {
|
func Fuzz(in []byte) int {
|
||||||
|
|
|
||||||
|
|
@ -1,162 +0,0 @@
|
||||||
// Copyright 2015 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package expfmt
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"sort"
|
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
|
|
||||||
"github.com/prometheus/common/model"
|
|
||||||
)
|
|
||||||
|
|
||||||
type json2Decoder struct {
|
|
||||||
dec *json.Decoder
|
|
||||||
fams []*dto.MetricFamily
|
|
||||||
}
|
|
||||||
|
|
||||||
func newJSON2Decoder(r io.Reader) Decoder {
|
|
||||||
return &json2Decoder{
|
|
||||||
dec: json.NewDecoder(r),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type histogram002 struct {
|
|
||||||
Labels model.LabelSet `json:"labels"`
|
|
||||||
Values map[string]float64 `json:"value"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type counter002 struct {
|
|
||||||
Labels model.LabelSet `json:"labels"`
|
|
||||||
Value float64 `json:"value"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func protoLabelSet(base, ext model.LabelSet) []*dto.LabelPair {
|
|
||||||
labels := base.Clone().Merge(ext)
|
|
||||||
delete(labels, model.MetricNameLabel)
|
|
||||||
|
|
||||||
names := make([]string, 0, len(labels))
|
|
||||||
for ln := range labels {
|
|
||||||
names = append(names, string(ln))
|
|
||||||
}
|
|
||||||
sort.Strings(names)
|
|
||||||
|
|
||||||
pairs := make([]*dto.LabelPair, 0, len(labels))
|
|
||||||
|
|
||||||
for _, ln := range names {
|
|
||||||
lv := labels[model.LabelName(ln)]
|
|
||||||
|
|
||||||
pairs = append(pairs, &dto.LabelPair{
|
|
||||||
Name: proto.String(ln),
|
|
||||||
Value: proto.String(string(lv)),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return pairs
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *json2Decoder) more() error {
|
|
||||||
var entities []struct {
|
|
||||||
BaseLabels model.LabelSet `json:"baseLabels"`
|
|
||||||
Docstring string `json:"docstring"`
|
|
||||||
Metric struct {
|
|
||||||
Type string `json:"type"`
|
|
||||||
Values json.RawMessage `json:"value"`
|
|
||||||
} `json:"metric"`
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := d.dec.Decode(&entities); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, e := range entities {
|
|
||||||
f := &dto.MetricFamily{
|
|
||||||
Name: proto.String(string(e.BaseLabels[model.MetricNameLabel])),
|
|
||||||
Help: proto.String(e.Docstring),
|
|
||||||
Type: dto.MetricType_UNTYPED.Enum(),
|
|
||||||
Metric: []*dto.Metric{},
|
|
||||||
}
|
|
||||||
|
|
||||||
d.fams = append(d.fams, f)
|
|
||||||
|
|
||||||
switch e.Metric.Type {
|
|
||||||
case "counter", "gauge":
|
|
||||||
var values []counter002
|
|
||||||
|
|
||||||
if err := json.Unmarshal(e.Metric.Values, &values); err != nil {
|
|
||||||
return fmt.Errorf("could not extract %s value: %s", e.Metric.Type, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, ctr := range values {
|
|
||||||
f.Metric = append(f.Metric, &dto.Metric{
|
|
||||||
Label: protoLabelSet(e.BaseLabels, ctr.Labels),
|
|
||||||
Untyped: &dto.Untyped{
|
|
||||||
Value: proto.Float64(ctr.Value),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
case "histogram":
|
|
||||||
var values []histogram002
|
|
||||||
|
|
||||||
if err := json.Unmarshal(e.Metric.Values, &values); err != nil {
|
|
||||||
return fmt.Errorf("could not extract %s value: %s", e.Metric.Type, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, hist := range values {
|
|
||||||
quants := make([]string, 0, len(values))
|
|
||||||
for q := range hist.Values {
|
|
||||||
quants = append(quants, q)
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Strings(quants)
|
|
||||||
|
|
||||||
for _, q := range quants {
|
|
||||||
value := hist.Values[q]
|
|
||||||
// The correct label is "quantile" but to not break old expressions
|
|
||||||
// this remains "percentile"
|
|
||||||
hist.Labels["percentile"] = model.LabelValue(q)
|
|
||||||
|
|
||||||
f.Metric = append(f.Metric, &dto.Metric{
|
|
||||||
Label: protoLabelSet(e.BaseLabels, hist.Labels),
|
|
||||||
Untyped: &dto.Untyped{
|
|
||||||
Value: proto.Float64(value),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("unknown metric type %q", e.Metric.Type)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode implements the Decoder interface.
|
|
||||||
func (d *json2Decoder) Decode(v *dto.MetricFamily) error {
|
|
||||||
if len(d.fams) == 0 {
|
|
||||||
if err := d.more(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
*v = *d.fams[0]
|
|
||||||
d.fams = d.fams[1:]
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
@ -14,7 +14,6 @@
|
||||||
package expfmt
|
package expfmt
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
|
|
@ -26,9 +25,12 @@ import (
|
||||||
|
|
||||||
// MetricFamilyToText converts a MetricFamily proto message into text format and
|
// MetricFamilyToText converts a MetricFamily proto message into text format and
|
||||||
// writes the resulting lines to 'out'. It returns the number of bytes written
|
// writes the resulting lines to 'out'. It returns the number of bytes written
|
||||||
// and any error encountered. This function does not perform checks on the
|
// and any error encountered. The output will have the same order as the input,
|
||||||
// content of the metric and label names, i.e. invalid metric or label names
|
// no further sorting is performed. Furthermore, this function assumes the input
|
||||||
|
// is already sanitized and does not perform any sanity checks. If the input
|
||||||
|
// contains duplicate metrics or invalid metric or label names, the conversion
|
||||||
// will result in invalid text format output.
|
// will result in invalid text format output.
|
||||||
|
//
|
||||||
// This method fulfills the type 'prometheus.encoder'.
|
// This method fulfills the type 'prometheus.encoder'.
|
||||||
func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
|
func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
|
||||||
var written int
|
var written int
|
||||||
|
|
@ -285,21 +287,17 @@ func labelPairsToText(
|
||||||
return written, nil
|
return written, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
escape = strings.NewReplacer("\\", `\\`, "\n", `\n`)
|
||||||
|
escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
|
||||||
|
)
|
||||||
|
|
||||||
// escapeString replaces '\' by '\\', new line character by '\n', and - if
|
// escapeString replaces '\' by '\\', new line character by '\n', and - if
|
||||||
// includeDoubleQuote is true - '"' by '\"'.
|
// includeDoubleQuote is true - '"' by '\"'.
|
||||||
func escapeString(v string, includeDoubleQuote bool) string {
|
func escapeString(v string, includeDoubleQuote bool) string {
|
||||||
result := bytes.NewBuffer(make([]byte, 0, len(v)))
|
if includeDoubleQuote {
|
||||||
for _, c := range v {
|
return escapeWithDoubleQuote.Replace(v)
|
||||||
switch {
|
|
||||||
case c == '\\':
|
|
||||||
result.WriteString(`\\`)
|
|
||||||
case includeDoubleQuote && c == '"':
|
|
||||||
result.WriteString(`\"`)
|
|
||||||
case c == '\n':
|
|
||||||
result.WriteString(`\n`)
|
|
||||||
default:
|
|
||||||
result.WriteRune(c)
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return result.String()
|
return escape.Replace(v)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -47,7 +47,7 @@ func (e ParseError) Error() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// TextParser is used to parse the simple and flat text-based exchange format. Its
|
// TextParser is used to parse the simple and flat text-based exchange format. Its
|
||||||
// nil value is ready to use.
|
// zero value is ready to use.
|
||||||
type TextParser struct {
|
type TextParser struct {
|
||||||
metricFamiliesByName map[string]*dto.MetricFamily
|
metricFamiliesByName map[string]*dto.MetricFamily
|
||||||
buf *bufio.Reader // Where the parsed input is read through.
|
buf *bufio.Reader // Where the parsed input is read through.
|
||||||
|
|
@ -108,6 +108,13 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF
|
||||||
delete(p.metricFamiliesByName, k)
|
delete(p.metricFamiliesByName, k)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// If p.err is io.EOF now, we have run into a premature end of the input
|
||||||
|
// stream. Turn this error into something nicer and more
|
||||||
|
// meaningful. (io.EOF is often used as a signal for the legitimate end
|
||||||
|
// of an input stream.)
|
||||||
|
if p.err == io.EOF {
|
||||||
|
p.parseError("unexpected end of input stream")
|
||||||
|
}
|
||||||
return p.metricFamiliesByName, p.err
|
return p.metricFamiliesByName, p.err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -37,6 +37,7 @@ type Alert struct {
|
||||||
// The known time range for this alert. Both ends are optional.
|
// The known time range for this alert. Both ends are optional.
|
||||||
StartsAt time.Time `json:"startsAt,omitempty"`
|
StartsAt time.Time `json:"startsAt,omitempty"`
|
||||||
EndsAt time.Time `json:"endsAt,omitempty"`
|
EndsAt time.Time `json:"endsAt,omitempty"`
|
||||||
|
GeneratorURL string `json:"generatorURL"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Name returns the name of the alert. It is equivalent to the "alertname" label.
|
// Name returns the name of the alert. It is equivalent to the "alertname" label.
|
||||||
|
|
@ -60,10 +61,16 @@ func (a *Alert) String() string {
|
||||||
|
|
||||||
// Resolved returns true iff the activity interval ended in the past.
|
// Resolved returns true iff the activity interval ended in the past.
|
||||||
func (a *Alert) Resolved() bool {
|
func (a *Alert) Resolved() bool {
|
||||||
|
return a.ResolvedAt(time.Now())
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResolvedAt returns true off the activity interval ended before
|
||||||
|
// the given timestamp.
|
||||||
|
func (a *Alert) ResolvedAt(ts time.Time) bool {
|
||||||
if a.EndsAt.IsZero() {
|
if a.EndsAt.IsZero() {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return !a.EndsAt.After(time.Now())
|
return !a.EndsAt.After(ts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Status returns the status of the alert.
|
// Status returns the status of the alert.
|
||||||
|
|
@ -74,6 +81,26 @@ func (a *Alert) Status() AlertStatus {
|
||||||
return AlertFiring
|
return AlertFiring
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Validate checks whether the alert data is inconsistent.
|
||||||
|
func (a *Alert) Validate() error {
|
||||||
|
if a.StartsAt.IsZero() {
|
||||||
|
return fmt.Errorf("start time missing")
|
||||||
|
}
|
||||||
|
if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
|
||||||
|
return fmt.Errorf("start time must be before end time")
|
||||||
|
}
|
||||||
|
if err := a.Labels.Validate(); err != nil {
|
||||||
|
return fmt.Errorf("invalid label set: %s", err)
|
||||||
|
}
|
||||||
|
if len(a.Labels) == 0 {
|
||||||
|
return fmt.Errorf("at least one label pair required")
|
||||||
|
}
|
||||||
|
if err := a.Annotations.Validate(); err != nil {
|
||||||
|
return fmt.Errorf("invalid annotations: %s", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Alert is a list of alerts that can be sorted in chronological order.
|
// Alert is a list of alerts that can be sorted in chronological order.
|
||||||
type Alerts []*Alert
|
type Alerts []*Alert
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,42 @@
|
||||||
|
// Copyright 2015 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package model
|
||||||
|
|
||||||
|
// Inline and byte-free variant of hash/fnv's fnv64a.
|
||||||
|
|
||||||
|
const (
|
||||||
|
offset64 = 14695981039346656037
|
||||||
|
prime64 = 1099511628211
|
||||||
|
)
|
||||||
|
|
||||||
|
// hashNew initializies a new fnv64a hash value.
|
||||||
|
func hashNew() uint64 {
|
||||||
|
return offset64
|
||||||
|
}
|
||||||
|
|
||||||
|
// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
|
||||||
|
func hashAdd(h uint64, s string) uint64 {
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
h ^= uint64(s[i])
|
||||||
|
h *= prime64
|
||||||
|
}
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
|
||||||
|
func hashAddByte(h uint64, b byte) uint64 {
|
||||||
|
h ^= uint64(b)
|
||||||
|
h *= prime64
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
@ -17,8 +17,8 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
"regexp"
|
||||||
"sort"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
"unicode/utf8"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -80,20 +80,37 @@ const (
|
||||||
QuantileLabel = "quantile"
|
QuantileLabel = "quantile"
|
||||||
)
|
)
|
||||||
|
|
||||||
// LabelNameRE is a regular expression matching valid label names.
|
// LabelNameRE is a regular expression matching valid label names. Note that the
|
||||||
|
// IsValid method of LabelName performs the same check but faster than a match
|
||||||
|
// with this regular expression.
|
||||||
var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
|
var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
|
||||||
|
|
||||||
// A LabelName is a key for a LabelSet or Metric. It has a value associated
|
// A LabelName is a key for a LabelSet or Metric. It has a value associated
|
||||||
// therewith.
|
// therewith.
|
||||||
type LabelName string
|
type LabelName string
|
||||||
|
|
||||||
|
// IsValid is true iff the label name matches the pattern of LabelNameRE. This
|
||||||
|
// method, however, does not use LabelNameRE for the check but a much faster
|
||||||
|
// hardcoded implementation.
|
||||||
|
func (ln LabelName) IsValid() bool {
|
||||||
|
if len(ln) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i, b := range ln {
|
||||||
|
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||||
func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
var s string
|
var s string
|
||||||
if err := unmarshal(&s); err != nil {
|
if err := unmarshal(&s); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !LabelNameRE.MatchString(s) {
|
if !LabelName(s).IsValid() {
|
||||||
return fmt.Errorf("%q is not a valid label name", s)
|
return fmt.Errorf("%q is not a valid label name", s)
|
||||||
}
|
}
|
||||||
*ln = LabelName(s)
|
*ln = LabelName(s)
|
||||||
|
|
@ -106,7 +123,7 @@ func (ln *LabelName) UnmarshalJSON(b []byte) error {
|
||||||
if err := json.Unmarshal(b, &s); err != nil {
|
if err := json.Unmarshal(b, &s); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !LabelNameRE.MatchString(s) {
|
if !LabelName(s).IsValid() {
|
||||||
return fmt.Errorf("%q is not a valid label name", s)
|
return fmt.Errorf("%q is not a valid label name", s)
|
||||||
}
|
}
|
||||||
*ln = LabelName(s)
|
*ln = LabelName(s)
|
||||||
|
|
@ -139,6 +156,11 @@ func (l LabelNames) String() string {
|
||||||
// A LabelValue is an associated value for a LabelName.
|
// A LabelValue is an associated value for a LabelName.
|
||||||
type LabelValue string
|
type LabelValue string
|
||||||
|
|
||||||
|
// IsValid returns true iff the string is a valid UTF8.
|
||||||
|
func (lv LabelValue) IsValid() bool {
|
||||||
|
return utf8.ValidString(string(lv))
|
||||||
|
}
|
||||||
|
|
||||||
// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
|
// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
|
||||||
type LabelValues []LabelValue
|
type LabelValues []LabelValue
|
||||||
|
|
||||||
|
|
@ -147,7 +169,7 @@ func (l LabelValues) Len() int {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l LabelValues) Less(i, j int) bool {
|
func (l LabelValues) Less(i, j int) bool {
|
||||||
return sort.StringsAreSorted([]string{string(l[i]), string(l[j])})
|
return string(l[i]) < string(l[j])
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l LabelValues) Swap(i, j int) {
|
func (l LabelValues) Swap(i, j int) {
|
||||||
|
|
|
||||||
|
|
@ -27,6 +27,21 @@ import (
|
||||||
// match.
|
// match.
|
||||||
type LabelSet map[LabelName]LabelValue
|
type LabelSet map[LabelName]LabelValue
|
||||||
|
|
||||||
|
// Validate checks whether all names and values in the label set
|
||||||
|
// are valid.
|
||||||
|
func (ls LabelSet) Validate() error {
|
||||||
|
for ln, lv := range ls {
|
||||||
|
if !ln.IsValid() {
|
||||||
|
return fmt.Errorf("invalid name %q", ln)
|
||||||
|
}
|
||||||
|
if !lv.IsValid() {
|
||||||
|
return fmt.Errorf("invalid value %q", lv)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equal returns true iff both label sets have exactly the same key/value pairs.
|
||||||
func (ls LabelSet) Equal(o LabelSet) bool {
|
func (ls LabelSet) Equal(o LabelSet) bool {
|
||||||
if len(ls) != len(o) {
|
if len(ls) != len(o) {
|
||||||
return false
|
return false
|
||||||
|
|
@ -90,6 +105,7 @@ func (ls LabelSet) Before(o LabelSet) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Clone returns a copy of the label set.
|
||||||
func (ls LabelSet) Clone() LabelSet {
|
func (ls LabelSet) Clone() LabelSet {
|
||||||
lsn := make(LabelSet, len(ls))
|
lsn := make(LabelSet, len(ls))
|
||||||
for ln, lv := range ls {
|
for ln, lv := range ls {
|
||||||
|
|
@ -144,7 +160,7 @@ func (l *LabelSet) UnmarshalJSON(b []byte) error {
|
||||||
// LabelName as a string and does not call its UnmarshalJSON method.
|
// LabelName as a string and does not call its UnmarshalJSON method.
|
||||||
// Thus, we have to replicate the behavior here.
|
// Thus, we have to replicate the behavior here.
|
||||||
for ln := range m {
|
for ln := range m {
|
||||||
if !LabelNameRE.MatchString(string(ln)) {
|
if !ln.IsValid() {
|
||||||
return fmt.Errorf("%q is not a valid label name", ln)
|
return fmt.Errorf("%q is not a valid label name", ln)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -15,11 +15,18 @@ package model
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"regexp"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
var separator = []byte{0}
|
var (
|
||||||
|
separator = []byte{0}
|
||||||
|
// MetricNameRE is a regular expression matching valid metric
|
||||||
|
// names. Note that the IsValidMetricName function performs the same
|
||||||
|
// check but faster than a match with this regular expression.
|
||||||
|
MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
|
||||||
|
)
|
||||||
|
|
||||||
// A Metric is similar to a LabelSet, but the key difference is that a Metric is
|
// A Metric is similar to a LabelSet, but the key difference is that a Metric is
|
||||||
// a singleton and refers to one and only one stream of samples.
|
// a singleton and refers to one and only one stream of samples.
|
||||||
|
|
@ -79,3 +86,18 @@ func (m Metric) Fingerprint() Fingerprint {
|
||||||
func (m Metric) FastFingerprint() Fingerprint {
|
func (m Metric) FastFingerprint() Fingerprint {
|
||||||
return LabelSet(m).FastFingerprint()
|
return LabelSet(m).FastFingerprint()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
|
||||||
|
// This function, however, does not use MetricNameRE for the check but a much
|
||||||
|
// faster hardcoded implementation.
|
||||||
|
func IsValidMetricName(n LabelValue) bool {
|
||||||
|
if len(n) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i, b := range n {
|
||||||
|
if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -12,5 +12,5 @@
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// Package model contains common data structures that are shared across
|
// Package model contains common data structures that are shared across
|
||||||
// Prometheus componenets and libraries.
|
// Prometheus components and libraries.
|
||||||
package model
|
package model
|
||||||
|
|
|
||||||
|
|
@ -14,11 +14,7 @@
|
||||||
package model
|
package model
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"hash"
|
|
||||||
"hash/fnv"
|
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
|
// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
|
||||||
|
|
@ -28,30 +24,9 @@ const SeparatorByte byte = 255
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// cache the signature of an empty label set.
|
// cache the signature of an empty label set.
|
||||||
emptyLabelSignature = fnv.New64a().Sum64()
|
emptyLabelSignature = hashNew()
|
||||||
|
|
||||||
hashAndBufPool sync.Pool
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type hashAndBuf struct {
|
|
||||||
h hash.Hash64
|
|
||||||
b bytes.Buffer
|
|
||||||
}
|
|
||||||
|
|
||||||
func getHashAndBuf() *hashAndBuf {
|
|
||||||
hb := hashAndBufPool.Get()
|
|
||||||
if hb == nil {
|
|
||||||
return &hashAndBuf{h: fnv.New64a()}
|
|
||||||
}
|
|
||||||
return hb.(*hashAndBuf)
|
|
||||||
}
|
|
||||||
|
|
||||||
func putHashAndBuf(hb *hashAndBuf) {
|
|
||||||
hb.h.Reset()
|
|
||||||
hb.b.Reset()
|
|
||||||
hashAndBufPool.Put(hb)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
|
// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
|
||||||
// given label set. (Collisions are possible but unlikely if the number of label
|
// given label set. (Collisions are possible but unlikely if the number of label
|
||||||
// sets the function is applied to is small.)
|
// sets the function is applied to is small.)
|
||||||
|
|
@ -66,18 +41,14 @@ func LabelsToSignature(labels map[string]string) uint64 {
|
||||||
}
|
}
|
||||||
sort.Strings(labelNames)
|
sort.Strings(labelNames)
|
||||||
|
|
||||||
hb := getHashAndBuf()
|
sum := hashNew()
|
||||||
defer putHashAndBuf(hb)
|
|
||||||
|
|
||||||
for _, labelName := range labelNames {
|
for _, labelName := range labelNames {
|
||||||
hb.b.WriteString(labelName)
|
sum = hashAdd(sum, labelName)
|
||||||
hb.b.WriteByte(SeparatorByte)
|
sum = hashAddByte(sum, SeparatorByte)
|
||||||
hb.b.WriteString(labels[labelName])
|
sum = hashAdd(sum, labels[labelName])
|
||||||
hb.b.WriteByte(SeparatorByte)
|
sum = hashAddByte(sum, SeparatorByte)
|
||||||
hb.h.Write(hb.b.Bytes())
|
|
||||||
hb.b.Reset()
|
|
||||||
}
|
}
|
||||||
return hb.h.Sum64()
|
return sum
|
||||||
}
|
}
|
||||||
|
|
||||||
// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as
|
// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as
|
||||||
|
|
@ -93,18 +64,14 @@ func labelSetToFingerprint(ls LabelSet) Fingerprint {
|
||||||
}
|
}
|
||||||
sort.Sort(labelNames)
|
sort.Sort(labelNames)
|
||||||
|
|
||||||
hb := getHashAndBuf()
|
sum := hashNew()
|
||||||
defer putHashAndBuf(hb)
|
|
||||||
|
|
||||||
for _, labelName := range labelNames {
|
for _, labelName := range labelNames {
|
||||||
hb.b.WriteString(string(labelName))
|
sum = hashAdd(sum, string(labelName))
|
||||||
hb.b.WriteByte(SeparatorByte)
|
sum = hashAddByte(sum, SeparatorByte)
|
||||||
hb.b.WriteString(string(ls[labelName]))
|
sum = hashAdd(sum, string(ls[labelName]))
|
||||||
hb.b.WriteByte(SeparatorByte)
|
sum = hashAddByte(sum, SeparatorByte)
|
||||||
hb.h.Write(hb.b.Bytes())
|
|
||||||
hb.b.Reset()
|
|
||||||
}
|
}
|
||||||
return Fingerprint(hb.h.Sum64())
|
return Fingerprint(sum)
|
||||||
}
|
}
|
||||||
|
|
||||||
// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a
|
// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a
|
||||||
|
|
@ -116,17 +83,12 @@ func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
|
||||||
}
|
}
|
||||||
|
|
||||||
var result uint64
|
var result uint64
|
||||||
hb := getHashAndBuf()
|
|
||||||
defer putHashAndBuf(hb)
|
|
||||||
|
|
||||||
for labelName, labelValue := range ls {
|
for labelName, labelValue := range ls {
|
||||||
hb.b.WriteString(string(labelName))
|
sum := hashNew()
|
||||||
hb.b.WriteByte(SeparatorByte)
|
sum = hashAdd(sum, string(labelName))
|
||||||
hb.b.WriteString(string(labelValue))
|
sum = hashAddByte(sum, SeparatorByte)
|
||||||
hb.h.Write(hb.b.Bytes())
|
sum = hashAdd(sum, string(labelValue))
|
||||||
result ^= hb.h.Sum64()
|
result ^= sum
|
||||||
hb.h.Reset()
|
|
||||||
hb.b.Reset()
|
|
||||||
}
|
}
|
||||||
return Fingerprint(result)
|
return Fingerprint(result)
|
||||||
}
|
}
|
||||||
|
|
@ -136,24 +98,20 @@ func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
|
||||||
// specified LabelNames into the signature calculation. The labels passed in
|
// specified LabelNames into the signature calculation. The labels passed in
|
||||||
// will be sorted by this function.
|
// will be sorted by this function.
|
||||||
func SignatureForLabels(m Metric, labels ...LabelName) uint64 {
|
func SignatureForLabels(m Metric, labels ...LabelName) uint64 {
|
||||||
if len(m) == 0 || len(labels) == 0 {
|
if len(labels) == 0 {
|
||||||
return emptyLabelSignature
|
return emptyLabelSignature
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.Sort(LabelNames(labels))
|
sort.Sort(LabelNames(labels))
|
||||||
|
|
||||||
hb := getHashAndBuf()
|
sum := hashNew()
|
||||||
defer putHashAndBuf(hb)
|
|
||||||
|
|
||||||
for _, label := range labels {
|
for _, label := range labels {
|
||||||
hb.b.WriteString(string(label))
|
sum = hashAdd(sum, string(label))
|
||||||
hb.b.WriteByte(SeparatorByte)
|
sum = hashAddByte(sum, SeparatorByte)
|
||||||
hb.b.WriteString(string(m[label]))
|
sum = hashAdd(sum, string(m[label]))
|
||||||
hb.b.WriteByte(SeparatorByte)
|
sum = hashAddByte(sum, SeparatorByte)
|
||||||
hb.h.Write(hb.b.Bytes())
|
|
||||||
hb.b.Reset()
|
|
||||||
}
|
}
|
||||||
return hb.h.Sum64()
|
return sum
|
||||||
}
|
}
|
||||||
|
|
||||||
// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
|
// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
|
||||||
|
|
@ -175,16 +133,12 @@ func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
|
||||||
}
|
}
|
||||||
sort.Sort(labelNames)
|
sort.Sort(labelNames)
|
||||||
|
|
||||||
hb := getHashAndBuf()
|
sum := hashNew()
|
||||||
defer putHashAndBuf(hb)
|
|
||||||
|
|
||||||
for _, labelName := range labelNames {
|
for _, labelName := range labelNames {
|
||||||
hb.b.WriteString(string(labelName))
|
sum = hashAdd(sum, string(labelName))
|
||||||
hb.b.WriteByte(SeparatorByte)
|
sum = hashAddByte(sum, SeparatorByte)
|
||||||
hb.b.WriteString(string(m[labelName]))
|
sum = hashAdd(sum, string(m[labelName]))
|
||||||
hb.b.WriteByte(SeparatorByte)
|
sum = hashAddByte(sum, SeparatorByte)
|
||||||
hb.h.Write(hb.b.Bytes())
|
|
||||||
hb.b.Reset()
|
|
||||||
}
|
}
|
||||||
return hb.h.Sum64()
|
return sum
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -44,6 +44,21 @@ func (m *Matcher) UnmarshalJSON(b []byte) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Validate returns true iff all fields of the matcher have valid values.
|
||||||
|
func (m *Matcher) Validate() error {
|
||||||
|
if !m.Name.IsValid() {
|
||||||
|
return fmt.Errorf("invalid name %q", m.Name)
|
||||||
|
}
|
||||||
|
if m.IsRegex {
|
||||||
|
if _, err := regexp.Compile(m.Value); err != nil {
|
||||||
|
return fmt.Errorf("invalid regular expression %q", m.Value)
|
||||||
|
}
|
||||||
|
} else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 {
|
||||||
|
return fmt.Errorf("invalid value %q", m.Value)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Silence defines the representation of a silence definiton
|
// Silence defines the representation of a silence definiton
|
||||||
// in the Prometheus eco-system.
|
// in the Prometheus eco-system.
|
||||||
type Silence struct {
|
type Silence struct {
|
||||||
|
|
@ -58,3 +73,34 @@ type Silence struct {
|
||||||
CreatedBy string `json:"createdBy"`
|
CreatedBy string `json:"createdBy"`
|
||||||
Comment string `json:"comment,omitempty"`
|
Comment string `json:"comment,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Validate returns true iff all fields of the silence have valid values.
|
||||||
|
func (s *Silence) Validate() error {
|
||||||
|
if len(s.Matchers) == 0 {
|
||||||
|
return fmt.Errorf("at least one matcher required")
|
||||||
|
}
|
||||||
|
for _, m := range s.Matchers {
|
||||||
|
if err := m.Validate(); err != nil {
|
||||||
|
return fmt.Errorf("invalid matcher: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if s.StartsAt.IsZero() {
|
||||||
|
return fmt.Errorf("start time missing")
|
||||||
|
}
|
||||||
|
if s.EndsAt.IsZero() {
|
||||||
|
return fmt.Errorf("end time missing")
|
||||||
|
}
|
||||||
|
if s.EndsAt.Before(s.StartsAt) {
|
||||||
|
return fmt.Errorf("start time must be before end time")
|
||||||
|
}
|
||||||
|
if s.CreatedBy == "" {
|
||||||
|
return fmt.Errorf("creator information missing")
|
||||||
|
}
|
||||||
|
if s.Comment == "" {
|
||||||
|
return fmt.Errorf("comment missing")
|
||||||
|
}
|
||||||
|
if s.CreatedAt.IsZero() {
|
||||||
|
return fmt.Errorf("creation timestamp missing")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -163,51 +163,70 @@ func (t *Time) UnmarshalJSON(b []byte) error {
|
||||||
// This type should not propagate beyond the scope of input/output processing.
|
// This type should not propagate beyond the scope of input/output processing.
|
||||||
type Duration time.Duration
|
type Duration time.Duration
|
||||||
|
|
||||||
|
var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
|
||||||
|
|
||||||
// StringToDuration parses a string into a time.Duration, assuming that a year
|
// StringToDuration parses a string into a time.Duration, assuming that a year
|
||||||
// a day always has 24h.
|
// always has 365d, a week always has 7d, and a day always has 24h.
|
||||||
func ParseDuration(durationStr string) (Duration, error) {
|
func ParseDuration(durationStr string) (Duration, error) {
|
||||||
matches := durationRE.FindStringSubmatch(durationStr)
|
matches := durationRE.FindStringSubmatch(durationStr)
|
||||||
if len(matches) != 3 {
|
if len(matches) != 3 {
|
||||||
return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
|
return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
|
||||||
}
|
}
|
||||||
durSeconds, _ := strconv.Atoi(matches[1])
|
var (
|
||||||
dur := time.Duration(durSeconds) * time.Second
|
n, _ = strconv.Atoi(matches[1])
|
||||||
unit := matches[2]
|
dur = time.Duration(n) * time.Millisecond
|
||||||
switch unit {
|
)
|
||||||
|
switch unit := matches[2]; unit {
|
||||||
|
case "y":
|
||||||
|
dur *= 1000 * 60 * 60 * 24 * 365
|
||||||
|
case "w":
|
||||||
|
dur *= 1000 * 60 * 60 * 24 * 7
|
||||||
case "d":
|
case "d":
|
||||||
dur *= 60 * 60 * 24
|
dur *= 1000 * 60 * 60 * 24
|
||||||
case "h":
|
case "h":
|
||||||
dur *= 60 * 60
|
dur *= 1000 * 60 * 60
|
||||||
case "m":
|
case "m":
|
||||||
dur *= 60
|
dur *= 1000 * 60
|
||||||
case "s":
|
case "s":
|
||||||
dur *= 1
|
dur *= 1000
|
||||||
|
case "ms":
|
||||||
|
// Value already correct
|
||||||
default:
|
default:
|
||||||
return 0, fmt.Errorf("invalid time unit in duration string: %q", unit)
|
return 0, fmt.Errorf("invalid time unit in duration string: %q", unit)
|
||||||
}
|
}
|
||||||
return Duration(dur), nil
|
return Duration(dur), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var durationRE = regexp.MustCompile("^([0-9]+)([ywdhms]+)$")
|
|
||||||
|
|
||||||
func (d Duration) String() string {
|
func (d Duration) String() string {
|
||||||
seconds := int64(time.Duration(d) / time.Second)
|
var (
|
||||||
|
ms = int64(time.Duration(d) / time.Millisecond)
|
||||||
|
unit = "ms"
|
||||||
|
)
|
||||||
factors := map[string]int64{
|
factors := map[string]int64{
|
||||||
"d": 60 * 60 * 24,
|
"y": 1000 * 60 * 60 * 24 * 365,
|
||||||
"h": 60 * 60,
|
"w": 1000 * 60 * 60 * 24 * 7,
|
||||||
"m": 60,
|
"d": 1000 * 60 * 60 * 24,
|
||||||
"s": 1,
|
"h": 1000 * 60 * 60,
|
||||||
|
"m": 1000 * 60,
|
||||||
|
"s": 1000,
|
||||||
|
"ms": 1,
|
||||||
}
|
}
|
||||||
unit := "s"
|
|
||||||
switch int64(0) {
|
switch int64(0) {
|
||||||
case seconds % factors["d"]:
|
case ms % factors["y"]:
|
||||||
|
unit = "y"
|
||||||
|
case ms % factors["w"]:
|
||||||
|
unit = "w"
|
||||||
|
case ms % factors["d"]:
|
||||||
unit = "d"
|
unit = "d"
|
||||||
case seconds % factors["h"]:
|
case ms % factors["h"]:
|
||||||
unit = "h"
|
unit = "h"
|
||||||
case seconds % factors["m"]:
|
case ms % factors["m"]:
|
||||||
unit = "m"
|
unit = "m"
|
||||||
|
case ms % factors["s"]:
|
||||||
|
unit = "s"
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("%v%v", seconds/factors[unit], unit)
|
return fmt.Sprintf("%v%v", ms/factors[unit], unit)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalYAML implements the yaml.Marshaler interface.
|
// MarshalYAML implements the yaml.Marshaler interface.
|
||||||
|
|
|
||||||
|
|
@ -16,11 +16,28 @@ package model
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
|
||||||
|
// non-existing sample pair. It is a SamplePair with timestamp Earliest and
|
||||||
|
// value 0.0. Note that the natural zero value of SamplePair has a timestamp
|
||||||
|
// of 0, which is possible to appear in a real SamplePair and thus not
|
||||||
|
// suitable to signal a non-existing SamplePair.
|
||||||
|
ZeroSamplePair = SamplePair{Timestamp: Earliest}
|
||||||
|
|
||||||
|
// ZeroSample is the pseudo zero-value of Sample used to signal a
|
||||||
|
// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
|
||||||
|
// and metric nil. Note that the natural zero value of Sample has a timestamp
|
||||||
|
// of 0, which is possible to appear in a real Sample and thus not suitable
|
||||||
|
// to signal a non-existing Sample.
|
||||||
|
ZeroSample = Sample{Timestamp: Earliest}
|
||||||
|
)
|
||||||
|
|
||||||
// A SampleValue is a representation of a value for a given sample at a given
|
// A SampleValue is a representation of a value for a given sample at a given
|
||||||
// time.
|
// time.
|
||||||
type SampleValue float64
|
type SampleValue float64
|
||||||
|
|
@ -43,8 +60,14 @@ func (v *SampleValue) UnmarshalJSON(b []byte) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Equal returns true if the value of v and o is equal or if both are NaN. Note
|
||||||
|
// that v==o is false if both are NaN. If you want the conventional float
|
||||||
|
// behavior, use == to compare two SampleValues.
|
||||||
func (v SampleValue) Equal(o SampleValue) bool {
|
func (v SampleValue) Equal(o SampleValue) bool {
|
||||||
return v == o
|
if v == o {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v SampleValue) String() string {
|
func (v SampleValue) String() string {
|
||||||
|
|
@ -77,9 +100,9 @@ func (s *SamplePair) UnmarshalJSON(b []byte) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Equal returns true if this SamplePair and o have equal Values and equal
|
// Equal returns true if this SamplePair and o have equal Values and equal
|
||||||
// Timestamps.
|
// Timestamps. The sematics of Value equality is defined by SampleValue.Equal.
|
||||||
func (s *SamplePair) Equal(o *SamplePair) bool {
|
func (s *SamplePair) Equal(o *SamplePair) bool {
|
||||||
return s == o || (s.Value == o.Value && s.Timestamp.Equal(o.Timestamp))
|
return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s SamplePair) String() string {
|
func (s SamplePair) String() string {
|
||||||
|
|
@ -93,7 +116,8 @@ type Sample struct {
|
||||||
Timestamp Time `json:"timestamp"`
|
Timestamp Time `json:"timestamp"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Equal compares first the metrics, then the timestamp, then the value.
|
// Equal compares first the metrics, then the timestamp, then the value. The
|
||||||
|
// sematics of value equality is defined by SampleValue.Equal.
|
||||||
func (s *Sample) Equal(o *Sample) bool {
|
func (s *Sample) Equal(o *Sample) bool {
|
||||||
if s == o {
|
if s == o {
|
||||||
return true
|
return true
|
||||||
|
|
@ -105,7 +129,7 @@ func (s *Sample) Equal(o *Sample) bool {
|
||||||
if !s.Timestamp.Equal(o.Timestamp) {
|
if !s.Timestamp.Equal(o.Timestamp) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if s.Value != o.Value {
|
if s.Value.Equal(o.Value) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -27,14 +27,7 @@ func NewFS(mountPoint string) (FS, error) {
|
||||||
return FS(mountPoint), nil
|
return FS(mountPoint), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs FS) stat(p string) (os.FileInfo, error) {
|
// Path returns the path of the given subsystem relative to the procfs root.
|
||||||
return os.Stat(path.Join(string(fs), p))
|
func (fs FS) Path(p ...string) string {
|
||||||
}
|
return path.Join(append([]string{string(fs)}, p...)...)
|
||||||
|
|
||||||
func (fs FS) open(p string) (*os.File, error) {
|
|
||||||
return os.Open(path.Join(string(fs), p))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fs FS) readlink(p string) (string, error) {
|
|
||||||
return os.Readlink(path.Join(string(fs), p))
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net"
|
"net"
|
||||||
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
@ -58,7 +59,7 @@ func NewIPVSStats() (IPVSStats, error) {
|
||||||
|
|
||||||
// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem.
|
// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem.
|
||||||
func (fs FS) NewIPVSStats() (IPVSStats, error) {
|
func (fs FS) NewIPVSStats() (IPVSStats, error) {
|
||||||
file, err := fs.open("net/ip_vs_stats")
|
file, err := os.Open(fs.Path("net/ip_vs_stats"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return IPVSStats{}, err
|
return IPVSStats{}, err
|
||||||
}
|
}
|
||||||
|
|
@ -127,7 +128,7 @@ func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
|
||||||
|
|
||||||
// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
|
// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
|
||||||
func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
|
func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
|
||||||
file, err := fs.open("net/ip_vs")
|
file, err := os.Open(fs.Path("net/ip_vs"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,6 @@ package procfs
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"path"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
@ -32,36 +31,22 @@ type MDStat struct {
|
||||||
|
|
||||||
// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos.
|
// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos.
|
||||||
func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
|
func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
|
||||||
mdStatusFilePath := path.Join(string(fs), "mdstat")
|
mdStatusFilePath := fs.Path("mdstat")
|
||||||
content, err := ioutil.ReadFile(mdStatusFilePath)
|
content, err := ioutil.ReadFile(mdStatusFilePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
|
return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
mdStatusFile := string(content)
|
mdStates := []MDStat{}
|
||||||
|
lines := strings.Split(string(content), "\n")
|
||||||
lines := strings.Split(mdStatusFile, "\n")
|
|
||||||
var currentMD string
|
|
||||||
|
|
||||||
// Each md has at least the deviceline, statusline and one empty line afterwards
|
|
||||||
// so we will have probably something of the order len(lines)/3 devices
|
|
||||||
// so we use that for preallocation.
|
|
||||||
estimateMDs := len(lines) / 3
|
|
||||||
mdStates := make([]MDStat, 0, estimateMDs)
|
|
||||||
|
|
||||||
for i, l := range lines {
|
for i, l := range lines {
|
||||||
if l == "" {
|
if l == "" {
|
||||||
// Skip entirely empty lines.
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if l[0] == ' ' {
|
if l[0] == ' ' {
|
||||||
// Those lines are not the beginning of a md-section.
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") {
|
if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") {
|
||||||
// We aren't interested in lines with general info.
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -69,32 +54,30 @@ func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
|
||||||
if len(mainLine) < 3 {
|
if len(mainLine) < 3 {
|
||||||
return mdStates, fmt.Errorf("error parsing mdline: %s", l)
|
return mdStates, fmt.Errorf("error parsing mdline: %s", l)
|
||||||
}
|
}
|
||||||
currentMD = mainLine[0] // name of md-device
|
mdName := mainLine[0]
|
||||||
activityState := mainLine[2] // activity status of said md-device
|
activityState := mainLine[2]
|
||||||
|
|
||||||
if len(lines) <= i+3 {
|
if len(lines) <= i+3 {
|
||||||
return mdStates, fmt.Errorf("error parsing %s: entry for %s has fewer lines than expected", mdStatusFilePath, currentMD)
|
return mdStates, fmt.Errorf(
|
||||||
|
"error parsing %s: too few lines for md device %s",
|
||||||
|
mdStatusFilePath,
|
||||||
|
mdName,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
active, total, size, err := evalStatusline(lines[i+1]) // parse statusline, always present
|
active, total, size, err := evalStatusline(lines[i+1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
|
return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
// j is the line number of the syncing-line.
|
||||||
// Now get the number of synced blocks.
|
j := i + 2
|
||||||
//
|
if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
|
||||||
|
|
||||||
// Get the line number of the syncing-line.
|
|
||||||
var j int
|
|
||||||
if strings.Contains(lines[i+2], "bitmap") { // then skip the bitmap line
|
|
||||||
j = i + 3
|
j = i + 3
|
||||||
} else {
|
|
||||||
j = i + 2
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// If device is syncing at the moment, get the number of currently synced bytes,
|
// If device is syncing at the moment, get the number of currently
|
||||||
// otherwise that number equals the size of the device.
|
// synced bytes, otherwise that number equals the size of the device.
|
||||||
syncedBlocks := size
|
syncedBlocks := size
|
||||||
if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") {
|
if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") {
|
||||||
syncedBlocks, err = evalBuildline(lines[j])
|
syncedBlocks, err = evalBuildline(lines[j])
|
||||||
|
|
@ -103,8 +86,14 @@ func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mdStates = append(mdStates, MDStat{currentMD, activityState, active, total, size, syncedBlocks})
|
mdStates = append(mdStates, MDStat{
|
||||||
|
Name: mdName,
|
||||||
|
ActivityState: activityState,
|
||||||
|
DisksActive: active,
|
||||||
|
DisksTotal: total,
|
||||||
|
BlocksTotal: size,
|
||||||
|
BlocksSynced: syncedBlocks,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
return mdStates, nil
|
return mdStates, nil
|
||||||
|
|
@ -112,47 +101,38 @@ func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
|
||||||
|
|
||||||
func evalStatusline(statusline string) (active, total, size int64, err error) {
|
func evalStatusline(statusline string) (active, total, size int64, err error) {
|
||||||
matches := statuslineRE.FindStringSubmatch(statusline)
|
matches := statuslineRE.FindStringSubmatch(statusline)
|
||||||
|
if len(matches) != 4 {
|
||||||
// +1 to make it more obvious that the whole string containing the info is also returned as matches[0].
|
return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline)
|
||||||
if len(matches) != 3+1 {
|
|
||||||
return 0, 0, 0, fmt.Errorf("unexpected number matches found in statusline: %s", statusline)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
size, err = strconv.ParseInt(matches[1], 10, 64)
|
size, err = strconv.ParseInt(matches[1], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, 0, 0, fmt.Errorf("%s in statusline: %s", err, statusline)
|
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
total, err = strconv.ParseInt(matches[2], 10, 64)
|
total, err = strconv.ParseInt(matches[2], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, 0, 0, fmt.Errorf("%s in statusline: %s", err, statusline)
|
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
active, err = strconv.ParseInt(matches[3], 10, 64)
|
active, err = strconv.ParseInt(matches[3], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, 0, 0, fmt.Errorf("%s in statusline: %s", err, statusline)
|
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return active, total, size, nil
|
return active, total, size, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Gets the size that has already been synced out of the sync-line.
|
func evalBuildline(buildline string) (syncedBlocks int64, err error) {
|
||||||
func evalBuildline(buildline string) (int64, error) {
|
|
||||||
matches := buildlineRE.FindStringSubmatch(buildline)
|
matches := buildlineRE.FindStringSubmatch(buildline)
|
||||||
|
if len(matches) != 2 {
|
||||||
// +1 to make it more obvious that the whole string containing the info is also returned as matches[0].
|
return 0, fmt.Errorf("unexpected buildline: %s", buildline)
|
||||||
if len(matches) < 1+1 {
|
|
||||||
return 0, fmt.Errorf("too few matches found in buildline: %s", buildline)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(matches) > 1+1 {
|
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
|
||||||
return 0, fmt.Errorf("too many matches found in buildline: %s", buildline)
|
|
||||||
}
|
|
||||||
|
|
||||||
syncedSize, err := strconv.ParseInt(matches[1], 10, 64)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("%s in buildline: %s", err, buildline)
|
return 0, fmt.Errorf("%s in buildline: %s", err, buildline)
|
||||||
}
|
}
|
||||||
|
|
||||||
return syncedSize, nil
|
return syncedBlocks, nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,6 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
@ -42,7 +41,7 @@ func NewProc(pid int) (Proc, error) {
|
||||||
return fs.NewProc(pid)
|
return fs.NewProc(pid)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AllProcs returns a list of all currently avaible processes under /proc.
|
// AllProcs returns a list of all currently available processes under /proc.
|
||||||
func AllProcs() (Procs, error) {
|
func AllProcs() (Procs, error) {
|
||||||
fs, err := NewFS(DefaultMountPoint)
|
fs, err := NewFS(DefaultMountPoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -53,7 +52,7 @@ func AllProcs() (Procs, error) {
|
||||||
|
|
||||||
// Self returns a process for the current process.
|
// Self returns a process for the current process.
|
||||||
func (fs FS) Self() (Proc, error) {
|
func (fs FS) Self() (Proc, error) {
|
||||||
p, err := fs.readlink("self")
|
p, err := os.Readlink(fs.Path("self"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Proc{}, err
|
return Proc{}, err
|
||||||
}
|
}
|
||||||
|
|
@ -66,15 +65,15 @@ func (fs FS) Self() (Proc, error) {
|
||||||
|
|
||||||
// NewProc returns a process for the given pid.
|
// NewProc returns a process for the given pid.
|
||||||
func (fs FS) NewProc(pid int) (Proc, error) {
|
func (fs FS) NewProc(pid int) (Proc, error) {
|
||||||
if _, err := fs.stat(strconv.Itoa(pid)); err != nil {
|
if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil {
|
||||||
return Proc{}, err
|
return Proc{}, err
|
||||||
}
|
}
|
||||||
return Proc{PID: pid, fs: fs}, nil
|
return Proc{PID: pid, fs: fs}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// AllProcs returns a list of all currently avaible processes.
|
// AllProcs returns a list of all currently available processes.
|
||||||
func (fs FS) AllProcs() (Procs, error) {
|
func (fs FS) AllProcs() (Procs, error) {
|
||||||
d, err := fs.open("")
|
d, err := os.Open(fs.Path())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Procs{}, err
|
return Procs{}, err
|
||||||
}
|
}
|
||||||
|
|
@ -99,7 +98,7 @@ func (fs FS) AllProcs() (Procs, error) {
|
||||||
|
|
||||||
// CmdLine returns the command line of a process.
|
// CmdLine returns the command line of a process.
|
||||||
func (p Proc) CmdLine() ([]string, error) {
|
func (p Proc) CmdLine() ([]string, error) {
|
||||||
f, err := p.open("cmdline")
|
f, err := os.Open(p.path("cmdline"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
@ -117,10 +116,25 @@ func (p Proc) CmdLine() ([]string, error) {
|
||||||
return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil
|
return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Comm returns the command name of a process.
|
||||||
|
func (p Proc) Comm() (string, error) {
|
||||||
|
f, err := os.Open(p.path("comm"))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
data, err := ioutil.ReadAll(f)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.TrimSpace(string(data)), nil
|
||||||
|
}
|
||||||
|
|
||||||
// Executable returns the absolute path of the executable command of a process.
|
// Executable returns the absolute path of the executable command of a process.
|
||||||
func (p Proc) Executable() (string, error) {
|
func (p Proc) Executable() (string, error) {
|
||||||
exe, err := p.readlink("exe")
|
exe, err := os.Readlink(p.path("exe"))
|
||||||
|
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
@ -158,7 +172,7 @@ func (p Proc) FileDescriptorTargets() ([]string, error) {
|
||||||
targets := make([]string, len(names))
|
targets := make([]string, len(names))
|
||||||
|
|
||||||
for i, name := range names {
|
for i, name := range names {
|
||||||
target, err := p.readlink("fd/" + name)
|
target, err := os.Readlink(p.path("fd", name))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
targets[i] = target
|
targets[i] = target
|
||||||
}
|
}
|
||||||
|
|
@ -179,7 +193,7 @@ func (p Proc) FileDescriptorsLen() (int, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p Proc) fileDescriptors() ([]string, error) {
|
func (p Proc) fileDescriptors() ([]string, error) {
|
||||||
d, err := p.open("fd")
|
d, err := os.Open(p.path("fd"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
@ -193,10 +207,6 @@ func (p Proc) fileDescriptors() ([]string, error) {
|
||||||
return names, nil
|
return names, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p Proc) open(pa string) (*os.File, error) {
|
func (p Proc) path(pa ...string) string {
|
||||||
return p.fs.open(path.Join(strconv.Itoa(p.PID), pa))
|
return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
|
||||||
}
|
|
||||||
|
|
||||||
func (p Proc) readlink(pa string) (string, error) {
|
|
||||||
return p.fs.readlink(path.Join(strconv.Itoa(p.PID), pa))
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@ package procfs
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ProcIO models the content of /proc/<pid>/io.
|
// ProcIO models the content of /proc/<pid>/io.
|
||||||
|
|
@ -29,7 +30,7 @@ type ProcIO struct {
|
||||||
func (p Proc) NewIO() (ProcIO, error) {
|
func (p Proc) NewIO() (ProcIO, error) {
|
||||||
pio := ProcIO{}
|
pio := ProcIO{}
|
||||||
|
|
||||||
f, err := p.open("io")
|
f, err := os.Open(p.path("io"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pio, err
|
return pio, err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -3,28 +3,55 @@ package procfs
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ProcLimits represents the soft limits for each of the process's resource
|
// ProcLimits represents the soft limits for each of the process's resource
|
||||||
// limits.
|
// limits. For more information see getrlimit(2):
|
||||||
|
// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
|
||||||
type ProcLimits struct {
|
type ProcLimits struct {
|
||||||
|
// CPU time limit in seconds.
|
||||||
CPUTime int
|
CPUTime int
|
||||||
|
// Maximum size of files that the process may create.
|
||||||
FileSize int
|
FileSize int
|
||||||
|
// Maximum size of the process's data segment (initialized data,
|
||||||
|
// uninitialized data, and heap).
|
||||||
DataSize int
|
DataSize int
|
||||||
|
// Maximum size of the process stack in bytes.
|
||||||
StackSize int
|
StackSize int
|
||||||
|
// Maximum size of a core file.
|
||||||
CoreFileSize int
|
CoreFileSize int
|
||||||
|
// Limit of the process's resident set in pages.
|
||||||
ResidentSet int
|
ResidentSet int
|
||||||
|
// Maximum number of processes that can be created for the real user ID of
|
||||||
|
// the calling process.
|
||||||
Processes int
|
Processes int
|
||||||
|
// Value one greater than the maximum file descriptor number that can be
|
||||||
|
// opened by this process.
|
||||||
OpenFiles int
|
OpenFiles int
|
||||||
|
// Maximum number of bytes of memory that may be locked into RAM.
|
||||||
LockedMemory int
|
LockedMemory int
|
||||||
|
// Maximum size of the process's virtual memory address space in bytes.
|
||||||
AddressSpace int
|
AddressSpace int
|
||||||
|
// Limit on the combined number of flock(2) locks and fcntl(2) leases that
|
||||||
|
// this process may establish.
|
||||||
FileLocks int
|
FileLocks int
|
||||||
|
// Limit of signals that may be queued for the real user ID of the calling
|
||||||
|
// process.
|
||||||
PendingSignals int
|
PendingSignals int
|
||||||
|
// Limit on the number of bytes that can be allocated for POSIX message
|
||||||
|
// queues for the real user ID of the calling process.
|
||||||
MsqqueueSize int
|
MsqqueueSize int
|
||||||
|
// Limit of the nice priority set using setpriority(2) or nice(2).
|
||||||
NicePriority int
|
NicePriority int
|
||||||
|
// Limit of the real-time priority set using sched_setscheduler(2) or
|
||||||
|
// sched_setparam(2).
|
||||||
RealtimePriority int
|
RealtimePriority int
|
||||||
|
// Limit (in microseconds) on the amount of CPU time that a process
|
||||||
|
// scheduled under a real-time scheduling policy may consume without making
|
||||||
|
// a blocking system call.
|
||||||
RealtimeTimeout int
|
RealtimeTimeout int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -39,7 +66,7 @@ var (
|
||||||
|
|
||||||
// NewLimits returns the current soft limits of the process.
|
// NewLimits returns the current soft limits of the process.
|
||||||
func (p Proc) NewLimits() (ProcLimits, error) {
|
func (p Proc) NewLimits() (ProcLimits, error) {
|
||||||
f, err := p.open("limits")
|
f, err := os.Open(p.path("limits"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ProcLimits{}, err
|
return ProcLimits{}, err
|
||||||
}
|
}
|
||||||
|
|
@ -60,7 +87,7 @@ func (p Proc) NewLimits() (ProcLimits, error) {
|
||||||
case "Max cpu time":
|
case "Max cpu time":
|
||||||
l.CPUTime, err = parseInt(fields[1])
|
l.CPUTime, err = parseInt(fields[1])
|
||||||
case "Max file size":
|
case "Max file size":
|
||||||
l.FileLocks, err = parseInt(fields[1])
|
l.FileSize, err = parseInt(fields[1])
|
||||||
case "Max data size":
|
case "Max data size":
|
||||||
l.DataSize, err = parseInt(fields[1])
|
l.DataSize, err = parseInt(fields[1])
|
||||||
case "Max stack size":
|
case "Max stack size":
|
||||||
|
|
@ -90,7 +117,6 @@ func (p Proc) NewLimits() (ProcLimits, error) {
|
||||||
case "Max realtime timeout":
|
case "Max realtime timeout":
|
||||||
l.RealtimeTimeout, err = parseInt(fields[1])
|
l.RealtimeTimeout, err = parseInt(fields[1])
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ProcLimits{}, err
|
return ProcLimits{}, err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -7,15 +7,15 @@ import (
|
||||||
"os"
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Originally, this USER_HZ value was dynamically retrieved via a sysconf call which
|
// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
|
||||||
// required cgo. However, that caused a lot of problems regarding
|
// which required cgo. However, that caused a lot of problems regarding
|
||||||
// cross-compilation. Alternatives such as running a binary to determine the
|
// cross-compilation. Alternatives such as running a binary to determine the
|
||||||
// value, or trying to derive it in some other way were all problematic.
|
// value, or trying to derive it in some other way were all problematic. After
|
||||||
// After much research it was determined that USER_HZ is actually hardcoded to
|
// much research it was determined that USER_HZ is actually hardcoded to 100 on
|
||||||
// 100 on all Go-supported platforms as of the time of this writing. This is
|
// all Go-supported platforms as of the time of this writing. This is why we
|
||||||
// why we decided to hardcode it here as well. It is not impossible that there
|
// decided to hardcode it here as well. It is not impossible that there could
|
||||||
// could be systems with exceptions, but they should be very exotic edge cases,
|
// be systems with exceptions, but they should be very exotic edge cases, and
|
||||||
// and in that case, the worst outcome will be two misreported metrics.
|
// in that case, the worst outcome will be two misreported metrics.
|
||||||
//
|
//
|
||||||
// See also the following discussions:
|
// See also the following discussions:
|
||||||
//
|
//
|
||||||
|
|
@ -91,7 +91,7 @@ type ProcStat struct {
|
||||||
|
|
||||||
// NewStat returns the current status information of the process.
|
// NewStat returns the current status information of the process.
|
||||||
func (p Proc) NewStat() (ProcStat, error) {
|
func (p Proc) NewStat() (ProcStat, error) {
|
||||||
f, err := p.open("stat")
|
f, err := os.Open(p.path("stat"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ProcStat{}, err
|
return ProcStat{}, err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -3,6 +3,7 @@ package procfs
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
@ -25,7 +26,7 @@ func NewStat() (Stat, error) {
|
||||||
|
|
||||||
// NewStat returns an information about current kernel/system statistics.
|
// NewStat returns an information about current kernel/system statistics.
|
||||||
func (fs FS) NewStat() (Stat, error) {
|
func (fs FS) NewStat() (Stat, error) {
|
||||||
f, err := fs.open("stat")
|
f, err := os.Open(fs.Path("stat"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Stat{}, err
|
return Stat{}, err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -64,10 +64,11 @@ Rich Feature Set includes:
|
||||||
- Never silently skip data when decoding.
|
- Never silently skip data when decoding.
|
||||||
User decides whether to return an error or silently skip data when keys or indexes
|
User decides whether to return an error or silently skip data when keys or indexes
|
||||||
in the data stream do not map to fields in the struct.
|
in the data stream do not map to fields in the struct.
|
||||||
|
- Detect and error when encoding a cyclic reference (instead of stack overflow shutdown)
|
||||||
- Encode/Decode from/to chan types (for iterative streaming support)
|
- Encode/Decode from/to chan types (for iterative streaming support)
|
||||||
- Drop-in replacement for encoding/json. `json:` key in struct tag supported.
|
- Drop-in replacement for encoding/json. `json:` key in struct tag supported.
|
||||||
- Provides a RPC Server and Client Codec for net/rpc communication protocol.
|
- Provides a RPC Server and Client Codec for net/rpc communication protocol.
|
||||||
- Handle unique idiosynchracies of codecs e.g.
|
- Handle unique idiosyncrasies of codecs e.g.
|
||||||
- For messagepack, configure how ambiguities in handling raw bytes are resolved
|
- For messagepack, configure how ambiguities in handling raw bytes are resolved
|
||||||
- For messagepack, provide rpc server/client codec to support
|
- For messagepack, provide rpc server/client codec to support
|
||||||
msgpack-rpc protocol defined at:
|
msgpack-rpc protocol defined at:
|
||||||
|
|
@ -171,6 +172,8 @@ package codec
|
||||||
|
|
||||||
// TODO:
|
// TODO:
|
||||||
//
|
//
|
||||||
|
// - optimization for codecgen:
|
||||||
|
// if len of entity is <= 3 words, then support a value receiver for encode.
|
||||||
// - (En|De)coder should store an error when it occurs.
|
// - (En|De)coder should store an error when it occurs.
|
||||||
// Until reset, subsequent calls return that error that was stored.
|
// Until reset, subsequent calls return that error that was stored.
|
||||||
// This means that free panics must go away.
|
// This means that free panics must go away.
|
||||||
|
|
@ -178,16 +181,19 @@ package codec
|
||||||
// - Decoding using a chan is good, but incurs concurrency costs.
|
// - Decoding using a chan is good, but incurs concurrency costs.
|
||||||
// This is because there's no fast way to use a channel without it
|
// This is because there's no fast way to use a channel without it
|
||||||
// having to switch goroutines constantly.
|
// having to switch goroutines constantly.
|
||||||
// Callback pattern is still the best. Maybe cnsider supporting something like:
|
// Callback pattern is still the best. Maybe consider supporting something like:
|
||||||
// type X struct {
|
// type X struct {
|
||||||
// Name string
|
// Name string
|
||||||
// Ys []Y
|
// Ys []Y
|
||||||
// Ys chan <- Y
|
// Ys chan <- Y
|
||||||
// Ys func(interface{}) -> call this interface for each entry in there.
|
// Ys func(Y) -> call this function for each entry
|
||||||
// }
|
// }
|
||||||
// - Consider adding a isZeroer interface { isZero() bool }
|
// - Consider adding a isZeroer interface { isZero() bool }
|
||||||
// It is used within isEmpty, for omitEmpty support.
|
// It is used within isEmpty, for omitEmpty support.
|
||||||
// - Consider making Handle used AS-IS within the encoding/decoding session.
|
// - Consider making Handle used AS-IS within the encoding/decoding session.
|
||||||
// This means that we don't cache Handle information within the (En|De)coder,
|
// This means that we don't cache Handle information within the (En|De)coder,
|
||||||
// except we really need it at Reset(...)
|
// except we really need it at Reset(...)
|
||||||
// - Handle recursive types during encoding/decoding?
|
// - Consider adding math/big support
|
||||||
|
// - Consider reducing the size of the generated functions:
|
||||||
|
// Maybe use one loop, and put the conditionals in the loop.
|
||||||
|
// for ... { if cLen > 0 { if j == cLen { break } } else if dd.CheckBreak() { break } }
|
||||||
|
|
|
||||||
|
|
@ -348,6 +348,13 @@ func (d *bincDecDriver) readNextBd() {
|
||||||
d.bdRead = true
|
d.bdRead = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *bincDecDriver) uncacheRead() {
|
||||||
|
if d.bdRead {
|
||||||
|
d.r.unreadn1()
|
||||||
|
d.bdRead = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (d *bincDecDriver) ContainerType() (vt valueType) {
|
func (d *bincDecDriver) ContainerType() (vt valueType) {
|
||||||
if d.vd == bincVdSpecial && d.vs == bincSpNil {
|
if d.vd == bincVdSpecial && d.vs == bincSpNil {
|
||||||
return valueTypeNil
|
return valueTypeNil
|
||||||
|
|
@ -705,7 +712,7 @@ func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *bincDecDriver) DecodeString() (s string) {
|
func (d *bincDecDriver) DecodeString() (s string) {
|
||||||
// DecodeBytes does not accomodate symbols, whose impl stores string version in map.
|
// DecodeBytes does not accommodate symbols, whose impl stores string version in map.
|
||||||
// Use decStringAndBytes directly.
|
// Use decStringAndBytes directly.
|
||||||
// return string(d.DecodeBytes(d.b[:], true, true))
|
// return string(d.DecodeBytes(d.b[:], true, true))
|
||||||
_, s = d.decStringAndBytes(d.b[:], true, true)
|
_, s = d.decStringAndBytes(d.b[:], true, true)
|
||||||
|
|
@ -908,10 +915,14 @@ func (h *BincHandle) newDecDriver(d *Decoder) decDriver {
|
||||||
|
|
||||||
func (e *bincEncDriver) reset() {
|
func (e *bincEncDriver) reset() {
|
||||||
e.w = e.e.w
|
e.w = e.e.w
|
||||||
|
e.s = 0
|
||||||
|
e.m = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *bincDecDriver) reset() {
|
func (d *bincDecDriver) reset() {
|
||||||
d.r = d.d.r
|
d.r = d.d.r
|
||||||
|
d.s = nil
|
||||||
|
d.bd, d.bdRead, d.vd, d.vs = 0, false, 0, 0
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ decDriver = (*bincDecDriver)(nil)
|
var _ decDriver = (*bincDecDriver)(nil)
|
||||||
|
|
|
||||||
|
|
@ -188,6 +188,13 @@ func (d *cborDecDriver) readNextBd() {
|
||||||
d.bdRead = true
|
d.bdRead = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *cborDecDriver) uncacheRead() {
|
||||||
|
if d.bdRead {
|
||||||
|
d.r.unreadn1()
|
||||||
|
d.bdRead = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (d *cborDecDriver) ContainerType() (vt valueType) {
|
func (d *cborDecDriver) ContainerType() (vt valueType) {
|
||||||
if d.bd == cborBdNil {
|
if d.bd == cborBdNil {
|
||||||
return valueTypeNil
|
return valueTypeNil
|
||||||
|
|
@ -508,7 +515,7 @@ func (d *cborDecDriver) DecodeNaked() {
|
||||||
n.v = valueTypeExt
|
n.v = valueTypeExt
|
||||||
n.u = d.decUint()
|
n.u = d.decUint()
|
||||||
n.l = nil
|
n.l = nil
|
||||||
d.bdRead = false
|
// d.bdRead = false
|
||||||
// d.d.decode(&re.Value) // handled by decode itself.
|
// d.d.decode(&re.Value) // handled by decode itself.
|
||||||
// decodeFurther = true
|
// decodeFurther = true
|
||||||
default:
|
default:
|
||||||
|
|
@ -578,6 +585,7 @@ func (e *cborEncDriver) reset() {
|
||||||
|
|
||||||
func (d *cborDecDriver) reset() {
|
func (d *cborDecDriver) reset() {
|
||||||
d.r = d.d.r
|
d.r = d.d.r
|
||||||
|
d.bd, d.bdRead = 0, false
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ decDriver = (*cborDecDriver)(nil)
|
var _ decDriver = (*cborDecDriver)(nil)
|
||||||
|
|
|
||||||
|
|
@ -91,10 +91,12 @@ type decDriver interface {
|
||||||
uncacheRead()
|
uncacheRead()
|
||||||
}
|
}
|
||||||
|
|
||||||
type decNoSeparator struct{}
|
type decNoSeparator struct {
|
||||||
|
}
|
||||||
|
|
||||||
func (_ decNoSeparator) ReadEnd() {}
|
func (_ decNoSeparator) ReadEnd() {}
|
||||||
func (_ decNoSeparator) uncacheRead() {}
|
|
||||||
|
// func (_ decNoSeparator) uncacheRead() {}
|
||||||
|
|
||||||
type DecodeOptions struct {
|
type DecodeOptions struct {
|
||||||
// MapType specifies type to use during schema-less decoding of a map in the stream.
|
// MapType specifies type to use during schema-less decoding of a map in the stream.
|
||||||
|
|
@ -161,6 +163,15 @@ type DecodeOptions struct {
|
||||||
// Note: Handles will be smart when using the intern functionality.
|
// Note: Handles will be smart when using the intern functionality.
|
||||||
// So everything will not be interned.
|
// So everything will not be interned.
|
||||||
InternString bool
|
InternString bool
|
||||||
|
|
||||||
|
// PreferArrayOverSlice controls whether to decode to an array or a slice.
|
||||||
|
//
|
||||||
|
// This only impacts decoding into a nil interface{}.
|
||||||
|
// Consequently, it has no effect on codecgen.
|
||||||
|
//
|
||||||
|
// *Note*: This only applies if using go1.5 and above,
|
||||||
|
// as it requires reflect.ArrayOf support which was absent before go1.5.
|
||||||
|
PreferArrayOverSlice bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------
|
// ------------------------------------
|
||||||
|
|
@ -433,6 +444,10 @@ func (f *decFnInfo) rawExt(rv reflect.Value) {
|
||||||
f.d.d.DecodeExt(rv.Addr().Interface(), 0, nil)
|
f.d.d.DecodeExt(rv.Addr().Interface(), 0, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *decFnInfo) raw(rv reflect.Value) {
|
||||||
|
rv.SetBytes(f.d.raw())
|
||||||
|
}
|
||||||
|
|
||||||
func (f *decFnInfo) ext(rv reflect.Value) {
|
func (f *decFnInfo) ext(rv reflect.Value) {
|
||||||
f.d.d.DecodeExt(rv.Addr().Interface(), f.xfTag, f.xfFn)
|
f.d.d.DecodeExt(rv.Addr().Interface(), f.xfTag, f.xfFn)
|
||||||
}
|
}
|
||||||
|
|
@ -583,14 +598,16 @@ func (f *decFnInfo) kInterfaceNaked() (rvn reflect.Value) {
|
||||||
if d.mtid == 0 || d.mtid == mapIntfIntfTypId {
|
if d.mtid == 0 || d.mtid == mapIntfIntfTypId {
|
||||||
l := len(n.ms)
|
l := len(n.ms)
|
||||||
n.ms = append(n.ms, nil)
|
n.ms = append(n.ms, nil)
|
||||||
d.decode(&n.ms[l])
|
var v2 interface{} = &n.ms[l]
|
||||||
rvn = reflect.ValueOf(&n.ms[l]).Elem()
|
d.decode(v2)
|
||||||
|
rvn = reflect.ValueOf(v2).Elem()
|
||||||
n.ms = n.ms[:l]
|
n.ms = n.ms[:l]
|
||||||
} else if d.mtid == mapStrIntfTypId { // for json performance
|
} else if d.mtid == mapStrIntfTypId { // for json performance
|
||||||
l := len(n.ns)
|
l := len(n.ns)
|
||||||
n.ns = append(n.ns, nil)
|
n.ns = append(n.ns, nil)
|
||||||
d.decode(&n.ns[l])
|
var v2 interface{} = &n.ns[l]
|
||||||
rvn = reflect.ValueOf(&n.ns[l]).Elem()
|
d.decode(v2)
|
||||||
|
rvn = reflect.ValueOf(v2).Elem()
|
||||||
n.ns = n.ns[:l]
|
n.ns = n.ns[:l]
|
||||||
} else {
|
} else {
|
||||||
rvn = reflect.New(d.h.MapType).Elem()
|
rvn = reflect.New(d.h.MapType).Elem()
|
||||||
|
|
@ -601,9 +618,13 @@ func (f *decFnInfo) kInterfaceNaked() (rvn reflect.Value) {
|
||||||
if d.stid == 0 || d.stid == intfSliceTypId {
|
if d.stid == 0 || d.stid == intfSliceTypId {
|
||||||
l := len(n.ss)
|
l := len(n.ss)
|
||||||
n.ss = append(n.ss, nil)
|
n.ss = append(n.ss, nil)
|
||||||
d.decode(&n.ss[l])
|
var v2 interface{} = &n.ss[l]
|
||||||
rvn = reflect.ValueOf(&n.ss[l]).Elem()
|
d.decode(v2)
|
||||||
n.ss = n.ss[:l]
|
n.ss = n.ss[:l]
|
||||||
|
rvn = reflect.ValueOf(v2).Elem()
|
||||||
|
if reflectArrayOfSupported && d.stid == 0 && d.h.PreferArrayOverSlice {
|
||||||
|
rvn = reflectArrayOf(rvn)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
rvn = reflect.New(d.h.SliceType).Elem()
|
rvn = reflect.New(d.h.SliceType).Elem()
|
||||||
d.decodeValue(rvn, nil)
|
d.decodeValue(rvn, nil)
|
||||||
|
|
@ -615,9 +636,9 @@ func (f *decFnInfo) kInterfaceNaked() (rvn reflect.Value) {
|
||||||
l := len(n.is)
|
l := len(n.is)
|
||||||
n.is = append(n.is, nil)
|
n.is = append(n.is, nil)
|
||||||
v2 := &n.is[l]
|
v2 := &n.is[l]
|
||||||
n.is = n.is[:l]
|
|
||||||
d.decode(v2)
|
d.decode(v2)
|
||||||
v = *v2
|
v = *v2
|
||||||
|
n.is = n.is[:l]
|
||||||
}
|
}
|
||||||
bfn := d.h.getExtForTag(tag)
|
bfn := d.h.getExtForTag(tag)
|
||||||
if bfn == nil {
|
if bfn == nil {
|
||||||
|
|
@ -1166,7 +1187,7 @@ type decRtidFn struct {
|
||||||
// primitives are being decoded.
|
// primitives are being decoded.
|
||||||
//
|
//
|
||||||
// maps and arrays are not handled by this mechanism.
|
// maps and arrays are not handled by this mechanism.
|
||||||
// However, RawExt is, and we accomodate for extensions that decode
|
// However, RawExt is, and we accommodate for extensions that decode
|
||||||
// RawExt from DecodeNaked, but need to decode the value subsequently.
|
// RawExt from DecodeNaked, but need to decode the value subsequently.
|
||||||
// kInterfaceNaked and swallow, which call DecodeNaked, handle this caveat.
|
// kInterfaceNaked and swallow, which call DecodeNaked, handle this caveat.
|
||||||
//
|
//
|
||||||
|
|
@ -1453,8 +1474,8 @@ func (d *Decoder) swallow() {
|
||||||
l := len(n.is)
|
l := len(n.is)
|
||||||
n.is = append(n.is, nil)
|
n.is = append(n.is, nil)
|
||||||
v2 := &n.is[l]
|
v2 := &n.is[l]
|
||||||
n.is = n.is[:l]
|
|
||||||
d.decode(v2)
|
d.decode(v2)
|
||||||
|
n.is = n.is[:l]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -1504,6 +1525,8 @@ func (d *Decoder) decode(iv interface{}) {
|
||||||
*v = 0
|
*v = 0
|
||||||
case *[]uint8:
|
case *[]uint8:
|
||||||
*v = nil
|
*v = nil
|
||||||
|
case *Raw:
|
||||||
|
*v = nil
|
||||||
case reflect.Value:
|
case reflect.Value:
|
||||||
if v.Kind() != reflect.Ptr || v.IsNil() {
|
if v.Kind() != reflect.Ptr || v.IsNil() {
|
||||||
d.errNotValidPtrValue(v)
|
d.errNotValidPtrValue(v)
|
||||||
|
|
@ -1543,7 +1566,6 @@ func (d *Decoder) decode(iv interface{}) {
|
||||||
d.decodeValueNotNil(v.Elem(), nil)
|
d.decodeValueNotNil(v.Elem(), nil)
|
||||||
|
|
||||||
case *string:
|
case *string:
|
||||||
|
|
||||||
*v = d.d.DecodeString()
|
*v = d.d.DecodeString()
|
||||||
case *bool:
|
case *bool:
|
||||||
*v = d.d.DecodeBool()
|
*v = d.d.DecodeBool()
|
||||||
|
|
@ -1574,6 +1596,9 @@ func (d *Decoder) decode(iv interface{}) {
|
||||||
case *[]uint8:
|
case *[]uint8:
|
||||||
*v = d.d.DecodeBytes(*v, false, false)
|
*v = d.d.DecodeBytes(*v, false, false)
|
||||||
|
|
||||||
|
case *Raw:
|
||||||
|
*v = d.raw()
|
||||||
|
|
||||||
case *interface{}:
|
case *interface{}:
|
||||||
d.decodeValueNotNil(reflect.ValueOf(iv).Elem(), nil)
|
d.decodeValueNotNil(reflect.ValueOf(iv).Elem(), nil)
|
||||||
|
|
||||||
|
|
@ -1695,6 +1720,8 @@ func (d *Decoder) getDecFn(rt reflect.Type, checkFastpath, checkCodecSelfer bool
|
||||||
fn.f = (*decFnInfo).selferUnmarshal
|
fn.f = (*decFnInfo).selferUnmarshal
|
||||||
} else if rtid == rawExtTypId {
|
} else if rtid == rawExtTypId {
|
||||||
fn.f = (*decFnInfo).rawExt
|
fn.f = (*decFnInfo).rawExt
|
||||||
|
} else if rtid == rawTypId {
|
||||||
|
fn.f = (*decFnInfo).raw
|
||||||
} else if d.d.IsBuiltinType(rtid) {
|
} else if d.d.IsBuiltinType(rtid) {
|
||||||
fn.f = (*decFnInfo).builtin
|
fn.f = (*decFnInfo).builtin
|
||||||
} else if xfFn := d.h.getExt(rtid); xfFn != nil {
|
} else if xfFn := d.h.getExt(rtid); xfFn != nil {
|
||||||
|
|
@ -1793,12 +1820,13 @@ func (d *Decoder) getDecFn(rt reflect.Type, checkFastpath, checkCodecSelfer bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Decoder) structFieldNotFound(index int, rvkencname string) {
|
func (d *Decoder) structFieldNotFound(index int, rvkencname string) {
|
||||||
|
// NOTE: rvkencname may be a stringView, so don't pass it to another function.
|
||||||
if d.h.ErrorIfNoField {
|
if d.h.ErrorIfNoField {
|
||||||
if index >= 0 {
|
if index >= 0 {
|
||||||
d.errorf("no matching struct field found when decoding stream array at index %v", index)
|
d.errorf("no matching struct field found when decoding stream array at index %v", index)
|
||||||
return
|
return
|
||||||
} else if rvkencname != "" {
|
} else if rvkencname != "" {
|
||||||
d.errorf("no matching struct field found when decoding stream map with key %s", rvkencname)
|
d.errorf("no matching struct field found when decoding stream map with key " + rvkencname)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -1862,6 +1890,7 @@ func (d *Decoder) intern(s string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// nextValueBytes returns the next value in the stream as a set of bytes.
|
||||||
func (d *Decoder) nextValueBytes() []byte {
|
func (d *Decoder) nextValueBytes() []byte {
|
||||||
d.d.uncacheRead()
|
d.d.uncacheRead()
|
||||||
d.r.track()
|
d.r.track()
|
||||||
|
|
@ -1869,6 +1898,15 @@ func (d *Decoder) nextValueBytes() []byte {
|
||||||
return d.r.stopTrack()
|
return d.r.stopTrack()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) raw() []byte {
|
||||||
|
// ensure that this is not a view into the bytes
|
||||||
|
// i.e. make new copy always.
|
||||||
|
bs := d.nextValueBytes()
|
||||||
|
bs2 := make([]byte, len(bs))
|
||||||
|
copy(bs2, bs)
|
||||||
|
return bs2
|
||||||
|
}
|
||||||
|
|
||||||
// --------------------------------------------------
|
// --------------------------------------------------
|
||||||
|
|
||||||
// decSliceHelper assists when decoding into a slice, from a map or an array in the stream.
|
// decSliceHelper assists when decoding into a slice, from a map or an array in the stream.
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,16 @@
|
||||||
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build go1.5
|
||||||
|
|
||||||
|
package codec
|
||||||
|
|
||||||
|
import "reflect"
|
||||||
|
|
||||||
|
const reflectArrayOfSupported = true
|
||||||
|
|
||||||
|
func reflectArrayOf(rvn reflect.Value) (rvn2 reflect.Value) {
|
||||||
|
rvn2 = reflect.New(reflect.ArrayOf(rvn.Len(), intfTyp)).Elem()
|
||||||
|
reflect.Copy(rvn2, rvn)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,14 @@
|
||||||
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !go1.5
|
||||||
|
|
||||||
|
package codec
|
||||||
|
|
||||||
|
import "reflect"
|
||||||
|
|
||||||
|
const reflectArrayOfSupported = false
|
||||||
|
|
||||||
|
func reflectArrayOf(rvn reflect.Value) (rvn2 reflect.Value) {
|
||||||
|
panic("reflect.ArrayOf unsupported")
|
||||||
|
}
|
||||||
|
|
@ -110,6 +110,28 @@ type EncodeOptions struct {
|
||||||
//
|
//
|
||||||
Canonical bool
|
Canonical bool
|
||||||
|
|
||||||
|
// CheckCircularRef controls whether we check for circular references
|
||||||
|
// and error fast during an encode.
|
||||||
|
//
|
||||||
|
// If enabled, an error is received if a pointer to a struct
|
||||||
|
// references itself either directly or through one of its fields (iteratively).
|
||||||
|
//
|
||||||
|
// This is opt-in, as there may be a performance hit to checking circular references.
|
||||||
|
CheckCircularRef bool
|
||||||
|
|
||||||
|
// RecursiveEmptyCheck controls whether we descend into interfaces, structs and pointers
|
||||||
|
// when checking if a value is empty.
|
||||||
|
//
|
||||||
|
// Note that this may make OmitEmpty more expensive, as it incurs a lot more reflect calls.
|
||||||
|
RecursiveEmptyCheck bool
|
||||||
|
|
||||||
|
// Raw controls whether we encode Raw values.
|
||||||
|
// This is a "dangerous" option and must be explicitly set.
|
||||||
|
// If set, we blindly encode Raw values as-is, without checking
|
||||||
|
// if they are a correct representation of a value in that format.
|
||||||
|
// If unset, we error out.
|
||||||
|
Raw bool
|
||||||
|
|
||||||
// AsSymbols defines what should be encoded as symbols.
|
// AsSymbols defines what should be encoded as symbols.
|
||||||
//
|
//
|
||||||
// Encoding as symbols can reduce the encoded size significantly.
|
// Encoding as symbols can reduce the encoded size significantly.
|
||||||
|
|
@ -132,13 +154,16 @@ type simpleIoEncWriterWriter struct {
|
||||||
w io.Writer
|
w io.Writer
|
||||||
bw io.ByteWriter
|
bw io.ByteWriter
|
||||||
sw ioEncStringWriter
|
sw ioEncStringWriter
|
||||||
|
bs [1]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *simpleIoEncWriterWriter) WriteByte(c byte) (err error) {
|
func (o *simpleIoEncWriterWriter) WriteByte(c byte) (err error) {
|
||||||
if o.bw != nil {
|
if o.bw != nil {
|
||||||
return o.bw.WriteByte(c)
|
return o.bw.WriteByte(c)
|
||||||
}
|
}
|
||||||
_, err = o.w.Write([]byte{c})
|
// _, err = o.w.Write([]byte{c})
|
||||||
|
o.bs[0] = c
|
||||||
|
_, err = o.w.Write(o.bs[:])
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -210,50 +235,71 @@ type bytesEncWriter struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (z *bytesEncWriter) writeb(s []byte) {
|
func (z *bytesEncWriter) writeb(s []byte) {
|
||||||
if len(s) > 0 {
|
if len(s) == 0 {
|
||||||
c := z.grow(len(s))
|
return
|
||||||
copy(z.b[c:], s)
|
|
||||||
}
|
}
|
||||||
|
oc, a := z.growNoAlloc(len(s))
|
||||||
|
if a {
|
||||||
|
z.growAlloc(len(s), oc)
|
||||||
|
}
|
||||||
|
copy(z.b[oc:], s)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (z *bytesEncWriter) writestr(s string) {
|
func (z *bytesEncWriter) writestr(s string) {
|
||||||
if len(s) > 0 {
|
if len(s) == 0 {
|
||||||
c := z.grow(len(s))
|
return
|
||||||
copy(z.b[c:], s)
|
|
||||||
}
|
}
|
||||||
|
oc, a := z.growNoAlloc(len(s))
|
||||||
|
if a {
|
||||||
|
z.growAlloc(len(s), oc)
|
||||||
|
}
|
||||||
|
copy(z.b[oc:], s)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (z *bytesEncWriter) writen1(b1 byte) {
|
func (z *bytesEncWriter) writen1(b1 byte) {
|
||||||
c := z.grow(1)
|
oc, a := z.growNoAlloc(1)
|
||||||
z.b[c] = b1
|
if a {
|
||||||
|
z.growAlloc(1, oc)
|
||||||
|
}
|
||||||
|
z.b[oc] = b1
|
||||||
}
|
}
|
||||||
|
|
||||||
func (z *bytesEncWriter) writen2(b1 byte, b2 byte) {
|
func (z *bytesEncWriter) writen2(b1 byte, b2 byte) {
|
||||||
c := z.grow(2)
|
oc, a := z.growNoAlloc(2)
|
||||||
z.b[c] = b1
|
if a {
|
||||||
z.b[c+1] = b2
|
z.growAlloc(2, oc)
|
||||||
|
}
|
||||||
|
z.b[oc+1] = b2
|
||||||
|
z.b[oc] = b1
|
||||||
}
|
}
|
||||||
|
|
||||||
func (z *bytesEncWriter) atEndOfEncode() {
|
func (z *bytesEncWriter) atEndOfEncode() {
|
||||||
*(z.out) = z.b[:z.c]
|
*(z.out) = z.b[:z.c]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (z *bytesEncWriter) grow(n int) (oldcursor int) {
|
// have a growNoalloc(n int), which can be inlined.
|
||||||
|
// if allocation is needed, then call growAlloc(n int)
|
||||||
|
|
||||||
|
func (z *bytesEncWriter) growNoAlloc(n int) (oldcursor int, allocNeeded bool) {
|
||||||
oldcursor = z.c
|
oldcursor = z.c
|
||||||
z.c = oldcursor + n
|
z.c = z.c + n
|
||||||
if z.c > len(z.b) {
|
if z.c > len(z.b) {
|
||||||
if z.c > cap(z.b) {
|
if z.c > cap(z.b) {
|
||||||
|
allocNeeded = true
|
||||||
|
} else {
|
||||||
|
z.b = z.b[:cap(z.b)]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (z *bytesEncWriter) growAlloc(n int, oldcursor int) {
|
||||||
// appendslice logic (if cap < 1024, *2, else *1.25): more expensive. many copy calls.
|
// appendslice logic (if cap < 1024, *2, else *1.25): more expensive. many copy calls.
|
||||||
// bytes.Buffer model (2*cap + n): much better
|
// bytes.Buffer model (2*cap + n): much better
|
||||||
// bs := make([]byte, 2*cap(z.b)+n)
|
// bs := make([]byte, 2*cap(z.b)+n)
|
||||||
bs := make([]byte, growCap(cap(z.b), 1, n))
|
bs := make([]byte, growCap(cap(z.b), 1, n))
|
||||||
copy(bs, z.b[:oldcursor])
|
copy(bs, z.b[:oldcursor])
|
||||||
z.b = bs
|
z.b = bs
|
||||||
} else {
|
|
||||||
z.b = z.b[:cap(z.b)]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ---------------------------------------------
|
// ---------------------------------------------
|
||||||
|
|
@ -270,6 +316,10 @@ func (f *encFnInfo) builtin(rv reflect.Value) {
|
||||||
f.e.e.EncodeBuiltin(f.ti.rtid, rv.Interface())
|
f.e.e.EncodeBuiltin(f.ti.rtid, rv.Interface())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *encFnInfo) raw(rv reflect.Value) {
|
||||||
|
f.e.raw(rv.Interface().(Raw))
|
||||||
|
}
|
||||||
|
|
||||||
func (f *encFnInfo) rawExt(rv reflect.Value) {
|
func (f *encFnInfo) rawExt(rv reflect.Value) {
|
||||||
// rev := rv.Interface().(RawExt)
|
// rev := rv.Interface().(RawExt)
|
||||||
// f.e.e.EncodeRawExt(&rev, f.e)
|
// f.e.e.EncodeRawExt(&rev, f.e)
|
||||||
|
|
@ -296,7 +346,7 @@ func (f *encFnInfo) getValueForMarshalInterface(rv reflect.Value, indir int8) (v
|
||||||
v = rv.Interface()
|
v = rv.Interface()
|
||||||
} else if indir == -1 {
|
} else if indir == -1 {
|
||||||
// If a non-pointer was passed to Encode(), then that value is not addressable.
|
// If a non-pointer was passed to Encode(), then that value is not addressable.
|
||||||
// Take addr if addresable, else copy value to an addressable value.
|
// Take addr if addressable, else copy value to an addressable value.
|
||||||
if rv.CanAddr() {
|
if rv.CanAddr() {
|
||||||
v = rv.Addr().Interface()
|
v = rv.Addr().Interface()
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -464,7 +514,7 @@ func (f *encFnInfo) kSlice(rv reflect.Value) {
|
||||||
for j := 0; j < l; j++ {
|
for j := 0; j < l; j++ {
|
||||||
if cr != nil {
|
if cr != nil {
|
||||||
if ti.mbs {
|
if ti.mbs {
|
||||||
if l%2 == 0 {
|
if j%2 == 0 {
|
||||||
cr.sendContainerState(containerMapKey)
|
cr.sendContainerState(containerMapKey)
|
||||||
} else {
|
} else {
|
||||||
cr.sendContainerState(containerMapValue)
|
cr.sendContainerState(containerMapValue)
|
||||||
|
|
@ -503,7 +553,7 @@ func (f *encFnInfo) kStruct(rv reflect.Value) {
|
||||||
newlen := len(fti.sfi)
|
newlen := len(fti.sfi)
|
||||||
|
|
||||||
// Use sync.Pool to reduce allocating slices unnecessarily.
|
// Use sync.Pool to reduce allocating slices unnecessarily.
|
||||||
// The cost of the occasional locking is less than the cost of new allocation.
|
// The cost of sync.Pool is less than the cost of new allocation.
|
||||||
pool, poolv, fkvs := encStructPoolGet(newlen)
|
pool, poolv, fkvs := encStructPoolGet(newlen)
|
||||||
|
|
||||||
// if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct)
|
// if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct)
|
||||||
|
|
@ -512,25 +562,20 @@ func (f *encFnInfo) kStruct(rv reflect.Value) {
|
||||||
}
|
}
|
||||||
newlen = 0
|
newlen = 0
|
||||||
var kv stringRv
|
var kv stringRv
|
||||||
|
recur := e.h.RecursiveEmptyCheck
|
||||||
for _, si := range tisfi {
|
for _, si := range tisfi {
|
||||||
kv.r = si.field(rv, false)
|
kv.r = si.field(rv, false)
|
||||||
// if si.i != -1 {
|
|
||||||
// rvals[newlen] = rv.Field(int(si.i))
|
|
||||||
// } else {
|
|
||||||
// rvals[newlen] = rv.FieldByIndex(si.is)
|
|
||||||
// }
|
|
||||||
if toMap {
|
if toMap {
|
||||||
if si.omitEmpty && isEmptyValue(kv.r) {
|
if si.omitEmpty && isEmptyValue(kv.r, recur, recur) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
kv.v = si.encName
|
kv.v = si.encName
|
||||||
} else {
|
} else {
|
||||||
// use the zero value.
|
// use the zero value.
|
||||||
// if a reference or struct, set to nil (so you do not output too much)
|
// if a reference or struct, set to nil (so you do not output too much)
|
||||||
if si.omitEmpty && isEmptyValue(kv.r) {
|
if si.omitEmpty && isEmptyValue(kv.r, recur, recur) {
|
||||||
switch kv.r.Kind() {
|
switch kv.r.Kind() {
|
||||||
case reflect.Struct, reflect.Interface, reflect.Ptr, reflect.Array,
|
case reflect.Struct, reflect.Interface, reflect.Ptr, reflect.Array, reflect.Map, reflect.Slice:
|
||||||
reflect.Map, reflect.Slice:
|
|
||||||
kv.r = reflect.Value{} //encode as nil
|
kv.r = reflect.Value{} //encode as nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -541,7 +586,7 @@ func (f *encFnInfo) kStruct(rv reflect.Value) {
|
||||||
|
|
||||||
// debugf(">>>> kStruct: newlen: %v", newlen)
|
// debugf(">>>> kStruct: newlen: %v", newlen)
|
||||||
// sep := !e.be
|
// sep := !e.be
|
||||||
ee := e.e //don't dereference everytime
|
ee := e.e //don't dereference every time
|
||||||
|
|
||||||
if toMap {
|
if toMap {
|
||||||
ee.EncodeMapStart(newlen)
|
ee.EncodeMapStart(newlen)
|
||||||
|
|
@ -596,13 +641,15 @@ func (f *encFnInfo) kStruct(rv reflect.Value) {
|
||||||
// f.e.encodeValue(rv.Elem())
|
// f.e.encodeValue(rv.Elem())
|
||||||
// }
|
// }
|
||||||
|
|
||||||
func (f *encFnInfo) kInterface(rv reflect.Value) {
|
// func (f *encFnInfo) kInterface(rv reflect.Value) {
|
||||||
if rv.IsNil() {
|
// println("kInterface called")
|
||||||
f.e.e.EncodeNil()
|
// debug.PrintStack()
|
||||||
return
|
// if rv.IsNil() {
|
||||||
}
|
// f.e.e.EncodeNil()
|
||||||
f.e.encodeValue(rv.Elem(), nil)
|
// return
|
||||||
}
|
// }
|
||||||
|
// f.e.encodeValue(rv.Elem(), nil)
|
||||||
|
// }
|
||||||
|
|
||||||
func (f *encFnInfo) kMap(rv reflect.Value) {
|
func (f *encFnInfo) kMap(rv reflect.Value) {
|
||||||
ee := f.e.e
|
ee := f.e.e
|
||||||
|
|
@ -877,6 +924,7 @@ type Encoder struct {
|
||||||
// as the handler MAY need to do some coordination.
|
// as the handler MAY need to do some coordination.
|
||||||
w encWriter
|
w encWriter
|
||||||
s []encRtidFn
|
s []encRtidFn
|
||||||
|
ci set
|
||||||
be bool // is binary encoding
|
be bool // is binary encoding
|
||||||
js bool // is json handle
|
js bool // is json handle
|
||||||
|
|
||||||
|
|
@ -925,7 +973,7 @@ func newEncoder(h Handle) *Encoder {
|
||||||
|
|
||||||
// Reset the Encoder with a new output stream.
|
// Reset the Encoder with a new output stream.
|
||||||
//
|
//
|
||||||
// This accomodates using the state of the Encoder,
|
// This accommodates using the state of the Encoder,
|
||||||
// where it has "cached" information about sub-engines.
|
// where it has "cached" information about sub-engines.
|
||||||
func (e *Encoder) Reset(w io.Writer) {
|
func (e *Encoder) Reset(w io.Writer) {
|
||||||
ww, ok := w.(ioEncWriterWriter)
|
ww, ok := w.(ioEncWriterWriter)
|
||||||
|
|
@ -1032,20 +1080,6 @@ func (e *Encoder) MustEncode(v interface{}) {
|
||||||
e.w.atEndOfEncode()
|
e.w.atEndOfEncode()
|
||||||
}
|
}
|
||||||
|
|
||||||
// comment out these (Must)Write methods. They were only put there to support cbor.
|
|
||||||
// However, users already have access to the streams, and can write directly.
|
|
||||||
//
|
|
||||||
// // Write allows users write to the Encoder stream directly.
|
|
||||||
// func (e *Encoder) Write(bs []byte) (err error) {
|
|
||||||
// defer panicToErr(&err)
|
|
||||||
// e.w.writeb(bs)
|
|
||||||
// return
|
|
||||||
// }
|
|
||||||
// // MustWrite is like write, but panics if unable to Write.
|
|
||||||
// func (e *Encoder) MustWrite(bs []byte) {
|
|
||||||
// e.w.writeb(bs)
|
|
||||||
// }
|
|
||||||
|
|
||||||
func (e *Encoder) encode(iv interface{}) {
|
func (e *Encoder) encode(iv interface{}) {
|
||||||
// if ics, ok := iv.(Selfer); ok {
|
// if ics, ok := iv.(Selfer); ok {
|
||||||
// ics.CodecEncodeSelf(e)
|
// ics.CodecEncodeSelf(e)
|
||||||
|
|
@ -1057,7 +1091,8 @@ func (e *Encoder) encode(iv interface{}) {
|
||||||
e.e.EncodeNil()
|
e.e.EncodeNil()
|
||||||
case Selfer:
|
case Selfer:
|
||||||
v.CodecEncodeSelf(e)
|
v.CodecEncodeSelf(e)
|
||||||
|
case Raw:
|
||||||
|
e.raw(v)
|
||||||
case reflect.Value:
|
case reflect.Value:
|
||||||
e.encodeValue(v, nil)
|
e.encodeValue(v, nil)
|
||||||
|
|
||||||
|
|
@ -1133,20 +1168,23 @@ func (e *Encoder) encode(iv interface{}) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Encoder) encodeI(iv interface{}, checkFastpath, checkCodecSelfer bool) {
|
func (e *Encoder) preEncodeValue(rv reflect.Value) (rv2 reflect.Value, sptr uintptr, proceed bool) {
|
||||||
if rv, proceed := e.preEncodeValue(reflect.ValueOf(iv)); proceed {
|
|
||||||
rt := rv.Type()
|
|
||||||
rtid := reflect.ValueOf(rt).Pointer()
|
|
||||||
fn := e.getEncFn(rtid, rt, checkFastpath, checkCodecSelfer)
|
|
||||||
fn.f(&fn.i, rv)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Encoder) preEncodeValue(rv reflect.Value) (rv2 reflect.Value, proceed bool) {
|
|
||||||
// use a goto statement instead of a recursive function for ptr/interface.
|
// use a goto statement instead of a recursive function for ptr/interface.
|
||||||
TOP:
|
TOP:
|
||||||
switch rv.Kind() {
|
switch rv.Kind() {
|
||||||
case reflect.Ptr, reflect.Interface:
|
case reflect.Ptr:
|
||||||
|
if rv.IsNil() {
|
||||||
|
e.e.EncodeNil()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rv = rv.Elem()
|
||||||
|
if e.h.CheckCircularRef && rv.Kind() == reflect.Struct {
|
||||||
|
// TODO: Movable pointers will be an issue here. Future problem.
|
||||||
|
sptr = rv.UnsafeAddr()
|
||||||
|
break TOP
|
||||||
|
}
|
||||||
|
goto TOP
|
||||||
|
case reflect.Interface:
|
||||||
if rv.IsNil() {
|
if rv.IsNil() {
|
||||||
e.e.EncodeNil()
|
e.e.EncodeNil()
|
||||||
return
|
return
|
||||||
|
|
@ -1163,18 +1201,40 @@ TOP:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
return rv, true
|
proceed = true
|
||||||
|
rv2 = rv
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Encoder) doEncodeValue(rv reflect.Value, fn *encFn, sptr uintptr,
|
||||||
|
checkFastpath, checkCodecSelfer bool) {
|
||||||
|
if sptr != 0 {
|
||||||
|
if (&e.ci).add(sptr) {
|
||||||
|
e.errorf("circular reference found: # %d", sptr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if fn == nil {
|
||||||
|
rt := rv.Type()
|
||||||
|
rtid := reflect.ValueOf(rt).Pointer()
|
||||||
|
// fn = e.getEncFn(rtid, rt, true, true)
|
||||||
|
fn = e.getEncFn(rtid, rt, checkFastpath, checkCodecSelfer)
|
||||||
|
}
|
||||||
|
fn.f(&fn.i, rv)
|
||||||
|
if sptr != 0 {
|
||||||
|
(&e.ci).remove(sptr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Encoder) encodeI(iv interface{}, checkFastpath, checkCodecSelfer bool) {
|
||||||
|
if rv, sptr, proceed := e.preEncodeValue(reflect.ValueOf(iv)); proceed {
|
||||||
|
e.doEncodeValue(rv, nil, sptr, checkFastpath, checkCodecSelfer)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Encoder) encodeValue(rv reflect.Value, fn *encFn) {
|
func (e *Encoder) encodeValue(rv reflect.Value, fn *encFn) {
|
||||||
// if a valid fn is passed, it MUST BE for the dereferenced type of rv
|
// if a valid fn is passed, it MUST BE for the dereferenced type of rv
|
||||||
if rv, proceed := e.preEncodeValue(rv); proceed {
|
if rv, sptr, proceed := e.preEncodeValue(rv); proceed {
|
||||||
if fn == nil {
|
e.doEncodeValue(rv, fn, sptr, true, true)
|
||||||
rt := rv.Type()
|
|
||||||
rtid := reflect.ValueOf(rt).Pointer()
|
|
||||||
fn = e.getEncFn(rtid, rt, true, true)
|
|
||||||
}
|
|
||||||
fn.f(&fn.i, rv)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1217,6 +1277,8 @@ func (e *Encoder) getEncFn(rtid uintptr, rt reflect.Type, checkFastpath, checkCo
|
||||||
|
|
||||||
if checkCodecSelfer && ti.cs {
|
if checkCodecSelfer && ti.cs {
|
||||||
fn.f = (*encFnInfo).selferMarshal
|
fn.f = (*encFnInfo).selferMarshal
|
||||||
|
} else if rtid == rawTypId {
|
||||||
|
fn.f = (*encFnInfo).raw
|
||||||
} else if rtid == rawExtTypId {
|
} else if rtid == rawExtTypId {
|
||||||
fn.f = (*encFnInfo).rawExt
|
fn.f = (*encFnInfo).rawExt
|
||||||
} else if e.e.IsBuiltinType(rtid) {
|
} else if e.e.IsBuiltinType(rtid) {
|
||||||
|
|
@ -1234,7 +1296,7 @@ func (e *Encoder) getEncFn(rtid uintptr, rt reflect.Type, checkFastpath, checkCo
|
||||||
} else {
|
} else {
|
||||||
rk := rt.Kind()
|
rk := rt.Kind()
|
||||||
if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) {
|
if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) {
|
||||||
if rt.PkgPath() == "" {
|
if rt.PkgPath() == "" { // un-named slice or map
|
||||||
if idx := fastpathAV.index(rtid); idx != -1 {
|
if idx := fastpathAV.index(rtid); idx != -1 {
|
||||||
fn.f = fastpathAV[idx].encfn
|
fn.f = fastpathAV[idx].encfn
|
||||||
}
|
}
|
||||||
|
|
@ -1284,10 +1346,11 @@ func (e *Encoder) getEncFn(rtid uintptr, rt reflect.Type, checkFastpath, checkCo
|
||||||
fn.f = (*encFnInfo).kSlice
|
fn.f = (*encFnInfo).kSlice
|
||||||
case reflect.Struct:
|
case reflect.Struct:
|
||||||
fn.f = (*encFnInfo).kStruct
|
fn.f = (*encFnInfo).kStruct
|
||||||
|
// reflect.Ptr and reflect.Interface are handled already by preEncodeValue
|
||||||
// case reflect.Ptr:
|
// case reflect.Ptr:
|
||||||
// fn.f = (*encFnInfo).kPtr
|
// fn.f = (*encFnInfo).kPtr
|
||||||
case reflect.Interface:
|
// case reflect.Interface:
|
||||||
fn.f = (*encFnInfo).kInterface
|
// fn.f = (*encFnInfo).kInterface
|
||||||
case reflect.Map:
|
case reflect.Map:
|
||||||
fn.f = (*encFnInfo).kMap
|
fn.f = (*encFnInfo).kMap
|
||||||
default:
|
default:
|
||||||
|
|
@ -1320,6 +1383,18 @@ func (e *Encoder) asis(v []byte) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *Encoder) raw(vv Raw) {
|
||||||
|
v := []byte(vv)
|
||||||
|
if !e.h.Raw {
|
||||||
|
e.errorf("Raw values cannot be encoded: %v", v)
|
||||||
|
}
|
||||||
|
if e.as == nil {
|
||||||
|
e.w.writeb(v)
|
||||||
|
} else {
|
||||||
|
e.as.EncodeAsis(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (e *Encoder) errorf(format string, params ...interface{}) {
|
func (e *Encoder) errorf(format string, params ...interface{}) {
|
||||||
err := fmt.Errorf(format, params...)
|
err := fmt.Errorf(format, params...)
|
||||||
panic(err)
|
panic(err)
|
||||||
|
|
@ -1353,25 +1428,6 @@ func encStructPoolGet(newlen int) (p *sync.Pool, v interface{}, s []stringRv) {
|
||||||
// panic(errors.New("encStructPoolLen must be equal to 4")) // defensive, in case it is changed
|
// panic(errors.New("encStructPoolLen must be equal to 4")) // defensive, in case it is changed
|
||||||
// }
|
// }
|
||||||
// idxpool := newlen / 8
|
// idxpool := newlen / 8
|
||||||
|
|
||||||
// if pool == nil {
|
|
||||||
// fkvs = make([]stringRv, newlen)
|
|
||||||
// } else {
|
|
||||||
// poolv = pool.Get()
|
|
||||||
// switch vv := poolv.(type) {
|
|
||||||
// case *[8]stringRv:
|
|
||||||
// fkvs = vv[:newlen]
|
|
||||||
// case *[16]stringRv:
|
|
||||||
// fkvs = vv[:newlen]
|
|
||||||
// case *[32]stringRv:
|
|
||||||
// fkvs = vv[:newlen]
|
|
||||||
// case *[64]stringRv:
|
|
||||||
// fkvs = vv[:newlen]
|
|
||||||
// case *[128]stringRv:
|
|
||||||
// fkvs = vv[:newlen]
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
if newlen <= 8 {
|
if newlen <= 8 {
|
||||||
p = &encStructPool[0]
|
p = &encStructPool[0]
|
||||||
v = p.Get()
|
v = p.Get()
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ package codec
|
||||||
// Currently support
|
// Currently support
|
||||||
// - slice of all builtin types,
|
// - slice of all builtin types,
|
||||||
// - map of all builtin types to string or interface value
|
// - map of all builtin types to string or interface value
|
||||||
// - symetrical maps of all builtin types (e.g. str-str, uint8-uint8)
|
// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8)
|
||||||
// This should provide adequate "typical" implementations.
|
// This should provide adequate "typical" implementations.
|
||||||
//
|
//
|
||||||
// Note that fast track decode functions must handle values for which an address cannot be obtained.
|
// Note that fast track decode functions must handle values for which an address cannot be obtained.
|
||||||
|
|
@ -38,6 +38,8 @@ import (
|
||||||
"sort"
|
"sort"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const fastpathEnabled = true
|
||||||
|
|
||||||
const fastpathCheckNilFalse = false // for reflect
|
const fastpathCheckNilFalse = false // for reflect
|
||||||
const fastpathCheckNilTrue = true // for type switch
|
const fastpathCheckNilTrue = true // for type switch
|
||||||
|
|
||||||
|
|
@ -81,9 +83,6 @@ var fastpathAV fastpathA
|
||||||
|
|
||||||
// due to possible initialization loop error, make fastpath in an init()
|
// due to possible initialization loop error, make fastpath in an init()
|
||||||
func init() {
|
func init() {
|
||||||
if !fastpathEnabled {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
i := 0
|
i := 0
|
||||||
fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) {
|
fn := func(v interface{}, fe func(*encFnInfo, reflect.Value), fd func(*decFnInfo, reflect.Value)) (f fastpathE) {
|
||||||
xrt := reflect.TypeOf(v)
|
xrt := reflect.TypeOf(v)
|
||||||
|
|
@ -373,9 +372,6 @@ func init() {
|
||||||
|
|
||||||
// -- -- fast path type switch
|
// -- -- fast path type switch
|
||||||
func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
|
func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
|
||||||
if !fastpathEnabled {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
switch v := iv.(type) {
|
switch v := iv.(type) {
|
||||||
|
|
||||||
case []interface{}:
|
case []interface{}:
|
||||||
|
|
@ -1741,9 +1737,6 @@ func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool {
|
func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool {
|
||||||
if !fastpathEnabled {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
switch v := iv.(type) {
|
switch v := iv.(type) {
|
||||||
|
|
||||||
case []interface{}:
|
case []interface{}:
|
||||||
|
|
@ -1829,9 +1822,6 @@ func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool {
|
func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool {
|
||||||
if !fastpathEnabled {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
switch v := iv.(type) {
|
switch v := iv.(type) {
|
||||||
|
|
||||||
case map[interface{}]interface{}:
|
case map[interface{}]interface{}:
|
||||||
|
|
@ -3124,7 +3114,11 @@ func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool {
|
||||||
// -- -- fast path functions
|
// -- -- fast path functions
|
||||||
|
|
||||||
func (f *encFnInfo) fastpathEncSliceIntfR(rv reflect.Value) {
|
func (f *encFnInfo) fastpathEncSliceIntfR(rv reflect.Value) {
|
||||||
|
if f.ti.mbs {
|
||||||
|
fastpathTV.EncAsMapSliceIntfV(rv.Interface().([]interface{}), fastpathCheckNilFalse, f.e)
|
||||||
|
} else {
|
||||||
fastpathTV.EncSliceIntfV(rv.Interface().([]interface{}), fastpathCheckNilFalse, f.e)
|
fastpathTV.EncSliceIntfV(rv.Interface().([]interface{}), fastpathCheckNilFalse, f.e)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
func (_ fastpathT) EncSliceIntfV(v []interface{}, checkNil bool, e *Encoder) {
|
func (_ fastpathT) EncSliceIntfV(v []interface{}, checkNil bool, e *Encoder) {
|
||||||
ee := e.e
|
ee := e.e
|
||||||
|
|
@ -3145,8 +3139,39 @@ func (_ fastpathT) EncSliceIntfV(v []interface{}, checkNil bool, e *Encoder) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (_ fastpathT) EncAsMapSliceIntfV(v []interface{}, checkNil bool, e *Encoder) {
|
||||||
|
ee := e.e
|
||||||
|
cr := e.cr
|
||||||
|
if checkNil && v == nil {
|
||||||
|
ee.EncodeNil()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(v)%2 == 1 {
|
||||||
|
e.errorf("mapBySlice requires even slice length, but got %v", len(v))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ee.EncodeMapStart(len(v) / 2)
|
||||||
|
for j, v2 := range v {
|
||||||
|
if cr != nil {
|
||||||
|
if j%2 == 0 {
|
||||||
|
cr.sendContainerState(containerMapKey)
|
||||||
|
} else {
|
||||||
|
cr.sendContainerState(containerMapValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
e.encode(v2)
|
||||||
|
}
|
||||||
|
if cr != nil {
|
||||||
|
cr.sendContainerState(containerMapEnd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (f *encFnInfo) fastpathEncSliceStringR(rv reflect.Value) {
|
func (f *encFnInfo) fastpathEncSliceStringR(rv reflect.Value) {
|
||||||
|
if f.ti.mbs {
|
||||||
|
fastpathTV.EncAsMapSliceStringV(rv.Interface().([]string), fastpathCheckNilFalse, f.e)
|
||||||
|
} else {
|
||||||
fastpathTV.EncSliceStringV(rv.Interface().([]string), fastpathCheckNilFalse, f.e)
|
fastpathTV.EncSliceStringV(rv.Interface().([]string), fastpathCheckNilFalse, f.e)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
func (_ fastpathT) EncSliceStringV(v []string, checkNil bool, e *Encoder) {
|
func (_ fastpathT) EncSliceStringV(v []string, checkNil bool, e *Encoder) {
|
||||||
ee := e.e
|
ee := e.e
|
||||||
|
|
@ -3167,8 +3192,39 @@ func (_ fastpathT) EncSliceStringV(v []string, checkNil bool, e *Encoder) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (_ fastpathT) EncAsMapSliceStringV(v []string, checkNil bool, e *Encoder) {
|
||||||
|
ee := e.e
|
||||||
|
cr := e.cr
|
||||||
|
if checkNil && v == nil {
|
||||||
|
ee.EncodeNil()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(v)%2 == 1 {
|
||||||
|
e.errorf("mapBySlice requires even slice length, but got %v", len(v))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ee.EncodeMapStart(len(v) / 2)
|
||||||
|
for j, v2 := range v {
|
||||||
|
if cr != nil {
|
||||||
|
if j%2 == 0 {
|
||||||
|
cr.sendContainerState(containerMapKey)
|
||||||
|
} else {
|
||||||
|
cr.sendContainerState(containerMapValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ee.EncodeString(c_UTF8, v2)
|
||||||
|
}
|
||||||
|
if cr != nil {
|
||||||
|
cr.sendContainerState(containerMapEnd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (f *encFnInfo) fastpathEncSliceFloat32R(rv reflect.Value) {
|
func (f *encFnInfo) fastpathEncSliceFloat32R(rv reflect.Value) {
|
||||||
|
if f.ti.mbs {
|
||||||
|
fastpathTV.EncAsMapSliceFloat32V(rv.Interface().([]float32), fastpathCheckNilFalse, f.e)
|
||||||
|
} else {
|
||||||
fastpathTV.EncSliceFloat32V(rv.Interface().([]float32), fastpathCheckNilFalse, f.e)
|
fastpathTV.EncSliceFloat32V(rv.Interface().([]float32), fastpathCheckNilFalse, f.e)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
func (_ fastpathT) EncSliceFloat32V(v []float32, checkNil bool, e *Encoder) {
|
func (_ fastpathT) EncSliceFloat32V(v []float32, checkNil bool, e *Encoder) {
|
||||||
ee := e.e
|
ee := e.e
|
||||||
|
|
@ -3189,8 +3245,39 @@ func (_ fastpathT) EncSliceFloat32V(v []float32, checkNil bool, e *Encoder) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (_ fastpathT) EncAsMapSliceFloat32V(v []float32, checkNil bool, e *Encoder) {
|
||||||
|
ee := e.e
|
||||||
|
cr := e.cr
|
||||||
|
if checkNil && v == nil {
|
||||||
|
ee.EncodeNil()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(v)%2 == 1 {
|
||||||
|
e.errorf("mapBySlice requires even slice length, but got %v", len(v))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ee.EncodeMapStart(len(v) / 2)
|
||||||
|
for j, v2 := range v {
|
||||||
|
if cr != nil {
|
||||||
|
if j%2 == 0 {
|
||||||
|
cr.sendContainerState(containerMapKey)
|
||||||
|
} else {
|
||||||
|
cr.sendContainerState(containerMapValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ee.EncodeFloat32(v2)
|
||||||
|
}
|
||||||
|
if cr != nil {
|
||||||
|
cr.sendContainerState(containerMapEnd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (f *encFnInfo) fastpathEncSliceFloat64R(rv reflect.Value) {
|
func (f *encFnInfo) fastpathEncSliceFloat64R(rv reflect.Value) {
|
||||||
|
if f.ti.mbs {
|
||||||
|
fastpathTV.EncAsMapSliceFloat64V(rv.Interface().([]float64), fastpathCheckNilFalse, f.e)
|
||||||
|
} else {
|
||||||
fastpathTV.EncSliceFloat64V(rv.Interface().([]float64), fastpathCheckNilFalse, f.e)
|
fastpathTV.EncSliceFloat64V(rv.Interface().([]float64), fastpathCheckNilFalse, f.e)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
func (_ fastpathT) EncSliceFloat64V(v []float64, checkNil bool, e *Encoder) {
|
func (_ fastpathT) EncSliceFloat64V(v []float64, checkNil bool, e *Encoder) {
|
||||||
ee := e.e
|
ee := e.e
|
||||||
|
|
@ -3211,8 +3298,39 @@ func (_ fastpathT) EncSliceFloat64V(v []float64, checkNil bool, e *Encoder) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (_ fastpathT) EncAsMapSliceFloat64V(v []float64, checkNil bool, e *Encoder) {
|
||||||
|
ee := e.e
|
||||||
|
cr := e.cr
|
||||||
|
if checkNil && v == nil {
|
||||||
|
ee.EncodeNil()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(v)%2 == 1 {
|
||||||
|
e.errorf("mapBySlice requires even slice length, but got %v", len(v))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ee.EncodeMapStart(len(v) / 2)
|
||||||
|
for j, v2 := range v {
|
||||||
|
if cr != nil {
|
||||||
|
if j%2 == 0 {
|
||||||
|
cr.sendContainerState(containerMapKey)
|
||||||
|
} else {
|
||||||
|
cr.sendContainerState(containerMapValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ee.EncodeFloat64(v2)
|
||||||
|
}
|
||||||
|
if cr != nil {
|
||||||
|
cr.sendContainerState(containerMapEnd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (f *encFnInfo) fastpathEncSliceUintR(rv reflect.Value) {
|
func (f *encFnInfo) fastpathEncSliceUintR(rv reflect.Value) {
|
||||||
|
if f.ti.mbs {
|
||||||
|
fastpathTV.EncAsMapSliceUintV(rv.Interface().([]uint), fastpathCheckNilFalse, f.e)
|
||||||
|
} else {
|
||||||
fastpathTV.EncSliceUintV(rv.Interface().([]uint), fastpathCheckNilFalse, f.e)
|
fastpathTV.EncSliceUintV(rv.Interface().([]uint), fastpathCheckNilFalse, f.e)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
func (_ fastpathT) EncSliceUintV(v []uint, checkNil bool, e *Encoder) {
|
func (_ fastpathT) EncSliceUintV(v []uint, checkNil bool, e *Encoder) {
|
||||||
ee := e.e
|
ee := e.e
|
||||||
|
|
@ -3233,8 +3351,39 @@ func (_ fastpathT) EncSliceUintV(v []uint, checkNil bool, e *Encoder) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (_ fastpathT) EncAsMapSliceUintV(v []uint, checkNil bool, e *Encoder) {
|
||||||
|
ee := e.e
|
||||||
|
cr := e.cr
|
||||||
|
if checkNil && v == nil {
|
||||||
|
ee.EncodeNil()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(v)%2 == 1 {
|
||||||
|
e.errorf("mapBySlice requires even slice length, but got %v", len(v))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ee.EncodeMapStart(len(v) / 2)
|
||||||
|
for j, v2 := range v {
|
||||||
|
if cr != nil {
|
||||||
|
if j%2 == 0 {
|
||||||
|
cr.sendContainerState(containerMapKey)
|
||||||
|
} else {
|
||||||
|
cr.sendContainerState(containerMapValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ee.EncodeUint(uint64(v2))
|
||||||
|
}
|
||||||
|
if cr != nil {
|
||||||
|
cr.sendContainerState(containerMapEnd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (f *encFnInfo) fastpathEncSliceUint16R(rv reflect.Value) {
|
func (f *encFnInfo) fastpathEncSliceUint16R(rv reflect.Value) {
|
||||||
|
if f.ti.mbs {
|
||||||
|
fastpathTV.EncAsMapSliceUint16V(rv.Interface().([]uint16), fastpathCheckNilFalse, f.e)
|
||||||
|
} else {
|
||||||
fastpathTV.EncSliceUint16V(rv.Interface().([]uint16), fastpathCheckNilFalse, f.e)
|
fastpathTV.EncSliceUint16V(rv.Interface().([]uint16), fastpathCheckNilFalse, f.e)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
func (_ fastpathT) EncSliceUint16V(v []uint16, checkNil bool, e *Encoder) {
|
func (_ fastpathT) EncSliceUint16V(v []uint16, checkNil bool, e *Encoder) {
|
||||||
ee := e.e
|
ee := e.e
|
||||||
|
|
@ -3255,8 +3404,39 @@ func (_ fastpathT) EncSliceUint16V(v []uint16, checkNil bool, e *Encoder) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (_ fastpathT) EncAsMapSliceUint16V(v []uint16, checkNil bool, e *Encoder) {
|
||||||
|
ee := e.e
|
||||||
|
cr := e.cr
|
||||||
|
if checkNil && v == nil {
|
||||||
|
ee.EncodeNil()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(v)%2 == 1 {
|
||||||
|
e.errorf("mapBySlice requires even slice length, but got %v", len(v))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ee.EncodeMapStart(len(v) / 2)
|
||||||
|
for j, v2 := range v {
|
||||||
|
if cr != nil {
|
||||||
|
if j%2 == 0 {
|
||||||
|
cr.sendContainerState(containerMapKey)
|
||||||
|
} else {
|
||||||
|
cr.sendContainerState(containerMapValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ee.EncodeUint(uint64(v2))
|
||||||
|
}
|
||||||
|
if cr != nil {
|
||||||
|
cr.sendContainerState(containerMapEnd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (f *encFnInfo) fastpathEncSliceUint32R(rv reflect.Value) {
|
func (f *encFnInfo) fastpathEncSliceUint32R(rv reflect.Value) {
|
||||||
|
if f.ti.mbs {
|
||||||
|
fastpathTV.EncAsMapSliceUint32V(rv.Interface().([]uint32), fastpathCheckNilFalse, f.e)
|
||||||
|
} else {
|
||||||
fastpathTV.EncSliceUint32V(rv.Interface().([]uint32), fastpathCheckNilFalse, f.e)
|
fastpathTV.EncSliceUint32V(rv.Interface().([]uint32), fastpathCheckNilFalse, f.e)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
func (_ fastpathT) EncSliceUint32V(v []uint32, checkNil bool, e *Encoder) {
|
func (_ fastpathT) EncSliceUint32V(v []uint32, checkNil bool, e *Encoder) {
|
||||||
ee := e.e
|
ee := e.e
|
||||||
|
|
@ -3277,8 +3457,39 @@ func (_ fastpathT) EncSliceUint32V(v []uint32, checkNil bool, e *Encoder) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (_ fastpathT) EncAsMapSliceUint32V(v []uint32, checkNil bool, e *Encoder) {
|
||||||
|
ee := e.e
|
||||||
|
cr := e.cr
|
||||||
|
if checkNil && v == nil {
|
||||||
|
ee.EncodeNil()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(v)%2 == 1 {
|
||||||
|
e.errorf("mapBySlice requires even slice length, but got %v", len(v))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ee.EncodeMapStart(len(v) / 2)
|
||||||
|
for j, v2 := range v {
|
||||||
|
if cr != nil {
|
||||||
|
if j%2 == 0 {
|
||||||
|
cr.sendContainerState(containerMapKey)
|
||||||
|
} else {
|
||||||
|
cr.sendContainerState(containerMapValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ee.EncodeUint(uint64(v2))
|
||||||
|
}
|
||||||
|
if cr != nil {
|
||||||
|
cr.sendContainerState(containerMapEnd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (f *encFnInfo) fastpathEncSliceUint64R(rv reflect.Value) {
|
func (f *encFnInfo) fastpathEncSliceUint64R(rv reflect.Value) {
|
||||||
|
if f.ti.mbs {
|
||||||
|
fastpathTV.EncAsMapSliceUint64V(rv.Interface().([]uint64), fastpathCheckNilFalse, f.e)
|
||||||
|
} else {
|
||||||
fastpathTV.EncSliceUint64V(rv.Interface().([]uint64), fastpathCheckNilFalse, f.e)
|
fastpathTV.EncSliceUint64V(rv.Interface().([]uint64), fastpathCheckNilFalse, f.e)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
func (_ fastpathT) EncSliceUint64V(v []uint64, checkNil bool, e *Encoder) {
|
func (_ fastpathT) EncSliceUint64V(v []uint64, checkNil bool, e *Encoder) {
|
||||||
ee := e.e
|
ee := e.e
|
||||||
|
|
@ -3299,8 +3510,39 @@ func (_ fastpathT) EncSliceUint64V(v []uint64, checkNil bool, e *Encoder) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (_ fastpathT) EncAsMapSliceUint64V(v []uint64, checkNil bool, e *Encoder) {
|
||||||
|
ee := e.e
|
||||||
|
cr := e.cr
|
||||||
|
if checkNil && v == nil {
|
||||||
|
ee.EncodeNil()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(v)%2 == 1 {
|
||||||
|
e.errorf("mapBySlice requires even slice length, but got %v", len(v))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ee.EncodeMapStart(len(v) / 2)
|
||||||
|
for j, v2 := range v {
|
||||||
|
if cr != nil {
|
||||||
|
if j%2 == 0 {
|
||||||
|
cr.sendContainerState(containerMapKey)
|
||||||
|
} else {
|
||||||
|
cr.sendContainerState(containerMapValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ee.EncodeUint(uint64(v2))
|
||||||
|
}
|
||||||
|
if cr != nil {
|
||||||
|
cr.sendContainerState(containerMapEnd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (f *encFnInfo) fastpathEncSliceUintptrR(rv reflect.Value) {
|
func (f *encFnInfo) fastpathEncSliceUintptrR(rv reflect.Value) {
|
||||||
|
if f.ti.mbs {
|
||||||
|
fastpathTV.EncAsMapSliceUintptrV(rv.Interface().([]uintptr), fastpathCheckNilFalse, f.e)
|
||||||
|
} else {
|
||||||
fastpathTV.EncSliceUintptrV(rv.Interface().([]uintptr), fastpathCheckNilFalse, f.e)
|
fastpathTV.EncSliceUintptrV(rv.Interface().([]uintptr), fastpathCheckNilFalse, f.e)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
func (_ fastpathT) EncSliceUintptrV(v []uintptr, checkNil bool, e *Encoder) {
|
func (_ fastpathT) EncSliceUintptrV(v []uintptr, checkNil bool, e *Encoder) {
|
||||||
ee := e.e
|
ee := e.e
|
||||||
|
|
@ -3321,8 +3563,39 @@ func (_ fastpathT) EncSliceUintptrV(v []uintptr, checkNil bool, e *Encoder) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (_ fastpathT) EncAsMapSliceUintptrV(v []uintptr, checkNil bool, e *Encoder) {
|
||||||
|
ee := e.e
|
||||||
|
cr := e.cr
|
||||||
|
if checkNil && v == nil {
|
||||||
|
ee.EncodeNil()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(v)%2 == 1 {
|
||||||
|
e.errorf("mapBySlice requires even slice length, but got %v", len(v))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ee.EncodeMapStart(len(v) / 2)
|
||||||
|
for j, v2 := range v {
|
||||||
|
if cr != nil {
|
||||||
|
if j%2 == 0 {
|
||||||
|
cr.sendContainerState(containerMapKey)
|
||||||
|
} else {
|
||||||
|
cr.sendContainerState(containerMapValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
e.encode(v2)
|
||||||
|
}
|
||||||
|
if cr != nil {
|
||||||
|
cr.sendContainerState(containerMapEnd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (f *encFnInfo) fastpathEncSliceIntR(rv reflect.Value) {
|
func (f *encFnInfo) fastpathEncSliceIntR(rv reflect.Value) {
|
||||||
|
if f.ti.mbs {
|
||||||
|
fastpathTV.EncAsMapSliceIntV(rv.Interface().([]int), fastpathCheckNilFalse, f.e)
|
||||||
|
} else {
|
||||||
fastpathTV.EncSliceIntV(rv.Interface().([]int), fastpathCheckNilFalse, f.e)
|
fastpathTV.EncSliceIntV(rv.Interface().([]int), fastpathCheckNilFalse, f.e)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
func (_ fastpathT) EncSliceIntV(v []int, checkNil bool, e *Encoder) {
|
func (_ fastpathT) EncSliceIntV(v []int, checkNil bool, e *Encoder) {
|
||||||
ee := e.e
|
ee := e.e
|
||||||
|
|
@ -3343,8 +3616,39 @@ func (_ fastpathT) EncSliceIntV(v []int, checkNil bool, e *Encoder) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (_ fastpathT) EncAsMapSliceIntV(v []int, checkNil bool, e *Encoder) {
|
||||||
|
ee := e.e
|
||||||
|
cr := e.cr
|
||||||
|
if checkNil && v == nil {
|
||||||
|
ee.EncodeNil()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(v)%2 == 1 {
|
||||||
|
e.errorf("mapBySlice requires even slice length, but got %v", len(v))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ee.EncodeMapStart(len(v) / 2)
|
||||||
|
for j, v2 := range v {
|
||||||
|
if cr != nil {
|
||||||
|
if j%2 == 0 {
|
||||||
|
cr.sendContainerState(containerMapKey)
|
||||||
|
} else {
|
||||||
|
cr.sendContainerState(containerMapValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ee.EncodeInt(int64(v2))
|
||||||
|
}
|
||||||
|
if cr != nil {
|
||||||
|
cr.sendContainerState(containerMapEnd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (f *encFnInfo) fastpathEncSliceInt8R(rv reflect.Value) {
|
func (f *encFnInfo) fastpathEncSliceInt8R(rv reflect.Value) {
|
||||||
|
if f.ti.mbs {
|
||||||
|
fastpathTV.EncAsMapSliceInt8V(rv.Interface().([]int8), fastpathCheckNilFalse, f.e)
|
||||||
|
} else {
|
||||||
fastpathTV.EncSliceInt8V(rv.Interface().([]int8), fastpathCheckNilFalse, f.e)
|
fastpathTV.EncSliceInt8V(rv.Interface().([]int8), fastpathCheckNilFalse, f.e)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
func (_ fastpathT) EncSliceInt8V(v []int8, checkNil bool, e *Encoder) {
|
func (_ fastpathT) EncSliceInt8V(v []int8, checkNil bool, e *Encoder) {
|
||||||
ee := e.e
|
ee := e.e
|
||||||
|
|
@ -3365,8 +3669,39 @@ func (_ fastpathT) EncSliceInt8V(v []int8, checkNil bool, e *Encoder) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (_ fastpathT) EncAsMapSliceInt8V(v []int8, checkNil bool, e *Encoder) {
|
||||||
|
ee := e.e
|
||||||
|
cr := e.cr
|
||||||
|
if checkNil && v == nil {
|
||||||
|
ee.EncodeNil()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(v)%2 == 1 {
|
||||||
|
e.errorf("mapBySlice requires even slice length, but got %v", len(v))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ee.EncodeMapStart(len(v) / 2)
|
||||||
|
for j, v2 := range v {
|
||||||
|
if cr != nil {
|
||||||
|
if j%2 == 0 {
|
||||||
|
cr.sendContainerState(containerMapKey)
|
||||||
|
} else {
|
||||||
|
cr.sendContainerState(containerMapValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ee.EncodeInt(int64(v2))
|
||||||
|
}
|
||||||
|
if cr != nil {
|
||||||
|
cr.sendContainerState(containerMapEnd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (f *encFnInfo) fastpathEncSliceInt16R(rv reflect.Value) {
|
func (f *encFnInfo) fastpathEncSliceInt16R(rv reflect.Value) {
|
||||||
|
if f.ti.mbs {
|
||||||
|
fastpathTV.EncAsMapSliceInt16V(rv.Interface().([]int16), fastpathCheckNilFalse, f.e)
|
||||||
|
} else {
|
||||||
fastpathTV.EncSliceInt16V(rv.Interface().([]int16), fastpathCheckNilFalse, f.e)
|
fastpathTV.EncSliceInt16V(rv.Interface().([]int16), fastpathCheckNilFalse, f.e)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
func (_ fastpathT) EncSliceInt16V(v []int16, checkNil bool, e *Encoder) {
|
func (_ fastpathT) EncSliceInt16V(v []int16, checkNil bool, e *Encoder) {
|
||||||
ee := e.e
|
ee := e.e
|
||||||
|
|
@ -3387,8 +3722,39 @@ func (_ fastpathT) EncSliceInt16V(v []int16, checkNil bool, e *Encoder) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (_ fastpathT) EncAsMapSliceInt16V(v []int16, checkNil bool, e *Encoder) {
|
||||||
|
ee := e.e
|
||||||
|
cr := e.cr
|
||||||
|
if checkNil && v == nil {
|
||||||
|
ee.EncodeNil()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(v)%2 == 1 {
|
||||||
|
e.errorf("mapBySlice requires even slice length, but got %v", len(v))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ee.EncodeMapStart(len(v) / 2)
|
||||||
|
for j, v2 := range v {
|
||||||
|
if cr != nil {
|
||||||
|
if j%2 == 0 {
|
||||||
|
cr.sendContainerState(containerMapKey)
|
||||||
|
} else {
|
||||||
|
cr.sendContainerState(containerMapValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ee.EncodeInt(int64(v2))
|
||||||
|
}
|
||||||
|
if cr != nil {
|
||||||
|
cr.sendContainerState(containerMapEnd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (f *encFnInfo) fastpathEncSliceInt32R(rv reflect.Value) {
|
func (f *encFnInfo) fastpathEncSliceInt32R(rv reflect.Value) {
|
||||||
|
if f.ti.mbs {
|
||||||
|
fastpathTV.EncAsMapSliceInt32V(rv.Interface().([]int32), fastpathCheckNilFalse, f.e)
|
||||||
|
} else {
|
||||||
fastpathTV.EncSliceInt32V(rv.Interface().([]int32), fastpathCheckNilFalse, f.e)
|
fastpathTV.EncSliceInt32V(rv.Interface().([]int32), fastpathCheckNilFalse, f.e)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
func (_ fastpathT) EncSliceInt32V(v []int32, checkNil bool, e *Encoder) {
|
func (_ fastpathT) EncSliceInt32V(v []int32, checkNil bool, e *Encoder) {
|
||||||
ee := e.e
|
ee := e.e
|
||||||
|
|
@ -3409,8 +3775,39 @@ func (_ fastpathT) EncSliceInt32V(v []int32, checkNil bool, e *Encoder) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (_ fastpathT) EncAsMapSliceInt32V(v []int32, checkNil bool, e *Encoder) {
|
||||||
|
ee := e.e
|
||||||
|
cr := e.cr
|
||||||
|
if checkNil && v == nil {
|
||||||
|
ee.EncodeNil()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(v)%2 == 1 {
|
||||||
|
e.errorf("mapBySlice requires even slice length, but got %v", len(v))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ee.EncodeMapStart(len(v) / 2)
|
||||||
|
for j, v2 := range v {
|
||||||
|
if cr != nil {
|
||||||
|
if j%2 == 0 {
|
||||||
|
cr.sendContainerState(containerMapKey)
|
||||||
|
} else {
|
||||||
|
cr.sendContainerState(containerMapValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ee.EncodeInt(int64(v2))
|
||||||
|
}
|
||||||
|
if cr != nil {
|
||||||
|
cr.sendContainerState(containerMapEnd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (f *encFnInfo) fastpathEncSliceInt64R(rv reflect.Value) {
|
func (f *encFnInfo) fastpathEncSliceInt64R(rv reflect.Value) {
|
||||||
|
if f.ti.mbs {
|
||||||
|
fastpathTV.EncAsMapSliceInt64V(rv.Interface().([]int64), fastpathCheckNilFalse, f.e)
|
||||||
|
} else {
|
||||||
fastpathTV.EncSliceInt64V(rv.Interface().([]int64), fastpathCheckNilFalse, f.e)
|
fastpathTV.EncSliceInt64V(rv.Interface().([]int64), fastpathCheckNilFalse, f.e)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
func (_ fastpathT) EncSliceInt64V(v []int64, checkNil bool, e *Encoder) {
|
func (_ fastpathT) EncSliceInt64V(v []int64, checkNil bool, e *Encoder) {
|
||||||
ee := e.e
|
ee := e.e
|
||||||
|
|
@ -3431,8 +3828,39 @@ func (_ fastpathT) EncSliceInt64V(v []int64, checkNil bool, e *Encoder) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (_ fastpathT) EncAsMapSliceInt64V(v []int64, checkNil bool, e *Encoder) {
|
||||||
|
ee := e.e
|
||||||
|
cr := e.cr
|
||||||
|
if checkNil && v == nil {
|
||||||
|
ee.EncodeNil()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(v)%2 == 1 {
|
||||||
|
e.errorf("mapBySlice requires even slice length, but got %v", len(v))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ee.EncodeMapStart(len(v) / 2)
|
||||||
|
for j, v2 := range v {
|
||||||
|
if cr != nil {
|
||||||
|
if j%2 == 0 {
|
||||||
|
cr.sendContainerState(containerMapKey)
|
||||||
|
} else {
|
||||||
|
cr.sendContainerState(containerMapValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ee.EncodeInt(int64(v2))
|
||||||
|
}
|
||||||
|
if cr != nil {
|
||||||
|
cr.sendContainerState(containerMapEnd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (f *encFnInfo) fastpathEncSliceBoolR(rv reflect.Value) {
|
func (f *encFnInfo) fastpathEncSliceBoolR(rv reflect.Value) {
|
||||||
|
if f.ti.mbs {
|
||||||
|
fastpathTV.EncAsMapSliceBoolV(rv.Interface().([]bool), fastpathCheckNilFalse, f.e)
|
||||||
|
} else {
|
||||||
fastpathTV.EncSliceBoolV(rv.Interface().([]bool), fastpathCheckNilFalse, f.e)
|
fastpathTV.EncSliceBoolV(rv.Interface().([]bool), fastpathCheckNilFalse, f.e)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
func (_ fastpathT) EncSliceBoolV(v []bool, checkNil bool, e *Encoder) {
|
func (_ fastpathT) EncSliceBoolV(v []bool, checkNil bool, e *Encoder) {
|
||||||
ee := e.e
|
ee := e.e
|
||||||
|
|
@ -3453,6 +3881,33 @@ func (_ fastpathT) EncSliceBoolV(v []bool, checkNil bool, e *Encoder) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (_ fastpathT) EncAsMapSliceBoolV(v []bool, checkNil bool, e *Encoder) {
|
||||||
|
ee := e.e
|
||||||
|
cr := e.cr
|
||||||
|
if checkNil && v == nil {
|
||||||
|
ee.EncodeNil()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(v)%2 == 1 {
|
||||||
|
e.errorf("mapBySlice requires even slice length, but got %v", len(v))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ee.EncodeMapStart(len(v) / 2)
|
||||||
|
for j, v2 := range v {
|
||||||
|
if cr != nil {
|
||||||
|
if j%2 == 0 {
|
||||||
|
cr.sendContainerState(containerMapKey)
|
||||||
|
} else {
|
||||||
|
cr.sendContainerState(containerMapValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ee.EncodeBool(v2)
|
||||||
|
}
|
||||||
|
if cr != nil {
|
||||||
|
cr.sendContainerState(containerMapEnd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (f *encFnInfo) fastpathEncMapIntfIntfR(rv reflect.Value) {
|
func (f *encFnInfo) fastpathEncMapIntfIntfR(rv reflect.Value) {
|
||||||
fastpathTV.EncMapIntfIntfV(rv.Interface().(map[interface{}]interface{}), fastpathCheckNilFalse, f.e)
|
fastpathTV.EncMapIntfIntfV(rv.Interface().(map[interface{}]interface{}), fastpathCheckNilFalse, f.e)
|
||||||
}
|
}
|
||||||
|
|
@ -15489,9 +15944,6 @@ func (_ fastpathT) EncMapBoolBoolV(v map[bool]bool, checkNil bool, e *Encoder) {
|
||||||
|
|
||||||
// -- -- fast path type switch
|
// -- -- fast path type switch
|
||||||
func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
|
func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool {
|
||||||
if !fastpathEnabled {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
switch v := iv.(type) {
|
switch v := iv.(type) {
|
||||||
|
|
||||||
case []interface{}:
|
case []interface{}:
|
||||||
|
|
@ -17712,7 +18164,7 @@ func (_ fastpathT) DecSliceIntfV(v []interface{}, checkNil bool, canChange bool,
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
slh.End()
|
slh.End()
|
||||||
return
|
return v, changed
|
||||||
}
|
}
|
||||||
|
|
||||||
if containerLenS > 0 {
|
if containerLenS > 0 {
|
||||||
|
|
@ -17771,7 +18223,7 @@ func (_ fastpathT) DecSliceIntfV(v []interface{}, checkNil bool, canChange bool,
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
slh.End()
|
slh.End()
|
||||||
return
|
return v, changed
|
||||||
}
|
}
|
||||||
if cap(v) == 0 {
|
if cap(v) == 0 {
|
||||||
v = make([]interface{}, 1, 4)
|
v = make([]interface{}, 1, 4)
|
||||||
|
|
@ -17846,7 +18298,7 @@ func (_ fastpathT) DecSliceStringV(v []string, checkNil bool, canChange bool, d
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
slh.End()
|
slh.End()
|
||||||
return
|
return v, changed
|
||||||
}
|
}
|
||||||
|
|
||||||
if containerLenS > 0 {
|
if containerLenS > 0 {
|
||||||
|
|
@ -17905,7 +18357,7 @@ func (_ fastpathT) DecSliceStringV(v []string, checkNil bool, canChange bool, d
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
slh.End()
|
slh.End()
|
||||||
return
|
return v, changed
|
||||||
}
|
}
|
||||||
if cap(v) == 0 {
|
if cap(v) == 0 {
|
||||||
v = make([]string, 1, 4)
|
v = make([]string, 1, 4)
|
||||||
|
|
@ -17979,7 +18431,7 @@ func (_ fastpathT) DecSliceFloat32V(v []float32, checkNil bool, canChange bool,
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
slh.End()
|
slh.End()
|
||||||
return
|
return v, changed
|
||||||
}
|
}
|
||||||
|
|
||||||
if containerLenS > 0 {
|
if containerLenS > 0 {
|
||||||
|
|
@ -18038,7 +18490,7 @@ func (_ fastpathT) DecSliceFloat32V(v []float32, checkNil bool, canChange bool,
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
slh.End()
|
slh.End()
|
||||||
return
|
return v, changed
|
||||||
}
|
}
|
||||||
if cap(v) == 0 {
|
if cap(v) == 0 {
|
||||||
v = make([]float32, 1, 4)
|
v = make([]float32, 1, 4)
|
||||||
|
|
@ -18112,7 +18564,7 @@ func (_ fastpathT) DecSliceFloat64V(v []float64, checkNil bool, canChange bool,
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
slh.End()
|
slh.End()
|
||||||
return
|
return v, changed
|
||||||
}
|
}
|
||||||
|
|
||||||
if containerLenS > 0 {
|
if containerLenS > 0 {
|
||||||
|
|
@ -18171,7 +18623,7 @@ func (_ fastpathT) DecSliceFloat64V(v []float64, checkNil bool, canChange bool,
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
slh.End()
|
slh.End()
|
||||||
return
|
return v, changed
|
||||||
}
|
}
|
||||||
if cap(v) == 0 {
|
if cap(v) == 0 {
|
||||||
v = make([]float64, 1, 4)
|
v = make([]float64, 1, 4)
|
||||||
|
|
@ -18245,7 +18697,7 @@ func (_ fastpathT) DecSliceUintV(v []uint, checkNil bool, canChange bool, d *Dec
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
slh.End()
|
slh.End()
|
||||||
return
|
return v, changed
|
||||||
}
|
}
|
||||||
|
|
||||||
if containerLenS > 0 {
|
if containerLenS > 0 {
|
||||||
|
|
@ -18304,7 +18756,7 @@ func (_ fastpathT) DecSliceUintV(v []uint, checkNil bool, canChange bool, d *Dec
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
slh.End()
|
slh.End()
|
||||||
return
|
return v, changed
|
||||||
}
|
}
|
||||||
if cap(v) == 0 {
|
if cap(v) == 0 {
|
||||||
v = make([]uint, 1, 4)
|
v = make([]uint, 1, 4)
|
||||||
|
|
@ -18378,7 +18830,7 @@ func (_ fastpathT) DecSliceUint16V(v []uint16, checkNil bool, canChange bool, d
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
slh.End()
|
slh.End()
|
||||||
return
|
return v, changed
|
||||||
}
|
}
|
||||||
|
|
||||||
if containerLenS > 0 {
|
if containerLenS > 0 {
|
||||||
|
|
@ -18437,7 +18889,7 @@ func (_ fastpathT) DecSliceUint16V(v []uint16, checkNil bool, canChange bool, d
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
slh.End()
|
slh.End()
|
||||||
return
|
return v, changed
|
||||||
}
|
}
|
||||||
if cap(v) == 0 {
|
if cap(v) == 0 {
|
||||||
v = make([]uint16, 1, 4)
|
v = make([]uint16, 1, 4)
|
||||||
|
|
@ -18511,7 +18963,7 @@ func (_ fastpathT) DecSliceUint32V(v []uint32, checkNil bool, canChange bool, d
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
slh.End()
|
slh.End()
|
||||||
return
|
return v, changed
|
||||||
}
|
}
|
||||||
|
|
||||||
if containerLenS > 0 {
|
if containerLenS > 0 {
|
||||||
|
|
@ -18570,7 +19022,7 @@ func (_ fastpathT) DecSliceUint32V(v []uint32, checkNil bool, canChange bool, d
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
slh.End()
|
slh.End()
|
||||||
return
|
return v, changed
|
||||||
}
|
}
|
||||||
if cap(v) == 0 {
|
if cap(v) == 0 {
|
||||||
v = make([]uint32, 1, 4)
|
v = make([]uint32, 1, 4)
|
||||||
|
|
@ -18644,7 +19096,7 @@ func (_ fastpathT) DecSliceUint64V(v []uint64, checkNil bool, canChange bool, d
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
slh.End()
|
slh.End()
|
||||||
return
|
return v, changed
|
||||||
}
|
}
|
||||||
|
|
||||||
if containerLenS > 0 {
|
if containerLenS > 0 {
|
||||||
|
|
@ -18703,7 +19155,7 @@ func (_ fastpathT) DecSliceUint64V(v []uint64, checkNil bool, canChange bool, d
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
slh.End()
|
slh.End()
|
||||||
return
|
return v, changed
|
||||||
}
|
}
|
||||||
if cap(v) == 0 {
|
if cap(v) == 0 {
|
||||||
v = make([]uint64, 1, 4)
|
v = make([]uint64, 1, 4)
|
||||||
|
|
@ -18777,7 +19229,7 @@ func (_ fastpathT) DecSliceUintptrV(v []uintptr, checkNil bool, canChange bool,
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
slh.End()
|
slh.End()
|
||||||
return
|
return v, changed
|
||||||
}
|
}
|
||||||
|
|
||||||
if containerLenS > 0 {
|
if containerLenS > 0 {
|
||||||
|
|
@ -18836,7 +19288,7 @@ func (_ fastpathT) DecSliceUintptrV(v []uintptr, checkNil bool, canChange bool,
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
slh.End()
|
slh.End()
|
||||||
return
|
return v, changed
|
||||||
}
|
}
|
||||||
if cap(v) == 0 {
|
if cap(v) == 0 {
|
||||||
v = make([]uintptr, 1, 4)
|
v = make([]uintptr, 1, 4)
|
||||||
|
|
@ -18910,7 +19362,7 @@ func (_ fastpathT) DecSliceIntV(v []int, checkNil bool, canChange bool, d *Decod
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
slh.End()
|
slh.End()
|
||||||
return
|
return v, changed
|
||||||
}
|
}
|
||||||
|
|
||||||
if containerLenS > 0 {
|
if containerLenS > 0 {
|
||||||
|
|
@ -18969,7 +19421,7 @@ func (_ fastpathT) DecSliceIntV(v []int, checkNil bool, canChange bool, d *Decod
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
slh.End()
|
slh.End()
|
||||||
return
|
return v, changed
|
||||||
}
|
}
|
||||||
if cap(v) == 0 {
|
if cap(v) == 0 {
|
||||||
v = make([]int, 1, 4)
|
v = make([]int, 1, 4)
|
||||||
|
|
@ -19043,7 +19495,7 @@ func (_ fastpathT) DecSliceInt8V(v []int8, checkNil bool, canChange bool, d *Dec
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
slh.End()
|
slh.End()
|
||||||
return
|
return v, changed
|
||||||
}
|
}
|
||||||
|
|
||||||
if containerLenS > 0 {
|
if containerLenS > 0 {
|
||||||
|
|
@ -19102,7 +19554,7 @@ func (_ fastpathT) DecSliceInt8V(v []int8, checkNil bool, canChange bool, d *Dec
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
slh.End()
|
slh.End()
|
||||||
return
|
return v, changed
|
||||||
}
|
}
|
||||||
if cap(v) == 0 {
|
if cap(v) == 0 {
|
||||||
v = make([]int8, 1, 4)
|
v = make([]int8, 1, 4)
|
||||||
|
|
@ -19176,7 +19628,7 @@ func (_ fastpathT) DecSliceInt16V(v []int16, checkNil bool, canChange bool, d *D
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
slh.End()
|
slh.End()
|
||||||
return
|
return v, changed
|
||||||
}
|
}
|
||||||
|
|
||||||
if containerLenS > 0 {
|
if containerLenS > 0 {
|
||||||
|
|
@ -19235,7 +19687,7 @@ func (_ fastpathT) DecSliceInt16V(v []int16, checkNil bool, canChange bool, d *D
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
slh.End()
|
slh.End()
|
||||||
return
|
return v, changed
|
||||||
}
|
}
|
||||||
if cap(v) == 0 {
|
if cap(v) == 0 {
|
||||||
v = make([]int16, 1, 4)
|
v = make([]int16, 1, 4)
|
||||||
|
|
@ -19309,7 +19761,7 @@ func (_ fastpathT) DecSliceInt32V(v []int32, checkNil bool, canChange bool, d *D
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
slh.End()
|
slh.End()
|
||||||
return
|
return v, changed
|
||||||
}
|
}
|
||||||
|
|
||||||
if containerLenS > 0 {
|
if containerLenS > 0 {
|
||||||
|
|
@ -19368,7 +19820,7 @@ func (_ fastpathT) DecSliceInt32V(v []int32, checkNil bool, canChange bool, d *D
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
slh.End()
|
slh.End()
|
||||||
return
|
return v, changed
|
||||||
}
|
}
|
||||||
if cap(v) == 0 {
|
if cap(v) == 0 {
|
||||||
v = make([]int32, 1, 4)
|
v = make([]int32, 1, 4)
|
||||||
|
|
@ -19442,7 +19894,7 @@ func (_ fastpathT) DecSliceInt64V(v []int64, checkNil bool, canChange bool, d *D
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
slh.End()
|
slh.End()
|
||||||
return
|
return v, changed
|
||||||
}
|
}
|
||||||
|
|
||||||
if containerLenS > 0 {
|
if containerLenS > 0 {
|
||||||
|
|
@ -19501,7 +19953,7 @@ func (_ fastpathT) DecSliceInt64V(v []int64, checkNil bool, canChange bool, d *D
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
slh.End()
|
slh.End()
|
||||||
return
|
return v, changed
|
||||||
}
|
}
|
||||||
if cap(v) == 0 {
|
if cap(v) == 0 {
|
||||||
v = make([]int64, 1, 4)
|
v = make([]int64, 1, 4)
|
||||||
|
|
@ -19575,7 +20027,7 @@ func (_ fastpathT) DecSliceBoolV(v []bool, checkNil bool, canChange bool, d *Dec
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
slh.End()
|
slh.End()
|
||||||
return
|
return v, changed
|
||||||
}
|
}
|
||||||
|
|
||||||
if containerLenS > 0 {
|
if containerLenS > 0 {
|
||||||
|
|
@ -19634,7 +20086,7 @@ func (_ fastpathT) DecSliceBoolV(v []bool, checkNil bool, canChange bool, d *Dec
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
slh.End()
|
slh.End()
|
||||||
return
|
return v, changed
|
||||||
}
|
}
|
||||||
if cap(v) == 0 {
|
if cap(v) == 0 {
|
||||||
v = make([]bool, 1, 4)
|
v = make([]bool, 1, 4)
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,8 @@ package codec
|
||||||
|
|
||||||
import "reflect"
|
import "reflect"
|
||||||
|
|
||||||
|
const fastpathEnabled = false
|
||||||
|
|
||||||
// The generated fast-path code is very large, and adds a few seconds to the build time.
|
// The generated fast-path code is very large, and adds a few seconds to the build time.
|
||||||
// This causes test execution, execution of small tools which use codec, etc
|
// This causes test execution, execution of small tools which use codec, etc
|
||||||
// to take a long time.
|
// to take a long time.
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
// //+build ignore
|
// // +build ignore
|
||||||
|
|
||||||
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
@ -17,7 +17,7 @@ import (
|
||||||
|
|
||||||
// This file is used to generate helper code for codecgen.
|
// This file is used to generate helper code for codecgen.
|
||||||
// The values here i.e. genHelper(En|De)coder are not to be used directly by
|
// The values here i.e. genHelper(En|De)coder are not to be used directly by
|
||||||
// library users. They WILL change continously and without notice.
|
// library users. They WILL change continuously and without notice.
|
||||||
//
|
//
|
||||||
// To help enforce this, we create an unexported type with exported members.
|
// To help enforce this, we create an unexported type with exported members.
|
||||||
// The only way to get the type is via the one exported type that we control (somewhat).
|
// The only way to get the type is via the one exported type that we control (somewhat).
|
||||||
|
|
@ -83,6 +83,11 @@ func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
|
||||||
f.e.marshal(bs, fnerr, false, c_RAW)
|
f.e.marshal(bs, fnerr, false, c_RAW)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperEncoder) EncRaw(iv Raw) {
|
||||||
|
f.e.raw(iv)
|
||||||
|
}
|
||||||
|
|
||||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
func (f genHelperEncoder) TimeRtidIfBinc() uintptr {
|
func (f genHelperEncoder) TimeRtidIfBinc() uintptr {
|
||||||
if _, ok := f.e.hh.(*BincHandle); ok {
|
if _, ok := f.e.hh.(*BincHandle); ok {
|
||||||
|
|
@ -191,6 +196,11 @@ func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
|
func (f genHelperDecoder) DecRaw() []byte {
|
||||||
|
return f.d.raw()
|
||||||
|
}
|
||||||
|
|
||||||
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
|
||||||
func (f genHelperDecoder) TimeRtidIfBinc() uintptr {
|
func (f genHelperDecoder) TimeRtidIfBinc() uintptr {
|
||||||
if _, ok := f.d.hh.(*BincHandle); ok {
|
if _, ok := f.d.hh.(*BincHandle); ok {
|
||||||
|
|
|
||||||
|
|
@ -68,8 +68,9 @@ z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }})
|
||||||
|
|
||||||
const genDecListTmpl = `
|
const genDecListTmpl = `
|
||||||
{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }}
|
{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }}
|
||||||
{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}
|
{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}}
|
||||||
var {{var "c"}} bool {{/* // changed */}}
|
var {{var "c"}} bool {{/* // changed */}}
|
||||||
|
_ = {{var "c"}}{{end}}
|
||||||
if {{var "l"}} == 0 {
|
if {{var "l"}} == 0 {
|
||||||
{{if isSlice }}if {{var "v"}} == nil {
|
{{if isSlice }}if {{var "v"}} == nil {
|
||||||
{{var "v"}} = []{{ .Typ }}{}
|
{{var "v"}} = []{{ .Typ }}{}
|
||||||
|
|
@ -95,6 +96,8 @@ if {{var "l"}} == 0 {
|
||||||
}
|
}
|
||||||
{{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}}
|
{{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}}
|
||||||
var {{var "rt"}} bool {{/* truncated */}}
|
var {{var "rt"}} bool {{/* truncated */}}
|
||||||
|
_, _ = {{var "rl"}}, {{var "rt"}}
|
||||||
|
{{var "rr"}} = {{var "l"}} // len({{var "v"}})
|
||||||
if {{var "l"}} > cap({{var "v"}}) {
|
if {{var "l"}} > cap({{var "v"}}) {
|
||||||
{{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}})
|
{{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}})
|
||||||
{{ else }}{{if not .Immutable }}
|
{{ else }}{{if not .Immutable }}
|
||||||
|
|
|
||||||
|
|
@ -12,7 +12,6 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"regexp"
|
"regexp"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
@ -21,11 +20,14 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"text/template"
|
"text/template"
|
||||||
"time"
|
"time"
|
||||||
|
"unicode"
|
||||||
|
"unicode/utf8"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ---------------------------------------------------
|
// ---------------------------------------------------
|
||||||
// codecgen supports the full cycle of reflection-based codec:
|
// codecgen supports the full cycle of reflection-based codec:
|
||||||
// - RawExt
|
// - RawExt
|
||||||
|
// - Raw
|
||||||
// - Builtins
|
// - Builtins
|
||||||
// - Extensions
|
// - Extensions
|
||||||
// - (Binary|Text|JSON)(Unm|M)arshal
|
// - (Binary|Text|JSON)(Unm|M)arshal
|
||||||
|
|
@ -76,7 +78,7 @@ import (
|
||||||
// codecgen will panic if the file was generated with an old version of the library in use.
|
// codecgen will panic if the file was generated with an old version of the library in use.
|
||||||
//
|
//
|
||||||
// Note:
|
// Note:
|
||||||
// It was a concious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil.
|
// It was a conscious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil.
|
||||||
// This way, there isn't a function call overhead just to see that we should not enter a block of code.
|
// This way, there isn't a function call overhead just to see that we should not enter a block of code.
|
||||||
|
|
||||||
// GenVersion is the current version of codecgen.
|
// GenVersion is the current version of codecgen.
|
||||||
|
|
@ -124,6 +126,7 @@ var (
|
||||||
genExpectArrayOrMapErr = errors.New("unexpected type. Expecting array/map/slice")
|
genExpectArrayOrMapErr = errors.New("unexpected type. Expecting array/map/slice")
|
||||||
genBase64enc = base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__")
|
genBase64enc = base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__")
|
||||||
genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`)
|
genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`)
|
||||||
|
genCheckVendor bool
|
||||||
)
|
)
|
||||||
|
|
||||||
// genRunner holds some state used during a Gen run.
|
// genRunner holds some state used during a Gen run.
|
||||||
|
|
@ -162,6 +165,16 @@ type genRunner struct {
|
||||||
//
|
//
|
||||||
// Library users: *DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.*
|
// Library users: *DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.*
|
||||||
func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeInfos, typ ...reflect.Type) {
|
func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeInfos, typ ...reflect.Type) {
|
||||||
|
// trim out all types which already implement Selfer
|
||||||
|
typ2 := make([]reflect.Type, 0, len(typ))
|
||||||
|
for _, t := range typ {
|
||||||
|
if reflect.PtrTo(t).Implements(selferTyp) || t.Implements(selferTyp) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
typ2 = append(typ2, t)
|
||||||
|
}
|
||||||
|
typ = typ2
|
||||||
|
|
||||||
if len(typ) == 0 {
|
if len(typ) == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -199,7 +212,7 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeIn
|
||||||
x.genRefPkgs(t)
|
x.genRefPkgs(t)
|
||||||
}
|
}
|
||||||
if buildTags != "" {
|
if buildTags != "" {
|
||||||
x.line("//+build " + buildTags)
|
x.line("// +build " + buildTags)
|
||||||
x.line("")
|
x.line("")
|
||||||
}
|
}
|
||||||
x.line(`
|
x.line(`
|
||||||
|
|
@ -266,6 +279,7 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeIn
|
||||||
x.line("type " + x.hn + " struct{}")
|
x.line("type " + x.hn + " struct{}")
|
||||||
x.line("")
|
x.line("")
|
||||||
|
|
||||||
|
x.varsfxreset()
|
||||||
x.line("func init() {")
|
x.line("func init() {")
|
||||||
x.linef("if %sGenVersion != %v {", x.cpfx, GenVersion)
|
x.linef("if %sGenVersion != %v {", x.cpfx, GenVersion)
|
||||||
x.line("_, file, _, _ := runtime.Caller(0)")
|
x.line("_, file, _, _ := runtime.Caller(0)")
|
||||||
|
|
@ -309,6 +323,7 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeIn
|
||||||
for _, t := range x.ts {
|
for _, t := range x.ts {
|
||||||
rtid := reflect.ValueOf(t).Pointer()
|
rtid := reflect.ValueOf(t).Pointer()
|
||||||
// generate enc functions for all these slice/map types.
|
// generate enc functions for all these slice/map types.
|
||||||
|
x.varsfxreset()
|
||||||
x.linef("func (x %s) enc%s(v %s%s, e *%sEncoder) {", x.hn, x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), x.cpfx)
|
x.linef("func (x %s) enc%s(v %s%s, e *%sEncoder) {", x.hn, x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), x.cpfx)
|
||||||
x.genRequiredMethodVars(true)
|
x.genRequiredMethodVars(true)
|
||||||
switch t.Kind() {
|
switch t.Kind() {
|
||||||
|
|
@ -323,6 +338,7 @@ func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeIn
|
||||||
x.line("")
|
x.line("")
|
||||||
|
|
||||||
// generate dec functions for all these slice/map types.
|
// generate dec functions for all these slice/map types.
|
||||||
|
x.varsfxreset()
|
||||||
x.linef("func (x %s) dec%s(v *%s, d *%sDecoder) {", x.hn, x.genMethodNameT(t), x.genTypeName(t), x.cpfx)
|
x.linef("func (x %s) dec%s(v *%s, d *%sDecoder) {", x.hn, x.genMethodNameT(t), x.genTypeName(t), x.cpfx)
|
||||||
x.genRequiredMethodVars(false)
|
x.genRequiredMethodVars(false)
|
||||||
switch t.Kind() {
|
switch t.Kind() {
|
||||||
|
|
@ -377,7 +393,7 @@ func (x *genRunner) genRefPkgs(t reflect.Type) {
|
||||||
x.imn[tpkg] = tpkg
|
x.imn[tpkg] = tpkg
|
||||||
} else {
|
} else {
|
||||||
x.imc++
|
x.imc++
|
||||||
x.imn[tpkg] = "pkg" + strconv.FormatUint(x.imc, 10) + "_" + tpkg[idx+1:]
|
x.imn[tpkg] = "pkg" + strconv.FormatUint(x.imc, 10) + "_" + genGoIdentifier(tpkg[idx+1:], false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -408,6 +424,10 @@ func (x *genRunner) varsfx() string {
|
||||||
return strconv.FormatUint(x.c, 10)
|
return strconv.FormatUint(x.c, 10)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (x *genRunner) varsfxreset() {
|
||||||
|
x.c = 0
|
||||||
|
}
|
||||||
|
|
||||||
func (x *genRunner) out(s string) {
|
func (x *genRunner) out(s string) {
|
||||||
if _, err := io.WriteString(x.w, s); err != nil {
|
if _, err := io.WriteString(x.w, s); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
|
|
@ -494,6 +514,7 @@ func (x *genRunner) selfer(encode bool) {
|
||||||
// always make decode use a pointer receiver,
|
// always make decode use a pointer receiver,
|
||||||
// and structs always use a ptr receiver (encode|decode)
|
// and structs always use a ptr receiver (encode|decode)
|
||||||
isptr := !encode || t.Kind() == reflect.Struct
|
isptr := !encode || t.Kind() == reflect.Struct
|
||||||
|
x.varsfxreset()
|
||||||
fnSigPfx := "func (x "
|
fnSigPfx := "func (x "
|
||||||
if isptr {
|
if isptr {
|
||||||
fnSigPfx += "*"
|
fnSigPfx += "*"
|
||||||
|
|
@ -566,9 +587,28 @@ func (x *genRunner) xtraSM(varname string, encode bool, t reflect.Type) {
|
||||||
} else {
|
} else {
|
||||||
x.linef("h.dec%s((*%s)(%s), d)", x.genMethodNameT(t), x.genTypeName(t), varname)
|
x.linef("h.dec%s((*%s)(%s), d)", x.genMethodNameT(t), x.genTypeName(t), varname)
|
||||||
}
|
}
|
||||||
if _, ok := x.tm[t]; !ok {
|
x.registerXtraT(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *genRunner) registerXtraT(t reflect.Type) {
|
||||||
|
// recursively register the types
|
||||||
|
if _, ok := x.tm[t]; ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var tkey reflect.Type
|
||||||
|
switch t.Kind() {
|
||||||
|
case reflect.Chan, reflect.Slice, reflect.Array:
|
||||||
|
case reflect.Map:
|
||||||
|
tkey = t.Key()
|
||||||
|
default:
|
||||||
|
return
|
||||||
|
}
|
||||||
x.tm[t] = struct{}{}
|
x.tm[t] = struct{}{}
|
||||||
x.ts = append(x.ts, t)
|
x.ts = append(x.ts, t)
|
||||||
|
// check if this refers to any xtra types eg. a slice of array: add the array
|
||||||
|
x.registerXtraT(t.Elem())
|
||||||
|
if tkey != nil {
|
||||||
|
x.registerXtraT(tkey)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -608,23 +648,34 @@ func (x *genRunner) encVar(varname string, t reflect.Type) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// enc will encode a variable (varname) of type T,
|
// enc will encode a variable (varname) of type t,
|
||||||
// except t is of kind reflect.Struct or reflect.Array, wherein varname is of type *T (to prevent copying)
|
// except t is of kind reflect.Struct or reflect.Array, wherein varname is of type ptrTo(T) (to prevent copying)
|
||||||
func (x *genRunner) enc(varname string, t reflect.Type) {
|
func (x *genRunner) enc(varname string, t reflect.Type) {
|
||||||
// varName here must be to a pointer to a struct/array, or to a value directly.
|
|
||||||
rtid := reflect.ValueOf(t).Pointer()
|
rtid := reflect.ValueOf(t).Pointer()
|
||||||
// We call CodecEncodeSelf if one of the following are honored:
|
// We call CodecEncodeSelf if one of the following are honored:
|
||||||
// - the type already implements Selfer, call that
|
// - the type already implements Selfer, call that
|
||||||
// - the type has a Selfer implementation just created, use that
|
// - the type has a Selfer implementation just created, use that
|
||||||
// - the type is in the list of the ones we will generate for, but it is not currently being generated
|
// - the type is in the list of the ones we will generate for, but it is not currently being generated
|
||||||
|
|
||||||
|
mi := x.varsfx()
|
||||||
tptr := reflect.PtrTo(t)
|
tptr := reflect.PtrTo(t)
|
||||||
tk := t.Kind()
|
tk := t.Kind()
|
||||||
if x.checkForSelfer(t, varname) {
|
if x.checkForSelfer(t, varname) {
|
||||||
if t.Implements(selferTyp) || (tptr.Implements(selferTyp) && (tk == reflect.Array || tk == reflect.Struct)) {
|
if tk == reflect.Array || tk == reflect.Struct { // varname is of type *T
|
||||||
|
if tptr.Implements(selferTyp) || t.Implements(selferTyp) {
|
||||||
x.line(varname + ".CodecEncodeSelf(e)")
|
x.line(varname + ".CodecEncodeSelf(e)")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
} else { // varname is of type T
|
||||||
|
if t.Implements(selferTyp) {
|
||||||
|
x.line(varname + ".CodecEncodeSelf(e)")
|
||||||
|
return
|
||||||
|
} else if tptr.Implements(selferTyp) {
|
||||||
|
x.linef("%ssf%s := &%s", genTempVarPfx, mi, varname)
|
||||||
|
x.linef("%ssf%s.CodecEncodeSelf(e)", genTempVarPfx, mi)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if _, ok := x.te[rtid]; ok {
|
if _, ok := x.te[rtid]; ok {
|
||||||
x.line(varname + ".CodecEncodeSelf(e)")
|
x.line(varname + ".CodecEncodeSelf(e)")
|
||||||
|
|
@ -651,14 +702,17 @@ func (x *genRunner) enc(varname string, t reflect.Type) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if
|
// check if
|
||||||
// - type is RawExt
|
// - type is RawExt, Raw
|
||||||
// - the type implements (Text|JSON|Binary)(Unm|M)arshal
|
// - the type implements (Text|JSON|Binary)(Unm|M)arshal
|
||||||
mi := x.varsfx()
|
|
||||||
x.linef("%sm%s := z.EncBinary()", genTempVarPfx, mi)
|
x.linef("%sm%s := z.EncBinary()", genTempVarPfx, mi)
|
||||||
x.linef("_ = %sm%s", genTempVarPfx, mi)
|
x.linef("_ = %sm%s", genTempVarPfx, mi)
|
||||||
x.line("if false {") //start if block
|
x.line("if false {") //start if block
|
||||||
defer func() { x.line("}") }() //end if block
|
defer func() { x.line("}") }() //end if block
|
||||||
|
|
||||||
|
if t == rawTyp {
|
||||||
|
x.linef("} else { z.EncRaw(%v)", varname)
|
||||||
|
return
|
||||||
|
}
|
||||||
if t == rawExtTyp {
|
if t == rawExtTyp {
|
||||||
x.linef("} else { r.EncodeRawExt(%v, e)", varname)
|
x.linef("} else { r.EncodeRawExt(%v, e)", varname)
|
||||||
return
|
return
|
||||||
|
|
@ -676,6 +730,7 @@ func (x *genRunner) enc(varname string, t reflect.Type) {
|
||||||
// first check if extensions are configued, before doing the interface conversion
|
// first check if extensions are configued, before doing the interface conversion
|
||||||
x.linef("} else if z.HasExtensions() && z.EncExt(%s) {", varname)
|
x.linef("} else if z.HasExtensions() && z.EncExt(%s) {", varname)
|
||||||
}
|
}
|
||||||
|
if tk == reflect.Array || tk == reflect.Struct { // varname is of type *T
|
||||||
if t.Implements(binaryMarshalerTyp) || tptr.Implements(binaryMarshalerTyp) {
|
if t.Implements(binaryMarshalerTyp) || tptr.Implements(binaryMarshalerTyp) {
|
||||||
x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname)
|
x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname)
|
||||||
}
|
}
|
||||||
|
|
@ -684,7 +739,22 @@ func (x *genRunner) enc(varname string, t reflect.Type) {
|
||||||
} else if t.Implements(textMarshalerTyp) || tptr.Implements(textMarshalerTyp) {
|
} else if t.Implements(textMarshalerTyp) || tptr.Implements(textMarshalerTyp) {
|
||||||
x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname)
|
x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname)
|
||||||
}
|
}
|
||||||
|
} else { // varname is of type T
|
||||||
|
if t.Implements(binaryMarshalerTyp) {
|
||||||
|
x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname)
|
||||||
|
} else if tptr.Implements(binaryMarshalerTyp) {
|
||||||
|
x.linef("} else if %sm%s { z.EncBinaryMarshal(&%v) ", genTempVarPfx, mi, varname)
|
||||||
|
}
|
||||||
|
if t.Implements(jsonMarshalerTyp) {
|
||||||
|
x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname)
|
||||||
|
} else if tptr.Implements(jsonMarshalerTyp) {
|
||||||
|
x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(&%v) ", genTempVarPfx, mi, varname)
|
||||||
|
} else if t.Implements(textMarshalerTyp) {
|
||||||
|
x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname)
|
||||||
|
} else if tptr.Implements(textMarshalerTyp) {
|
||||||
|
x.linef("} else if !%sm%s { z.EncTextMarshal(&%v) ", genTempVarPfx, mi, varname)
|
||||||
|
}
|
||||||
|
}
|
||||||
x.line("} else {")
|
x.line("} else {")
|
||||||
|
|
||||||
switch t.Kind() {
|
switch t.Kind() {
|
||||||
|
|
@ -922,6 +992,14 @@ func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *genRunner) encListFallback(varname string, t reflect.Type) {
|
func (x *genRunner) encListFallback(varname string, t reflect.Type) {
|
||||||
|
if t.AssignableTo(uint8SliceTyp) {
|
||||||
|
x.linef("r.EncodeStringBytes(codecSelferC_RAW%s, []byte(%s))", x.xs, varname)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 {
|
||||||
|
x.linef("r.EncodeStringBytes(codecSelferC_RAW%s, ([%v]byte(%s))[:])", x.xs, t.Len(), varname)
|
||||||
|
return
|
||||||
|
}
|
||||||
i := x.varsfx()
|
i := x.varsfx()
|
||||||
g := genTempVarPfx
|
g := genTempVarPfx
|
||||||
x.line("r.EncodeArrayStart(len(" + varname + "))")
|
x.line("r.EncodeArrayStart(len(" + varname + "))")
|
||||||
|
|
@ -1020,6 +1098,8 @@ func (x *genRunner) decVar(varname string, t reflect.Type, canBeNil bool) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// dec will decode a variable (varname) of type ptrTo(t).
|
||||||
|
// t is always a basetype (i.e. not of kind reflect.Ptr).
|
||||||
func (x *genRunner) dec(varname string, t reflect.Type) {
|
func (x *genRunner) dec(varname string, t reflect.Type) {
|
||||||
// assumptions:
|
// assumptions:
|
||||||
// - the varname is to a pointer already. No need to take address of it
|
// - the varname is to a pointer already. No need to take address of it
|
||||||
|
|
@ -1056,7 +1136,7 @@ func (x *genRunner) dec(varname string, t reflect.Type) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if
|
// check if
|
||||||
// - type is RawExt
|
// - type is Raw, RawExt
|
||||||
// - the type implements (Text|JSON|Binary)(Unm|M)arshal
|
// - the type implements (Text|JSON|Binary)(Unm|M)arshal
|
||||||
mi := x.varsfx()
|
mi := x.varsfx()
|
||||||
x.linef("%sm%s := z.DecBinary()", genTempVarPfx, mi)
|
x.linef("%sm%s := z.DecBinary()", genTempVarPfx, mi)
|
||||||
|
|
@ -1064,6 +1144,10 @@ func (x *genRunner) dec(varname string, t reflect.Type) {
|
||||||
x.line("if false {") //start if block
|
x.line("if false {") //start if block
|
||||||
defer func() { x.line("}") }() //end if block
|
defer func() { x.line("}") }() //end if block
|
||||||
|
|
||||||
|
if t == rawTyp {
|
||||||
|
x.linef("} else { *%v = z.DecRaw()", varname)
|
||||||
|
return
|
||||||
|
}
|
||||||
if t == rawExtTyp {
|
if t == rawExtTyp {
|
||||||
x.linef("} else { r.DecodeExt(%v, 0, nil)", varname)
|
x.linef("} else { r.DecodeExt(%v, 0, nil)", varname)
|
||||||
return
|
return
|
||||||
|
|
@ -1189,59 +1273,49 @@ func (x *genRunner) dec(varname string, t reflect.Type) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type) (tryAsPtr bool) {
|
func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type) (tryAsPtr bool) {
|
||||||
// We have to use the actual type name when doing a direct assignment.
|
// This should only be used for exact primitives (ie un-named types).
|
||||||
// We don't have the luxury of casting the pointer to the underlying type.
|
// Named types may be implementations of Selfer, Unmarshaler, etc.
|
||||||
//
|
// They should be handled by dec(...)
|
||||||
// Consequently, in the situation of a
|
|
||||||
// type Message int32
|
|
||||||
// var x Message
|
|
||||||
// var i int32 = 32
|
|
||||||
// x = i // this will bomb
|
|
||||||
// x = Message(i) // this will work
|
|
||||||
// *((*int32)(&x)) = i // this will work
|
|
||||||
//
|
|
||||||
// Consequently, we replace:
|
|
||||||
// case reflect.Uint32: x.line(varname + " = uint32(r.DecodeUint(32))")
|
|
||||||
// with:
|
|
||||||
// case reflect.Uint32: x.line(varname + " = " + genTypeNamePrim(t, x.tc) + "(r.DecodeUint(32))")
|
|
||||||
|
|
||||||
xfn := func(t reflect.Type) string {
|
if t.Name() != "" {
|
||||||
return x.genTypeNamePrim(t)
|
tryAsPtr = true
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
switch t.Kind() {
|
switch t.Kind() {
|
||||||
case reflect.Int:
|
case reflect.Int:
|
||||||
x.linef("%s = %s(r.DecodeInt(codecSelferBitsize%s))", varname, xfn(t), x.xs)
|
x.linef("%s = r.DecodeInt(codecSelferBitsize%s)", varname, x.xs)
|
||||||
case reflect.Int8:
|
case reflect.Int8:
|
||||||
x.linef("%s = %s(r.DecodeInt(8))", varname, xfn(t))
|
x.linef("%s = r.DecodeInt(8)", varname)
|
||||||
case reflect.Int16:
|
case reflect.Int16:
|
||||||
x.linef("%s = %s(r.DecodeInt(16))", varname, xfn(t))
|
x.linef("%s = r.DecodeInt(16)", varname)
|
||||||
case reflect.Int32:
|
case reflect.Int32:
|
||||||
x.linef("%s = %s(r.DecodeInt(32))", varname, xfn(t))
|
x.linef("%s = r.DecodeInt(32)", varname)
|
||||||
case reflect.Int64:
|
case reflect.Int64:
|
||||||
x.linef("%s = %s(r.DecodeInt(64))", varname, xfn(t))
|
x.linef("%s = r.DecodeInt(64)", varname)
|
||||||
|
|
||||||
case reflect.Uint:
|
case reflect.Uint:
|
||||||
x.linef("%s = %s(r.DecodeUint(codecSelferBitsize%s))", varname, xfn(t), x.xs)
|
x.linef("%s = r.DecodeUint(codecSelferBitsize%s)", varname, x.xs)
|
||||||
case reflect.Uint8:
|
case reflect.Uint8:
|
||||||
x.linef("%s = %s(r.DecodeUint(8))", varname, xfn(t))
|
x.linef("%s = r.DecodeUint(8)", varname)
|
||||||
case reflect.Uint16:
|
case reflect.Uint16:
|
||||||
x.linef("%s = %s(r.DecodeUint(16))", varname, xfn(t))
|
x.linef("%s = r.DecodeUint(16)", varname)
|
||||||
case reflect.Uint32:
|
case reflect.Uint32:
|
||||||
x.linef("%s = %s(r.DecodeUint(32))", varname, xfn(t))
|
x.linef("%s = r.DecodeUint(32)", varname)
|
||||||
case reflect.Uint64:
|
case reflect.Uint64:
|
||||||
x.linef("%s = %s(r.DecodeUint(64))", varname, xfn(t))
|
x.linef("%s = r.DecodeUint(64)", varname)
|
||||||
case reflect.Uintptr:
|
case reflect.Uintptr:
|
||||||
x.linef("%s = %s(r.DecodeUint(codecSelferBitsize%s))", varname, xfn(t), x.xs)
|
x.linef("%s = r.DecodeUint(codecSelferBitsize%s)", varname, x.xs)
|
||||||
|
|
||||||
case reflect.Float32:
|
case reflect.Float32:
|
||||||
x.linef("%s = %s(r.DecodeFloat(true))", varname, xfn(t))
|
x.linef("%s = r.DecodeFloat(true)", varname)
|
||||||
case reflect.Float64:
|
case reflect.Float64:
|
||||||
x.linef("%s = %s(r.DecodeFloat(false))", varname, xfn(t))
|
x.linef("%s = r.DecodeFloat(false)", varname)
|
||||||
|
|
||||||
case reflect.Bool:
|
case reflect.Bool:
|
||||||
x.linef("%s = %s(r.DecodeBool())", varname, xfn(t))
|
x.linef("%s = r.DecodeBool()", varname)
|
||||||
case reflect.String:
|
case reflect.String:
|
||||||
x.linef("%s = %s(r.DecodeString())", varname, xfn(t))
|
x.linef("%s = r.DecodeString()", varname)
|
||||||
default:
|
default:
|
||||||
tryAsPtr = true
|
tryAsPtr = true
|
||||||
}
|
}
|
||||||
|
|
@ -1249,6 +1323,14 @@ func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type) (tryAs
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) {
|
func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) {
|
||||||
|
if t.AssignableTo(uint8SliceTyp) {
|
||||||
|
x.line("*" + varname + " = r.DecodeBytes(*((*[]byte)(" + varname + ")), false, false)")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 {
|
||||||
|
x.linef("r.DecodeBytes( ((*[%s]byte)(%s))[:], false, true)", t.Len(), varname)
|
||||||
|
return
|
||||||
|
}
|
||||||
type tstruc struct {
|
type tstruc struct {
|
||||||
TempVar string
|
TempVar string
|
||||||
Rand string
|
Rand string
|
||||||
|
|
@ -1364,7 +1446,7 @@ func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintpt
|
||||||
if si.i != -1 {
|
if si.i != -1 {
|
||||||
t2 = t.Field(int(si.i))
|
t2 = t.Field(int(si.i))
|
||||||
} else {
|
} else {
|
||||||
//we must accomodate anonymous fields, where the embedded field is a nil pointer in the value.
|
//we must accommodate anonymous fields, where the embedded field is a nil pointer in the value.
|
||||||
// t2 = t.FieldByIndex(si.is)
|
// t2 = t.FieldByIndex(si.is)
|
||||||
t2typ := t
|
t2typ := t
|
||||||
varname3 := varname
|
varname3 := varname
|
||||||
|
|
@ -1452,7 +1534,7 @@ func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid
|
||||||
if si.i != -1 {
|
if si.i != -1 {
|
||||||
t2 = t.Field(int(si.i))
|
t2 = t.Field(int(si.i))
|
||||||
} else {
|
} else {
|
||||||
//we must accomodate anonymous fields, where the embedded field is a nil pointer in the value.
|
//we must accommodate anonymous fields, where the embedded field is a nil pointer in the value.
|
||||||
// t2 = t.FieldByIndex(si.is)
|
// t2 = t.FieldByIndex(si.is)
|
||||||
t2typ := t
|
t2typ := t
|
||||||
varname3 := varname
|
varname3 := varname
|
||||||
|
|
@ -1569,8 +1651,6 @@ func (x *genV) MethodNamePfx(prefix string, prim bool) string {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1"
|
|
||||||
|
|
||||||
// genImportPath returns import path of a non-predeclared named typed, or an empty string otherwise.
|
// genImportPath returns import path of a non-predeclared named typed, or an empty string otherwise.
|
||||||
//
|
//
|
||||||
// This handles the misbehaviour that occurs when 1.5-style vendoring is enabled,
|
// This handles the misbehaviour that occurs when 1.5-style vendoring is enabled,
|
||||||
|
|
@ -1592,6 +1672,26 @@ func genImportPath(t reflect.Type) (s string) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// A go identifier is (letter|_)[letter|number|_]*
|
||||||
|
func genGoIdentifier(s string, checkFirstChar bool) string {
|
||||||
|
b := make([]byte, 0, len(s))
|
||||||
|
t := make([]byte, 4)
|
||||||
|
var n int
|
||||||
|
for i, r := range s {
|
||||||
|
if checkFirstChar && i == 0 && !unicode.IsLetter(r) {
|
||||||
|
b = append(b, '_')
|
||||||
|
}
|
||||||
|
// r must be unicode_letter, unicode_digit or _
|
||||||
|
if unicode.IsLetter(r) || unicode.IsDigit(r) {
|
||||||
|
n = utf8.EncodeRune(t, r)
|
||||||
|
b = append(b, t[:n]...)
|
||||||
|
} else {
|
||||||
|
b = append(b, '_')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return string(b)
|
||||||
|
}
|
||||||
|
|
||||||
func genNonPtr(t reflect.Type) reflect.Type {
|
func genNonPtr(t reflect.Type) reflect.Type {
|
||||||
for t.Kind() == reflect.Ptr {
|
for t.Kind() == reflect.Ptr {
|
||||||
t = t.Elem()
|
t = t.Elem()
|
||||||
|
|
@ -1601,7 +1701,7 @@ func genNonPtr(t reflect.Type) reflect.Type {
|
||||||
|
|
||||||
func genTitleCaseName(s string) string {
|
func genTitleCaseName(s string) string {
|
||||||
switch s {
|
switch s {
|
||||||
case "interface{}":
|
case "interface{}", "interface {}":
|
||||||
return "Intf"
|
return "Intf"
|
||||||
default:
|
default:
|
||||||
return strings.ToUpper(s[0:1]) + s[1:]
|
return strings.ToUpper(s[0:1]) + s[1:]
|
||||||
|
|
@ -1704,7 +1804,7 @@ func (x genInternal) FastpathLen() (l int) {
|
||||||
|
|
||||||
func genInternalZeroValue(s string) string {
|
func genInternalZeroValue(s string) string {
|
||||||
switch s {
|
switch s {
|
||||||
case "interface{}":
|
case "interface{}", "interface {}":
|
||||||
return "nil"
|
return "nil"
|
||||||
case "bool":
|
case "bool":
|
||||||
return "false"
|
return "false"
|
||||||
|
|
@ -1856,7 +1956,7 @@ func genInternalInit() {
|
||||||
}
|
}
|
||||||
var gt genInternal
|
var gt genInternal
|
||||||
|
|
||||||
// For each slice or map type, there must be a (symetrical) Encode and Decode fast-path function
|
// For each slice or map type, there must be a (symmetrical) Encode and Decode fast-path function
|
||||||
for _, s := range types {
|
for _, s := range types {
|
||||||
gt.Values = append(gt.Values, genV{Primitive: s, Size: mapvaltypes2[s]})
|
gt.Values = append(gt.Values, genV{Primitive: s, Size: mapvaltypes2[s]})
|
||||||
if s != "uint8" { // do not generate fast path for slice of bytes. Treat specially already.
|
if s != "uint8" { // do not generate fast path for slice of bytes. Treat specially already.
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,12 @@
|
||||||
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build go1.5,!go1.6
|
||||||
|
|
||||||
|
package codec
|
||||||
|
|
||||||
|
import "os"
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1"
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,12 @@
|
||||||
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build go1.6
|
||||||
|
|
||||||
|
package codec
|
||||||
|
|
||||||
|
import "os"
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") != "0"
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,10 @@
|
||||||
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build go1.7
|
||||||
|
|
||||||
|
package codec
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
genCheckVendor = true
|
||||||
|
}
|
||||||
|
|
@ -38,10 +38,6 @@ package codec
|
||||||
// a length prefix, or if it used explicit breaks. If length-prefixed, we assume that
|
// a length prefix, or if it used explicit breaks. If length-prefixed, we assume that
|
||||||
// it has to be binary, and we do not even try to read separators.
|
// it has to be binary, and we do not even try to read separators.
|
||||||
//
|
//
|
||||||
// The only codec that may suffer (slightly) is cbor, and only when decoding indefinite-length.
|
|
||||||
// It may suffer because we treat it like a text-based codec, and read separators.
|
|
||||||
// However, this read is a no-op and the cost is insignificant.
|
|
||||||
//
|
|
||||||
// Philosophy
|
// Philosophy
|
||||||
// ------------
|
// ------------
|
||||||
// On decode, this codec will update containers appropriately:
|
// On decode, this codec will update containers appropriately:
|
||||||
|
|
@ -137,17 +133,6 @@ const (
|
||||||
// Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic.
|
// Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic.
|
||||||
recoverPanicToErr = true
|
recoverPanicToErr = true
|
||||||
|
|
||||||
// Fast path functions try to create a fast path encode or decode implementation
|
|
||||||
// for common maps and slices, by by-passing reflection altogether.
|
|
||||||
fastpathEnabled = true
|
|
||||||
|
|
||||||
// if checkStructForEmptyValue, check structs fields to see if an empty value.
|
|
||||||
// This could be an expensive call, so possibly disable it.
|
|
||||||
checkStructForEmptyValue = false
|
|
||||||
|
|
||||||
// if derefForIsEmptyValue, deref pointers and interfaces when checking isEmptyValue
|
|
||||||
derefForIsEmptyValue = false
|
|
||||||
|
|
||||||
// if resetSliceElemToZeroValue, then on decoding a slice, reset the element to a zero value first.
|
// if resetSliceElemToZeroValue, then on decoding a slice, reset the element to a zero value first.
|
||||||
// Only concern is that, if the slice already contained some garbage, we will decode into that garbage.
|
// Only concern is that, if the slice already contained some garbage, we will decode into that garbage.
|
||||||
// The chances of this are slim, so leave this "optimization".
|
// The chances of this are slim, so leave this "optimization".
|
||||||
|
|
@ -155,8 +140,10 @@ const (
|
||||||
resetSliceElemToZeroValue bool = false
|
resetSliceElemToZeroValue bool = false
|
||||||
)
|
)
|
||||||
|
|
||||||
var oneByteArr = [1]byte{0}
|
var (
|
||||||
var zeroByteSlice = oneByteArr[:0:0]
|
oneByteArr = [1]byte{0}
|
||||||
|
zeroByteSlice = oneByteArr[:0:0]
|
||||||
|
)
|
||||||
|
|
||||||
type charEncoding uint8
|
type charEncoding uint8
|
||||||
|
|
||||||
|
|
@ -215,6 +202,41 @@ const (
|
||||||
containerArrayEnd
|
containerArrayEnd
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// sfiIdx used for tracking where a (field/enc)Name is seen in a []*structFieldInfo
|
||||||
|
type sfiIdx struct {
|
||||||
|
name string
|
||||||
|
index int
|
||||||
|
}
|
||||||
|
|
||||||
|
// do not recurse if a containing type refers to an embedded type
|
||||||
|
// which refers back to its containing type (via a pointer).
|
||||||
|
// The second time this back-reference happens, break out,
|
||||||
|
// so as not to cause an infinite loop.
|
||||||
|
const rgetMaxRecursion = 2
|
||||||
|
|
||||||
|
// Anecdotally, we believe most types have <= 12 fields.
|
||||||
|
// Java's PMD rules set TooManyFields threshold to 15.
|
||||||
|
const rgetPoolTArrayLen = 12
|
||||||
|
|
||||||
|
type rgetT struct {
|
||||||
|
fNames []string
|
||||||
|
encNames []string
|
||||||
|
etypes []uintptr
|
||||||
|
sfis []*structFieldInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
type rgetPoolT struct {
|
||||||
|
fNames [rgetPoolTArrayLen]string
|
||||||
|
encNames [rgetPoolTArrayLen]string
|
||||||
|
etypes [rgetPoolTArrayLen]uintptr
|
||||||
|
sfis [rgetPoolTArrayLen]*structFieldInfo
|
||||||
|
sfiidx [rgetPoolTArrayLen]sfiIdx
|
||||||
|
}
|
||||||
|
|
||||||
|
var rgetPool = sync.Pool{
|
||||||
|
New: func() interface{} { return new(rgetPoolT) },
|
||||||
|
}
|
||||||
|
|
||||||
type containerStateRecv interface {
|
type containerStateRecv interface {
|
||||||
sendContainerState(containerState)
|
sendContainerState(containerState)
|
||||||
}
|
}
|
||||||
|
|
@ -240,6 +262,7 @@ var (
|
||||||
stringTyp = reflect.TypeOf("")
|
stringTyp = reflect.TypeOf("")
|
||||||
timeTyp = reflect.TypeOf(time.Time{})
|
timeTyp = reflect.TypeOf(time.Time{})
|
||||||
rawExtTyp = reflect.TypeOf(RawExt{})
|
rawExtTyp = reflect.TypeOf(RawExt{})
|
||||||
|
rawTyp = reflect.TypeOf(Raw{})
|
||||||
uint8SliceTyp = reflect.TypeOf([]uint8(nil))
|
uint8SliceTyp = reflect.TypeOf([]uint8(nil))
|
||||||
|
|
||||||
mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem()
|
mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem()
|
||||||
|
|
@ -257,6 +280,7 @@ var (
|
||||||
|
|
||||||
uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer()
|
uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer()
|
||||||
rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer()
|
rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer()
|
||||||
|
rawTypId = reflect.ValueOf(rawTyp).Pointer()
|
||||||
intfTypId = reflect.ValueOf(intfTyp).Pointer()
|
intfTypId = reflect.ValueOf(intfTyp).Pointer()
|
||||||
timeTypId = reflect.ValueOf(timeTyp).Pointer()
|
timeTypId = reflect.ValueOf(timeTyp).Pointer()
|
||||||
stringTypId = reflect.ValueOf(stringTyp).Pointer()
|
stringTypId = reflect.ValueOf(stringTyp).Pointer()
|
||||||
|
|
@ -337,6 +361,11 @@ type Handle interface {
|
||||||
isBinary() bool
|
isBinary() bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Raw represents raw formatted bytes.
|
||||||
|
// We "blindly" store it during encode and store the raw bytes during decode.
|
||||||
|
// Note: it is dangerous during encode, so we may gate the behaviour behind an Encode flag which must be explicitly set.
|
||||||
|
type Raw []byte
|
||||||
|
|
||||||
// RawExt represents raw unprocessed extension data.
|
// RawExt represents raw unprocessed extension data.
|
||||||
// Some codecs will decode extension data as a *RawExt if there is no registered extension for the tag.
|
// Some codecs will decode extension data as a *RawExt if there is no registered extension for the tag.
|
||||||
//
|
//
|
||||||
|
|
@ -347,7 +376,7 @@ type RawExt struct {
|
||||||
// Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types
|
// Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types
|
||||||
Data []byte
|
Data []byte
|
||||||
// Value represents the extension, if Data is nil.
|
// Value represents the extension, if Data is nil.
|
||||||
// Value is used by codecs (e.g. cbor) which use the format to do custom serialization of the types.
|
// Value is used by codecs (e.g. cbor, json) which use the format to do custom serialization of the types.
|
||||||
Value interface{}
|
Value interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -525,7 +554,7 @@ func (o *extHandle) AddExt(
|
||||||
func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) {
|
func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) {
|
||||||
// o is a pointer, because we may need to initialize it
|
// o is a pointer, because we may need to initialize it
|
||||||
if rt.PkgPath() == "" || rt.Kind() == reflect.Interface {
|
if rt.PkgPath() == "" || rt.Kind() == reflect.Interface {
|
||||||
err = fmt.Errorf("codec.Handle.AddExt: Takes named type, especially not a pointer or interface: %T",
|
err = fmt.Errorf("codec.Handle.AddExt: Takes named type, not a pointer or interface: %T",
|
||||||
reflect.Zero(rt).Interface())
|
reflect.Zero(rt).Interface())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -569,6 +598,7 @@ func (o extHandle) getExtForTag(tag uint64) *extTypeTagFn {
|
||||||
|
|
||||||
type structFieldInfo struct {
|
type structFieldInfo struct {
|
||||||
encName string // encode name
|
encName string // encode name
|
||||||
|
fieldName string // field name
|
||||||
|
|
||||||
// only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been set.
|
// only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been set.
|
||||||
|
|
||||||
|
|
@ -714,6 +744,7 @@ type typeInfo struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ti *typeInfo) indexForEncName(name string) int {
|
func (ti *typeInfo) indexForEncName(name string) int {
|
||||||
|
// NOTE: name may be a stringView, so don't pass it to another function.
|
||||||
//tisfi := ti.sfi
|
//tisfi := ti.sfi
|
||||||
const binarySearchThreshold = 16
|
const binarySearchThreshold = 16
|
||||||
if sfilen := len(ti.sfi); sfilen < binarySearchThreshold {
|
if sfilen := len(ti.sfi); sfilen < binarySearchThreshold {
|
||||||
|
|
@ -828,19 +859,19 @@ func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if rt.Kind() == reflect.Struct {
|
if rt.Kind() == reflect.Struct {
|
||||||
var siInfo *structFieldInfo
|
var omitEmpty bool
|
||||||
if f, ok := rt.FieldByName(structInfoFieldName); ok {
|
if f, ok := rt.FieldByName(structInfoFieldName); ok {
|
||||||
siInfo = parseStructFieldInfo(structInfoFieldName, x.structTag(f.Tag))
|
siInfo := parseStructFieldInfo(structInfoFieldName, x.structTag(f.Tag))
|
||||||
ti.toArray = siInfo.toArray
|
ti.toArray = siInfo.toArray
|
||||||
|
omitEmpty = siInfo.omitEmpty
|
||||||
}
|
}
|
||||||
sfip := make([]*structFieldInfo, 0, rt.NumField())
|
pi := rgetPool.Get()
|
||||||
x.rget(rt, nil, make(map[string]bool, 16), &sfip, siInfo)
|
pv := pi.(*rgetPoolT)
|
||||||
|
pv.etypes[0] = ti.baseId
|
||||||
ti.sfip = make([]*structFieldInfo, len(sfip))
|
vv := rgetT{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]}
|
||||||
ti.sfi = make([]*structFieldInfo, len(sfip))
|
x.rget(rt, rtid, omitEmpty, nil, &vv)
|
||||||
copy(ti.sfip, sfip)
|
ti.sfip, ti.sfi = rgetResolveSFI(vv.sfis, pv.sfiidx[:0])
|
||||||
sort.Sort(sfiSortedByEncName(sfip))
|
rgetPool.Put(pi)
|
||||||
copy(ti.sfi, sfip)
|
|
||||||
}
|
}
|
||||||
// sfi = sfip
|
// sfi = sfip
|
||||||
|
|
||||||
|
|
@ -853,17 +884,30 @@ func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *TypeInfos) rget(rt reflect.Type, indexstack []int, fnameToHastag map[string]bool,
|
func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr, omitEmpty bool,
|
||||||
sfi *[]*structFieldInfo, siInfo *structFieldInfo,
|
indexstack []int, pv *rgetT,
|
||||||
) {
|
) {
|
||||||
for j := 0; j < rt.NumField(); j++ {
|
// Read up fields and store how to access the value.
|
||||||
|
//
|
||||||
|
// It uses go's rules for message selectors,
|
||||||
|
// which say that the field with the shallowest depth is selected.
|
||||||
|
//
|
||||||
|
// Note: we consciously use slices, not a map, to simulate a set.
|
||||||
|
// Typically, types have < 16 fields,
|
||||||
|
// and iteration using equals is faster than maps there
|
||||||
|
|
||||||
|
LOOP:
|
||||||
|
for j, jlen := 0, rt.NumField(); j < jlen; j++ {
|
||||||
f := rt.Field(j)
|
f := rt.Field(j)
|
||||||
fkind := f.Type.Kind()
|
fkind := f.Type.Kind()
|
||||||
// skip if a func type, or is unexported, or structTag value == "-"
|
// skip if a func type, or is unexported, or structTag value == "-"
|
||||||
if fkind == reflect.Func {
|
switch fkind {
|
||||||
continue
|
case reflect.Func, reflect.Complex64, reflect.Complex128, reflect.UnsafePointer:
|
||||||
|
continue LOOP
|
||||||
}
|
}
|
||||||
// if r1, _ := utf8.DecodeRuneInString(f.Name); r1 == utf8.RuneError || !unicode.IsUpper(r1) {
|
|
||||||
|
// if r1, _ := utf8.DecodeRuneInString(f.Name);
|
||||||
|
// r1 == utf8.RuneError || !unicode.IsUpper(r1) {
|
||||||
if f.PkgPath != "" && !f.Anonymous { // unexported, not embedded
|
if f.PkgPath != "" && !f.Anonymous { // unexported, not embedded
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
@ -872,7 +916,8 @@ func (x *TypeInfos) rget(rt reflect.Type, indexstack []int, fnameToHastag map[st
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
var si *structFieldInfo
|
var si *structFieldInfo
|
||||||
// if anonymous and no struct tag (or it's blank), and a struct (or pointer to struct), inline it.
|
// if anonymous and no struct tag (or it's blank),
|
||||||
|
// and a struct (or pointer to struct), inline it.
|
||||||
if f.Anonymous && fkind != reflect.Interface {
|
if f.Anonymous && fkind != reflect.Interface {
|
||||||
doInline := stag == ""
|
doInline := stag == ""
|
||||||
if !doInline {
|
if !doInline {
|
||||||
|
|
@ -886,11 +931,31 @@ func (x *TypeInfos) rget(rt reflect.Type, indexstack []int, fnameToHastag map[st
|
||||||
ft = ft.Elem()
|
ft = ft.Elem()
|
||||||
}
|
}
|
||||||
if ft.Kind() == reflect.Struct {
|
if ft.Kind() == reflect.Struct {
|
||||||
indexstack2 := make([]int, len(indexstack)+1, len(indexstack)+4)
|
// if etypes contains this, don't call rget again (as fields are already seen here)
|
||||||
|
ftid := reflect.ValueOf(ft).Pointer()
|
||||||
|
// We cannot recurse forever, but we need to track other field depths.
|
||||||
|
// So - we break if we see a type twice (not the first time).
|
||||||
|
// This should be sufficient to handle an embedded type that refers to its
|
||||||
|
// owning type, which then refers to its embedded type.
|
||||||
|
processIt := true
|
||||||
|
numk := 0
|
||||||
|
for _, k := range pv.etypes {
|
||||||
|
if k == ftid {
|
||||||
|
numk++
|
||||||
|
if numk == rgetMaxRecursion {
|
||||||
|
processIt = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if processIt {
|
||||||
|
pv.etypes = append(pv.etypes, ftid)
|
||||||
|
indexstack2 := make([]int, len(indexstack)+1)
|
||||||
copy(indexstack2, indexstack)
|
copy(indexstack2, indexstack)
|
||||||
indexstack2[len(indexstack)] = j
|
indexstack2[len(indexstack)] = j
|
||||||
// indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
|
// indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
|
||||||
x.rget(ft, indexstack2, fnameToHastag, sfi, siInfo)
|
x.rget(ft, ftid, omitEmpty, indexstack2, pv)
|
||||||
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -901,36 +966,86 @@ func (x *TypeInfos) rget(rt reflect.Type, indexstack []int, fnameToHastag map[st
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// do not let fields with same name in embedded structs override field at higher level.
|
|
||||||
// this must be done after anonymous check, to allow anonymous field
|
|
||||||
// still include their child fields
|
|
||||||
if _, ok := fnameToHastag[f.Name]; ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if f.Name == "" {
|
if f.Name == "" {
|
||||||
panic(noFieldNameToStructFieldInfoErr)
|
panic(noFieldNameToStructFieldInfoErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pv.fNames = append(pv.fNames, f.Name)
|
||||||
|
|
||||||
if si == nil {
|
if si == nil {
|
||||||
si = parseStructFieldInfo(f.Name, stag)
|
si = parseStructFieldInfo(f.Name, stag)
|
||||||
} else if si.encName == "" {
|
} else if si.encName == "" {
|
||||||
si.encName = f.Name
|
si.encName = f.Name
|
||||||
}
|
}
|
||||||
|
si.fieldName = f.Name
|
||||||
|
|
||||||
|
pv.encNames = append(pv.encNames, si.encName)
|
||||||
|
|
||||||
// si.ikind = int(f.Type.Kind())
|
// si.ikind = int(f.Type.Kind())
|
||||||
if len(indexstack) == 0 {
|
if len(indexstack) == 0 {
|
||||||
si.i = int16(j)
|
si.i = int16(j)
|
||||||
} else {
|
} else {
|
||||||
si.i = -1
|
si.i = -1
|
||||||
si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
|
si.is = make([]int, len(indexstack)+1)
|
||||||
|
copy(si.is, indexstack)
|
||||||
|
si.is[len(indexstack)] = j
|
||||||
|
// si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
|
||||||
}
|
}
|
||||||
|
|
||||||
if siInfo != nil {
|
if omitEmpty {
|
||||||
if siInfo.omitEmpty {
|
|
||||||
si.omitEmpty = true
|
si.omitEmpty = true
|
||||||
}
|
}
|
||||||
|
pv.sfis = append(pv.sfis, si)
|
||||||
}
|
}
|
||||||
*sfi = append(*sfi, si)
|
}
|
||||||
fnameToHastag[f.Name] = stag != ""
|
|
||||||
|
// resolves the struct field info got from a call to rget.
|
||||||
|
// Returns a trimmed, unsorted and sorted []*structFieldInfo.
|
||||||
|
func rgetResolveSFI(x []*structFieldInfo, pv []sfiIdx) (y, z []*structFieldInfo) {
|
||||||
|
var n int
|
||||||
|
for i, v := range x {
|
||||||
|
xn := v.encName //TODO: fieldName or encName? use encName for now.
|
||||||
|
var found bool
|
||||||
|
for j, k := range pv {
|
||||||
|
if k.name == xn {
|
||||||
|
// one of them must be reset to nil, and the index updated appropriately to the other one
|
||||||
|
if len(v.is) == len(x[k.index].is) {
|
||||||
|
} else if len(v.is) < len(x[k.index].is) {
|
||||||
|
pv[j].index = i
|
||||||
|
if x[k.index] != nil {
|
||||||
|
x[k.index] = nil
|
||||||
|
n++
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
if x[i] != nil {
|
||||||
|
x[i] = nil
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
pv = append(pv, sfiIdx{xn, i})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// remove all the nils
|
||||||
|
y = make([]*structFieldInfo, len(x)-n)
|
||||||
|
n = 0
|
||||||
|
for _, v := range x {
|
||||||
|
if v == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
y[n] = v
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
|
||||||
|
z = make([]*structFieldInfo, len(y))
|
||||||
|
copy(z, y)
|
||||||
|
sort.Sort(sfiSortedByEncName(z))
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func panicToErr(err *error) {
|
func panicToErr(err *error) {
|
||||||
|
|
@ -1127,3 +1242,73 @@ type bytesISlice []bytesI
|
||||||
func (p bytesISlice) Len() int { return len(p) }
|
func (p bytesISlice) Len() int { return len(p) }
|
||||||
func (p bytesISlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 }
|
func (p bytesISlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 }
|
||||||
func (p bytesISlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
func (p bytesISlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||||
|
|
||||||
|
// -----------------
|
||||||
|
|
||||||
|
type set []uintptr
|
||||||
|
|
||||||
|
func (s *set) add(v uintptr) (exists bool) {
|
||||||
|
// e.ci is always nil, or len >= 1
|
||||||
|
// defer func() { fmt.Printf("$$$$$$$$$$$ cirRef Add: %v, exists: %v\n", v, exists) }()
|
||||||
|
x := *s
|
||||||
|
if x == nil {
|
||||||
|
x = make([]uintptr, 1, 8)
|
||||||
|
x[0] = v
|
||||||
|
*s = x
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// typically, length will be 1. make this perform.
|
||||||
|
if len(x) == 1 {
|
||||||
|
if j := x[0]; j == 0 {
|
||||||
|
x[0] = v
|
||||||
|
} else if j == v {
|
||||||
|
exists = true
|
||||||
|
} else {
|
||||||
|
x = append(x, v)
|
||||||
|
*s = x
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// check if it exists
|
||||||
|
for _, j := range x {
|
||||||
|
if j == v {
|
||||||
|
exists = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// try to replace a "deleted" slot
|
||||||
|
for i, j := range x {
|
||||||
|
if j == 0 {
|
||||||
|
x[i] = v
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// if unable to replace deleted slot, just append it.
|
||||||
|
x = append(x, v)
|
||||||
|
*s = x
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *set) remove(v uintptr) (exists bool) {
|
||||||
|
// defer func() { fmt.Printf("$$$$$$$$$$$ cirRef Rm: %v, exists: %v\n", v, exists) }()
|
||||||
|
x := *s
|
||||||
|
if len(x) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(x) == 1 {
|
||||||
|
if x[0] == v {
|
||||||
|
x[0] = 0
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for i, j := range x {
|
||||||
|
if j == v {
|
||||||
|
exists = true
|
||||||
|
x[i] = 0 // set it to 0, as way to delete it.
|
||||||
|
// copy(x[i:], x[i+1:])
|
||||||
|
// x = x[:len(x)-1]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -70,8 +70,8 @@ func hIsEmptyValue(v reflect.Value, deref, checkStruct bool) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func isEmptyValue(v reflect.Value) bool {
|
func isEmptyValue(v reflect.Value, deref, checkStruct bool) bool {
|
||||||
return hIsEmptyValue(v, derefForIsEmptyValue, checkStructForEmptyValue)
|
return hIsEmptyValue(v, deref, checkStruct)
|
||||||
}
|
}
|
||||||
|
|
||||||
func pruneSignExt(v []byte, pos bool) (n int) {
|
func pruneSignExt(v []byte, pos bool) (n int) {
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
//+build !unsafe
|
// +build !unsafe
|
||||||
|
|
||||||
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
//+build unsafe
|
// +build unsafe
|
||||||
|
|
||||||
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||||
|
|
@ -16,7 +16,7 @@ type unsafeString struct {
|
||||||
Len int
|
Len int
|
||||||
}
|
}
|
||||||
|
|
||||||
type unsafeBytes struct {
|
type unsafeSlice struct {
|
||||||
Data uintptr
|
Data uintptr
|
||||||
Len int
|
Len int
|
||||||
Cap int
|
Cap int
|
||||||
|
|
@ -29,8 +29,10 @@ func stringView(v []byte) string {
|
||||||
if len(v) == 0 {
|
if len(v) == 0 {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
x := unsafeString{uintptr(unsafe.Pointer(&v[0])), len(v)}
|
|
||||||
return *(*string)(unsafe.Pointer(&x))
|
bx := (*unsafeSlice)(unsafe.Pointer(&v))
|
||||||
|
sx := unsafeString{bx.Data, bx.Len}
|
||||||
|
return *(*string)(unsafe.Pointer(&sx))
|
||||||
}
|
}
|
||||||
|
|
||||||
// bytesView returns a view of the string as a []byte.
|
// bytesView returns a view of the string as a []byte.
|
||||||
|
|
@ -40,6 +42,8 @@ func bytesView(v string) []byte {
|
||||||
if len(v) == 0 {
|
if len(v) == 0 {
|
||||||
return zeroByteSlice
|
return zeroByteSlice
|
||||||
}
|
}
|
||||||
x := unsafeBytes{uintptr(unsafe.Pointer(&v)), len(v), len(v)}
|
|
||||||
return *(*[]byte)(unsafe.Pointer(&x))
|
sx := (*unsafeString)(unsafe.Pointer(&v))
|
||||||
|
bx := unsafeSlice{sx.Data, sx.Len, sx.Len}
|
||||||
|
return *(*[]byte)(unsafe.Pointer(&bx))
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -43,18 +43,23 @@ import (
|
||||||
|
|
||||||
//--------------------------------
|
//--------------------------------
|
||||||
|
|
||||||
var jsonLiterals = [...]byte{'t', 'r', 'u', 'e', 'f', 'a', 'l', 's', 'e', 'n', 'u', 'l', 'l'}
|
var (
|
||||||
|
jsonLiterals = [...]byte{'t', 'r', 'u', 'e', 'f', 'a', 'l', 's', 'e', 'n', 'u', 'l', 'l'}
|
||||||
|
|
||||||
var jsonFloat64Pow10 = [...]float64{
|
jsonFloat64Pow10 = [...]float64{
|
||||||
1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
|
1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
|
||||||
1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
|
1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
|
||||||
1e20, 1e21, 1e22,
|
1e20, 1e21, 1e22,
|
||||||
}
|
}
|
||||||
|
|
||||||
var jsonUint64Pow10 = [...]uint64{
|
jsonUint64Pow10 = [...]uint64{
|
||||||
1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
|
1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
|
||||||
1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
|
1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// jsonTabs and jsonSpaces are used as caches for indents
|
||||||
|
jsonTabs, jsonSpaces string
|
||||||
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// jsonUnreadAfterDecNum controls whether we unread after decoding a number.
|
// jsonUnreadAfterDecNum controls whether we unread after decoding a number.
|
||||||
|
|
@ -85,8 +90,23 @@ const (
|
||||||
jsonNumUintMaxVal = 1<<uint64(64) - 1
|
jsonNumUintMaxVal = 1<<uint64(64) - 1
|
||||||
|
|
||||||
// jsonNumDigitsUint64Largest = 19
|
// jsonNumDigitsUint64Largest = 19
|
||||||
|
|
||||||
|
jsonSpacesOrTabsLen = 128
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
var bs [jsonSpacesOrTabsLen]byte
|
||||||
|
for i := 0; i < jsonSpacesOrTabsLen; i++ {
|
||||||
|
bs[i] = ' '
|
||||||
|
}
|
||||||
|
jsonSpaces = string(bs[:])
|
||||||
|
|
||||||
|
for i := 0; i < jsonSpacesOrTabsLen; i++ {
|
||||||
|
bs[i] = '\t'
|
||||||
|
}
|
||||||
|
jsonTabs = string(bs[:])
|
||||||
|
}
|
||||||
|
|
||||||
type jsonEncDriver struct {
|
type jsonEncDriver struct {
|
||||||
e *Encoder
|
e *Encoder
|
||||||
w encWriter
|
w encWriter
|
||||||
|
|
@ -94,30 +114,76 @@ type jsonEncDriver struct {
|
||||||
b [64]byte // scratch
|
b [64]byte // scratch
|
||||||
bs []byte // scratch
|
bs []byte // scratch
|
||||||
se setExtWrapper
|
se setExtWrapper
|
||||||
|
ds string // indent string
|
||||||
|
dl uint16 // indent level
|
||||||
|
dt bool // indent using tabs
|
||||||
|
d bool // indent
|
||||||
c containerState
|
c containerState
|
||||||
noBuiltInTypes
|
noBuiltInTypes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// indent is done as below:
|
||||||
|
// - newline and indent are added before each mapKey or arrayElem
|
||||||
|
// - newline and indent are added before each ending,
|
||||||
|
// except there was no entry (so we can have {} or [])
|
||||||
|
|
||||||
func (e *jsonEncDriver) sendContainerState(c containerState) {
|
func (e *jsonEncDriver) sendContainerState(c containerState) {
|
||||||
// determine whether to output separators
|
// determine whether to output separators
|
||||||
if c == containerMapKey {
|
if c == containerMapKey {
|
||||||
if e.c != containerMapStart {
|
if e.c != containerMapStart {
|
||||||
e.w.writen1(',')
|
e.w.writen1(',')
|
||||||
}
|
}
|
||||||
|
if e.d {
|
||||||
|
e.writeIndent()
|
||||||
|
}
|
||||||
} else if c == containerMapValue {
|
} else if c == containerMapValue {
|
||||||
|
if e.d {
|
||||||
|
e.w.writen2(':', ' ')
|
||||||
|
} else {
|
||||||
e.w.writen1(':')
|
e.w.writen1(':')
|
||||||
|
}
|
||||||
} else if c == containerMapEnd {
|
} else if c == containerMapEnd {
|
||||||
|
if e.d {
|
||||||
|
e.dl--
|
||||||
|
if e.c != containerMapStart {
|
||||||
|
e.writeIndent()
|
||||||
|
}
|
||||||
|
}
|
||||||
e.w.writen1('}')
|
e.w.writen1('}')
|
||||||
} else if c == containerArrayElem {
|
} else if c == containerArrayElem {
|
||||||
if e.c != containerArrayStart {
|
if e.c != containerArrayStart {
|
||||||
e.w.writen1(',')
|
e.w.writen1(',')
|
||||||
}
|
}
|
||||||
|
if e.d {
|
||||||
|
e.writeIndent()
|
||||||
|
}
|
||||||
} else if c == containerArrayEnd {
|
} else if c == containerArrayEnd {
|
||||||
|
if e.d {
|
||||||
|
e.dl--
|
||||||
|
if e.c != containerArrayStart {
|
||||||
|
e.writeIndent()
|
||||||
|
}
|
||||||
|
}
|
||||||
e.w.writen1(']')
|
e.w.writen1(']')
|
||||||
}
|
}
|
||||||
e.c = c
|
e.c = c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *jsonEncDriver) writeIndent() {
|
||||||
|
e.w.writen1('\n')
|
||||||
|
if x := len(e.ds) * int(e.dl); x <= jsonSpacesOrTabsLen {
|
||||||
|
if e.dt {
|
||||||
|
e.w.writestr(jsonTabs[:x])
|
||||||
|
} else {
|
||||||
|
e.w.writestr(jsonSpaces[:x])
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for i := uint16(0); i < e.dl; i++ {
|
||||||
|
e.w.writestr(e.ds)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (e *jsonEncDriver) EncodeNil() {
|
func (e *jsonEncDriver) EncodeNil() {
|
||||||
e.w.writeb(jsonLiterals[9:13]) // null
|
e.w.writeb(jsonLiterals[9:13]) // null
|
||||||
}
|
}
|
||||||
|
|
@ -131,19 +197,39 @@ func (e *jsonEncDriver) EncodeBool(b bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *jsonEncDriver) EncodeFloat32(f float32) {
|
func (e *jsonEncDriver) EncodeFloat32(f float32) {
|
||||||
e.w.writeb(strconv.AppendFloat(e.b[:0], float64(f), 'E', -1, 32))
|
e.encodeFloat(float64(f), 32)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *jsonEncDriver) EncodeFloat64(f float64) {
|
func (e *jsonEncDriver) EncodeFloat64(f float64) {
|
||||||
// e.w.writestr(strconv.FormatFloat(f, 'E', -1, 64))
|
// e.w.writestr(strconv.FormatFloat(f, 'E', -1, 64))
|
||||||
e.w.writeb(strconv.AppendFloat(e.b[:0], f, 'E', -1, 64))
|
e.encodeFloat(f, 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *jsonEncDriver) encodeFloat(f float64, numbits int) {
|
||||||
|
x := strconv.AppendFloat(e.b[:0], f, 'G', -1, numbits)
|
||||||
|
e.w.writeb(x)
|
||||||
|
if bytes.IndexByte(x, 'E') == -1 && bytes.IndexByte(x, '.') == -1 {
|
||||||
|
e.w.writen2('.', '0')
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *jsonEncDriver) EncodeInt(v int64) {
|
func (e *jsonEncDriver) EncodeInt(v int64) {
|
||||||
|
if x := e.h.IntegerAsString; x == 'A' || x == 'L' && (v > 1<<53 || v < -(1<<53)) {
|
||||||
|
e.w.writen1('"')
|
||||||
|
e.w.writeb(strconv.AppendInt(e.b[:0], v, 10))
|
||||||
|
e.w.writen1('"')
|
||||||
|
return
|
||||||
|
}
|
||||||
e.w.writeb(strconv.AppendInt(e.b[:0], v, 10))
|
e.w.writeb(strconv.AppendInt(e.b[:0], v, 10))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *jsonEncDriver) EncodeUint(v uint64) {
|
func (e *jsonEncDriver) EncodeUint(v uint64) {
|
||||||
|
if x := e.h.IntegerAsString; x == 'A' || x == 'L' && v > 1<<53 {
|
||||||
|
e.w.writen1('"')
|
||||||
|
e.w.writeb(strconv.AppendUint(e.b[:0], v, 10))
|
||||||
|
e.w.writen1('"')
|
||||||
|
return
|
||||||
|
}
|
||||||
e.w.writeb(strconv.AppendUint(e.b[:0], v, 10))
|
e.w.writeb(strconv.AppendUint(e.b[:0], v, 10))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -165,11 +251,17 @@ func (e *jsonEncDriver) EncodeRawExt(re *RawExt, en *Encoder) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *jsonEncDriver) EncodeArrayStart(length int) {
|
func (e *jsonEncDriver) EncodeArrayStart(length int) {
|
||||||
|
if e.d {
|
||||||
|
e.dl++
|
||||||
|
}
|
||||||
e.w.writen1('[')
|
e.w.writen1('[')
|
||||||
e.c = containerArrayStart
|
e.c = containerArrayStart
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *jsonEncDriver) EncodeMapStart(length int) {
|
func (e *jsonEncDriver) EncodeMapStart(length int) {
|
||||||
|
if e.d {
|
||||||
|
e.dl++
|
||||||
|
}
|
||||||
e.w.writen1('{')
|
e.w.writen1('{')
|
||||||
e.c = containerMapStart
|
e.c = containerMapStart
|
||||||
}
|
}
|
||||||
|
|
@ -564,6 +656,11 @@ func (d *jsonDecDriver) decNum(storeBytes bool) {
|
||||||
d.tok = b
|
d.tok = b
|
||||||
}
|
}
|
||||||
b := d.tok
|
b := d.tok
|
||||||
|
var str bool
|
||||||
|
if b == '"' {
|
||||||
|
str = true
|
||||||
|
b = d.r.readn1()
|
||||||
|
}
|
||||||
if !(b == '+' || b == '-' || b == '.' || (b >= '0' && b <= '9')) {
|
if !(b == '+' || b == '-' || b == '.' || (b >= '0' && b <= '9')) {
|
||||||
d.d.errorf("json: decNum: got first char '%c'", b)
|
d.d.errorf("json: decNum: got first char '%c'", b)
|
||||||
return
|
return
|
||||||
|
|
@ -578,6 +675,10 @@ func (d *jsonDecDriver) decNum(storeBytes bool) {
|
||||||
n.reset()
|
n.reset()
|
||||||
d.bs = d.bs[:0]
|
d.bs = d.bs[:0]
|
||||||
|
|
||||||
|
if str && storeBytes {
|
||||||
|
d.bs = append(d.bs, '"')
|
||||||
|
}
|
||||||
|
|
||||||
// The format of a number is as below:
|
// The format of a number is as below:
|
||||||
// parsing: sign? digit* dot? digit* e? sign? digit*
|
// parsing: sign? digit* dot? digit* e? sign? digit*
|
||||||
// states: 0 1* 2 3* 4 5* 6 7
|
// states: 0 1* 2 3* 4 5* 6 7
|
||||||
|
|
@ -668,6 +769,14 @@ LOOP:
|
||||||
default:
|
default:
|
||||||
break LOOP
|
break LOOP
|
||||||
}
|
}
|
||||||
|
case '"':
|
||||||
|
if str {
|
||||||
|
if storeBytes {
|
||||||
|
d.bs = append(d.bs, '"')
|
||||||
|
}
|
||||||
|
b, eof = r.readn1eof()
|
||||||
|
}
|
||||||
|
break LOOP
|
||||||
default:
|
default:
|
||||||
break LOOP
|
break LOOP
|
||||||
}
|
}
|
||||||
|
|
@ -822,6 +931,11 @@ func (d *jsonDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut [
|
||||||
if isstring {
|
if isstring {
|
||||||
return d.bs
|
return d.bs
|
||||||
}
|
}
|
||||||
|
// if appendStringAsBytes returned a zero-len slice, then treat as nil.
|
||||||
|
// This should only happen for null, and "".
|
||||||
|
if len(d.bs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
bs0 := d.bs
|
bs0 := d.bs
|
||||||
slen := base64.StdEncoding.DecodedLen(len(bs0))
|
slen := base64.StdEncoding.DecodedLen(len(bs0))
|
||||||
if slen <= cap(bs) {
|
if slen <= cap(bs) {
|
||||||
|
|
@ -859,6 +973,14 @@ func (d *jsonDecDriver) appendStringAsBytes() {
|
||||||
}
|
}
|
||||||
d.tok = b
|
d.tok = b
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// handle null as a string
|
||||||
|
if d.tok == 'n' {
|
||||||
|
d.readStrIdx(10, 13) // ull
|
||||||
|
d.bs = d.bs[:0]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if d.tok != '"' {
|
if d.tok != '"' {
|
||||||
d.d.errorf("json: expect char '%c' but got char '%c'", '"', d.tok)
|
d.d.errorf("json: expect char '%c' but got char '%c'", '"', d.tok)
|
||||||
}
|
}
|
||||||
|
|
@ -1033,6 +1155,24 @@ type JsonHandle struct {
|
||||||
// RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way.
|
// RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way.
|
||||||
// If not configured, raw bytes are encoded to/from base64 text.
|
// If not configured, raw bytes are encoded to/from base64 text.
|
||||||
RawBytesExt InterfaceExt
|
RawBytesExt InterfaceExt
|
||||||
|
|
||||||
|
// Indent indicates how a value is encoded.
|
||||||
|
// - If positive, indent by that number of spaces.
|
||||||
|
// - If negative, indent by that number of tabs.
|
||||||
|
Indent int8
|
||||||
|
|
||||||
|
// IntegerAsString controls how integers (signed and unsigned) are encoded.
|
||||||
|
//
|
||||||
|
// Per the JSON Spec, JSON numbers are 64-bit floating point numbers.
|
||||||
|
// Consequently, integers > 2^53 cannot be represented as a JSON number without losing precision.
|
||||||
|
// This can be mitigated by configuring how to encode integers.
|
||||||
|
//
|
||||||
|
// IntegerAsString interpretes the following values:
|
||||||
|
// - if 'L', then encode integers > 2^53 as a json string.
|
||||||
|
// - if 'A', then encode all integers as a json string
|
||||||
|
// containing the exact integer representation as a decimal.
|
||||||
|
// - else encode all integers as a json number (default)
|
||||||
|
IntegerAsString uint8
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
|
func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
|
||||||
|
|
@ -1040,26 +1180,48 @@ func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceE
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *JsonHandle) newEncDriver(e *Encoder) encDriver {
|
func (h *JsonHandle) newEncDriver(e *Encoder) encDriver {
|
||||||
hd := jsonEncDriver{e: e, w: e.w, h: h}
|
hd := jsonEncDriver{e: e, h: h}
|
||||||
hd.bs = hd.b[:0]
|
hd.bs = hd.b[:0]
|
||||||
hd.se.i = h.RawBytesExt
|
|
||||||
|
hd.reset()
|
||||||
|
|
||||||
return &hd
|
return &hd
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *JsonHandle) newDecDriver(d *Decoder) decDriver {
|
func (h *JsonHandle) newDecDriver(d *Decoder) decDriver {
|
||||||
// d := jsonDecDriver{r: r.(*bytesDecReader), h: h}
|
// d := jsonDecDriver{r: r.(*bytesDecReader), h: h}
|
||||||
hd := jsonDecDriver{d: d, r: d.r, h: h}
|
hd := jsonDecDriver{d: d, h: h}
|
||||||
hd.bs = hd.b[:0]
|
hd.bs = hd.b[:0]
|
||||||
hd.se.i = h.RawBytesExt
|
hd.reset()
|
||||||
return &hd
|
return &hd
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *jsonEncDriver) reset() {
|
func (e *jsonEncDriver) reset() {
|
||||||
e.w = e.e.w
|
e.w = e.e.w
|
||||||
|
e.se.i = e.h.RawBytesExt
|
||||||
|
if e.bs != nil {
|
||||||
|
e.bs = e.bs[:0]
|
||||||
|
}
|
||||||
|
e.d, e.dt, e.dl, e.ds = false, false, 0, ""
|
||||||
|
e.c = 0
|
||||||
|
if e.h.Indent > 0 {
|
||||||
|
e.d = true
|
||||||
|
e.ds = jsonSpaces[:e.h.Indent]
|
||||||
|
} else if e.h.Indent < 0 {
|
||||||
|
e.d = true
|
||||||
|
e.dt = true
|
||||||
|
e.ds = jsonTabs[:-(e.h.Indent)]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *jsonDecDriver) reset() {
|
func (d *jsonDecDriver) reset() {
|
||||||
d.r = d.d.r
|
d.r = d.d.r
|
||||||
|
d.se.i = d.h.RawBytesExt
|
||||||
|
if d.bs != nil {
|
||||||
|
d.bs = d.bs[:0]
|
||||||
|
}
|
||||||
|
d.c, d.tok = 0, 0
|
||||||
|
d.n.reset()
|
||||||
}
|
}
|
||||||
|
|
||||||
var jsonEncodeTerminate = []byte{' '}
|
var jsonEncodeTerminate = []byte{' '}
|
||||||
|
|
|
||||||
|
|
@ -374,7 +374,7 @@ func (d *msgpackDecDriver) DecodeNaked() {
|
||||||
}
|
}
|
||||||
if n.v == valueTypeUint && d.h.SignedInteger {
|
if n.v == valueTypeUint && d.h.SignedInteger {
|
||||||
n.v = valueTypeInt
|
n.v = valueTypeInt
|
||||||
n.i = int64(n.v)
|
n.i = int64(n.u)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
@ -561,6 +561,13 @@ func (d *msgpackDecDriver) readNextBd() {
|
||||||
d.bdRead = true
|
d.bdRead = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *msgpackDecDriver) uncacheRead() {
|
||||||
|
if d.bdRead {
|
||||||
|
d.r.unreadn1()
|
||||||
|
d.bdRead = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (d *msgpackDecDriver) ContainerType() (vt valueType) {
|
func (d *msgpackDecDriver) ContainerType() (vt valueType) {
|
||||||
bd := d.bd
|
bd := d.bd
|
||||||
if bd == mpNil {
|
if bd == mpNil {
|
||||||
|
|
@ -729,6 +736,7 @@ func (e *msgpackEncDriver) reset() {
|
||||||
|
|
||||||
func (d *msgpackDecDriver) reset() {
|
func (d *msgpackDecDriver) reset() {
|
||||||
d.r = d.d.r
|
d.r = d.d.r
|
||||||
|
d.bd, d.bdRead = 0, false
|
||||||
}
|
}
|
||||||
|
|
||||||
//--------------------------------------------------
|
//--------------------------------------------------
|
||||||
|
|
|
||||||
|
|
@ -25,7 +25,7 @@ type Rpc interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
// RpcCodecBuffered allows access to the underlying bufio.Reader/Writer
|
// RpcCodecBuffered allows access to the underlying bufio.Reader/Writer
|
||||||
// used by the rpc connection. It accomodates use-cases where the connection
|
// used by the rpc connection. It accommodates use-cases where the connection
|
||||||
// should be used by rpc and non-rpc functions, e.g. streaming a file after
|
// should be used by rpc and non-rpc functions, e.g. streaming a file after
|
||||||
// sending an rpc response.
|
// sending an rpc response.
|
||||||
type RpcCodecBuffered interface {
|
type RpcCodecBuffered interface {
|
||||||
|
|
|
||||||
|
|
@ -166,6 +166,13 @@ func (d *simpleDecDriver) readNextBd() {
|
||||||
d.bdRead = true
|
d.bdRead = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *simpleDecDriver) uncacheRead() {
|
||||||
|
if d.bdRead {
|
||||||
|
d.r.unreadn1()
|
||||||
|
d.bdRead = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (d *simpleDecDriver) ContainerType() (vt valueType) {
|
func (d *simpleDecDriver) ContainerType() (vt valueType) {
|
||||||
if d.bd == simpleVdNil {
|
if d.bd == simpleVdNil {
|
||||||
return valueTypeNil
|
return valueTypeNil
|
||||||
|
|
@ -474,7 +481,7 @@ func (d *simpleDecDriver) DecodeNaked() {
|
||||||
// SimpleHandle is a Handle for a very simple encoding format.
|
// SimpleHandle is a Handle for a very simple encoding format.
|
||||||
//
|
//
|
||||||
// simple is a simplistic codec similar to binc, but not as compact.
|
// simple is a simplistic codec similar to binc, but not as compact.
|
||||||
// - Encoding of a value is always preceeded by the descriptor byte (bd)
|
// - Encoding of a value is always preceded by the descriptor byte (bd)
|
||||||
// - True, false, nil are encoded fully in 1 byte (the descriptor)
|
// - True, false, nil are encoded fully in 1 byte (the descriptor)
|
||||||
// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte).
|
// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte).
|
||||||
// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers.
|
// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers.
|
||||||
|
|
@ -512,6 +519,7 @@ func (e *simpleEncDriver) reset() {
|
||||||
|
|
||||||
func (d *simpleDecDriver) reset() {
|
func (d *simpleDecDriver) reset() {
|
||||||
d.r = d.d.r
|
d.r = d.d.r
|
||||||
|
d.bd, d.bdRead = 0, false
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ decDriver = (*simpleDecDriver)(nil)
|
var _ decDriver = (*simpleDecDriver)(nil)
|
||||||
|
|
|
||||||
|
|
@ -5,11 +5,22 @@ package codec
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"reflect"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
|
timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
|
||||||
|
timeExtEncFn = func(rv reflect.Value) (bs []byte, err error) {
|
||||||
|
defer panicToErr(&err)
|
||||||
|
bs = timeExt{}.WriteExt(rv.Interface())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
timeExtDecFn = func(rv reflect.Value, bs []byte) (err error) {
|
||||||
|
defer panicToErr(&err)
|
||||||
|
timeExt{}.ReadExt(rv.Interface(), bs)
|
||||||
|
return
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
type timeExt struct{}
|
type timeExt struct{}
|
||||||
|
|
|
||||||
|
|
@ -56,6 +56,11 @@ func (x Int26_6) Round() int { return int((x + 0x20) >> 6) }
|
||||||
// Its return type is int, not Int26_6.
|
// Its return type is int, not Int26_6.
|
||||||
func (x Int26_6) Ceil() int { return int((x + 0x3f) >> 6) }
|
func (x Int26_6) Ceil() int { return int((x + 0x3f) >> 6) }
|
||||||
|
|
||||||
|
// Mul returns x*y in 26.6 fixed-point arithmetic.
|
||||||
|
func (x Int26_6) Mul(y Int26_6) Int26_6 {
|
||||||
|
return Int26_6((int64(x)*int64(y) + 1<<5) >> 6)
|
||||||
|
}
|
||||||
|
|
||||||
// Int52_12 is a signed 52.12 fixed-point number.
|
// Int52_12 is a signed 52.12 fixed-point number.
|
||||||
//
|
//
|
||||||
// The integer part ranges from -2251799813685248 to 2251799813685247,
|
// The integer part ranges from -2251799813685248 to 2251799813685247,
|
||||||
|
|
@ -95,6 +100,39 @@ func (x Int52_12) Round() int { return int((x + 0x800) >> 12) }
|
||||||
// Its return type is int, not Int52_12.
|
// Its return type is int, not Int52_12.
|
||||||
func (x Int52_12) Ceil() int { return int((x + 0xfff) >> 12) }
|
func (x Int52_12) Ceil() int { return int((x + 0xfff) >> 12) }
|
||||||
|
|
||||||
|
// Mul returns x*y in 52.12 fixed-point arithmetic.
|
||||||
|
func (x Int52_12) Mul(y Int52_12) Int52_12 {
|
||||||
|
const M, N = 52, 12
|
||||||
|
lo, hi := muli64(int64(x), int64(y))
|
||||||
|
ret := Int52_12(hi<<M | lo>>N)
|
||||||
|
ret += Int52_12((lo >> (N - 1)) & 1) // Round to nearest, instead of rounding down.
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// muli64 multiplies two int64 values, returning the 128-bit signed integer
|
||||||
|
// result as two uint64 values.
|
||||||
|
//
|
||||||
|
// This implementation is similar to $GOROOT/src/runtime/softfloat64.go's mullu
|
||||||
|
// function, which is in turn adapted from Hacker's Delight.
|
||||||
|
func muli64(u, v int64) (lo, hi uint64) {
|
||||||
|
const (
|
||||||
|
s = 32
|
||||||
|
mask = 1<<s - 1
|
||||||
|
)
|
||||||
|
|
||||||
|
u1 := uint64(u >> s)
|
||||||
|
u0 := uint64(u & mask)
|
||||||
|
v1 := uint64(v >> s)
|
||||||
|
v0 := uint64(v & mask)
|
||||||
|
|
||||||
|
w0 := u0 * v0
|
||||||
|
t := u1*v0 + w0>>s
|
||||||
|
w1 := t & mask
|
||||||
|
w2 := uint64(int64(t) >> s)
|
||||||
|
w1 += u0 * v1
|
||||||
|
return uint64(u) * uint64(v), u1*v1 + w2 + uint64(int64(w1)>>s)
|
||||||
|
}
|
||||||
|
|
||||||
// P returns the integer values x and y as a Point26_6.
|
// P returns the integer values x and y as a Point26_6.
|
||||||
//
|
//
|
||||||
// For example, passing the integer values (2, -3) yields Point26_6{128, -192}.
|
// For example, passing the integer values (2, -3) yields Point26_6{128, -192}.
|
||||||
|
|
|
||||||
|
|
@ -118,8 +118,9 @@ func (d *decoder) ifdUint(p []byte) (u []uint, err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseIFD decides whether the the IFD entry in p is "interesting" and
|
// parseIFD decides whether the the IFD entry in p is "interesting" and
|
||||||
// stows away the data in the decoder.
|
// stows away the data in the decoder. It returns the tag number of the
|
||||||
func (d *decoder) parseIFD(p []byte) error {
|
// entry and an error, if any.
|
||||||
|
func (d *decoder) parseIFD(p []byte) (int, error) {
|
||||||
tag := d.byteOrder.Uint16(p[0:2])
|
tag := d.byteOrder.Uint16(p[0:2])
|
||||||
switch tag {
|
switch tag {
|
||||||
case tBitsPerSample,
|
case tBitsPerSample,
|
||||||
|
|
@ -138,17 +139,17 @@ func (d *decoder) parseIFD(p []byte) error {
|
||||||
tImageWidth:
|
tImageWidth:
|
||||||
val, err := d.ifdUint(p)
|
val, err := d.ifdUint(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return 0, err
|
||||||
}
|
}
|
||||||
d.features[int(tag)] = val
|
d.features[int(tag)] = val
|
||||||
case tColorMap:
|
case tColorMap:
|
||||||
val, err := d.ifdUint(p)
|
val, err := d.ifdUint(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return 0, err
|
||||||
}
|
}
|
||||||
numcolors := len(val) / 3
|
numcolors := len(val) / 3
|
||||||
if len(val)%3 != 0 || numcolors <= 0 || numcolors > 256 {
|
if len(val)%3 != 0 || numcolors <= 0 || numcolors > 256 {
|
||||||
return FormatError("bad ColorMap length")
|
return 0, FormatError("bad ColorMap length")
|
||||||
}
|
}
|
||||||
d.palette = make([]color.Color, numcolors)
|
d.palette = make([]color.Color, numcolors)
|
||||||
for i := 0; i < numcolors; i++ {
|
for i := 0; i < numcolors; i++ {
|
||||||
|
|
@ -166,15 +167,15 @@ func (d *decoder) parseIFD(p []byte) error {
|
||||||
// must terminate the import process gracefully.
|
// must terminate the import process gracefully.
|
||||||
val, err := d.ifdUint(p)
|
val, err := d.ifdUint(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return 0, err
|
||||||
}
|
}
|
||||||
for _, v := range val {
|
for _, v := range val {
|
||||||
if v != 1 {
|
if v != 1 {
|
||||||
return UnsupportedError("sample format")
|
return 0, UnsupportedError("sample format")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return int(tag), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// readBits reads n bits from the internal buffer starting at the current offset.
|
// readBits reads n bits from the internal buffer starting at the current offset.
|
||||||
|
|
@ -428,10 +429,16 @@ func newDecoder(r io.Reader) (*decoder, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
prevTag := -1
|
||||||
for i := 0; i < len(p); i += ifdLen {
|
for i := 0; i < len(p); i += ifdLen {
|
||||||
if err := d.parseIFD(p[i : i+ifdLen]); err != nil {
|
tag, err := d.parseIFD(p[i : i+ifdLen])
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if tag <= prevTag {
|
||||||
|
return nil, FormatError("tags are not sorted in ascending order")
|
||||||
|
}
|
||||||
|
prevTag = tag
|
||||||
}
|
}
|
||||||
|
|
||||||
d.config.Width = int(d.firstVal(tImageWidth))
|
d.config.Width = int(d.firstVal(tImageWidth))
|
||||||
|
|
|
||||||
|
|
@ -28,7 +28,8 @@ import (
|
||||||
// See https://cloud.google.com/appengine/docs/flexible/custom-runtimes#health_check_requests
|
// See https://cloud.google.com/appengine/docs/flexible/custom-runtimes#health_check_requests
|
||||||
// for details on how to do your own health checking.
|
// for details on how to do your own health checking.
|
||||||
//
|
//
|
||||||
// Main is not yet supported on App Engine Standard.
|
// On App Engine Standard it ensures the server has started and is prepared to
|
||||||
|
// receive requests.
|
||||||
//
|
//
|
||||||
// Main never returns.
|
// Main never returns.
|
||||||
//
|
//
|
||||||
|
|
|
||||||
|
|
@ -33,6 +33,7 @@ import (
|
||||||
|
|
||||||
const (
|
const (
|
||||||
apiPath = "/rpc_http"
|
apiPath = "/rpc_http"
|
||||||
|
defaultTicketSuffix = "/default.20150612t184001.0"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|
@ -60,6 +61,9 @@ var (
|
||||||
Dial: limitDial,
|
Dial: limitDial,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
defaultTicketOnce sync.Once
|
||||||
|
defaultTicket string
|
||||||
)
|
)
|
||||||
|
|
||||||
func apiURL() *url.URL {
|
func apiURL() *url.URL {
|
||||||
|
|
@ -266,6 +270,24 @@ func WithContext(parent netcontext.Context, req *http.Request) netcontext.Contex
|
||||||
return withContext(parent, c)
|
return withContext(parent, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DefaultTicket returns a ticket used for background context or dev_appserver.
|
||||||
|
func DefaultTicket() string {
|
||||||
|
defaultTicketOnce.Do(func() {
|
||||||
|
if IsDevAppServer() {
|
||||||
|
defaultTicket = "testapp" + defaultTicketSuffix
|
||||||
|
return
|
||||||
|
}
|
||||||
|
appID := partitionlessAppID()
|
||||||
|
escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
|
||||||
|
majVersion := VersionID(nil)
|
||||||
|
if i := strings.Index(majVersion, "."); i > 0 {
|
||||||
|
majVersion = majVersion[:i]
|
||||||
|
}
|
||||||
|
defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID())
|
||||||
|
})
|
||||||
|
return defaultTicket
|
||||||
|
}
|
||||||
|
|
||||||
func BackgroundContext() netcontext.Context {
|
func BackgroundContext() netcontext.Context {
|
||||||
ctxs.Lock()
|
ctxs.Lock()
|
||||||
defer ctxs.Unlock()
|
defer ctxs.Unlock()
|
||||||
|
|
@ -275,13 +297,7 @@ func BackgroundContext() netcontext.Context {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compute background security ticket.
|
// Compute background security ticket.
|
||||||
appID := partitionlessAppID()
|
ticket := DefaultTicket()
|
||||||
escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
|
|
||||||
majVersion := VersionID(nil)
|
|
||||||
if i := strings.Index(majVersion, "."); i > 0 {
|
|
||||||
majVersion = majVersion[:i]
|
|
||||||
}
|
|
||||||
ticket := fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID())
|
|
||||||
|
|
||||||
ctxs.bg = &context{
|
ctxs.bg = &context{
|
||||||
req: &http.Request{
|
req: &http.Request{
|
||||||
|
|
@ -475,6 +491,16 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message)
|
||||||
}
|
}
|
||||||
|
|
||||||
ticket := c.req.Header.Get(ticketHeader)
|
ticket := c.req.Header.Get(ticketHeader)
|
||||||
|
// Use a test ticket under test environment.
|
||||||
|
if ticket == "" {
|
||||||
|
if appid := ctx.Value(&appIDOverrideKey); appid != nil {
|
||||||
|
ticket = appid.(string) + defaultTicketSuffix
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver.
|
||||||
|
if ticket == "" {
|
||||||
|
ticket = DefaultTicket()
|
||||||
|
}
|
||||||
req := &remotepb.Request{
|
req := &remotepb.Request{
|
||||||
ServiceName: &service,
|
ServiceName: &service,
|
||||||
Method: &method,
|
Method: &method,
|
||||||
|
|
@ -550,6 +576,9 @@ var logLevelName = map[int64]string{
|
||||||
}
|
}
|
||||||
|
|
||||||
func logf(c *context, level int64, format string, args ...interface{}) {
|
func logf(c *context, level int64, format string, args ...interface{}) {
|
||||||
|
if c == nil {
|
||||||
|
panic("not an App Engine context")
|
||||||
|
}
|
||||||
s := fmt.Sprintf(format, args...)
|
s := fmt.Sprintf(format, args...)
|
||||||
s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
|
s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
|
||||||
c.addLogLine(&logpb.UserAppLogLine{
|
c.addLogLine(&logpb.UserAppLogLine{
|
||||||
|
|
|
||||||
|
|
@ -5,6 +5,8 @@
|
||||||
package internal
|
package internal
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
netcontext "golang.org/x/net/context"
|
netcontext "golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
@ -84,3 +86,31 @@ func Logf(ctx netcontext.Context, level int64, format string, args ...interface{
|
||||||
func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context {
|
func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context {
|
||||||
return withNamespace(ctx, namespace)
|
return withNamespace(ctx, namespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetTestEnv sets the env variables for testing background ticket in Flex.
|
||||||
|
func SetTestEnv() func() {
|
||||||
|
var environ = []struct {
|
||||||
|
key, value string
|
||||||
|
}{
|
||||||
|
{"GAE_LONG_APP_ID", "my-app-id"},
|
||||||
|
{"GAE_MINOR_VERSION", "067924799508853122"},
|
||||||
|
{"GAE_MODULE_INSTANCE", "0"},
|
||||||
|
{"GAE_MODULE_NAME", "default"},
|
||||||
|
{"GAE_MODULE_VERSION", "20150612t184001"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range environ {
|
||||||
|
old := os.Getenv(v.key)
|
||||||
|
os.Setenv(v.key, v.value)
|
||||||
|
v.value = old
|
||||||
|
}
|
||||||
|
return func() { // Restore old environment after the test completes.
|
||||||
|
for _, v := range environ {
|
||||||
|
if v.value == "" {
|
||||||
|
os.Unsetenv(v.key)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
os.Setenv(v.key, v.value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue