vendor: update, sync with etcd master

This commit is contained in:
Gyu-Ho Lee 2017-02-15 10:51:19 -08:00
parent 705a53d413
commit ba6a0c332d
No known key found for this signature in database
GPG Key ID: 1DDD39C7EB70C24C
17 changed files with 1631 additions and 44 deletions

8
glide.lock generated
View File

@ -1,5 +1,5 @@
hash: 6bd04d9644a95f1caa2c1df864c296078eadff2a349eb724fbc0486aa5a2a72f
updated: 2017-02-13T13:49:21.228470942-08:00
updated: 2017-02-15T10:49:57.073799273-08:00
imports:
- name: bitbucket.org/zombiezen/gopdf
version: 1c63dc69751bc45441c2ce1f56b631c55294b4d5
@ -145,7 +145,9 @@ imports:
- internal/bitbucket.org/ww/goautoneg
- model
- name: github.com/prometheus/procfs
version: 1878d9fbb537119d24b21ca07effd591627cd160
version: a59ae619c824360be3641ceb5a456ff13dd58be6
subpackages:
- xfs
- name: github.com/samuel/go-zookeeper
version: 1d7be4effb13d2d908342d349d71a284a7542693
subpackages:
@ -159,7 +161,7 @@ imports:
subpackages:
- codec
- name: golang.org/x/image
version: df2aa51d4407e30b309afc8c4d6fdca045d97fb0
version: 306b8294319cca33073246c5f75e5142f54f4cca
subpackages:
- draw
- font

View File

@ -0,0 +1,75 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package netutil
import (
"fmt"
"os/exec"
)
// DropPort drops all tcp packets that are received from the given port and sent to the given port.
func DropPort(port int) error {
cmdStr := fmt.Sprintf("sudo iptables -A OUTPUT -p tcp --destination-port %d -j DROP", port)
if _, err := exec.Command("/bin/sh", "-c", cmdStr).Output(); err != nil {
return err
}
cmdStr = fmt.Sprintf("sudo iptables -A INPUT -p tcp --destination-port %d -j DROP", port)
_, err := exec.Command("/bin/sh", "-c", cmdStr).Output()
return err
}
// RecoverPort stops dropping tcp packets at given port.
func RecoverPort(port int) error {
cmdStr := fmt.Sprintf("sudo iptables -D OUTPUT -p tcp --destination-port %d -j DROP", port)
if _, err := exec.Command("/bin/sh", "-c", cmdStr).Output(); err != nil {
return err
}
cmdStr = fmt.Sprintf("sudo iptables -D INPUT -p tcp --destination-port %d -j DROP", port)
_, err := exec.Command("/bin/sh", "-c", cmdStr).Output()
return err
}
// SetLatency adds latency in millisecond scale with random variations.
func SetLatency(ms, rv int) error {
ifce, err := GetDefaultInterface()
if err != nil {
return err
}
if rv > ms {
rv = 1
}
cmdStr := fmt.Sprintf("sudo tc qdisc add dev %s root netem delay %dms %dms distribution normal", ifce, ms, rv)
_, err = exec.Command("/bin/sh", "-c", cmdStr).Output()
if err != nil {
// the rule has already been added. Overwrite it.
cmdStr = fmt.Sprintf("sudo tc qdisc change dev %s root netem delay %dms %dms distribution normal", ifce, ms, rv)
_, err = exec.Command("/bin/sh", "-c", cmdStr).Output()
if err != nil {
return err
}
}
return nil
}
// RemoveLatency resets latency configurations.
func RemoveLatency() error {
ifce, err := GetDefaultInterface()
if err != nil {
return err
}
_, err = exec.Command("/bin/sh", "-c", fmt.Sprintf("sudo tc qdisc del dev %s root netem", ifce)).Output()
return err
}

View File

@ -0,0 +1,25 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !linux
package netutil
func DropPort(port int) error { return nil }
func RecoverPort(port int) error { return nil }
func SetLatency(ms, rv int) error { return nil }
func RemoveLatency() error { return nil }

143
vendor/github.com/coreos/etcd/pkg/netutil/netutil.go generated vendored Normal file
View File

@ -0,0 +1,143 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package netutil implements network-related utility functions.
package netutil
import (
"net"
"net/url"
"reflect"
"sort"
"time"
"golang.org/x/net/context"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/pkg/capnslog"
)
var (
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/netutil")
// indirection for testing
resolveTCPAddr = net.ResolveTCPAddr
)
const retryInterval = time.Second
// resolveTCPAddrs is a convenience wrapper for net.ResolveTCPAddr.
// resolveTCPAddrs return a new set of url.URLs, in which all DNS hostnames
// are resolved.
func resolveTCPAddrs(ctx context.Context, urls [][]url.URL) ([][]url.URL, error) {
newurls := make([][]url.URL, 0)
for _, us := range urls {
nus := make([]url.URL, len(us))
for i, u := range us {
nu, err := url.Parse(u.String())
if err != nil {
return nil, err
}
nus[i] = *nu
}
for i, u := range nus {
h, err := resolveURL(ctx, u)
if err != nil {
return nil, err
}
if h != "" {
nus[i].Host = h
}
}
newurls = append(newurls, nus)
}
return newurls, nil
}
func resolveURL(ctx context.Context, u url.URL) (string, error) {
for ctx.Err() == nil {
host, _, err := net.SplitHostPort(u.Host)
if err != nil {
plog.Errorf("could not parse url %s during tcp resolving", u.Host)
return "", err
}
if host == "localhost" || net.ParseIP(host) != nil {
return "", nil
}
tcpAddr, err := resolveTCPAddr("tcp", u.Host)
if err == nil {
plog.Infof("resolving %s to %s", u.Host, tcpAddr.String())
return tcpAddr.String(), nil
}
plog.Warningf("failed resolving host %s (%v); retrying in %v", u.Host, err, retryInterval)
select {
case <-ctx.Done():
plog.Errorf("could not resolve host %s", u.Host)
return "", err
case <-time.After(retryInterval):
}
}
return "", ctx.Err()
}
// urlsEqual checks equality of url.URLS between two arrays.
// This check pass even if an URL is in hostname and opposite is in IP address.
func urlsEqual(ctx context.Context, a []url.URL, b []url.URL) bool {
if len(a) != len(b) {
return false
}
urls, err := resolveTCPAddrs(ctx, [][]url.URL{a, b})
if err != nil {
return false
}
a, b = urls[0], urls[1]
sort.Sort(types.URLs(a))
sort.Sort(types.URLs(b))
for i := range a {
if !reflect.DeepEqual(a[i], b[i]) {
return false
}
}
return true
}
func URLStringsEqual(ctx context.Context, a []string, b []string) bool {
if len(a) != len(b) {
return false
}
urlsA := make([]url.URL, 0)
for _, str := range a {
u, err := url.Parse(str)
if err != nil {
return false
}
urlsA = append(urlsA, *u)
}
urlsB := make([]url.URL, 0)
for _, str := range b {
u, err := url.Parse(str)
if err != nil {
return false
}
urlsB = append(urlsB, *u)
}
return urlsEqual(ctx, urlsA, urlsB)
}
func IsNetworkTimeoutError(err error) bool {
nerr, ok := err.(net.Error)
return ok && nerr.Timeout()
}

33
vendor/github.com/coreos/etcd/pkg/netutil/routes.go generated vendored Normal file
View File

@ -0,0 +1,33 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !linux !386,!amd64
package netutil
import (
"fmt"
"runtime"
)
// GetDefaultHost fetches the a resolvable name that corresponds
// to the machine's default routable interface
func GetDefaultHost() (string, error) {
return "", fmt.Errorf("default host not supported on %s_%s", runtime.GOOS, runtime.GOARCH)
}
// GetDefaultInterface fetches the device name of default routable interface.
func GetDefaultInterface() (string, error) {
return "", fmt.Errorf("default host not supported on %s_%s", runtime.GOOS, runtime.GOARCH)
}

View File

@ -0,0 +1,178 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
// +build 386 amd64
// TODO support native endian but without using "unsafe"
package netutil
import (
"bytes"
"encoding/binary"
"fmt"
"net"
"syscall"
)
var errNoDefaultRoute = fmt.Errorf("could not find default route")
func GetDefaultHost() (string, error) {
rmsg, rerr := getDefaultRoute()
if rerr != nil {
return "", rerr
}
host, oif, err := parsePREFSRC(rmsg)
if err != nil {
return "", err
}
if host != "" {
return host, nil
}
// prefsrc not detected, fall back to getting address from iface
ifmsg, ierr := getIface(oif)
if ierr != nil {
return "", ierr
}
attrs, aerr := syscall.ParseNetlinkRouteAttr(ifmsg)
if aerr != nil {
return "", aerr
}
for _, attr := range attrs {
if attr.Attr.Type == syscall.RTA_SRC {
return net.IP(attr.Value).String(), nil
}
}
return "", errNoDefaultRoute
}
func getDefaultRoute() (*syscall.NetlinkMessage, error) {
dat, err := syscall.NetlinkRIB(syscall.RTM_GETROUTE, syscall.AF_UNSPEC)
if err != nil {
return nil, err
}
msgs, msgErr := syscall.ParseNetlinkMessage(dat)
if msgErr != nil {
return nil, msgErr
}
rtmsg := syscall.RtMsg{}
for _, m := range msgs {
if m.Header.Type != syscall.RTM_NEWROUTE {
continue
}
buf := bytes.NewBuffer(m.Data[:syscall.SizeofRtMsg])
if rerr := binary.Read(buf, binary.LittleEndian, &rtmsg); rerr != nil {
continue
}
if rtmsg.Dst_len == 0 {
// zero-length Dst_len implies default route
return &m, nil
}
}
return nil, errNoDefaultRoute
}
func getIface(idx uint32) (*syscall.NetlinkMessage, error) {
dat, err := syscall.NetlinkRIB(syscall.RTM_GETADDR, syscall.AF_UNSPEC)
if err != nil {
return nil, err
}
msgs, msgErr := syscall.ParseNetlinkMessage(dat)
if msgErr != nil {
return nil, msgErr
}
ifaddrmsg := syscall.IfAddrmsg{}
for _, m := range msgs {
if m.Header.Type != syscall.RTM_NEWADDR {
continue
}
buf := bytes.NewBuffer(m.Data[:syscall.SizeofIfAddrmsg])
if rerr := binary.Read(buf, binary.LittleEndian, &ifaddrmsg); rerr != nil {
continue
}
if ifaddrmsg.Index == idx {
return &m, nil
}
}
return nil, errNoDefaultRoute
}
var errNoDefaultInterface = fmt.Errorf("could not find default interface")
func GetDefaultInterface() (string, error) {
rmsg, rerr := getDefaultRoute()
if rerr != nil {
return "", rerr
}
_, oif, err := parsePREFSRC(rmsg)
if err != nil {
return "", err
}
ifmsg, ierr := getIface(oif)
if ierr != nil {
return "", ierr
}
attrs, aerr := syscall.ParseNetlinkRouteAttr(ifmsg)
if aerr != nil {
return "", aerr
}
for _, attr := range attrs {
if attr.Attr.Type == syscall.IFLA_IFNAME {
return string(attr.Value[:len(attr.Value)-1]), nil
}
}
return "", errNoDefaultInterface
}
// parsePREFSRC returns preferred source address and output interface index (RTA_OIF).
func parsePREFSRC(m *syscall.NetlinkMessage) (host string, oif uint32, err error) {
var attrs []syscall.NetlinkRouteAttr
attrs, err = syscall.ParseNetlinkRouteAttr(m)
if err != nil {
return "", 0, err
}
for _, attr := range attrs {
if attr.Attr.Type == syscall.RTA_PREFSRC {
host = net.IP(attr.Value).String()
}
if attr.Attr.Type == syscall.RTA_OIF {
oif = binary.LittleEndian.Uint32(attr.Value)
}
if host != "" && oif != uint32(0) {
break
}
}
if oif == 0 {
err = errNoDefaultRoute
}
return
}

16
vendor/github.com/coreos/etcd/pkg/report/doc.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package report generates human-readable benchmark reports.
package report

284
vendor/github.com/coreos/etcd/pkg/report/report.go generated vendored Normal file
View File

@ -0,0 +1,284 @@
// Copyright 2014 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// the file is borrowed from github.com/rakyll/boom/boomer/print.go
package report
import (
"fmt"
"math"
"sort"
"strings"
"time"
)
const (
barChar = "∎"
)
// Result describes the timings for an operation.
type Result struct {
Start time.Time
End time.Time
Err error
}
func (res *Result) Duration() time.Duration { return res.End.Sub(res.Start) }
type report struct {
results chan Result
precision string
avgTotal float64
fastest float64
slowest float64
average float64
stddev float64
rps float64
total time.Duration
errorDist map[string]int
lats []float64
sps *secondPoints
}
// Stats exposes results raw data.
type Stats struct {
AvgTotal float64
Fastest float64
Slowest float64
Average float64
Stddev float64
RPS float64
Total time.Duration
ErrorDist map[string]int
Lats []float64
TimeSeries TimeSeries
}
// Report processes a result stream until it is closed, then produces a
// string with information about the consumed result data.
type Report interface {
Results() chan<- Result
// Run returns results in print-friendly format.
Run() <-chan string
// Stats returns results in raw data.
Stats() <-chan Stats
}
func NewReport(precision string) Report {
return &report{
results: make(chan Result, 16),
precision: precision,
errorDist: make(map[string]int),
}
}
func NewReportSample(precision string) Report {
r := NewReport(precision).(*report)
r.sps = newSecondPoints()
return r
}
func (r *report) Results() chan<- Result { return r.results }
func (r *report) Run() <-chan string {
donec := make(chan string, 1)
go func() {
defer close(donec)
r.processResults()
donec <- r.String()
}()
return donec
}
func (r *report) Stats() <-chan Stats {
donec := make(chan Stats, 1)
go func() {
defer close(donec)
r.processResults()
donec <- Stats{
AvgTotal: r.avgTotal,
Fastest: r.fastest,
Slowest: r.slowest,
Average: r.average,
Stddev: r.stddev,
RPS: r.rps,
Total: r.total,
ErrorDist: copyMap(r.errorDist),
Lats: copyFloats(r.lats),
TimeSeries: r.sps.getTimeSeries(),
}
}()
return donec
}
func copyMap(m map[string]int) (c map[string]int) {
c = make(map[string]int, len(m))
for k, v := range m {
c[k] = v
}
return
}
func copyFloats(s []float64) (c []float64) {
c = make([]float64, len(s))
copy(c, s)
return
}
func (r *report) String() (s string) {
if len(r.lats) > 0 {
s += fmt.Sprintf("\nSummary:\n")
s += fmt.Sprintf(" Total:\t%s.\n", r.sec2str(r.total.Seconds()))
s += fmt.Sprintf(" Slowest:\t%s.\n", r.sec2str(r.slowest))
s += fmt.Sprintf(" Fastest:\t%s.\n", r.sec2str(r.fastest))
s += fmt.Sprintf(" Average:\t%s.\n", r.sec2str(r.average))
s += fmt.Sprintf(" Stddev:\t%s.\n", r.sec2str(r.stddev))
s += fmt.Sprintf(" Requests/sec:\t"+r.precision+"\n", r.rps)
s += r.histogram()
s += r.sprintLatencies()
if r.sps != nil {
s += fmt.Sprintf("%v\n", r.sps.getTimeSeries())
}
}
if len(r.errorDist) > 0 {
s += r.errors()
}
return s
}
func (r *report) sec2str(sec float64) string { return fmt.Sprintf(r.precision+" secs", sec) }
type reportRate struct{ *report }
func NewReportRate(precision string) Report {
return &reportRate{NewReport(precision).(*report)}
}
func (r *reportRate) String() string {
return fmt.Sprintf(" Requests/sec:\t"+r.precision+"\n", r.rps)
}
func (r *report) processResult(res *Result) {
if res.Err != nil {
r.errorDist[res.Err.Error()]++
return
}
dur := res.Duration()
r.lats = append(r.lats, dur.Seconds())
r.avgTotal += dur.Seconds()
if r.sps != nil {
r.sps.Add(res.Start, dur)
}
}
func (r *report) processResults() {
st := time.Now()
for res := range r.results {
r.processResult(&res)
}
r.total = time.Since(st)
r.rps = float64(len(r.lats)) / r.total.Seconds()
r.average = r.avgTotal / float64(len(r.lats))
for i := range r.lats {
dev := r.lats[i] - r.average
r.stddev += dev * dev
}
r.stddev = math.Sqrt(r.stddev / float64(len(r.lats)))
sort.Float64s(r.lats)
if len(r.lats) > 0 {
r.fastest = r.lats[0]
r.slowest = r.lats[len(r.lats)-1]
}
}
var pctls = []float64{10, 25, 50, 75, 90, 95, 99, 99.9}
// Percentiles returns percentile distribution of float64 slice.
func Percentiles(nums []float64) (pcs []float64, data []float64) {
return pctls, percentiles(nums)
}
func percentiles(nums []float64) (data []float64) {
data = make([]float64, len(pctls))
j := 0
n := len(nums)
for i := 0; i < n && j < len(pctls); i++ {
current := float64(i) * 100.0 / float64(n)
if current >= pctls[j] {
data[j] = nums[i]
j++
}
}
return
}
func (r *report) sprintLatencies() string {
data := percentiles(r.lats)
s := fmt.Sprintf("\nLatency distribution:\n")
for i := 0; i < len(pctls); i++ {
if data[i] > 0 {
s += fmt.Sprintf(" %v%% in %s.\n", pctls[i], r.sec2str(data[i]))
}
}
return s
}
func (r *report) histogram() string {
bc := 10
buckets := make([]float64, bc+1)
counts := make([]int, bc+1)
bs := (r.slowest - r.fastest) / float64(bc)
for i := 0; i < bc; i++ {
buckets[i] = r.fastest + bs*float64(i)
}
buckets[bc] = r.slowest
var bi int
var max int
for i := 0; i < len(r.lats); {
if r.lats[i] <= buckets[bi] {
i++
counts[bi]++
if max < counts[bi] {
max = counts[bi]
}
} else if bi < len(buckets)-1 {
bi++
}
}
s := fmt.Sprintf("\nResponse time histogram:\n")
for i := 0; i < len(buckets); i++ {
// Normalize bar lengths.
var barLen int
if max > 0 {
barLen = counts[i] * 40 / max
}
s += fmt.Sprintf(" "+r.precision+" [%v]\t|%v\n", buckets[i], counts[i], strings.Repeat(barChar, barLen))
}
return s
}
func (r *report) errors() string {
s := fmt.Sprintf("\nError distribution:\n")
for err, num := range r.errorDist {
s += fmt.Sprintf(" [%d]\t%s\n", num, err)
}
return s
}

134
vendor/github.com/coreos/etcd/pkg/report/timeseries.go generated vendored Normal file
View File

@ -0,0 +1,134 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package report
import (
"bytes"
"encoding/csv"
"fmt"
"log"
"math"
"sort"
"sync"
"time"
)
type DataPoint struct {
Timestamp int64
AvgLatency time.Duration
ThroughPut int64
}
type TimeSeries []DataPoint
func (t TimeSeries) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
func (t TimeSeries) Len() int { return len(t) }
func (t TimeSeries) Less(i, j int) bool { return t[i].Timestamp < t[j].Timestamp }
type secondPoint struct {
totalLatency time.Duration
count int64
}
type secondPoints struct {
mu sync.Mutex
tm map[int64]secondPoint
}
func newSecondPoints() *secondPoints {
return &secondPoints{tm: make(map[int64]secondPoint)}
}
func (sp *secondPoints) Add(ts time.Time, lat time.Duration) {
sp.mu.Lock()
defer sp.mu.Unlock()
tk := ts.Unix()
if v, ok := sp.tm[tk]; !ok {
sp.tm[tk] = secondPoint{totalLatency: lat, count: 1}
} else {
v.totalLatency += lat
v.count += 1
sp.tm[tk] = v
}
}
func (sp *secondPoints) getTimeSeries() TimeSeries {
sp.mu.Lock()
defer sp.mu.Unlock()
var (
minTs int64 = math.MaxInt64
maxTs int64 = -1
)
for k := range sp.tm {
if minTs > k {
minTs = k
}
if maxTs < k {
maxTs = k
}
}
for ti := minTs; ti < maxTs; ti++ {
if _, ok := sp.tm[ti]; !ok { // fill-in empties
sp.tm[ti] = secondPoint{totalLatency: 0, count: 0}
}
}
var (
tslice = make(TimeSeries, len(sp.tm))
i int
)
for k, v := range sp.tm {
var lat time.Duration
if v.count > 0 {
lat = time.Duration(v.totalLatency) / time.Duration(v.count)
}
tslice[i] = DataPoint{
Timestamp: k,
AvgLatency: lat,
ThroughPut: v.count,
}
i++
}
sort.Sort(tslice)
return tslice
}
func (ts TimeSeries) String() string {
buf := new(bytes.Buffer)
wr := csv.NewWriter(buf)
if err := wr.Write([]string{"UNIX-TS", "AVG-LATENCY-MS", "AVG-THROUGHPUT"}); err != nil {
log.Fatal(err)
}
rows := [][]string{}
for i := range ts {
row := []string{
fmt.Sprintf("%d", ts[i].Timestamp),
fmt.Sprintf("%s", ts[i].AvgLatency),
fmt.Sprintf("%d", ts[i].ThroughPut),
}
rows = append(rows, row)
}
if err := wr.WriteAll(rows); err != nil {
log.Fatal(err)
}
wr.Flush()
if err := wr.Error(); err != nil {
log.Fatal(err)
}
return fmt.Sprintf("\nSample in one second (unix latency throughput):\n%s", buf.String())
}

95
vendor/github.com/prometheus/procfs/buddyinfo.go generated vendored Normal file
View File

@ -0,0 +1,95 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"fmt"
"io"
"os"
"strconv"
"strings"
)
// A BuddyInfo is the details parsed from /proc/buddyinfo.
// The data is comprised of an array of free fragments of each size.
// The sizes are 2^n*PAGE_SIZE, where n is the array index.
type BuddyInfo struct {
Node string
Zone string
Sizes []float64
}
// NewBuddyInfo reads the buddyinfo statistics.
func NewBuddyInfo() ([]BuddyInfo, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
return nil, err
}
return fs.NewBuddyInfo()
}
// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) {
file, err := os.Open(fs.Path("buddyinfo"))
if err != nil {
return nil, err
}
defer file.Close()
return parseBuddyInfo(file)
}
func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
var (
buddyInfo = []BuddyInfo{}
scanner = bufio.NewScanner(r)
bucketCount = -1
)
for scanner.Scan() {
var err error
line := scanner.Text()
parts := strings.Fields(string(line))
if len(parts) < 4 {
return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo")
}
node := strings.TrimRight(parts[1], ",")
zone := strings.TrimRight(parts[3], ",")
arraySize := len(parts[4:])
if bucketCount == -1 {
bucketCount = arraySize
} else {
if bucketCount != arraySize {
return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize)
}
}
sizes := make([]float64, arraySize)
for i := 0; i < arraySize; i++ {
sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
if err != nil {
return nil, fmt.Errorf("invalid value in buddyinfo: %s", err)
}
}
buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes})
}
return buddyInfo, scanner.Err()
}

View File

@ -4,6 +4,8 @@ import (
"fmt"
"os"
"path"
"github.com/prometheus/procfs/xfs"
)
// FS represents the pseudo-filesystem proc, which provides an interface to
@ -31,3 +33,14 @@ func NewFS(mountPoint string) (FS, error) {
func (fs FS) Path(p ...string) string {
return path.Join(append([]string{string(fs)}, p...)...)
}
// XFSStats retrieves XFS filesystem runtime statistics.
func (fs FS) XFSStats() (*xfs.Stats, error) {
f, err := os.Open(fs.Path("fs/xfs/stat"))
if err != nil {
return nil, err
}
defer f.Close()
return xfs.ParseStats(f)
}

View File

@ -123,7 +123,7 @@ type NFSEventsStats struct {
VFSFlush uint64
// Number of times fsync() has been called on directories and files.
VFSFsync uint64
// Number of times locking has been attemped on a file.
// Number of times locking has been attempted on a file.
VFSLock uint64
// Number of times files have been closed and released.
VFSFileRelease uint64
@ -356,7 +356,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
}
// When encountering "per-operation statistics", we must break this
// loop and parse them seperately to ensure we can terminate parsing
// loop and parse them separately to ensure we can terminate parsing
// before reaching another device entry; hence why this 'if' statement
// is not just another switch case
if ss[0] == fieldPerOpStats {

361
vendor/github.com/prometheus/procfs/xfs/parse.go generated vendored Normal file
View File

@ -0,0 +1,361 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xfs
import (
"bufio"
"fmt"
"io"
"log"
"strconv"
"strings"
)
// ParseStats parses a Stats from an input io.Reader, using the format
// found in /proc/fs/xfs/stat.
func ParseStats(r io.Reader) (*Stats, error) {
const (
// Fields parsed into stats structures.
fieldExtentAlloc = "extent_alloc"
fieldAbt = "abt"
fieldBlkMap = "blk_map"
fieldBmbt = "bmbt"
fieldDir = "dir"
fieldTrans = "trans"
fieldIg = "ig"
fieldLog = "log"
fieldRw = "rw"
fieldAttr = "attr"
fieldIcluster = "icluster"
fieldVnodes = "vnodes"
fieldBuf = "buf"
fieldXpc = "xpc"
// Unimplemented at this time due to lack of documentation.
fieldPushAil = "push_ail"
fieldXstrat = "xstrat"
fieldAbtb2 = "abtb2"
fieldAbtc2 = "abtc2"
fieldBmbt2 = "bmbt2"
fieldIbt2 = "ibt2"
fieldFibt2 = "fibt2"
fieldQm = "qm"
fieldDebug = "debug"
)
var xfss Stats
s := bufio.NewScanner(r)
for s.Scan() {
// Expect at least a string label and a single integer value, ex:
// - abt 0
// - rw 1 2
ss := strings.Fields(string(s.Bytes()))
if len(ss) < 2 {
continue
}
label := ss[0]
// Extended precision counters are uint64 values.
if label == fieldXpc {
us, err := parseUint64s(ss[1:])
if err != nil {
return nil, err
}
xfss.ExtendedPrecision, err = extendedPrecisionStats(us)
if err != nil {
return nil, err
}
continue
}
// All other counters are uint32 values.
us, err := parseUint32s(ss[1:])
if err != nil {
return nil, err
}
switch label {
case fieldExtentAlloc:
xfss.ExtentAllocation, err = extentAllocationStats(us)
case fieldAbt:
xfss.AllocationBTree, err = btreeStats(us)
case fieldBlkMap:
xfss.BlockMapping, err = blockMappingStats(us)
case fieldBmbt:
xfss.BlockMapBTree, err = btreeStats(us)
case fieldDir:
xfss.DirectoryOperation, err = directoryOperationStats(us)
case fieldTrans:
xfss.Transaction, err = transactionStats(us)
case fieldIg:
xfss.InodeOperation, err = inodeOperationStats(us)
case fieldLog:
xfss.LogOperation, err = logOperationStats(us)
case fieldRw:
xfss.ReadWrite, err = readWriteStats(us)
case fieldAttr:
xfss.AttributeOperation, err = attributeOperationStats(us)
case fieldIcluster:
xfss.InodeClustering, err = inodeClusteringStats(us)
case fieldVnodes:
xfss.Vnode, err = vnodeStats(us)
case fieldBuf:
xfss.Buffer, err = bufferStats(us)
}
if err != nil {
return nil, err
}
}
return &xfss, s.Err()
}
// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s.
func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) {
if l := len(us); l != 4 {
return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l)
}
return ExtentAllocationStats{
ExtentsAllocated: us[0],
BlocksAllocated: us[1],
ExtentsFreed: us[2],
BlocksFreed: us[3],
}, nil
}
// btreeStats builds a BTreeStats from a slice of uint32s.
func btreeStats(us []uint32) (BTreeStats, error) {
if l := len(us); l != 4 {
return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l)
}
return BTreeStats{
Lookups: us[0],
Compares: us[1],
RecordsInserted: us[2],
RecordsDeleted: us[3],
}, nil
}
// BlockMappingStat builds a BlockMappingStats from a slice of uint32s.
func blockMappingStats(us []uint32) (BlockMappingStats, error) {
if l := len(us); l != 7 {
return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l)
}
return BlockMappingStats{
Reads: us[0],
Writes: us[1],
Unmaps: us[2],
ExtentListInsertions: us[3],
ExtentListDeletions: us[4],
ExtentListLookups: us[5],
ExtentListCompares: us[6],
}, nil
}
// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s.
func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) {
if l := len(us); l != 4 {
return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l)
}
return DirectoryOperationStats{
Lookups: us[0],
Creates: us[1],
Removes: us[2],
Getdents: us[3],
}, nil
}
// TransactionStats builds a TransactionStats from a slice of uint32s.
func transactionStats(us []uint32) (TransactionStats, error) {
if l := len(us); l != 3 {
return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l)
}
return TransactionStats{
Sync: us[0],
Async: us[1],
Empty: us[2],
}, nil
}
// InodeOperationStats builds an InodeOperationStats from a slice of uint32s.
func inodeOperationStats(us []uint32) (InodeOperationStats, error) {
if l := len(us); l != 7 {
return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l)
}
return InodeOperationStats{
Attempts: us[0],
Found: us[1],
Recycle: us[2],
Missed: us[3],
Duplicate: us[4],
Reclaims: us[5],
AttributeChange: us[6],
}, nil
}
// LogOperationStats builds a LogOperationStats from a slice of uint32s.
func logOperationStats(us []uint32) (LogOperationStats, error) {
if l := len(us); l != 5 {
return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l)
}
return LogOperationStats{
Writes: us[0],
Blocks: us[1],
NoInternalBuffers: us[2],
Force: us[3],
ForceSleep: us[4],
}, nil
}
// ReadWriteStats builds a ReadWriteStats from a slice of uint32s.
func readWriteStats(us []uint32) (ReadWriteStats, error) {
if l := len(us); l != 2 {
return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l)
}
return ReadWriteStats{
Read: us[0],
Write: us[1],
}, nil
}
// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s.
func attributeOperationStats(us []uint32) (AttributeOperationStats, error) {
if l := len(us); l != 4 {
return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l)
}
return AttributeOperationStats{
Get: us[0],
Set: us[1],
Remove: us[2],
List: us[3],
}, nil
}
// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s.
func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) {
if l := len(us); l != 3 {
return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l)
}
return InodeClusteringStats{
Iflush: us[0],
Flush: us[1],
FlushInode: us[2],
}, nil
}
// VnodeStats builds a VnodeStats from a slice of uint32s.
func vnodeStats(us []uint32) (VnodeStats, error) {
// The attribute "Free" appears to not be available on older XFS
// stats versions. Therefore, 7 or 8 elements may appear in
// this slice.
l := len(us)
log.Println(l)
if l != 7 && l != 8 {
return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l)
}
s := VnodeStats{
Active: us[0],
Allocate: us[1],
Get: us[2],
Hold: us[3],
Release: us[4],
Reclaim: us[5],
Remove: us[6],
}
// Skip adding free, unless it is present. The zero value will
// be used in place of an actual count.
if l == 7 {
return s, nil
}
s.Free = us[7]
return s, nil
}
// BufferStats builds a BufferStats from a slice of uint32s.
func bufferStats(us []uint32) (BufferStats, error) {
if l := len(us); l != 9 {
return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l)
}
return BufferStats{
Get: us[0],
Create: us[1],
GetLocked: us[2],
GetLockedWaited: us[3],
BusyLocked: us[4],
MissLocked: us[5],
PageRetries: us[6],
PageFound: us[7],
GetRead: us[8],
}, nil
}
// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s.
func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) {
if l := len(us); l != 3 {
return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l)
}
return ExtendedPrecisionStats{
FlushBytes: us[0],
WriteBytes: us[1],
ReadBytes: us[2],
}, nil
}
// parseUint32s parses a slice of strings into a slice of uint32s.
func parseUint32s(ss []string) ([]uint32, error) {
us := make([]uint32, 0, len(ss))
for _, s := range ss {
u, err := strconv.ParseUint(s, 10, 32)
if err != nil {
return nil, err
}
us = append(us, uint32(u))
}
return us, nil
}
// parseUint64s parses a slice of strings into a slice of uint64s.
func parseUint64s(ss []string) ([]uint64, error) {
us := make([]uint64, 0, len(ss))
for _, s := range ss {
u, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return nil, err
}
us = append(us, u)
}
return us, nil
}

158
vendor/github.com/prometheus/procfs/xfs/xfs.go generated vendored Normal file
View File

@ -0,0 +1,158 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package xfs provides access to statistics exposed by the XFS filesystem.
package xfs
// Stats contains XFS filesystem runtime statistics, parsed from
// /proc/fs/xfs/stat.
//
// The names and meanings of each statistic were taken from
// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux
// kernel source. Most counters are uint32s (same data types used in
// xfs_stats.h), but some of the "extended precision stats" are uint64s.
type Stats struct {
ExtentAllocation ExtentAllocationStats
AllocationBTree BTreeStats
BlockMapping BlockMappingStats
BlockMapBTree BTreeStats
DirectoryOperation DirectoryOperationStats
Transaction TransactionStats
InodeOperation InodeOperationStats
LogOperation LogOperationStats
ReadWrite ReadWriteStats
AttributeOperation AttributeOperationStats
InodeClustering InodeClusteringStats
Vnode VnodeStats
Buffer BufferStats
ExtendedPrecision ExtendedPrecisionStats
}
// ExtentAllocationStats contains statistics regarding XFS extent allocations.
type ExtentAllocationStats struct {
ExtentsAllocated uint32
BlocksAllocated uint32
ExtentsFreed uint32
BlocksFreed uint32
}
// BTreeStats contains statistics regarding an XFS internal B-tree.
type BTreeStats struct {
Lookups uint32
Compares uint32
RecordsInserted uint32
RecordsDeleted uint32
}
// BlockMappingStats contains statistics regarding XFS block maps.
type BlockMappingStats struct {
Reads uint32
Writes uint32
Unmaps uint32
ExtentListInsertions uint32
ExtentListDeletions uint32
ExtentListLookups uint32
ExtentListCompares uint32
}
// DirectoryOperationStats contains statistics regarding XFS directory entries.
type DirectoryOperationStats struct {
Lookups uint32
Creates uint32
Removes uint32
Getdents uint32
}
// TransactionStats contains statistics regarding XFS metadata transactions.
type TransactionStats struct {
Sync uint32
Async uint32
Empty uint32
}
// InodeOperationStats contains statistics regarding XFS inode operations.
type InodeOperationStats struct {
Attempts uint32
Found uint32
Recycle uint32
Missed uint32
Duplicate uint32
Reclaims uint32
AttributeChange uint32
}
// LogOperationStats contains statistics regarding the XFS log buffer.
type LogOperationStats struct {
Writes uint32
Blocks uint32
NoInternalBuffers uint32
Force uint32
ForceSleep uint32
}
// ReadWriteStats contains statistics regarding the number of read and write
// system calls for XFS filesystems.
type ReadWriteStats struct {
Read uint32
Write uint32
}
// AttributeOperationStats contains statistics regarding manipulation of
// XFS extended file attributes.
type AttributeOperationStats struct {
Get uint32
Set uint32
Remove uint32
List uint32
}
// InodeClusteringStats contains statistics regarding XFS inode clustering
// operations.
type InodeClusteringStats struct {
Iflush uint32
Flush uint32
FlushInode uint32
}
// VnodeStats contains statistics regarding XFS vnode operations.
type VnodeStats struct {
Active uint32
Allocate uint32
Get uint32
Hold uint32
Release uint32
Reclaim uint32
Remove uint32
Free uint32
}
// BufferStats contains statistics regarding XFS read/write I/O buffers.
type BufferStats struct {
Get uint32
Create uint32
GetLocked uint32
GetLockedWaited uint32
BusyLocked uint32
MissLocked uint32
PageRetries uint32
PageFound uint32
GetRead uint32
}
// ExtendedPrecisionStats contains high precision counters used to track the
// total number of bytes read, written, or flushed, during XFS operations.
type ExtendedPrecisionStats struct {
FlushBytes uint64
WriteBytes uint64
ReadBytes uint64
}

View File

@ -11,12 +11,12 @@
// package in the standard library.
package draw
// This file just contains the API exported by the image/draw package in the
// standard library. Other files in this package provide additional features.
// This file, and the go1_*.go files, just contains the API exported by the
// image/draw package in the standard library. Other files in this package
// provide additional features.
import (
"image"
"image/color"
"image/draw"
)
@ -32,13 +32,6 @@ func DrawMask(dst Image, r image.Rectangle, src image.Image, sp image.Point, mas
draw.DrawMask(dst, r, src, sp, mask, mp, draw.Op(op))
}
// Drawer contains the Draw method.
type Drawer interface {
// Draw aligns r.Min in dst with sp in src and then replaces the
// rectangle r in dst with the result of drawing src on dst.
Draw(dst Image, r image.Rectangle, src image.Image, sp image.Point)
}
// FloydSteinberg is a Drawer that is the Src Op with Floyd-Steinberg error
// diffusion.
var FloydSteinberg Drawer = floydSteinberg{}
@ -48,32 +41,3 @@ type floydSteinberg struct{}
func (floydSteinberg) Draw(dst Image, r image.Rectangle, src image.Image, sp image.Point) {
draw.FloydSteinberg.Draw(dst, r, src, sp)
}
// Image is an image.Image with a Set method to change a single pixel.
type Image interface {
image.Image
Set(x, y int, c color.Color)
}
// Op is a Porter-Duff compositing operator.
type Op int
const (
// Over specifies ``(src in mask) over dst''.
Over Op = Op(draw.Over)
// Src specifies ``src in mask''.
Src Op = Op(draw.Src)
)
// Draw implements the Drawer interface by calling the Draw function with
// this Op.
func (op Op) Draw(dst Image, r image.Rectangle, src image.Image, sp image.Point) {
(draw.Op(op)).Draw(dst, r, src, sp)
}
// Quantizer produces a palette for an image.
type Quantizer interface {
// Quantize appends up to cap(p) - len(p) colors to p and returns the
// updated palette suitable for converting m to a paletted image.
Quantize(p color.Palette, m image.Image) color.Palette
}

49
vendor/golang.org/x/image/draw/go1_8.go generated vendored Normal file
View File

@ -0,0 +1,49 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.9
package draw
import (
"image"
"image/color"
"image/draw"
)
// Drawer contains the Draw method.
type Drawer interface {
// Draw aligns r.Min in dst with sp in src and then replaces the
// rectangle r in dst with the result of drawing src on dst.
Draw(dst Image, r image.Rectangle, src image.Image, sp image.Point)
}
// Image is an image.Image with a Set method to change a single pixel.
type Image interface {
image.Image
Set(x, y int, c color.Color)
}
// Op is a Porter-Duff compositing operator.
type Op int
const (
// Over specifies ``(src in mask) over dst''.
Over Op = Op(draw.Over)
// Src specifies ``src in mask''.
Src Op = Op(draw.Src)
)
// Draw implements the Drawer interface by calling the Draw function with
// this Op.
func (op Op) Draw(dst Image, r image.Rectangle, src image.Image, sp image.Point) {
(draw.Op(op)).Draw(dst, r, src, sp)
}
// Quantizer produces a palette for an image.
type Quantizer interface {
// Quantize appends up to cap(p) - len(p) colors to p and returns the
// updated palette suitable for converting m to a paletted image.
Quantize(p color.Palette, m image.Image) color.Palette
}

57
vendor/golang.org/x/image/draw/go1_9.go generated vendored Normal file
View File

@ -0,0 +1,57 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.9
package draw
import (
"image/draw"
)
// We use type aliases (new in Go 1.9) for the exported names from the standard
// library's image/draw package. This is not merely syntactic sugar for
//
// type Drawer draw.Drawer
//
// as aliasing means that the types in this package, such as draw.Image and
// draw.Op, are identical to the corresponding draw.Image and draw.Op types in
// the standard library. In comparison, prior to Go 1.9, the code in go1_8.go
// defines new types that mimic the old but are different types.
//
// The package documentation, in draw.go, explicitly gives the intent of this
// package:
//
// This package is a superset of and a drop-in replacement for the
// image/draw package in the standard library.
//
// Drop-in replacement means that I can replace all of my "image/draw" imports
// with "golang.org/x/image/draw", to access additional features in this
// package, and no further changes are required. That's mostly true, but not
// completely true unless we use type aliases.
//
// Without type aliases, users might need to import both "image/draw" and
// "golang.org/x/image/draw" in order to convert from two conceptually
// equivalent but different (from the compiler's point of view) types, such as
// from one draw.Op type to another draw.Op type, to satisfy some other
// interface or function signature.
// Drawer contains the Draw method.
type Drawer = draw.Drawer
// Image is an image.Image with a Set method to change a single pixel.
type Image = draw.Image
// Op is a Porter-Duff compositing operator.
type Op = draw.Op
const (
// Over specifies ``(src in mask) over dst''.
Over Op = draw.Over
// Src specifies ``src in mask''.
Src Op = draw.Src
)
// Quantizer produces a palette for an image.
type Quantizer = draw.Quantizer