vendor: re-vendor with glide

This commit is contained in:
Gyu-Ho Lee 2016-08-23 02:26:02 -07:00
parent a589a295cd
commit caea364f8b
579 changed files with 229173 additions and 0 deletions

193
glide.lock generated Normal file
View File

@ -0,0 +1,193 @@
hash: 6ac0ebb0f8853fa18c2be5b1b550eb3f96b4ea1deb3a0d4ff5390af768e16a22
updated: 2016-08-23T02:25:54.515417454-07:00
imports:
- name: bitbucket.org/zombiezen/gopdf
version: 1c63dc69751bc45441c2ce1f56b631c55294b4d5
subpackages:
- pdf
- name: cloud.google.com/go
version: 242ed68074e53cbb6f78a90c28d9a48bfa2d1093
subpackages:
- compute/metadata
- internal
- name: github.com/ajstarks/svgo
version: 672fe547df4e49efc6db67a74391368bcb149b37
- name: github.com/cheggaaa/pb
version: 6e9d17711bb763b26b68b3931d47f24c1323abab
- name: github.com/cloudfoundry-incubator/candiedyaml
version: 99c3df83b51532e3615f851d8c2dbb638f5313bf
- name: github.com/coreos/etcd
version: 959f860a4053f66a2602a5795a9ebc04eed20f0a
subpackages:
- auth/authpb
- client
- clientv3
- etcdserver/api/v3rpc/rpctypes
- etcdserver/etcdserverpb
- mvcc/mvccpb
- pkg/pathutil
- pkg/tlsutil
- pkg/types
- name: github.com/coreos/go-systemd
version: 5c49e4850c879a0ddc061e8f4adcf307de8a8bc2
subpackages:
- journal
- name: github.com/coreos/pkg
version: 3ac0863d7acf3bc44daf49afef8919af12f704ef
subpackages:
- capnslog
- name: github.com/dustin/go-humanize
version: 2fcb5204cdc65b4bec9fd0a87606bb0d0e3c54e8
- name: github.com/ghodss/yaml
version: aa0c862057666179de291b67d9f093d12b5a8473
- name: github.com/gogo/protobuf
version: 330d6892d1d39fcd6cff0b4240821bdaf30c3a3c
subpackages:
- gogoproto
- proto
- protoc-gen-gogo/descriptor
- name: github.com/golang/freetype
version: 38b4c392adc5eed94207994c4848fff99f4ac234
subpackages:
- raster
- truetype
- name: github.com/golang/protobuf
version: f592bd283e9ef86337a432eb50e592278c3d534d
subpackages:
- jsonpb
- proto
- name: github.com/gonum/floats
version: e7dfbda930c8a40d6d1ba9107762aae3e6e47591
- name: github.com/gonum/internal
version: 495c8ab446bf60264be32be6973302faac6a5695
subpackages:
- asm
- name: github.com/gonum/plot
version: c1e68bf98c6ed50599f84747f929f684d49807b9
subpackages:
- palette
- plotter
- plotutil
- vg
- vg/draw
- vg/fonts
- vg/vgeps
- vg/vgimg
- vg/vgpdf
- vg/vgsvg
- name: github.com/grpc-ecosystem/grpc-gateway
version: 5e0e028ba0a015710eaebf6e47af18812c9f2767
subpackages:
- runtime
- runtime/internal
- utilities
- name: github.com/gyuho/dataframe
version: 573cd728a011e5473510a6a1df0f39023c305e04
- name: github.com/gyuho/psn
version: 415d7423cced5ca6bf6dbf81c5cbe37605f1dfaa
subpackages:
- process
- name: github.com/hashicorp/consul
version: 36dc9201f2e006d4b5db1f0446b17357811297bf
subpackages:
- api
- name: github.com/hashicorp/go-cleanhttp
version: ad28ea4487f05916463e2423a55166280e8254b5
- name: github.com/hashicorp/serf
version: 9432bc08aa8d486e497e27f84878ebbe8c1eab66
subpackages:
- coordinate
- name: github.com/inconshreveable/mousetrap
version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
- name: github.com/llgcode/draw2d
version: 13548be874707c45a83f9c2c282d76d671c7acf8
subpackages:
- draw2dbase
- draw2dimg
- name: github.com/mattn/go-runewidth
version: d6bea18f789704b5f83375793155289da36a3c7f
- name: github.com/olekukonko/tablewriter
version: daf2955e742cf123959884fdff4685aa79b63135
- name: github.com/samuel/go-zookeeper
version: e64db453f3512cade908163702045e0f31137843
subpackages:
- zk
- name: github.com/spf13/cobra
version: 1f4717172e07e12150feb061b2ab847c3054f53f
- name: github.com/spf13/pflag
version: 103ce5cd2042f2fe629c1957abb64ab3e7f50235
- name: github.com/ugorji/go
version: 4a1cb5252a6951f715a85d0e4be334c2a2dbf2a2
subpackages:
- codec
- name: golang.org/x/image
version: 9f8d0d45877da31f672995d10677ae6a586e31a5
subpackages:
- draw
- font
- math/f64
- math/fixed
- tiff
- tiff/lzw
- name: golang.org/x/net
version: 7394c112eae4dba7e96bfcfe738e6373d61772b4
subpackages:
- context
- context/ctxhttp
- http2
- http2/hpack
- internal/timeseries
- lex/httplex
- trace
- name: golang.org/x/oauth2
version: 3b966c7f301c0c71c53d94dc632a62df0a682cd7
subpackages:
- google
- internal
- jws
- jwt
- name: google.golang.org/api
version: 7b2847ee16541b59d2a5004bb75794ebdc2246c4
subpackages:
- gensupport
- googleapi
- googleapi/internal/uritemplates
- internal
- option
- storage/v1
- transport
- name: google.golang.org/appengine
version: 4f7eeb5305a4ba1966344836ba4af9996b7b4e05
subpackages:
- internal
- internal/app_identity
- internal/base
- internal/datastore
- internal/log
- internal/modules
- internal/remote_api
- internal/socket
- internal/urlfetch
- socket
- urlfetch
- name: google.golang.org/cloud
version: 12aa462581208c155e498dc13e14cabe6da24dc3
subpackages:
- internal
- internal/transport
- storage
- name: google.golang.org/grpc
version: 0032a855ba5c8a3c8e0d71c2deef354b70af1584
subpackages:
- codes
- credentials
- credentials/oauth
- grpclog
- internal
- metadata
- naming
- peer
- transport
- name: gopkg.in/yaml.v2
version: e4d366fc3c7938e2958e662b4258c7a89e1f0e3e
testImports: []

45
glide.yaml Normal file
View File

@ -0,0 +1,45 @@
package: github.com/coreos/dbtester
import:
- package: github.com/cheggaaa/pb
- package: github.com/coreos/etcd
subpackages:
- client
- clientv3
- package: github.com/coreos/pkg
subpackages:
- capnslog
- package: github.com/dustin/go-humanize
- package: github.com/gogo/protobuf
subpackages:
- gogoproto
- package: github.com/golang/protobuf
subpackages:
- proto
- package: github.com/gonum/plot
subpackages:
- plotter
- plotutil
- vg
- package: github.com/gyuho/dataframe
- package: github.com/gyuho/psn
subpackages:
- process
- package: github.com/hashicorp/consul
subpackages:
- api
- package: github.com/samuel/go-zookeeper
subpackages:
- zk
- package: github.com/spf13/cobra
- package: golang.org/x/net
subpackages:
- context
- package: golang.org/x/oauth2
subpackages:
- google
- jwt
- package: google.golang.org/cloud
subpackages:
- storage
- package: google.golang.org/grpc
- package: gopkg.in/yaml.v2

23
vendor/bitbucket.org/zombiezen/gopdf/LICENSE generated vendored Normal file
View File

@ -0,0 +1,23 @@
Copyright (c) 2011, The gopdf Authors
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

248
vendor/bitbucket.org/zombiezen/gopdf/pdf/canvas.go generated vendored Normal file
View File

@ -0,0 +1,248 @@
// Copyright (C) 2011, Ross Light
package pdf
import (
"bytes"
"fmt"
"image"
"io"
"math"
)
// writeCommand writes a PDF graphics command.
func writeCommand(w io.Writer, op string, args ...interface{}) error {
for _, arg := range args {
// TODO: Use the same buffer for all arguments
if m, err := marshal(nil, arg); err == nil {
if _, err := w.Write(append(m, ' ')); err != nil {
return err
}
} else {
return err
}
}
if _, err := io.WriteString(w, op); err != nil {
return err
}
if _, err := w.Write([]byte{'\n'}); err != nil {
return err
}
return nil
}
// Canvas is a two-dimensional drawing region on a single page. You can obtain
// a canvas once you have created a document.
type Canvas struct {
doc *Document
page *pageDict
ref Reference
contents *stream
imageCounter uint
}
// Document returns the document the canvas is attached to.
func (canvas *Canvas) Document() *Document {
return canvas.doc
}
// Close flushes the page's stream to the document. This must be called once
// drawing has completed or else the document will be inconsistent.
func (canvas *Canvas) Close() error {
return canvas.contents.Close()
}
// Size returns the page's media box (the size of the physical medium).
func (canvas *Canvas) Size() (width, height Unit) {
mbox := canvas.page.MediaBox
return mbox.Dx(), mbox.Dy()
}
// SetSize changes the page's media box (the size of the physical medium).
func (canvas *Canvas) SetSize(width, height Unit) {
canvas.page.MediaBox = Rectangle{Point{0, 0}, Point{width, height}}
}
// CropBox returns the page's crop box.
func (canvas *Canvas) CropBox() Rectangle {
return canvas.page.CropBox
}
// SetCropBox changes the page's crop box.
func (canvas *Canvas) SetCropBox(crop Rectangle) {
canvas.page.CropBox = crop
}
// FillStroke fills then strokes the given path. This operation has the same
// effect as performing a fill then a stroke, but does not repeat the path in
// the file.
func (canvas *Canvas) FillStroke(p *Path) {
io.Copy(canvas.contents, &p.buf)
writeCommand(canvas.contents, "B")
}
// Fill paints the area enclosed by the given path using the current fill color.
func (canvas *Canvas) Fill(p *Path) {
io.Copy(canvas.contents, &p.buf)
writeCommand(canvas.contents, "f")
}
// Stroke paints a line along the given path using the current stroke color.
func (canvas *Canvas) Stroke(p *Path) {
io.Copy(canvas.contents, &p.buf)
writeCommand(canvas.contents, "S")
}
// SetLineWidth changes the stroke width to the given value.
func (canvas *Canvas) SetLineWidth(w Unit) {
writeCommand(canvas.contents, "w", w)
}
// SetLineDash changes the line dash pattern in the current graphics state.
// Examples:
//
// c.SetLineDash(0, []Unit{}) // solid line
// c.SetLineDash(0, []Unit{3}) // 3 units on, 3 units off...
// c.SetLineDash(0, []Unit{2, 1}) // 2 units on, 1 unit off...
// c.SetLineDash(1, []Unit{2}) // 1 unit on, 2 units off, 2 units on...
func (canvas *Canvas) SetLineDash(phase Unit, dash []Unit) {
writeCommand(canvas.contents, "d", dash, phase)
}
// SetColor changes the current fill color to the given RGB triple (in device
// RGB space).
func (canvas *Canvas) SetColor(r, g, b float32) {
writeCommand(canvas.contents, "rg", r, g, b)
}
// SetStrokeColor changes the current stroke color to the given RGB triple (in
// device RGB space).
func (canvas *Canvas) SetStrokeColor(r, g, b float32) {
writeCommand(canvas.contents, "RG", r, g, b)
}
// Push saves a copy of the current graphics state. The state can later be
// restored using Pop.
func (canvas *Canvas) Push() {
writeCommand(canvas.contents, "q")
}
// Pop restores the most recently saved graphics state by popping it from the
// stack.
func (canvas *Canvas) Pop() {
writeCommand(canvas.contents, "Q")
}
// Translate moves the canvas's coordinates system by the given offset.
func (canvas *Canvas) Translate(x, y Unit) {
writeCommand(canvas.contents, "cm", 1, 0, 0, 1, x, y)
}
// Rotate rotates the canvas's coordinate system by a given angle (in radians).
func (canvas *Canvas) Rotate(theta float32) {
s, c := math.Sin(float64(theta)), math.Cos(float64(theta))
writeCommand(canvas.contents, "cm", c, s, -s, c, 0, 0)
}
// Scale multiplies the canvas's coordinate system by the given scalars.
func (canvas *Canvas) Scale(x, y float32) {
writeCommand(canvas.contents, "cm", x, 0, 0, y, 0, 0)
}
// Transform concatenates a 3x3 matrix with the current transformation matrix.
// The arguments map to values in the matrix as shown below:
//
// / a b 0 \
// | c d 0 |
// \ e f 1 /
//
// For more information, see Section 8.3.4 of ISO 32000-1.
func (canvas *Canvas) Transform(a, b, c, d, e, f float32) {
writeCommand(canvas.contents, "cm", a, b, c, d, e, f)
}
// DrawText paints a text object onto the canvas.
func (canvas *Canvas) DrawText(text *Text) {
for fontName := range text.fonts {
if _, ok := canvas.page.Resources.Font[fontName]; !ok {
canvas.page.Resources.Font[fontName] = canvas.doc.standardFont(fontName)
}
}
writeCommand(canvas.contents, "BT")
io.Copy(canvas.contents, &text.buf)
writeCommand(canvas.contents, "ET")
}
// DrawImage paints a raster image at the given location and scaled to the
// given dimensions. If you want to render the same image multiple times in
// the same document, use DrawImageReference.
func (canvas *Canvas) DrawImage(img image.Image, rect Rectangle) {
canvas.DrawImageReference(canvas.doc.AddImage(img), rect)
}
// DrawImageReference paints the raster image referenced in the document at the
// given location and scaled to the given dimensions.
func (canvas *Canvas) DrawImageReference(ref Reference, rect Rectangle) {
name := canvas.nextImageName()
canvas.page.Resources.XObject[name] = ref
canvas.Push()
canvas.Transform(float32(rect.Dx()), 0, 0, float32(rect.Dy()), float32(rect.Min.X), float32(rect.Min.Y))
writeCommand(canvas.contents, "Do", name)
canvas.Pop()
}
// DrawLine paints a straight line from pt1 to pt2 using the current stroke
// color and line width.
func (canvas *Canvas) DrawLine(pt1, pt2 Point) {
var path Path
path.Move(pt1)
path.Line(pt2)
canvas.Stroke(&path)
}
const anonymousImageFormat = "__image%d__"
func (canvas *Canvas) nextImageName() name {
var n name
for {
n = name(fmt.Sprintf(anonymousImageFormat, canvas.imageCounter))
canvas.imageCounter++
if _, ok := canvas.page.Resources.XObject[n]; !ok {
break
}
}
return n
}
// Path is a shape that can be painted on a canvas. The zero value is an empty
// path.
type Path struct {
buf bytes.Buffer
}
// Move begins a new subpath by moving the current point to the given location.
func (path *Path) Move(pt Point) {
writeCommand(&path.buf, "m", pt.X, pt.Y)
}
// Line appends a line segment from the current point to the given location.
func (path *Path) Line(pt Point) {
writeCommand(&path.buf, "l", pt.X, pt.Y)
}
// Curve appends a cubic Bezier curve to the path.
func (path *Path) Curve(pt1, pt2, pt3 Point) {
writeCommand(&path.buf, "c", pt1.X, pt1.Y, pt2.X, pt2.Y, pt3.X, pt3.Y)
}
// Rectangle appends a complete rectangle to the path.
func (path *Path) Rectangle(rect Rectangle) {
writeCommand(&path.buf, "re", rect.Min.X, rect.Min.Y, rect.Dx(), rect.Dy())
}
// Close appends a line segment from the current point to the starting point of
// the subpath.
func (path *Path) Close() {
writeCommand(&path.buf, "h")
}

40
vendor/bitbucket.org/zombiezen/gopdf/pdf/doc.go generated vendored Normal file
View File

@ -0,0 +1,40 @@
// Copyright (C) 2011, Ross Light
/*
Package pdf implements a Portable Document Format writer, as defined in ISO 32000-1.
An example of basic usage:
package main
import (
"bitbucket.org/zombiezen/gopdf/pdf"
"fmt"
"os"
)
func main() {
doc := pdf.New()
canvas := doc.NewPage(pdf.USLetterWidth, pdf.USLetterHeight)
canvas.Translate(100, 100)
path := new(pdf.Path)
path.Move(pdf.Point{0, 0})
path.Line(pdf.Point{100, 0})
canvas.Stroke(path)
text := new(pdf.Text)
text.SetFont(pdf.Helvetica, 14)
text.Text("Hello, World!")
canvas.DrawText(text)
canvas.Close()
err := doc.Encode(os.Stdout)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
*/
package pdf

149
vendor/bitbucket.org/zombiezen/gopdf/pdf/encode.go generated vendored Normal file
View File

@ -0,0 +1,149 @@
// Copyright (C) 2011, Ross Light
package pdf
import (
"fmt"
"io"
)
// encoder writes the PDF file format structure.
type encoder struct {
objects []interface{}
root Reference
}
type trailer struct {
Size int
Root Reference
}
// add appends an object to the file. The object is marshalled only when an
// encoding is requested.
func (enc *encoder) add(v interface{}) Reference {
enc.objects = append(enc.objects, v)
return Reference{uint(len(enc.objects)), 0}
}
const (
header = "%PDF-1.7" + newline + "%\x93\x8c\x8b\x9e" + newline
newline = "\r\n"
)
// Cross reference strings
const (
crossReferenceSectionHeader = "xref" + newline
crossReferenceSubsectionFormat = "%d %d" + newline
crossReferenceFormat = "%010d %05d n" + newline
crossReferenceFreeFormat = "%010d %05d f" + newline
)
const trailerHeader = "trailer" + newline
const startxrefFormat = "startxref" + newline + "%d" + newline
const eofString = "%%EOF" + newline
// encode writes an entire PDF document by marshalling the added objects.
func (enc *encoder) encode(wr io.Writer) error {
w := &offsetWriter{Writer: wr}
if err := enc.writeHeader(w); err != nil {
return err
}
objectOffsets, err := enc.writeBody(w)
if err != nil {
return err
}
tableOffset := w.offset
if err := enc.writeXrefTable(w, objectOffsets); err != nil {
return err
}
if err := enc.writeTrailer(w); err != nil {
return err
}
if err := enc.writeStartxref(w, tableOffset); err != nil {
return err
}
if err := enc.writeEOF(w); err != nil {
return err
}
return nil
}
func (enc *encoder) writeHeader(w *offsetWriter) error {
_, err := io.WriteString(w, header)
return err
}
func (enc *encoder) writeBody(w *offsetWriter) ([]int64, error) {
objectOffsets := make([]int64, len(enc.objects))
for i, obj := range enc.objects {
// TODO: Use same buffer for writing across objects
objectOffsets[i] = w.offset
data, err := marshal(nil, indirectObject{Reference{uint(i + 1), 0}, obj})
if err != nil {
return nil, err
}
if _, err = w.Write(append(data, newline...)); err != nil {
return nil, err
}
}
return objectOffsets, nil
}
func (enc *encoder) writeXrefTable(w *offsetWriter, objectOffsets []int64) error {
if _, err := io.WriteString(w, crossReferenceSectionHeader); err != nil {
return err
}
if _, err := fmt.Fprintf(w, crossReferenceSubsectionFormat, 0, len(enc.objects)+1); err != nil {
return err
}
if _, err := fmt.Fprintf(w, crossReferenceFreeFormat, 0, 65535); err != nil {
return err
}
for _, offset := range objectOffsets {
if _, err := fmt.Fprintf(w, crossReferenceFormat, offset, 0); err != nil {
return err
}
}
return nil
}
func (enc *encoder) writeTrailer(w *offsetWriter) error {
var err error
dict := trailer{
Size: len(enc.objects) + 1,
Root: enc.root,
}
data := make([]byte, 0, len(trailerHeader)+len(newline))
data = append(data, trailerHeader...)
if data, err = marshal(data, dict); err != nil {
return err
}
data = append(data, newline...)
_, err = w.Write(data)
return err
}
func (enc *encoder) writeStartxref(w *offsetWriter, tableOffset int64) error {
_, err := fmt.Fprintf(w, startxrefFormat, tableOffset)
return err
}
func (enc *encoder) writeEOF(w *offsetWriter) error {
_, err := io.WriteString(w, eofString)
return err
}
// offsetWriter tracks how many bytes have been written to it.
type offsetWriter struct {
io.Writer
offset int64
}
func (w *offsetWriter) Write(p []byte) (n int, err error) {
n, err = w.Writer.Write(p)
w.offset += int64(n)
return
}

135
vendor/bitbucket.org/zombiezen/gopdf/pdf/image.go generated vendored Normal file
View File

@ -0,0 +1,135 @@
// Copyright (C) 2011, Ross Light
package pdf
import (
"image"
"image/color"
"io"
)
const (
deviceRGBColorSpace name = "DeviceRGB"
)
type imageStream struct {
*stream
Width int
Height int
BitsPerComponent int
ColorSpace name
}
type imageStreamInfo struct {
Type name
Subtype name
Length int
Filter name `pdf:",omitempty"`
Width int
Height int
BitsPerComponent int
ColorSpace name
}
func newImageStream(filter name, w, h int) *imageStream {
return &imageStream{
stream: newStream(filter),
Width: w,
Height: h,
BitsPerComponent: 8,
ColorSpace: deviceRGBColorSpace,
}
}
func (st *imageStream) marshalPDF(dst []byte) ([]byte, error) {
return marshalStream(dst, imageStreamInfo{
Type: xobjectType,
Subtype: imageSubtype,
Length: st.Len(),
Filter: st.filter,
Width: st.Width,
Height: st.Height,
BitsPerComponent: st.BitsPerComponent,
ColorSpace: st.ColorSpace,
}, st.Bytes())
}
// encodeImageStream writes RGB data from an image in PDF format.
func encodeImageStream(w io.Writer, img image.Image) error {
bd := img.Bounds()
row := make([]byte, bd.Dx()*3)
for y := bd.Min.Y; y < bd.Max.Y; y++ {
i := 0
for x := bd.Min.X; x < bd.Max.X; x++ {
r, g, b, a := img.At(x, y).RGBA()
if a != 0 {
row[i+0] = uint8((r * 65535 / a) >> 8)
row[i+1] = uint8((g * 65535 / a) >> 8)
row[i+2] = uint8((b * 65535 / a) >> 8)
} else {
row[i+0] = 0
row[i+1] = 0
row[i+2] = 0
}
i += 3
}
if _, err := w.Write(row); err != nil {
return err
}
}
return nil
}
func encodeRGBAStream(w io.Writer, img *image.RGBA) error {
buf := make([]byte, 3*img.Rect.Dx()*img.Rect.Dy())
var a uint16
for i, j := 0, 0; i < len(img.Pix); i, j = i+4, j+3 {
a = uint16(img.Pix[i+3])
if a != 0 {
buf[j+0] = byte(uint16(img.Pix[i+0]) * 0xff / a)
buf[j+1] = byte(uint16(img.Pix[i+1]) * 0xff / a)
buf[j+2] = byte(uint16(img.Pix[i+2]) * 0xff / a)
}
}
_, err := w.Write(buf)
return err
}
func encodeNRGBAStream(w io.Writer, img *image.NRGBA) error {
buf := make([]byte, 3*img.Rect.Dx()*img.Rect.Dy())
for i, j := 0, 0; i < len(img.Pix); i, j = i+4, j+3 {
buf[j+0] = img.Pix[i+0]
buf[j+1] = img.Pix[i+1]
buf[j+2] = img.Pix[i+2]
}
_, err := w.Write(buf)
return err
}
func encodeYCbCrStream(w io.Writer, img *image.YCbCr) error {
var yy, cb, cr uint8
var i, j int
dx, dy := img.Rect.Dx(), img.Rect.Dy()
buf := make([]byte, 3*dx*dy)
bi := 0
for y := 0; y < dy; y++ {
for x := 0; x < dx; x++ {
i, j = x, y
switch img.SubsampleRatio {
case image.YCbCrSubsampleRatio420:
j /= 2
fallthrough
case image.YCbCrSubsampleRatio422:
i /= 2
}
yy = img.Y[y*img.YStride+x]
cb = img.Cb[j*img.CStride+i]
cr = img.Cr[j*img.CStride+i]
buf[bi+0], buf[bi+1], buf[bi+2] = color.YCbCrToRGB(yy, cb, cr)
bi += 3
}
}
_, err := w.Write(buf)
return err
}

202
vendor/bitbucket.org/zombiezen/gopdf/pdf/marshal.go generated vendored Normal file
View File

@ -0,0 +1,202 @@
// Copyright (C) 2011, Ross Light
package pdf
import (
"errors"
"reflect"
"strconv"
"strings"
)
// A marshaler can produce a PDF object.
type marshaler interface {
marshalPDF(dst []byte) ([]byte, error)
}
// marshal returns the PDF encoding of v.
//
// If the value implements the marshaler interface, then its marshalPDF method
// is called. ints, strings, and floats will be marshalled according to the PDF
// standard.
func marshal(dst []byte, v interface{}) ([]byte, error) {
state := marshalState{dst}
if err := state.marshalValue(reflect.ValueOf(v)); err != nil {
return nil, err
}
return state.data, nil
}
type marshalState struct {
data []byte
}
const marshalFloatPrec = 5
func (state *marshalState) writeString(s string) {
state.data = append(state.data, s...)
}
func (state *marshalState) marshalValue(v reflect.Value) error {
if !v.IsValid() {
state.writeString("null")
return nil
}
if m, ok := v.Interface().(marshaler); ok {
slice, err := m.marshalPDF(state.data)
if err != nil {
return err
}
state.data = slice
return nil
}
switch v.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
state.writeString(strconv.FormatInt(v.Int(), 10))
return nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
state.writeString(strconv.FormatUint(v.Uint(), 10))
return nil
case reflect.Float32, reflect.Float64:
state.writeString(strconv.FormatFloat(v.Float(), 'f', marshalFloatPrec, 64))
return nil
case reflect.String:
state.writeString(quote(v.String()))
return nil
case reflect.Ptr, reflect.Interface:
return state.marshalValue(v.Elem())
case reflect.Array, reflect.Slice:
return state.marshalSlice(v)
case reflect.Map:
return state.marshalDictionary(v)
case reflect.Struct:
return state.marshalStruct(v)
}
return errors.New("pdf: unsupported type: " + v.Type().String())
}
// quote escapes a string and returns a PDF string literal.
func quote(s string) string {
r := strings.NewReplacer(
"\r", `\r`,
"\t", `\t`,
"\b", `\b`,
"\f", `\f`,
"(", `\(`,
")", `\)`,
`\`, `\\`,
)
return "(" + r.Replace(s) + ")"
}
func (state *marshalState) marshalSlice(v reflect.Value) error {
state.writeString("[ ")
for i := 0; i < v.Len(); i++ {
if err := state.marshalValue(v.Index(i)); err != nil {
return err
}
state.writeString(" ")
}
state.writeString("]")
return nil
}
func (state *marshalState) marshalDictionary(v reflect.Value) error {
if v.Type().Key() != reflect.TypeOf(name("")) {
return errors.New("pdf: cannot marshal dictionary with key type: " + v.Type().Key().String())
}
state.writeString("<< ")
for _, k := range v.MapKeys() {
state.marshalKeyValue(k.Interface().(name), v.MapIndex(k))
}
state.writeString(">>")
return nil
}
func (state *marshalState) marshalStruct(v reflect.Value) error {
state.writeString("<< ")
t := v.Type()
n := v.NumField()
for i := 0; i < n; i++ {
f := t.Field(i)
if f.PkgPath != "" {
continue
}
tag, omitEmpty := f.Name, false
if tv := f.Tag.Get("pdf"); tv != "" {
if tv == "-" {
continue
}
name, options := parseTag(tv)
if name != "" {
tag = name
}
omitEmpty = options.Contains("omitempty")
}
fieldValue := v.Field(i)
if omitEmpty && isEmptyValue(fieldValue) {
continue
}
state.marshalKeyValue(name(tag), fieldValue)
}
state.writeString(">>")
return nil
}
func (state *marshalState) marshalKeyValue(k name, v reflect.Value) error {
slice, err := k.marshalPDF(state.data)
if err != nil {
return err
}
state.data = slice
state.writeString(" ")
if err := state.marshalValue(v); err != nil {
return err
}
state.writeString(" ")
return nil
}
type tagOptions []string
func parseTag(tag string) (name string, options tagOptions) {
result := strings.Split(tag, ",")
return result[0], tagOptions(result[1:])
}
func (options tagOptions) Contains(opt string) bool {
for _, o := range options {
if opt == o {
return true
}
}
return false
}
func isEmptyValue(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
}
return false
}

1389
vendor/bitbucket.org/zombiezen/gopdf/pdf/metrics.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

55
vendor/bitbucket.org/zombiezen/gopdf/pdf/objects.go generated vendored Normal file
View File

@ -0,0 +1,55 @@
// Copyright (C) 2011, Ross Light
package pdf
import (
"fmt"
"strconv"
)
// name is a PDF name object, which is used as an identifier.
type name string
func (n name) String() string {
return string(n)
}
func (n name) marshalPDF(dst []byte) ([]byte, error) {
// TODO: escape characters
dst = append(dst, '/')
return append(dst, []byte(n)...), nil
}
type indirectObject struct {
Reference
Object interface{}
}
const (
objectBegin = " obj\r\n"
objectEnd = "\r\nendobj"
)
func (obj indirectObject) marshalPDF(dst []byte) ([]byte, error) {
var err error
mn, mg := strconv.FormatUint(uint64(obj.Number), 10), strconv.FormatUint(uint64(obj.Generation), 10)
dst = append(dst, mn...)
dst = append(dst, ' ')
dst = append(dst, mg...)
dst = append(dst, objectBegin...)
if dst, err = marshal(dst, obj.Object); err != nil {
return nil, err
}
dst = append(dst, objectEnd...)
return dst, nil
}
// Reference holds a PDF indirect reference.
type Reference struct {
Number uint
Generation uint
}
func (ref Reference) marshalPDF(dst []byte) ([]byte, error) {
return append(dst, fmt.Sprintf("%d %d R", ref.Number, ref.Generation)...), nil
}

230
vendor/bitbucket.org/zombiezen/gopdf/pdf/pdf.go generated vendored Normal file
View File

@ -0,0 +1,230 @@
// Copyright (C) 2011, Ross Light
package pdf
import (
"image"
"io"
"strconv"
)
// Unit is a device-independent dimensional type. On a new canvas, this
// represents 1/72 of an inch.
type Unit float32
func (unit Unit) String() string {
return strconv.FormatFloat(float64(unit), 'f', marshalFloatPrec, 32)
}
// Common unit scales
const (
Pt Unit = 1
Inch Unit = 72
Cm Unit = 28.35
)
// Common page sizes
const (
USLetterWidth Unit = 8.5 * Inch
USLetterHeight Unit = 11.0 * Inch
A4Width Unit = 21.0 * Cm
A4Height Unit = 29.7 * Cm
)
// Document provides a high-level drawing interface for the PDF format.
type Document struct {
encoder
catalog *catalog
pages []indirectObject
fonts map[name]Reference
}
// New creates a new document with no pages.
func New() *Document {
doc := new(Document)
doc.catalog = &catalog{
Type: catalogType,
}
doc.root = doc.add(doc.catalog)
doc.fonts = make(map[name]Reference, 14)
return doc
}
// NewPage creates a new canvas with the given dimensions.
func (doc *Document) NewPage(width, height Unit) *Canvas {
page := &pageDict{
Type: pageType,
MediaBox: Rectangle{Point{0, 0}, Point{width, height}},
CropBox: Rectangle{Point{0, 0}, Point{width, height}},
Resources: resources{
ProcSet: []name{pdfProcSet, textProcSet, imageCProcSet},
Font: make(map[name]interface{}),
XObject: make(map[name]interface{}),
},
}
pageRef := doc.add(page)
doc.pages = append(doc.pages, indirectObject{pageRef, page})
stream := newStream(streamFlateDecode)
page.Contents = doc.add(stream)
return &Canvas{
doc: doc,
page: page,
ref: pageRef,
contents: stream,
}
}
// standardFont returns a reference to a standard font dictionary. If there is
// no font dictionary for the font in the document yet, it is added
// automatically.
func (doc *Document) standardFont(fontName name) Reference {
if ref, ok := doc.fonts[fontName]; ok {
return ref
}
// TODO: check name is standard?
ref := doc.add(standardFontDict{
Type: fontType,
Subtype: fontType1Subtype,
BaseFont: fontName,
})
doc.fonts[fontName] = ref
return ref
}
// AddImage encodes an image into the document's stream and returns its PDF
// file reference. This reference can be used to draw the image multiple times
// without storing the image multiple times.
func (doc *Document) AddImage(img image.Image) Reference {
bd := img.Bounds()
st := newImageStream(streamFlateDecode, bd.Dx(), bd.Dy())
defer st.Close()
switch i := img.(type) {
case *image.RGBA:
encodeRGBAStream(st, i)
case *image.NRGBA:
encodeNRGBAStream(st, i)
case *image.YCbCr:
encodeYCbCrStream(st, i)
default:
encodeImageStream(st, i)
}
return doc.add(st)
}
// Encode writes the document to a writer in the PDF format.
func (doc *Document) Encode(w io.Writer) error {
pageRoot := &pageRootNode{
Type: pageNodeType,
Count: len(doc.pages),
}
doc.catalog.Pages = doc.add(pageRoot)
for _, p := range doc.pages {
page := p.Object.(*pageDict)
page.Parent = doc.catalog.Pages
pageRoot.Kids = append(pageRoot.Kids, p.Reference)
}
return doc.encoder.encode(w)
}
// PDF object types
const (
catalogType name = "Catalog"
pageNodeType name = "Pages"
pageType name = "Page"
fontType name = "Font"
xobjectType name = "XObject"
)
// PDF object subtypes
const (
imageSubtype name = "Image"
fontType1Subtype name = "Type1"
)
type catalog struct {
Type name
Pages Reference
}
type pageRootNode struct {
Type name
Kids []Reference
Count int
}
type pageNode struct {
Type name
Parent Reference
Kids []Reference
Count int
}
type pageDict struct {
Type name
Parent Reference
Resources resources
MediaBox Rectangle
CropBox Rectangle
Contents Reference
}
// Point is a 2D point.
type Point struct {
X, Y Unit
}
// A Rectangle defines a rectangle with two points.
type Rectangle struct {
Min, Max Point
}
// Dx returns the rectangle's width.
func (r Rectangle) Dx() Unit {
return r.Max.X - r.Min.X
}
// Dy returns the rectangle's height.
func (r Rectangle) Dy() Unit {
return r.Max.Y - r.Min.Y
}
func (r Rectangle) marshalPDF(dst []byte) ([]byte, error) {
dst = append(dst, '[', ' ')
dst, _ = marshal(dst, r.Min.X)
dst = append(dst, ' ')
dst, _ = marshal(dst, r.Min.Y)
dst = append(dst, ' ')
dst, _ = marshal(dst, r.Max.X)
dst = append(dst, ' ')
dst, _ = marshal(dst, r.Max.Y)
dst = append(dst, ' ', ']')
return dst, nil
}
type resources struct {
ProcSet []name
Font map[name]interface{}
XObject map[name]interface{}
}
// Predefined procedure sets
const (
pdfProcSet name = "PDF"
textProcSet name = "Text"
imageBProcSet name = "ImageB"
imageCProcSet name = "ImageC"
imageIProcSet name = "ImageI"
)
type standardFontDict struct {
Type name
Subtype name
BaseFont name
}

91
vendor/bitbucket.org/zombiezen/gopdf/pdf/stream.go generated vendored Normal file
View File

@ -0,0 +1,91 @@
package pdf
import (
"bytes"
"compress/lzw"
"compress/zlib"
"io"
)
const (
streamNoFilter name = ""
streamLZWDecode name = "LZWDecode"
streamFlateDecode name = "FlateDecode"
)
// stream is a blob of data stored in a PDF file.
type stream struct {
bytes.Buffer
writer io.Writer
filter name
}
type streamInfo struct {
Length int
Filter name `pdf:",omitempty"`
}
func newStream(filter name) *stream {
st := new(stream)
st.filter = filter
switch filter {
case streamLZWDecode:
st.writer = lzw.NewWriter(&st.Buffer, lzw.MSB, 8)
case streamFlateDecode:
st.writer = zlib.NewWriter(&st.Buffer)
default:
// TODO: warn about bad filter names?
st.writer = &st.Buffer
}
return st
}
func (st *stream) ReadFrom(r io.Reader) (n int64, err error) {
return io.Copy(st.writer, r)
}
func (st *stream) Write(p []byte) (n int, err error) {
return st.writer.Write(p)
}
func (st *stream) WriteByte(c byte) error {
_, err := st.writer.Write([]byte{c})
return err
}
func (st *stream) WriteString(s string) (n int, err error) {
return io.WriteString(st.writer, s)
}
func (st *stream) Close() error {
if wc, ok := st.writer.(io.WriteCloser); ok {
return wc.Close()
}
return nil
}
func (st *stream) marshalPDF(dst []byte) ([]byte, error) {
return marshalStream(dst, streamInfo{
Length: st.Len(),
Filter: st.filter,
}, st.Bytes())
}
const (
streamBegin = " stream\r\n"
streamEnd = "\r\nendstream"
)
// marshalStream encodes a generic stream. The resulting data encodes the
// given object and a sequence of bytes. This function does not enforce any
// rules about the object being encoded.
func marshalStream(dst []byte, obj interface{}, data []byte) ([]byte, error) {
var err error
if dst, err = marshal(dst, obj); err != nil {
return nil, err
}
dst = append(dst, streamBegin...)
dst = append(dst, data...)
dst = append(dst, streamEnd...)
return dst, nil
}

138
vendor/bitbucket.org/zombiezen/gopdf/pdf/text.go generated vendored Normal file
View File

@ -0,0 +1,138 @@
// Copyright (C) 2011, Ross Light
package pdf
import (
"bytes"
)
// Text is a PDF text object. The zero value is an empty text object.
type Text struct {
buf bytes.Buffer
fonts map[name]bool
x, y Unit
currFont name
currSize Unit
currLeading Unit
}
// Text adds a string to the text object.
func (text *Text) Text(s string) {
writeCommand(&text.buf, "Tj", s)
if widths := getFontWidths(text.currFont); widths != nil {
text.x += computeStringWidth(s, widths, text.currSize)
}
}
const defaultLeadingScalar = 1.2
// SetFont changes the current font to a standard font. This also changes the
// leading to 1.2 times the font size.
func (text *Text) SetFont(fontName string, size Unit) {
if text.fonts == nil {
text.fonts = make(map[name]bool)
}
text.fonts[name(fontName)] = true
text.currFont, text.currSize = name(fontName), size
writeCommand(&text.buf, "Tf", name(fontName), size)
text.SetLeading(size * defaultLeadingScalar)
}
// SetLeading changes the amount of space between lines.
func (text *Text) SetLeading(leading Unit) {
writeCommand(&text.buf, "TL", leading)
text.currLeading = leading
}
// NextLine advances the current text position to the next line, based on the
// current leading.
func (text *Text) NextLine() {
writeCommand(&text.buf, "T*")
text.x = 0
text.y -= text.currLeading
}
// NextLineOffset moves the current text position to an offset relative to the
// beginning of the line.
func (text *Text) NextLineOffset(tx, ty Unit) {
writeCommand(&text.buf, "Td", tx, ty)
text.x = tx
text.y += ty
}
// X returns the current x position of the text cursor.
func (text *Text) X() Unit {
return text.x
}
// Y returns the current y position of the text cursor.
func (text *Text) Y() Unit {
return text.y
}
// Standard 14 fonts
const (
Courier = "Courier"
CourierBold = "Courier-Bold"
CourierOblique = "Courier-Oblique"
CourierBoldOblique = "Courier-BoldOblique"
Helvetica = "Helvetica"
HelveticaBold = "Helvetica-Bold"
HelveticaOblique = "Helvetica-Oblique"
HelveticaBoldOblique = "Helvetica-BoldOblique"
Symbol = "Symbol"
Times = "Times-Roman"
TimesBold = "Times-Bold"
TimesItalic = "Times-Italic"
TimesBoldItalic = "Times-BoldItalic"
ZapfDingbats = "ZapfDingbats"
)
func getFontWidths(fontName name) []uint16 {
switch fontName {
case Courier:
return courierWidths
case CourierBold:
return courierBoldWidths
case CourierOblique:
return courierObliqueWidths
case CourierBoldOblique:
return courierBoldObliqueWidths
case Helvetica:
return helveticaWidths
case HelveticaBold:
return helveticaBoldWidths
case HelveticaOblique:
return helveticaObliqueWidths
case HelveticaBoldOblique:
return helveticaBoldObliqueWidths
case Symbol:
return symbolWidths
case Times:
return timesRomanWidths
case TimesBold:
return timesBoldWidths
case TimesItalic:
return timesItalicWidths
case TimesBoldItalic:
return timesBoldItalicWidths
case ZapfDingbats:
return zapfDingbatsWidths
}
return nil
}
func computeStringWidth(s string, widths []uint16, fontSize Unit) Unit {
width := Unit(0)
for _, r := range s {
if int(r) < len(widths) {
width += Unit(widths[r])
}
}
return width * fontSize / 1000
}

202
vendor/cloud.google.com/go/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2014 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

18
vendor/cloud.google.com/go/cloud.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package cloud is the root of the packages used to access Google Cloud
// Services. See https://godoc.org/cloud.google.com/go for a full list
// of sub-packages.
package cloud // import "cloud.google.com/go"

438
vendor/cloud.google.com/go/compute/metadata/metadata.go generated vendored Normal file
View File

@ -0,0 +1,438 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package metadata provides access to Google Compute Engine (GCE)
// metadata and API service accounts.
//
// This package is a wrapper around the GCE metadata service,
// as documented at https://developers.google.com/compute/docs/metadata.
package metadata // import "cloud.google.com/go/compute/metadata"
import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"runtime"
"strings"
"sync"
"time"
"golang.org/x/net/context"
"golang.org/x/net/context/ctxhttp"
"cloud.google.com/go/internal"
)
const (
// metadataIP is the documented metadata server IP address.
metadataIP = "169.254.169.254"
// metadataHostEnv is the environment variable specifying the
// GCE metadata hostname. If empty, the default value of
// metadataIP ("169.254.169.254") is used instead.
// This is variable name is not defined by any spec, as far as
// I know; it was made up for the Go package.
metadataHostEnv = "GCE_METADATA_HOST"
)
type cachedValue struct {
k string
trim bool
mu sync.Mutex
v string
}
var (
projID = &cachedValue{k: "project/project-id", trim: true}
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
instID = &cachedValue{k: "instance/id", trim: true}
)
var (
metaClient = &http.Client{
Transport: &internal.Transport{
Base: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
ResponseHeaderTimeout: 2 * time.Second,
},
},
}
subscribeClient = &http.Client{
Transport: &internal.Transport{
Base: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
},
},
}
)
// NotDefinedError is returned when requested metadata is not defined.
//
// The underlying string is the suffix after "/computeMetadata/v1/".
//
// This error is not returned if the value is defined to be the empty
// string.
type NotDefinedError string
func (suffix NotDefinedError) Error() string {
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
}
// Get returns a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
//
// If the GCE_METADATA_HOST environment variable is not defined, a default of
// 169.254.169.254 will be used instead.
//
// If the requested metadata is not defined, the returned error will
// be of type NotDefinedError.
func Get(suffix string) (string, error) {
val, _, err := getETag(metaClient, suffix)
return val, err
}
// getETag returns a value from the metadata service as well as the associated
// ETag using the provided client. This func is otherwise equivalent to Get.
func getETag(client *http.Client, suffix string) (value, etag string, err error) {
// Using a fixed IP makes it very difficult to spoof the metadata service in
// a container, which is an important use-case for local testing of cloud
// deployments. To enable spoofing of the metadata service, the environment
// variable GCE_METADATA_HOST is first inspected to decide where metadata
// requests shall go.
host := os.Getenv(metadataHostEnv)
if host == "" {
// Using 169.254.169.254 instead of "metadata" here because Go
// binaries built with the "netgo" tag and without cgo won't
// know the search suffix for "metadata" is
// ".google.internal", and this IP address is documented as
// being stable anyway.
host = metadataIP
}
url := "http://" + host + "/computeMetadata/v1/" + suffix
req, _ := http.NewRequest("GET", url, nil)
req.Header.Set("Metadata-Flavor", "Google")
res, err := client.Do(req)
if err != nil {
return "", "", err
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
return "", "", NotDefinedError(suffix)
}
if res.StatusCode != 200 {
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
}
all, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", "", err
}
return string(all), res.Header.Get("Etag"), nil
}
func getTrimmed(suffix string) (s string, err error) {
s, err = Get(suffix)
s = strings.TrimSpace(s)
return
}
func (c *cachedValue) get() (v string, err error) {
defer c.mu.Unlock()
c.mu.Lock()
if c.v != "" {
return c.v, nil
}
if c.trim {
v, err = getTrimmed(c.k)
} else {
v, err = Get(c.k)
}
if err == nil {
c.v = v
}
return
}
var (
onGCEOnce sync.Once
onGCE bool
)
// OnGCE reports whether this process is running on Google Compute Engine.
func OnGCE() bool {
onGCEOnce.Do(initOnGCE)
return onGCE
}
func initOnGCE() {
onGCE = testOnGCE()
}
func testOnGCE() bool {
// The user explicitly said they're on GCE, so trust them.
if os.Getenv(metadataHostEnv) != "" {
return true
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
resc := make(chan bool, 2)
// Try two strategies in parallel.
// See https://github.com/GoogleCloudPlatform/gcloud-golang/issues/194
go func() {
res, err := ctxhttp.Get(ctx, metaClient, "http://"+metadataIP)
if err != nil {
resc <- false
return
}
defer res.Body.Close()
resc <- res.Header.Get("Metadata-Flavor") == "Google"
}()
go func() {
addrs, err := net.LookupHost("metadata.google.internal")
if err != nil || len(addrs) == 0 {
resc <- false
return
}
resc <- strsContains(addrs, metadataIP)
}()
tryHarder := systemInfoSuggestsGCE()
if tryHarder {
res := <-resc
if res {
// The first strategy succeeded, so let's use it.
return true
}
// Wait for either the DNS or metadata server probe to
// contradict the other one and say we are running on
// GCE. Give it a lot of time to do so, since the system
// info already suggests we're running on a GCE BIOS.
timer := time.NewTimer(5 * time.Second)
defer timer.Stop()
select {
case res = <-resc:
return res
case <-timer.C:
// Too slow. Who knows what this system is.
return false
}
}
// There's no hint from the system info that we're running on
// GCE, so use the first probe's result as truth, whether it's
// true or false. The goal here is to optimize for speed for
// users who are NOT running on GCE. We can't assume that
// either a DNS lookup or an HTTP request to a blackholed IP
// address is fast. Worst case this should return when the
// metaClient's Transport.ResponseHeaderTimeout or
// Transport.Dial.Timeout fires (in two seconds).
return <-resc
}
// systemInfoSuggestsGCE reports whether the local system (without
// doing network requests) suggests that we're running on GCE. If this
// returns true, testOnGCE tries a bit harder to reach its metadata
// server.
func systemInfoSuggestsGCE() bool {
if runtime.GOOS != "linux" {
// We don't have any non-Linux clues available, at least yet.
return false
}
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
name := strings.TrimSpace(string(slurp))
return name == "Google" || name == "Google Compute Engine"
}
// Subscribe subscribes to a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
// The suffix may contain query parameters.
//
// Subscribe calls fn with the latest metadata value indicated by the provided
// suffix. If the metadata value is deleted, fn is called with the empty string
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
// is deleted. Subscribe returns the error value returned from the last call to
// fn, which may be nil when ok == false.
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
const failedSubscribeSleep = time.Second * 5
// First check to see if the metadata value exists at all.
val, lastETag, err := getETag(subscribeClient, suffix)
if err != nil {
return err
}
if err := fn(val, true); err != nil {
return err
}
ok := true
if strings.ContainsRune(suffix, '?') {
suffix += "&wait_for_change=true&last_etag="
} else {
suffix += "?wait_for_change=true&last_etag="
}
for {
val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag))
if err != nil {
if _, deleted := err.(NotDefinedError); !deleted {
time.Sleep(failedSubscribeSleep)
continue // Retry on other errors.
}
ok = false
}
lastETag = etag
if err := fn(val, ok); err != nil || !ok {
return err
}
}
}
// ProjectID returns the current instance's project ID string.
func ProjectID() (string, error) { return projID.get() }
// NumericProjectID returns the current instance's numeric project ID.
func NumericProjectID() (string, error) { return projNum.get() }
// InternalIP returns the instance's primary internal IP address.
func InternalIP() (string, error) {
return getTrimmed("instance/network-interfaces/0/ip")
}
// ExternalIP returns the instance's primary external (public) IP address.
func ExternalIP() (string, error) {
return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
}
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func Hostname() (string, error) {
return getTrimmed("instance/hostname")
}
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func InstanceTags() ([]string, error) {
var s []string
j, err := Get("instance/tags")
if err != nil {
return nil, err
}
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
return nil, err
}
return s, nil
}
// InstanceID returns the current VM's numeric instance ID.
func InstanceID() (string, error) {
return instID.get()
}
// InstanceName returns the current VM's instance ID string.
func InstanceName() (string, error) {
host, err := Hostname()
if err != nil {
return "", err
}
return strings.Split(host, ".")[0], nil
}
// Zone returns the current VM's zone, such as "us-central1-b".
func Zone() (string, error) {
zone, err := getTrimmed("instance/zone")
// zone is of the form "projects/<projNum>/zones/<zoneName>".
if err != nil {
return "", err
}
return zone[strings.LastIndex(zone, "/")+1:], nil
}
// InstanceAttributes returns the list of user-defined attributes,
// assigned when initially creating a GCE VM instance. The value of an
// attribute can be obtained with InstanceAttributeValue.
func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
// ProjectAttributes returns the list of user-defined attributes
// applying to the project as a whole, not just this VM. The value of
// an attribute can be obtained with ProjectAttributeValue.
func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
func lines(suffix string) ([]string, error) {
j, err := Get(suffix)
if err != nil {
return nil, err
}
s := strings.Split(strings.TrimSpace(j), "\n")
for i := range s {
s[i] = strings.TrimSpace(s[i])
}
return s, nil
}
// InstanceAttributeValue returns the value of the provided VM
// instance attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// InstanceAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func InstanceAttributeValue(attr string) (string, error) {
return Get("instance/attributes/" + attr)
}
// ProjectAttributeValue returns the value of the provided
// project attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// ProjectAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func ProjectAttributeValue(attr string) (string, error) {
return Get("project/attributes/" + attr)
}
// Scopes returns the service account scopes for the given account.
// The account may be empty or the string "default" to use the instance's
// main account.
func Scopes(serviceAccount string) ([]string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
return lines("instance/service-accounts/" + serviceAccount + "/scopes")
}
func strsContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}

64
vendor/cloud.google.com/go/internal/cloud.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package internal provides support for the cloud packages.
//
// Users should not import this package directly.
package internal
import (
"fmt"
"net/http"
)
const userAgent = "gcloud-golang/0.1"
// Transport is an http.RoundTripper that appends Google Cloud client's
// user-agent to the original request's user-agent header.
type Transport struct {
// TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does.
// Do User-Agent some other way.
// Base is the actual http.RoundTripper
// requests will use. It must not be nil.
Base http.RoundTripper
}
// RoundTrip appends a user-agent to the existing user-agent
// header and delegates the request to the base http.RoundTripper.
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
req = cloneRequest(req)
ua := req.Header.Get("User-Agent")
if ua == "" {
ua = userAgent
} else {
ua = fmt.Sprintf("%s %s", ua, userAgent)
}
req.Header.Set("User-Agent", ua)
return t.Base.RoundTrip(req)
}
// cloneRequest returns a clone of the provided *http.Request.
// The clone is a shallow copy of the struct and its Header map.
func cloneRequest(r *http.Request) *http.Request {
// shallow copy of the struct
r2 := new(http.Request)
*r2 = *r
// deep copy of the Header
r2.Header = make(http.Header)
for k, s := range r.Header {
r2.Header[k] = s
}
return r2
}

3
vendor/github.com/ajstarks/svgo/LICENSE generated vendored Normal file
View File

@ -0,0 +1,3 @@
The contents of this repository are Licensed under
the Creative Commons Attribution 3.0 license as described in
http://creativecommons.org/licenses/by/3.0/us/

126
vendor/github.com/ajstarks/svgo/doc.go generated vendored Normal file
View File

@ -0,0 +1,126 @@
/*
Package svg generates SVG as defined by the Scalable Vector Graphics 1.1 Specification (<http://www.w3.org/TR/SVG11/>).
Output goes to the specified io.Writer.
Supported SVG elements and functions
Shapes, lines, text
circle, ellipse, polygon, polyline, rect (including roundrects), line, text
Paths
general, arc, cubic and quadratic bezier paths,
Image and Gradients
image, linearGradient, radialGradient,
Transforms
translate, rotate, scale, skewX, skewY
Filter Effects
filter, feBlend, feColorMatrix, feColorMatrix, feComponentTransfer, feComposite, feConvolveMatrix, feDiffuseLighting,
feDisplacementMap, feDistantLight, feFlood, feGaussianBlur, feImage, feMerge, feMorphology, feOffset, fePointLight,
feSpecularLighting, feSpotLight,feTile, feTurbulence
Metadata elements
desc, defs, g (style, transform, id), mask, marker, pattern, title, (a)ddress, link, script, use
Usage: (assuming GOPATH is set)
go get github.com/ajstarks/svgo
go install github.com/ajstarks/svgo/...
You can use godoc to browse the documentation from the command line:
$ godoc github.com/ajstarks/svgo
a minimal program, to generate SVG to standard output.
package main
import (
"github.com/ajstarks/svgo"
"os"
)
func main() {
width := 500
height := 500
canvas := svg.New(os.Stdout)
canvas.Start(width, height)
canvas.Circle(width/2, height/2, 100)
canvas.Text(width/2, height/2, "Hello, SVG", "text-anchor:middle;font-size:30px;fill:white")
canvas.End()
}
Drawing in a web server: (http://localhost:2003/circle)
package main
import (
"log"
"github.com/ajstarks/svgo"
"net/http"
)
func main() {
http.Handle("/circle", http.HandlerFunc(circle))
err := http.ListenAndServe(":2003", nil)
if err != nil {
log.Fatal("ListenAndServe:", err)
}
}
func circle(w http.ResponseWriter, req *http.Request) {
w.Header().Set("Content-Type", "image/svg+xml")
s := svg.New(w)
s.Start(500, 500)
s.Circle(250, 250, 125, "fill:none;stroke:black")
s.End()
}
Functions and types
Many functions use x, y to specify an object's location, and w, h to specify the object's width and height.
Where applicable, a final optional argument specifies the style to be applied to the object.
The style strings follow the SVG standard; name:value pairs delimited by semicolons, or a
series of name="value" pairs. For example: `"fill:none; opacity:0.3"` or `fill="none" opacity="0.3"` (see: <http://www.w3.org/TR/SVG11/styling.html>)
The SVG type:
type SVG struct {
Writer io.Writer
}
Most operations are methods on this type, specifying the destination io.Writer.
The Offcolor type:
type Offcolor struct {
Offset uint8
Color string
Opacity float64
}
is used to specify the offset, color, and opacity of stop colors in linear and radial gradients
The Filterspec type:
type Filterspec struct {
In string
In2 string
Result string
}
is used to specify inputs and results for filter effects
*/
package svg

978
vendor/github.com/ajstarks/svgo/svg.go generated vendored Normal file
View File

@ -0,0 +1,978 @@
// Package svg provides an API for generating Scalable Vector Graphics (SVG)
package svg
// package main
//
// import (
// "github.com/ajstarks/svgo"
// "os"
// )
//
// var (
// width = 500
// height = 500
// canvas = svg.New(os.Stdout)
// )
//
// func main() {
// canvas.Start(width, height)
// canvas.Circle(width/2, height/2, 100)
// canvas.Text(width/2, height/2, "Hello, SVG",
// "text-anchor:middle;font-size:30px;fill:white")
// canvas.End()
// }
//
import (
"fmt"
"io"
"encoding/xml"
"strings"
)
// SVG defines the location of the generated SVG
type SVG struct {
Writer io.Writer
}
// Offcolor defines the offset and color for gradients
type Offcolor struct {
Offset uint8
Color string
Opacity float64
}
// Filterspec defines the specification of SVG filters
type Filterspec struct {
In, In2, Result string
}
const (
svgtop = `<?xml version="1.0"?>
<!-- Generated by SVGo -->
<svg`
svginitfmt = `%s width="%d%s" height="%d%s"`
svgns = `
xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink">`
vbfmt = `viewBox="%d %d %d %d"`
emptyclose = "/>\n"
)
// New is the SVG constructor, specifying the io.Writer where the generated SVG is written.
func New(w io.Writer) *SVG { return &SVG{w} }
func (svg *SVG) print(a ...interface{}) (n int, errno error) {
return fmt.Fprint(svg.Writer, a...)
}
func (svg *SVG) println(a ...interface{}) (n int, errno error) {
return fmt.Fprintln(svg.Writer, a...)
}
func (svg *SVG) printf(format string, a ...interface{}) (n int, errno error) {
return fmt.Fprintf(svg.Writer, format, a...)
}
func (svg *SVG) genattr(ns []string) {
for _, v := range ns {
svg.printf("\n %s", v)
}
svg.println(svgns)
}
// Structure, Metadata, Scripting, Transformation, and Links
// Start begins the SVG document with the width w and height h.
// Other attributes may be optionally added, for example viewbox or additional namespaces
// Standard Reference: http://www.w3.org/TR/SVG11/struct.html#SVGElement
func (svg *SVG) Start(w int, h int, ns ...string) {
svg.printf(svginitfmt, svgtop, w, "", h, "")
svg.genattr(ns)
}
// Startunit begins the SVG document, with width and height in the specified units
// Other attributes may be optionally added, for example viewbox or additional namespaces
func (svg *SVG) Startunit(w int, h int, unit string, ns ...string) {
svg.printf(svginitfmt, svgtop, w, unit, h, unit)
svg.genattr(ns)
}
// Startpercent begins the SVG document, with width and height as percentages
// Other attributes may be optionally added, for example viewbox or additional namespaces
func (svg *SVG) Startpercent(w int, h int, ns ...string) {
svg.printf(svginitfmt, svgtop, w, "%", h, "%")
svg.genattr(ns)
}
// Startview begins the SVG document, with the specified width, height, and viewbox
// Other attributes may be optionally added, for example viewbox or additional namespaces
func (svg *SVG) Startview(w, h, minx, miny, vw, vh int) {
svg.Start(w, h, fmt.Sprintf(vbfmt, minx, miny, vw, vh))
}
func (svg *SVG) StartviewUnit(w, h int, unit string, minx, miny, vw, vh int) {
svg.Startunit(w, h, unit, fmt.Sprintf(vbfmt, minx, miny, vw, vh))
}
// Startraw begins the SVG document, passing arbitrary attributes
func (svg *SVG) Startraw(ns ...string) {
svg.printf(svgtop)
svg.genattr(ns)
}
// End the SVG document
func (svg *SVG) End() { svg.println("</svg>") }
// Script defines a script with a specified type, (for example "application/javascript").
// if the first variadic argument is a link, use only the link reference.
// Otherwise, treat those arguments as the text of the script (marked up as CDATA).
// if no data is specified, just close the script element
func (svg *SVG) Script(scriptype string, data ...string) {
svg.printf(`<script type="%s"`, scriptype)
switch {
case len(data) == 1 && islink(data[0]):
svg.printf(" %s/>\n", href(data[0]))
case len(data) > 0:
svg.printf(">\n<![CDATA[\n")
for _, v := range data {
svg.println(v)
}
svg.printf("]]>\n</script>\n")
default:
svg.println(`/>`)
}
}
// Gstyle begins a group, with the specified style.
// Standard Reference: http://www.w3.org/TR/SVG11/struct.html#GElement
func (svg *SVG) Gstyle(s string) { svg.println(group("style", s)) }
// Gtransform begins a group, with the specified transform
// Standard Reference: http://www.w3.org/TR/SVG11/coords.html#TransformAttribute
func (svg *SVG) Gtransform(s string) { svg.println(group("transform", s)) }
// Translate begins coordinate translation, end with Gend()
// Standard Reference: http://www.w3.org/TR/SVG11/coords.html#TransformAttribute
func (svg *SVG) Translate(x, y int) { svg.Gtransform(translate(x, y)) }
// Scale scales the coordinate system by n, end with Gend()
// Standard Reference: http://www.w3.org/TR/SVG11/coords.html#TransformAttribute
func (svg *SVG) Scale(n float64) { svg.Gtransform(scale(n)) }
// ScaleXY scales the coordinate system by dx and dy, end with Gend()
// Standard Reference: http://www.w3.org/TR/SVG11/coords.html#TransformAttribute
func (svg *SVG) ScaleXY(dx, dy float64) { svg.Gtransform(scaleXY(dx, dy)) }
// SkewX skews the x coordinate system by angle a, end with Gend()
// Standard Reference: http://www.w3.org/TR/SVG11/coords.html#TransformAttribute
func (svg *SVG) SkewX(a float64) { svg.Gtransform(skewX(a)) }
// SkewY skews the y coordinate system by angle a, end with Gend()
// Standard Reference: http://www.w3.org/TR/SVG11/coords.html#TransformAttribute
func (svg *SVG) SkewY(a float64) { svg.Gtransform(skewY(a)) }
// SkewXY skews x and y coordinates by ax, ay respectively, end with Gend()
// Standard Reference: http://www.w3.org/TR/SVG11/coords.html#TransformAttribute
func (svg *SVG) SkewXY(ax, ay float64) { svg.Gtransform(skewX(ax) + " " + skewY(ay)) }
// Rotate rotates the coordinate system by r degrees, end with Gend()
// Standard Reference: http://www.w3.org/TR/SVG11/coords.html#TransformAttribute
func (svg *SVG) Rotate(r float64) { svg.Gtransform(rotate(r)) }
// TranslateRotate translates the coordinate system to (x,y), then rotates to r degrees, end with Gend()
func (svg *SVG) TranslateRotate(x, y int, r float64) {
svg.Gtransform(translate(x, y) + " " + rotate(r))
}
// RotateTranslate rotates the coordinate system r degrees, then translates to (x,y), end with Gend()
func (svg *SVG) RotateTranslate(x, y int, r float64) {
svg.Gtransform(rotate(r) + " " + translate(x, y))
}
// Group begins a group with arbitrary attributes
func (svg *SVG) Group(s ...string) { svg.printf("<g %s\n", endstyle(s, `>`)) }
// Gid begins a group, with the specified id
func (svg *SVG) Gid(s string) {
svg.print(`<g id="`)
xml.Escape(svg.Writer, []byte(s))
svg.println(`">`)
}
// Gend ends a group (must be paired with Gsttyle, Gtransform, Gid).
func (svg *SVG) Gend() { svg.println(`</g>`) }
// ClipPath defines a clip path
func (svg *SVG) ClipPath(s ...string) { svg.printf(`<clipPath %s`, endstyle(s, `>`)) }
// ClipEnd ends a ClipPath
func (svg *SVG) ClipEnd() {
svg.println(`</clipPath>`)
}
// Def begins a defintion block.
// Standard Reference: http://www.w3.org/TR/SVG11/struct.html#DefsElement
func (svg *SVG) Def() { svg.println(`<defs>`) }
// DefEnd ends a defintion block.
func (svg *SVG) DefEnd() { svg.println(`</defs>`) }
// Marker defines a marker
// Standard reference: http://www.w3.org/TR/SVG11/painting.html#MarkerElement
func (svg *SVG) Marker(id string, x, y, width, height int, s ...string) {
svg.printf(`<marker id="%s" refX="%d" refY="%d" markerWidth="%d" markerHeight="%d" %s`,
id, x, y, width, height, endstyle(s, ">\n"))
}
// MarkEnd ends a marker
func (svg *SVG) MarkerEnd() { svg.println(`</marker>`) }
// Pattern defines a pattern with the specified dimensions.
// The putype can be either "user" or "obj", which sets the patternUnits
// attribute to be either userSpaceOnUse or objectBoundingBox
// Standard reference: http://www.w3.org/TR/SVG11/pservers.html#Patterns
func (svg *SVG) Pattern(id string, x, y, width, height int, putype string, s ...string) {
puattr := "userSpaceOnUse"
if putype != "user" {
puattr = "objectBoundingBox"
}
svg.printf(`<pattern id="%s" x="%d" y="%d" width="%d" height="%d" patternUnits="%s" %s`,
id, x, y, width, height, puattr, endstyle(s, ">\n"))
}
// PatternEnd ends a marker
func (svg *SVG) PatternEnd() { svg.println(`</pattern>`) }
// Desc specified the text of the description tag.
// Standard Reference: http://www.w3.org/TR/SVG11/struct.html#DescElement
func (svg *SVG) Desc(s string) { svg.tt("desc", s) }
// Title specified the text of the title tag.
// Standard Reference: http://www.w3.org/TR/SVG11/struct.html#TitleElement
func (svg *SVG) Title(s string) { svg.tt("title", s) }
// Link begins a link named "name", with the specified title.
// Standard Reference: http://www.w3.org/TR/SVG11/linking.html#Links
func (svg *SVG) Link(href string, title string) {
svg.printf("<a xlink:href=\"%s\" xlink:title=\"", href)
xml.Escape(svg.Writer, []byte(title))
svg.println("\">")
}
// LinkEnd ends a link.
func (svg *SVG) LinkEnd() { svg.println(`</a>`) }
// Use places the object referenced at link at the location x, y, with optional style.
// Standard Reference: http://www.w3.org/TR/SVG11/struct.html#UseElement
func (svg *SVG) Use(x int, y int, link string, s ...string) {
svg.printf(`<use %s %s %s`, loc(x, y), href(link), endstyle(s, emptyclose))
}
// Mask creates a mask with a specified id, dimension, and optional style.
func (svg *SVG) Mask(id string, x int, y int, w int, h int, s ...string) {
svg.printf(`<mask id="%s" x="%d" y="%d" width="%d" height="%d" %s`, id, x, y, w, h, endstyle(s, `>`))
}
// MaskEnd ends a Mask.
func (svg *SVG) MaskEnd() { svg.println(`</mask>`) }
// Shapes
// Circle centered at x,y, with radius r, with optional style.
// Standard Reference: http://www.w3.org/TR/SVG11/shapes.html#CircleElement
func (svg *SVG) Circle(x int, y int, r int, s ...string) {
svg.printf(`<circle cx="%d" cy="%d" r="%d" %s`, x, y, r, endstyle(s, emptyclose))
}
// Ellipse centered at x,y, centered at x,y with radii w, and h, with optional style.
// Standard Reference: http://www.w3.org/TR/SVG11/shapes.html#EllipseElement
func (svg *SVG) Ellipse(x int, y int, w int, h int, s ...string) {
svg.printf(`<ellipse cx="%d" cy="%d" rx="%d" ry="%d" %s`,
x, y, w, h, endstyle(s, emptyclose))
}
// Polygon draws a series of line segments using an array of x, y coordinates, with optional style.
// Standard Reference: http://www.w3.org/TR/SVG11/shapes.html#PolygonElement
func (svg *SVG) Polygon(x []int, y []int, s ...string) {
svg.poly(x, y, "polygon", s...)
}
// Rect draws a rectangle with upper left-hand corner at x,y, with width w, and height h, with optional style
// Standard Reference: http://www.w3.org/TR/SVG11/shapes.html#RectElement
func (svg *SVG) Rect(x int, y int, w int, h int, s ...string) {
svg.printf(`<rect %s %s`, dim(x, y, w, h), endstyle(s, emptyclose))
}
// CenterRect draws a rectangle with its center at x,y, with width w, and height h, with optional style
func (svg *SVG) CenterRect(x int, y int, w int, h int, s ...string) {
svg.Rect(x-(w/2), y-(h/2), w, h, s...)
}
// Roundrect draws a rounded rectangle with upper the left-hand corner at x,y,
// with width w, and height h. The radii for the rounded portion
// are specified by rx (width), and ry (height).
// Style is optional.
// Standard Reference: http://www.w3.org/TR/SVG11/shapes.html#RectElement
func (svg *SVG) Roundrect(x int, y int, w int, h int, rx int, ry int, s ...string) {
svg.printf(`<rect %s rx="%d" ry="%d" %s`, dim(x, y, w, h), rx, ry, endstyle(s, emptyclose))
}
// Square draws a square with upper left corner at x,y with sides of length l, with optional style.
func (svg *SVG) Square(x int, y int, l int, s ...string) {
svg.Rect(x, y, l, l, s...)
}
// Paths
// Path draws an arbitrary path, the caller is responsible for structuring the path data
func (svg *SVG) Path(d string, s ...string) {
svg.printf(`<path d="%s" %s`, d, endstyle(s, emptyclose))
}
// Arc draws an elliptical arc, with optional style, beginning coordinate at sx,sy, ending coordinate at ex, ey
// width and height of the arc are specified by ax, ay, the x axis rotation is r
// if sweep is true, then the arc will be drawn in a "positive-angle" direction (clockwise), if false,
// the arc is drawn counterclockwise.
// if large is true, the arc sweep angle is greater than or equal to 180 degrees,
// otherwise the arc sweep is less than 180 degrees
// http://www.w3.org/TR/SVG11/paths.html#PathDataEllipticalArcCommands
func (svg *SVG) Arc(sx int, sy int, ax int, ay int, r int, large bool, sweep bool, ex int, ey int, s ...string) {
svg.printf(`%s A%s %d %s %s %s" %s`,
ptag(sx, sy), coord(ax, ay), r, onezero(large), onezero(sweep), coord(ex, ey), endstyle(s, emptyclose))
}
// Bezier draws a cubic bezier curve, with optional style, beginning at sx,sy, ending at ex,ey
// with control points at cx,cy and px,py.
// Standard Reference: http://www.w3.org/TR/SVG11/paths.html#PathDataCubicBezierCommands
func (svg *SVG) Bezier(sx int, sy int, cx int, cy int, px int, py int, ex int, ey int, s ...string) {
svg.printf(`%s C%s %s %s" %s`,
ptag(sx, sy), coord(cx, cy), coord(px, py), coord(ex, ey), endstyle(s, emptyclose))
}
// Qbez draws a quadratic bezier curver, with optional style
// beginning at sx,sy, ending at ex, sy with control points at cx, cy
// Standard Reference: http://www.w3.org/TR/SVG11/paths.html#PathDataQuadraticBezierCommands
func (svg *SVG) Qbez(sx int, sy int, cx int, cy int, ex int, ey int, s ...string) {
svg.printf(`%s Q%s %s" %s`,
ptag(sx, sy), coord(cx, cy), coord(ex, ey), endstyle(s, emptyclose))
}
// Qbezier draws a Quadratic Bezier curve, with optional style, beginning at sx, sy, ending at tx,ty
// with control points are at cx,cy, ex,ey.
// Standard Reference: http://www.w3.org/TR/SVG11/paths.html#PathDataQuadraticBezierCommands
func (svg *SVG) Qbezier(sx int, sy int, cx int, cy int, ex int, ey int, tx int, ty int, s ...string) {
svg.printf(`%s Q%s %s T%s" %s`,
ptag(sx, sy), coord(cx, cy), coord(ex, ey), coord(tx, ty), endstyle(s, emptyclose))
}
// Lines
// Line draws a straight line between two points, with optional style.
// Standard Reference: http://www.w3.org/TR/SVG11/shapes.html#LineElement
func (svg *SVG) Line(x1 int, y1 int, x2 int, y2 int, s ...string) {
svg.printf(`<line x1="%d" y1="%d" x2="%d" y2="%d" %s`, x1, y1, x2, y2, endstyle(s, emptyclose))
}
// Polyline draws connected lines between coordinates, with optional style.
// Standard Reference: http://www.w3.org/TR/SVG11/shapes.html#PolylineElement
func (svg *SVG) Polyline(x []int, y []int, s ...string) {
svg.poly(x, y, "polyline", s...)
}
// Image places at x,y (upper left hand corner), the image with
// width w, and height h, referenced at link, with optional style.
// Standard Reference: http://www.w3.org/TR/SVG11/struct.html#ImageElement
func (svg *SVG) Image(x int, y int, w int, h int, link string, s ...string) {
svg.printf(`<image %s %s %s`, dim(x, y, w, h), href(link), endstyle(s, emptyclose))
}
// Text places the specified text, t at x,y according to the style specified in s
// Standard Reference: http://www.w3.org/TR/SVG11/text.html#TextElement
func (svg *SVG) Text(x int, y int, t string, s ...string) {
svg.printf(`<text %s %s`, loc(x, y), endstyle(s, ">"))
xml.Escape(svg.Writer, []byte(t))
svg.println(`</text>`)
}
// Textpath places text optionally styled text along a previously defined path
// Standard Reference: http://www.w3.org/TR/SVG11/text.html#TextPathElement
func (svg *SVG) Textpath(t string, pathid string, s ...string) {
svg.printf("<text %s<textPath xlink:href=\"%s\">", endstyle(s, ">"), pathid)
xml.Escape(svg.Writer, []byte(t))
svg.println(`</textPath></text>`)
}
// Textlines places a series of lines of text starting at x,y, at the specified size, fill, and alignment.
// Each line is spaced according to the spacing argument
func (svg *SVG) Textlines(x, y int, s []string, size, spacing int, fill, align string) {
svg.Gstyle(fmt.Sprintf("font-size:%dpx;fill:%s;text-anchor:%s", size, fill, align))
for _, t := range s {
svg.Text(x, y, t)
y += spacing
}
svg.Gend()
}
// Colors
// RGB specifies a fill color in terms of a (r)ed, (g)reen, (b)lue triple.
// Standard reference: http://www.w3.org/TR/css3-color/
func (svg *SVG) RGB(r int, g int, b int) string {
return fmt.Sprintf(`fill:rgb(%d,%d,%d)`, r, g, b)
}
// RGBA specifies a fill color in terms of a (r)ed, (g)reen, (b)lue triple and opacity.
func (svg *SVG) RGBA(r int, g int, b int, a float64) string {
return fmt.Sprintf(`fill-opacity:%.2f; %s`, a, svg.RGB(r, g, b))
}
// Gradients
// LinearGradient constructs a linear color gradient identified by id,
// along the vector defined by (x1,y1), and (x2,y2).
// The stop color sequence defined in sc. Coordinates are expressed as percentages.
func (svg *SVG) LinearGradient(id string, x1, y1, x2, y2 uint8, sc []Offcolor) {
svg.printf("<linearGradient id=\"%s\" x1=\"%d%%\" y1=\"%d%%\" x2=\"%d%%\" y2=\"%d%%\">\n",
id, pct(x1), pct(y1), pct(x2), pct(y2))
svg.stopcolor(sc)
svg.println("</linearGradient>")
}
// RadialGradient constructs a radial color gradient identified by id,
// centered at (cx,cy), with a radius of r.
// (fx, fy) define the location of the focal point of the light source.
// The stop color sequence defined in sc.
// Coordinates are expressed as percentages.
func (svg *SVG) RadialGradient(id string, cx, cy, r, fx, fy uint8, sc []Offcolor) {
svg.printf("<radialGradient id=\"%s\" cx=\"%d%%\" cy=\"%d%%\" r=\"%d%%\" fx=\"%d%%\" fy=\"%d%%\">\n",
id, pct(cx), pct(cy), pct(r), pct(fx), pct(fy))
svg.stopcolor(sc)
svg.println("</radialGradient>")
}
// stopcolor is a utility function used by the gradient functions
// to define a sequence of offsets (expressed as percentages) and colors
func (svg *SVG) stopcolor(oc []Offcolor) {
for _, v := range oc {
svg.printf("<stop offset=\"%d%%\" stop-color=\"%s\" stop-opacity=\"%.2f\"/>\n",
pct(v.Offset), v.Color, v.Opacity)
}
}
// Filter Effects:
// Most functions have common attributes (in, in2, result) defined in type Filterspec
// used as a common first argument.
// Filter begins a filter set
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#FilterElement
func (svg *SVG) Filter(id string, s ...string) {
svg.printf(`<filter id="%s" %s`, id, endstyle(s, ">\n"))
}
// Fend ends a filter set
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#FilterElement
func (svg *SVG) Fend() {
svg.println(`</filter>`)
}
// FeBlend specifies a Blend filter primitive
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#feBlendElement
func (svg *SVG) FeBlend(fs Filterspec, mode string, s ...string) {
switch mode {
case "normal", "multiply", "screen", "darken", "lighten":
break
default:
mode = "normal"
}
svg.printf(`<feBlend %s mode="%s" %s`,
fsattr(fs), mode, endstyle(s, emptyclose))
}
// FeColorMatrix specifies a color matrix filter primitive, with matrix values
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#feColorMatrixElement
func (svg *SVG) FeColorMatrix(fs Filterspec, values [20]float64, s ...string) {
svg.printf(`<feColorMatrix %s type="matrix" values="`, fsattr(fs))
for _, v := range values {
svg.printf(`%g `, v)
}
svg.printf(`" %s`, endstyle(s, emptyclose))
}
// FeColorMatrixHue specifies a color matrix filter primitive, with hue rotation values
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#feColorMatrixElement
func (svg *SVG) FeColorMatrixHue(fs Filterspec, value float64, s ...string) {
if value < -360 || value > 360 {
value = 0
}
svg.printf(`<feColorMatrix %s type="hueRotate" values="%g" %s`,
fsattr(fs), value, endstyle(s, emptyclose))
}
// FeColorMatrixSaturate specifies a color matrix filter primitive, with saturation values
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#feColorMatrixElement
func (svg *SVG) FeColorMatrixSaturate(fs Filterspec, value float64, s ...string) {
if value < 0 || value > 1 {
value = 1
}
svg.printf(`<feColorMatrix %s type="saturate" values="%g" %s`,
fsattr(fs), value, endstyle(s, emptyclose))
}
// FeColorMatrixLuminence specifies a color matrix filter primitive, with luminence values
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#feColorMatrixElement
func (svg *SVG) FeColorMatrixLuminence(fs Filterspec, s ...string) {
svg.printf(`<feColorMatrix %s type="luminenceToAlpha" %s`,
fsattr(fs), endstyle(s, emptyclose))
}
// FeComponentTransfer begins a feComponent filter element
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#feComponentTransferElement
func (svg *SVG) FeComponentTransfer() {
svg.println(`<feComponentTransfer>`)
}
// FeCompEnd ends a feComponent filter element
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#feComponentTransferElement
func (svg *SVG) FeCompEnd() {
svg.println(`</feComponentTransfer>`)
}
// FeComposite specifies a feComposite filter primitive
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#feCompositeElement
func (svg *SVG) FeComposite(fs Filterspec, operator string, k1, k2, k3, k4 int, s ...string) {
switch operator {
case "over", "in", "out", "atop", "xor", "arithmetic":
break
default:
operator = "over"
}
svg.printf(`<feComposite %s operator="%s" k1="%d" k2="%d" k3="%d" k4="%d" %s`,
fsattr(fs), operator, k1, k2, k3, k4, endstyle(s, emptyclose))
}
// FeConvolveMatrix specifies a feConvolveMatrix filter primitive
// Standard referencd: http://www.w3.org/TR/SVG11/filters.html#feConvolveMatrixElement
func (svg *SVG) FeConvolveMatrix(fs Filterspec, matrix [9]int, s ...string) {
svg.printf(`<feConvolveMatrix %s kernelMatrix="%d %d %d %d %d %d %d %d %d" %s`,
fsattr(fs),
matrix[0], matrix[1], matrix[2],
matrix[3], matrix[4], matrix[5],
matrix[6], matrix[7], matrix[8], endstyle(s, emptyclose))
}
// FeDiffuseLighting specifies a diffuse lighting filter primitive,
// a container for light source elements, end with DiffuseEnd()
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#feComponentTransferElement
func (svg *SVG) FeDiffuseLighting(fs Filterspec, scale, constant float64, s ...string) {
svg.printf(`<feDiffuseLighting %s surfaceScale="%g" diffuseConstant="%g" %s`,
fsattr(fs), scale, constant, endstyle(s, `>`))
}
// FeDiffEnd ends a diffuse lighting filter primitive container
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#feDiffuseLightingElement
func (svg *SVG) FeDiffEnd() {
svg.println(`</feDiffuseLighting>`)
}
// FeDisplacementMap specifies a feDisplacementMap filter primitive
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#feDisplacementMapElement
func (svg *SVG) FeDisplacementMap(fs Filterspec, scale float64, xchannel, ychannel string, s ...string) {
svg.printf(`<feDisplacementMap %s scale="%g" xChannelSelector="%s" yChannelSelector="%s" %s`,
fsattr(fs), scale, imgchannel(xchannel), ychannel, endstyle(s, emptyclose))
}
// FeDistantLight specifies a feDistantLight filter primitive
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#feDistantLightElement
func (svg *SVG) FeDistantLight(fs Filterspec, azimuth, elevation float64, s ...string) {
svg.printf(`<feDistantLight %s azimuth="%g" elevation="%g" %s`,
fsattr(fs), azimuth, elevation, endstyle(s, emptyclose))
}
// FeFlood specifies a flood filter primitive
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#feFloodElement
func (svg *SVG) FeFlood(fs Filterspec, color string, opacity float64, s ...string) {
svg.printf(`<feFlood %s flood-fill-color="%s" flood-fill-opacity="%g" %s`,
fsattr(fs), color, opacity, endstyle(s, emptyclose))
}
// FeFunc{linear|Gamma|Table|Discrete} specify various types of feFunc{R|G|B|A} filter primitives
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#feComponentTransferElement
// FeFuncLinear specifies a linear style function for the feFunc{R|G|B|A} filter element
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#feComponentTransferElement
func (svg *SVG) FeFuncLinear(channel string, slope, intercept float64) {
svg.printf(`<feFunc%s type="linear" slope="%g" intercept="%g"%s`,
imgchannel(channel), slope, intercept, emptyclose)
}
// FeFuncGamma specifies the curve values for gamma correction for the feFunc{R|G|B|A} filter element
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#feComponentTransferElement
func (svg *SVG) FeFuncGamma(channel string, amplitude, exponent, offset float64) {
svg.printf(`<feFunc%s type="gamma" amplitude="%g" exponent="%g" offset="%g"%s`,
imgchannel(channel), amplitude, exponent, offset, emptyclose)
}
// FeFuncTable specifies the table of values for the feFunc{R|G|B|A} filter element
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#feComponentTransferElement
func (svg *SVG) FeFuncTable(channel string, tv []float64) {
svg.printf(`<feFunc%s type="table"`, imgchannel(channel))
svg.tablevalues(`tableValues`, tv)
}
// FeFuncDiscrete specifies the discrete values for the feFunc{R|G|B|A} filter element
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#feComponentTransferElement
func (svg *SVG) FeFuncDiscrete(channel string, tv []float64) {
svg.printf(`<feFunc%s type="discrete"`, imgchannel(channel))
svg.tablevalues(`tableValues`, tv)
}
// FeGaussianBlur specifies a Gaussian Blur filter primitive
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#feGaussianBlurElement
func (svg *SVG) FeGaussianBlur(fs Filterspec, stdx, stdy float64, s ...string) {
if stdx < 0 {
stdx = 0
}
if stdy < 0 {
stdy = 0
}
svg.printf(`<feGaussianBlur %s stdDeviation="%g %g" %s`,
fsattr(fs), stdx, stdy, endstyle(s, emptyclose))
}
// FeImage specifies a feImage filter primitive
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#feImageElement
func (svg *SVG) FeImage(href string, result string, s ...string) {
svg.printf(`<feImage xlink:href="%s" result="%s" %s`,
href, result, endstyle(s, emptyclose))
}
// FeMerge specifies a feMerge filter primitive, containing feMerge elements
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#feMergeElement
func (svg *SVG) FeMerge(nodes []string, s ...string) {
svg.println(`<feMerge>`)
for _, n := range nodes {
svg.printf("<feMergeNode in=\"%s\"/>\n", n)
}
svg.println(`</feMerge>`)
}
// FeMorphology specifies a feMorphologyLight filter primitive
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#feMorphologyElement
func (svg *SVG) FeMorphology(fs Filterspec, operator string, xradius, yradius float64, s ...string) {
switch operator {
case "erode", "dilate":
break
default:
operator = "erode"
}
svg.printf(`<feMorphology %s operator="%s" radius="%g %g" %s`,
fsattr(fs), operator, xradius, yradius, endstyle(s, emptyclose))
}
// FeOffset specifies the feOffset filter primitive
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#feOffsetElement
func (svg *SVG) FeOffset(fs Filterspec, dx, dy int, s ...string) {
svg.printf(`<feOffset %s dx="%d" dy="%d" %s`,
fsattr(fs), dx, dy, endstyle(s, emptyclose))
}
// FePointLight specifies a fePpointLight filter primitive
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#fePointLightElement
func (svg *SVG) FePointLight(x, y, z float64, s ...string) {
svg.printf(`<fePointLight x="%g" y="%g" z="%g" %s`,
x, y, z, endstyle(s, emptyclose))
}
// FeSpecularLighting specifies a specular lighting filter primitive,
// a container for light source elements, end with SpecularEnd()
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#feSpecularLightingElement
func (svg *SVG) FeSpecularLighting(fs Filterspec, scale, constant float64, exponent int, color string, s ...string) {
svg.printf(`<feSpecularLighting %s surfaceScale="%g" specularConstant="%g" specularExponent="%d" lighting-color="%s" %s`,
fsattr(fs), scale, constant, exponent, color, endstyle(s, ">\n"))
}
// FeSpecEnd ends a specular lighting filter primitive container
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#feSpecularLightingElement
func (svg *SVG) FeSpecEnd() {
svg.println(`</feSpecularLighting>`)
}
// FeSpotLight specifies a feSpotLight filter primitive
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#feSpotLightElement
func (svg *SVG) FeSpotLight(fs Filterspec, x, y, z, px, py, pz float64, s ...string) {
svg.printf(`<feSpotLight %s x="%g" y="%g" z="%g" pointsAtX="%g" pointsAtY="%g" pointsAtZ="%g" %s`,
fsattr(fs), x, y, z, px, py, pz, endstyle(s, emptyclose))
}
// FeTile specifies the tile utility filter primitive
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#feTileElement
func (svg *SVG) FeTile(fs Filterspec, in string, s ...string) {
svg.printf(`<feTile %s %s`, fsattr(fs), endstyle(s, emptyclose))
}
// FeTurbulence specifies a turbulence filter primitive
// Standard reference: http://www.w3.org/TR/SVG11/filters.html#feTurbulenceElement
func (svg *SVG) FeTurbulence(fs Filterspec, ftype string, bfx, bfy float64, octaves int, seed int64, stitch bool, s ...string) {
if bfx < 0 || bfx > 1 {
bfx = 0
}
if bfy < 0 || bfy > 1 {
bfy = 0
}
switch ftype[0:1] {
case "f", "F":
ftype = "fractalNoise"
case "t", "T":
ftype = "turbulence"
default:
ftype = "turbulence"
}
var ss string
if stitch {
ss = "stitch"
} else {
ss = "noStitch"
}
svg.printf(`<feTurbulence %s type="%s" baseFrequency="%.2f %.2f" numOctaves="%d" seed="%d" stitchTiles="%s" %s`,
fsattr(fs), ftype, bfx, bfy, octaves, seed, ss, endstyle(s, emptyclose))
}
// Filter Effects convenience functions, modeled after CSS versions
// Blur emulates the CSS blur filter
func (svg *SVG) Blur(p float64) {
svg.FeGaussianBlur(Filterspec{}, p, p)
}
// Brightness emulates the CSS brightness filter
func (svg *SVG) Brightness(p float64) {
svg.FeComponentTransfer()
svg.FeFuncLinear("R", p, 0)
svg.FeFuncLinear("G", p, 0)
svg.FeFuncLinear("B", p, 0)
svg.FeCompEnd()
}
// Contrast emulates the CSS contrast filter
//func (svg *SVG) Contrast(p float64) {
//}
// Dropshadow emulates the CSS dropshadow filter
//func (svg *SVG) Dropshadow(p float64) {
//}
// Grayscale eumulates the CSS grayscale filter
func (svg *SVG) Grayscale() {
svg.FeColorMatrixSaturate(Filterspec{}, 0)
}
// HueRotate eumulates the CSS huerotate filter
func (svg *SVG) HueRotate(a float64) {
svg.FeColorMatrixHue(Filterspec{}, a)
}
// Invert eumulates the CSS invert filter
func (svg *SVG) Invert() {
svg.FeComponentTransfer()
svg.FeFuncTable("R", []float64{1, 0})
svg.FeFuncTable("G", []float64{1, 0})
svg.FeFuncTable("B", []float64{1, 0})
svg.FeCompEnd()
}
// Saturate eumulates the CSS saturate filter
func (svg *SVG) Saturate(p float64) {
svg.FeColorMatrixSaturate(Filterspec{}, p)
}
// Sepia applies a sepia tone, emulating the CSS sepia filter
func (svg *SVG) Sepia() {
var sepiamatrix = [20]float64{
0.280, 0.450, 0.05, 0, 0,
0.140, 0.390, 0.04, 0, 0,
0.080, 0.280, 0.03, 0, 0,
0, 0, 0, 1, 0,
}
svg.FeColorMatrix(Filterspec{}, sepiamatrix)
}
// Utility
// Grid draws a grid at the specified coordinate, dimensions, and spacing, with optional style.
func (svg *SVG) Grid(x int, y int, w int, h int, n int, s ...string) {
if len(s) > 0 {
svg.Gstyle(s[0])
}
for ix := x; ix <= x+w; ix += n {
svg.Line(ix, y, ix, y+h)
}
for iy := y; iy <= y+h; iy += n {
svg.Line(x, iy, x+w, iy)
}
if len(s) > 0 {
svg.Gend()
}
}
// Support functions
// style returns a style name,attribute string
func style(s string) string {
if len(s) > 0 {
return fmt.Sprintf(`style="%s"`, s)
}
return s
}
// pp returns a series of polygon points
func (svg *SVG) pp(x []int, y []int, tag string) {
svg.print(tag)
if len(x) != len(y) {
svg.print(" ")
return
}
lx := len(x) - 1
for i := 0; i < lx; i++ {
svg.print(coord(x[i], y[i]) + " ")
}
svg.print(coord(x[lx], y[lx]))
}
// endstyle modifies an SVG object, with either a series of name="value" pairs,
// or a single string containing a style
func endstyle(s []string, endtag string) string {
if len(s) > 0 {
nv := ""
for i := 0; i < len(s); i++ {
if strings.Index(s[i], "=") > 0 {
nv += (s[i]) + " "
} else {
nv += style(s[i])
}
}
return nv + endtag
}
return endtag
}
// tt creates a xml element, tag containing s
func (svg *SVG) tt(tag string, s string) {
svg.print("<" + tag + ">")
xml.Escape(svg.Writer, []byte(s))
svg.println("</" + tag + ">")
}
// poly compiles the polygon element
func (svg *SVG) poly(x []int, y []int, tag string, s ...string) {
svg.pp(x, y, "<"+tag+" points=\"")
svg.print(`" ` + endstyle(s, "/>\n"))
}
// onezero returns "0" or "1"
func onezero(flag bool) string {
if flag {
return "1"
}
return "0"
}
// pct returns a percetage, capped at 100
func pct(n uint8) uint8 {
if n > 100 {
return 100
}
return n
}
// islink determines if a string is a script reference
func islink(link string) bool {
return strings.HasPrefix(link, "http://") || strings.HasPrefix(link, "#") ||
strings.HasPrefix(link, "../") || strings.HasPrefix(link, "./")
}
// group returns a group element
func group(tag string, value string) string { return fmt.Sprintf(`<g %s="%s">`, tag, value) }
// scale return the scale string for the transform
func scale(n float64) string { return fmt.Sprintf(`scale(%g)`, n) }
// scaleXY return the scale string for the transform
func scaleXY(dx, dy float64) string { return fmt.Sprintf(`scale(%g,%g)`, dx, dy) }
// skewx returns the skewX string for the transform
func skewX(angle float64) string { return fmt.Sprintf(`skewX(%g)`, angle) }
// skewx returns the skewX string for the transform
func skewY(angle float64) string { return fmt.Sprintf(`skewY(%g)`, angle) }
// rotate returns the rotate string for the transform
func rotate(r float64) string { return fmt.Sprintf(`rotate(%g)`, r) }
// translate returns the translate string for the transform
func translate(x, y int) string { return fmt.Sprintf(`translate(%d,%d)`, x, y) }
// coord returns a coordinate string
func coord(x int, y int) string { return fmt.Sprintf(`%d,%d`, x, y) }
// ptag returns the beginning of the path element
func ptag(x int, y int) string { return fmt.Sprintf(`<path d="M%s`, coord(x, y)) }
// loc returns the x and y coordinate attributes
func loc(x int, y int) string { return fmt.Sprintf(`x="%d" y="%d"`, x, y) }
// href returns the href name and attribute
func href(s string) string { return fmt.Sprintf(`xlink:href="%s"`, s) }
// dim returns the dimension string (x, y coordinates and width, height)
func dim(x int, y int, w int, h int) string {
return fmt.Sprintf(`x="%d" y="%d" width="%d" height="%d"`, x, y, w, h)
}
// fsattr returns the XML attribute representation of a filterspec, ignoring empty attributes
func fsattr(s Filterspec) string {
attrs := ""
if len(s.In) > 0 {
attrs += fmt.Sprintf(`in="%s" `, s.In)
}
if len(s.In2) > 0 {
attrs += fmt.Sprintf(`in2="%s" `, s.In2)
}
if len(s.Result) > 0 {
attrs += fmt.Sprintf(`result="%s" `, s.Result)
}
return attrs
}
// tablevalues outputs a series of values as a XML attribute
func (svg *SVG) tablevalues(s string, t []float64) {
svg.printf(` %s="`, s)
for i := 0; i < len(t)-1; i++ {
svg.printf("%g ", t[i])
}
svg.printf(`%g"%s`, t[len(t)-1], emptyclose)
}
// imgchannel validates the image channel indicator
func imgchannel(c string) string {
switch c {
case "R", "G", "B", "A":
return c
case "r", "g", "b", "a":
return strings.ToUpper(c)
case "red", "green", "blue", "alpha":
return strings.ToUpper(c[0:1])
case "Red", "Green", "Blue", "Alpha":
return c[0:1]
}
return "R"
}

12
vendor/github.com/cheggaaa/pb/LICENSE generated vendored Normal file
View File

@ -0,0 +1,12 @@
Copyright (c) 2012-2015, Sergey Cherepanov
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

87
vendor/github.com/cheggaaa/pb/format.go generated vendored Normal file
View File

@ -0,0 +1,87 @@
package pb
import (
"fmt"
"strings"
"time"
)
type Units int
const (
// U_NO are default units, they represent a simple value and are not formatted at all.
U_NO Units = iota
// U_BYTES units are formatted in a human readable way (b, Bb, Mb, ...)
U_BYTES
// U_DURATION units are formatted in a human readable way (3h14m15s)
U_DURATION
)
func Format(i int64) *formatter {
return &formatter{n: i}
}
type formatter struct {
n int64
unit Units
width int
perSec bool
}
func (f *formatter) Value(n int64) *formatter {
f.n = n
return f
}
func (f *formatter) To(unit Units) *formatter {
f.unit = unit
return f
}
func (f *formatter) Width(width int) *formatter {
f.width = width
return f
}
func (f *formatter) PerSec() *formatter {
f.perSec = true
return f
}
func (f *formatter) String() (out string) {
switch f.unit {
case U_BYTES:
out = formatBytes(f.n)
case U_DURATION:
d := time.Duration(f.n)
if d > time.Hour*24 {
out = fmt.Sprintf("%dd", d/24/time.Hour)
d -= (d / time.Hour / 24) * (time.Hour * 24)
}
out = fmt.Sprintf("%s%v", out, d)
default:
out = fmt.Sprintf(fmt.Sprintf("%%%dd", f.width), f.n)
}
if f.perSec {
out += "/s"
}
return
}
// Convert bytes to human readable string. Like a 2 MB, 64.2 KB, 52 B
func formatBytes(i int64) (result string) {
switch {
case i > (1024 * 1024 * 1024 * 1024):
result = fmt.Sprintf("%.02f TB", float64(i)/1024/1024/1024/1024)
case i > (1024 * 1024 * 1024):
result = fmt.Sprintf("%.02f GB", float64(i)/1024/1024/1024)
case i > (1024 * 1024):
result = fmt.Sprintf("%.02f MB", float64(i)/1024/1024)
case i > 1024:
result = fmt.Sprintf("%.02f KB", float64(i)/1024)
default:
result = fmt.Sprintf("%d B", i)
}
result = strings.Trim(result, " ")
return
}

441
vendor/github.com/cheggaaa/pb/pb.go generated vendored Normal file
View File

@ -0,0 +1,441 @@
// Simple console progress bars
package pb
import (
"fmt"
"io"
"math"
"strings"
"sync"
"sync/atomic"
"time"
"unicode/utf8"
)
// Current version
const Version = "1.0.5"
const (
// Default refresh rate - 200ms
DEFAULT_REFRESH_RATE = time.Millisecond * 200
FORMAT = "[=>-]"
)
// DEPRECATED
// variables for backward compatibility, from now do not work
// use pb.Format and pb.SetRefreshRate
var (
DefaultRefreshRate = DEFAULT_REFRESH_RATE
BarStart, BarEnd, Empty, Current, CurrentN string
)
// Create new progress bar object
func New(total int) *ProgressBar {
return New64(int64(total))
}
// Create new progress bar object using int64 as total
func New64(total int64) *ProgressBar {
pb := &ProgressBar{
Total: total,
RefreshRate: DEFAULT_REFRESH_RATE,
ShowPercent: true,
ShowCounters: true,
ShowBar: true,
ShowTimeLeft: true,
ShowFinalTime: true,
Units: U_NO,
ManualUpdate: false,
finish: make(chan struct{}),
currentValue: -1,
mu: new(sync.Mutex),
}
return pb.Format(FORMAT)
}
// Create new object and start
func StartNew(total int) *ProgressBar {
return New(total).Start()
}
// Callback for custom output
// For example:
// bar.Callback = func(s string) {
// mySuperPrint(s)
// }
//
type Callback func(out string)
type ProgressBar struct {
current int64 // current must be first member of struct (https://code.google.com/p/go/issues/detail?id=5278)
Total int64
RefreshRate time.Duration
ShowPercent, ShowCounters bool
ShowSpeed, ShowTimeLeft, ShowBar bool
ShowFinalTime bool
Output io.Writer
Callback Callback
NotPrint bool
Units Units
Width int
ForceWidth bool
ManualUpdate bool
AutoStat bool
// Default width for the time box.
UnitsWidth int
TimeBoxWidth int
finishOnce sync.Once //Guards isFinish
finish chan struct{}
isFinish bool
startTime time.Time
startValue int64
currentValue int64
prefix, postfix string
mu *sync.Mutex
lastPrint string
BarStart string
BarEnd string
Empty string
Current string
CurrentN string
AlwaysUpdate bool
}
// Start print
func (pb *ProgressBar) Start() *ProgressBar {
pb.startTime = time.Now()
pb.startValue = pb.current
if pb.Total == 0 {
pb.ShowTimeLeft = false
pb.ShowPercent = false
pb.AutoStat = false
}
if !pb.ManualUpdate {
pb.Update() // Initial printing of the bar before running the bar refresher.
go pb.refresher()
}
return pb
}
// Increment current value
func (pb *ProgressBar) Increment() int {
return pb.Add(1)
}
// Get current value
func (pb *ProgressBar) Get() int64 {
c := atomic.LoadInt64(&pb.current)
return c
}
// Set current value
func (pb *ProgressBar) Set(current int) *ProgressBar {
return pb.Set64(int64(current))
}
// Set64 sets the current value as int64
func (pb *ProgressBar) Set64(current int64) *ProgressBar {
atomic.StoreInt64(&pb.current, current)
return pb
}
// Add to current value
func (pb *ProgressBar) Add(add int) int {
return int(pb.Add64(int64(add)))
}
func (pb *ProgressBar) Add64(add int64) int64 {
return atomic.AddInt64(&pb.current, add)
}
// Set prefix string
func (pb *ProgressBar) Prefix(prefix string) *ProgressBar {
pb.prefix = prefix
return pb
}
// Set postfix string
func (pb *ProgressBar) Postfix(postfix string) *ProgressBar {
pb.postfix = postfix
return pb
}
// Set custom format for bar
// Example: bar.Format("[=>_]")
// Example: bar.Format("[\x00=\x00>\x00-\x00]") // \x00 is the delimiter
func (pb *ProgressBar) Format(format string) *ProgressBar {
var formatEntries []string
if len(format) == 5 {
formatEntries = strings.Split(format, "")
} else {
formatEntries = strings.Split(format, "\x00")
}
if len(formatEntries) == 5 {
pb.BarStart = formatEntries[0]
pb.BarEnd = formatEntries[4]
pb.Empty = formatEntries[3]
pb.Current = formatEntries[1]
pb.CurrentN = formatEntries[2]
}
return pb
}
// Set bar refresh rate
func (pb *ProgressBar) SetRefreshRate(rate time.Duration) *ProgressBar {
pb.RefreshRate = rate
return pb
}
// Set units
// bar.SetUnits(U_NO) - by default
// bar.SetUnits(U_BYTES) - for Mb, Kb, etc
func (pb *ProgressBar) SetUnits(units Units) *ProgressBar {
pb.Units = units
return pb
}
// Set max width, if width is bigger than terminal width, will be ignored
func (pb *ProgressBar) SetMaxWidth(width int) *ProgressBar {
pb.Width = width
pb.ForceWidth = false
return pb
}
// Set bar width
func (pb *ProgressBar) SetWidth(width int) *ProgressBar {
pb.Width = width
pb.ForceWidth = true
return pb
}
// End print
func (pb *ProgressBar) Finish() {
//Protect multiple calls
pb.finishOnce.Do(func() {
close(pb.finish)
pb.write(atomic.LoadInt64(&pb.current))
if !pb.NotPrint {
fmt.Println()
}
pb.isFinish = true
})
}
// End print and write string 'str'
func (pb *ProgressBar) FinishPrint(str string) {
pb.Finish()
fmt.Println(str)
}
// implement io.Writer
func (pb *ProgressBar) Write(p []byte) (n int, err error) {
n = len(p)
pb.Add(n)
return
}
// implement io.Reader
func (pb *ProgressBar) Read(p []byte) (n int, err error) {
n = len(p)
pb.Add(n)
return
}
// Create new proxy reader over bar
// Takes io.Reader or io.ReadCloser
func (pb *ProgressBar) NewProxyReader(r io.Reader) *Reader {
return &Reader{r, pb}
}
func (pb *ProgressBar) write(current int64) {
width := pb.GetWidth()
var percentBox, countersBox, timeLeftBox, speedBox, barBox, end, out string
// percents
if pb.ShowPercent {
var percent float64
if pb.Total > 0 {
percent = float64(current) / (float64(pb.Total) / float64(100))
} else {
percent = float64(current) / float64(100)
}
percentBox = fmt.Sprintf(" %6.02f%%", percent)
}
// counters
if pb.ShowCounters {
current := Format(current).To(pb.Units).Width(pb.UnitsWidth)
if pb.Total > 0 {
total := Format(pb.Total).To(pb.Units).Width(pb.UnitsWidth)
countersBox = fmt.Sprintf(" %s / %s ", current, total)
} else {
countersBox = fmt.Sprintf(" %s / ? ", current)
}
}
// time left
fromStart := time.Now().Sub(pb.startTime)
currentFromStart := current - pb.startValue
select {
case <-pb.finish:
if pb.ShowFinalTime {
var left time.Duration
if pb.Total > 0 {
left = (fromStart / time.Second) * time.Second
} else {
left = (time.Duration(currentFromStart) / time.Second) * time.Second
}
timeLeftBox = fmt.Sprintf(" %s", left.String())
}
default:
if pb.ShowTimeLeft && currentFromStart > 0 {
perEntry := fromStart / time.Duration(currentFromStart)
var left time.Duration
if pb.Total > 0 {
left = time.Duration(pb.Total-currentFromStart) * perEntry
left = (left / time.Second) * time.Second
} else {
left = time.Duration(currentFromStart) * perEntry
left = (left / time.Second) * time.Second
}
timeLeft := Format(int64(left)).To(U_DURATION).String()
timeLeftBox = fmt.Sprintf(" %s", timeLeft)
}
}
if len(timeLeftBox) < pb.TimeBoxWidth {
timeLeftBox = fmt.Sprintf("%s%s", strings.Repeat(" ", pb.TimeBoxWidth-len(timeLeftBox)), timeLeftBox)
}
// speed
if pb.ShowSpeed && currentFromStart > 0 {
fromStart := time.Now().Sub(pb.startTime)
speed := float64(currentFromStart) / (float64(fromStart) / float64(time.Second))
speedBox = " " + Format(int64(speed)).To(pb.Units).Width(pb.UnitsWidth).PerSec().String()
}
barWidth := escapeAwareRuneCountInString(countersBox + pb.BarStart + pb.BarEnd + percentBox + timeLeftBox + speedBox + pb.prefix + pb.postfix)
// bar
if pb.ShowBar {
size := width - barWidth
if size > 0 {
if pb.Total > 0 {
curCount := int(math.Ceil((float64(current) / float64(pb.Total)) * float64(size)))
emptCount := size - curCount
barBox = pb.BarStart
if emptCount < 0 {
emptCount = 0
}
if curCount > size {
curCount = size
}
if emptCount <= 0 {
barBox += strings.Repeat(pb.Current, curCount)
} else if curCount > 0 {
barBox += strings.Repeat(pb.Current, curCount-1) + pb.CurrentN
}
barBox += strings.Repeat(pb.Empty, emptCount) + pb.BarEnd
} else {
barBox = pb.BarStart
pos := size - int(current)%int(size)
if pos-1 > 0 {
barBox += strings.Repeat(pb.Empty, pos-1)
}
barBox += pb.Current
if size-pos-1 > 0 {
barBox += strings.Repeat(pb.Empty, size-pos-1)
}
barBox += pb.BarEnd
}
}
}
// check len
out = pb.prefix + countersBox + barBox + percentBox + speedBox + timeLeftBox + pb.postfix
if escapeAwareRuneCountInString(out) < width {
end = strings.Repeat(" ", width-utf8.RuneCountInString(out))
}
// and print!
pb.mu.Lock()
pb.lastPrint = out + end
pb.mu.Unlock()
switch {
case pb.isFinish:
return
case pb.Output != nil:
fmt.Fprint(pb.Output, "\r"+out+end)
case pb.Callback != nil:
pb.Callback(out + end)
case !pb.NotPrint:
fmt.Print("\r" + out + end)
}
}
// GetTerminalWidth - returns terminal width for all platforms.
func GetTerminalWidth() (int, error) {
return terminalWidth()
}
func (pb *ProgressBar) GetWidth() int {
if pb.ForceWidth {
return pb.Width
}
width := pb.Width
termWidth, _ := terminalWidth()
if width == 0 || termWidth <= width {
width = termWidth
}
return width
}
// Write the current state of the progressbar
func (pb *ProgressBar) Update() {
c := atomic.LoadInt64(&pb.current)
if pb.AlwaysUpdate || c != pb.currentValue {
pb.write(c)
pb.currentValue = c
}
if pb.AutoStat {
if c == 0 {
pb.startTime = time.Now()
pb.startValue = 0
} else if c >= pb.Total && pb.isFinish != true {
pb.Finish()
}
}
}
func (pb *ProgressBar) String() string {
return pb.lastPrint
}
// Internal loop for refreshing the progressbar
func (pb *ProgressBar) refresher() {
for {
select {
case <-pb.finish:
return
case <-time.After(pb.RefreshRate):
pb.Update()
}
}
}
type window struct {
Row uint16
Col uint16
Xpixel uint16
Ypixel uint16
}

11
vendor/github.com/cheggaaa/pb/pb_appengine.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// +build appengine
package pb
import "errors"
// terminalWidth returns width of the terminal, which is not supported
// and should always failed on appengine classic which is a sandboxed PaaS.
func terminalWidth() (int, error) {
return 0, errors.New("Not supported")
}

8
vendor/github.com/cheggaaa/pb/pb_nix.go generated vendored Normal file
View File

@ -0,0 +1,8 @@
// +build linux darwin freebsd netbsd openbsd dragonfly
// +build !appengine
package pb
import "syscall"
const sysIoctl = syscall.SYS_IOCTL

6
vendor/github.com/cheggaaa/pb/pb_solaris.go generated vendored Normal file
View File

@ -0,0 +1,6 @@
// +build solaris
// +build !appengine
package pb
const sysIoctl = 54

141
vendor/github.com/cheggaaa/pb/pb_win.go generated vendored Normal file
View File

@ -0,0 +1,141 @@
// +build windows
package pb
import (
"errors"
"fmt"
"os"
"sync"
"syscall"
"unsafe"
)
var tty = os.Stdin
var (
kernel32 = syscall.NewLazyDLL("kernel32.dll")
// GetConsoleScreenBufferInfo retrieves information about the
// specified console screen buffer.
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx
procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
// GetConsoleMode retrieves the current input mode of a console's
// input buffer or the current output mode of a console screen buffer.
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx
getConsoleMode = kernel32.NewProc("GetConsoleMode")
// SetConsoleMode sets the input mode of a console's input buffer
// or the output mode of a console screen buffer.
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
setConsoleMode = kernel32.NewProc("SetConsoleMode")
// SetConsoleCursorPosition sets the cursor position in the
// specified console screen buffer.
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx
setConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
)
type (
// Defines the coordinates of the upper left and lower right corners
// of a rectangle.
// See
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686311(v=vs.85).aspx
smallRect struct {
Left, Top, Right, Bottom int16
}
// Defines the coordinates of a character cell in a console screen
// buffer. The origin of the coordinate system (0,0) is at the top, left cell
// of the buffer.
// See
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms682119(v=vs.85).aspx
coordinates struct {
X, Y int16
}
word int16
// Contains information about a console screen buffer.
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093(v=vs.85).aspx
consoleScreenBufferInfo struct {
dwSize coordinates
dwCursorPosition coordinates
wAttributes word
srWindow smallRect
dwMaximumWindowSize coordinates
}
)
// terminalWidth returns width of the terminal.
func terminalWidth() (width int, err error) {
var info consoleScreenBufferInfo
_, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&info)), 0)
if e != 0 {
return 0, error(e)
}
return int(info.dwSize.X) - 1, nil
}
func getCursorPos() (pos coordinates, err error) {
var info consoleScreenBufferInfo
_, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&info)), 0)
if e != 0 {
return info.dwCursorPosition, error(e)
}
return info.dwCursorPosition, nil
}
func setCursorPos(pos coordinates) error {
_, _, e := syscall.Syscall(setConsoleCursorPosition.Addr(), 2, uintptr(syscall.Stdout), uintptr(uint32(uint16(pos.Y))<<16|uint32(uint16(pos.X))), 0)
if e != 0 {
return error(e)
}
return nil
}
var ErrPoolWasStarted = errors.New("Bar pool was started")
var echoLocked bool
var echoLockMutex sync.Mutex
var oldState word
func lockEcho() (quit chan int, err error) {
echoLockMutex.Lock()
defer echoLockMutex.Unlock()
if echoLocked {
err = ErrPoolWasStarted
return
}
echoLocked = true
if _, _, e := syscall.Syscall(getConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&oldState)), 0); e != 0 {
err = fmt.Errorf("Can't get terminal settings: %v", e)
return
}
newState := oldState
const ENABLE_ECHO_INPUT = 0x0004
const ENABLE_LINE_INPUT = 0x0002
newState = newState & (^(ENABLE_LINE_INPUT | ENABLE_ECHO_INPUT))
if _, _, e := syscall.Syscall(setConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(newState), 0); e != 0 {
err = fmt.Errorf("Can't set terminal settings: %v", e)
return
}
return
}
func unlockEcho() (err error) {
echoLockMutex.Lock()
defer echoLockMutex.Unlock()
if !echoLocked {
return
}
echoLocked = false
if _, _, e := syscall.Syscall(setConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(oldState), 0); e != 0 {
err = fmt.Errorf("Can't set terminal settings")
}
return
}

110
vendor/github.com/cheggaaa/pb/pb_x.go generated vendored Normal file
View File

@ -0,0 +1,110 @@
// +build linux darwin freebsd netbsd openbsd solaris dragonfly
// +build !appengine
package pb
import (
"errors"
"fmt"
"os"
"os/signal"
"runtime"
"sync"
"syscall"
"unsafe"
)
const (
TIOCGWINSZ = 0x5413
TIOCGWINSZ_OSX = 1074295912
)
var tty *os.File
var ErrPoolWasStarted = errors.New("Bar pool was started")
var echoLocked bool
var echoLockMutex sync.Mutex
func init() {
var err error
tty, err = os.Open("/dev/tty")
if err != nil {
tty = os.Stdin
}
}
// terminalWidth returns width of the terminal.
func terminalWidth() (int, error) {
w := new(window)
tio := syscall.TIOCGWINSZ
if runtime.GOOS == "darwin" {
tio = TIOCGWINSZ_OSX
}
res, _, err := syscall.Syscall(sysIoctl,
tty.Fd(),
uintptr(tio),
uintptr(unsafe.Pointer(w)),
)
if int(res) == -1 {
return 0, err
}
return int(w.Col), nil
}
var oldState syscall.Termios
func lockEcho() (quit chan int, err error) {
echoLockMutex.Lock()
defer echoLockMutex.Unlock()
if echoLocked {
err = ErrPoolWasStarted
return
}
echoLocked = true
fd := tty.Fd()
if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); e != 0 {
err = fmt.Errorf("Can't get terminal settings: %v", e)
return
}
newState := oldState
newState.Lflag &^= syscall.ECHO
newState.Lflag |= syscall.ICANON | syscall.ISIG
newState.Iflag |= syscall.ICRNL
if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); e != 0 {
err = fmt.Errorf("Can't set terminal settings: %v", e)
return
}
quit = make(chan int, 1)
go catchTerminate(quit)
return
}
func unlockEcho() (err error) {
echoLockMutex.Lock()
defer echoLockMutex.Unlock()
if !echoLocked {
return
}
echoLocked = false
fd := tty.Fd()
if _, _, e := syscall.Syscall6(sysIoctl, fd, ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); e != 0 {
err = fmt.Errorf("Can't set terminal settings")
}
return
}
// listen exit signals and restore terminal state
func catchTerminate(quit chan int) {
sig := make(chan os.Signal, 1)
signal.Notify(sig, os.Interrupt, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGKILL)
defer signal.Stop(sig)
select {
case <-quit:
unlockEcho()
case <-sig:
unlockEcho()
}
}

76
vendor/github.com/cheggaaa/pb/pool.go generated vendored Normal file
View File

@ -0,0 +1,76 @@
// +build linux darwin freebsd netbsd openbsd solaris dragonfly windows
package pb
import (
"sync"
"time"
)
// Create and start new pool with given bars
// You need call pool.Stop() after work
func StartPool(pbs ...*ProgressBar) (pool *Pool, err error) {
pool = new(Pool)
if err = pool.start(); err != nil {
return
}
pool.Add(pbs...)
return
}
type Pool struct {
RefreshRate time.Duration
bars []*ProgressBar
quit chan int
finishOnce sync.Once
}
// Add progress bars.
func (p *Pool) Add(pbs ...*ProgressBar) {
for _, bar := range pbs {
bar.ManualUpdate = true
bar.NotPrint = true
bar.Start()
p.bars = append(p.bars, bar)
}
}
func (p *Pool) start() (err error) {
p.RefreshRate = DefaultRefreshRate
quit, err := lockEcho()
if err != nil {
return
}
p.quit = make(chan int)
go p.writer(quit)
return
}
func (p *Pool) writer(finish chan int) {
var first = true
for {
select {
case <-time.After(p.RefreshRate):
if p.print(first) {
p.print(false)
finish <- 1
return
}
first = false
case <-p.quit:
finish <- 1
return
}
}
}
// Restore terminal state and close pool
func (p *Pool) Stop() error {
// Wait until one final refresh has passed.
time.Sleep(p.RefreshRate)
p.finishOnce.Do(func() {
close(p.quit)
})
return unlockEcho()
}

35
vendor/github.com/cheggaaa/pb/pool_win.go generated vendored Normal file
View File

@ -0,0 +1,35 @@
// +build windows
package pb
import (
"fmt"
"log"
)
func (p *Pool) print(first bool) bool {
var out string
if !first {
coords, err := getCursorPos()
if err != nil {
log.Panic(err)
}
coords.Y -= int16(len(p.bars))
coords.X = 0
err = setCursorPos(coords)
if err != nil {
log.Panic(err)
}
}
isFinished := true
for _, bar := range p.bars {
if !bar.isFinish {
isFinished = false
}
bar.Update()
out += fmt.Sprintf("\r%s\n", bar.String())
}
fmt.Print(out)
return isFinished
}

22
vendor/github.com/cheggaaa/pb/pool_x.go generated vendored Normal file
View File

@ -0,0 +1,22 @@
// +build linux darwin freebsd netbsd openbsd solaris dragonfly
package pb
import "fmt"
func (p *Pool) print(first bool) bool {
var out string
if !first {
out = fmt.Sprintf("\033[%dA", len(p.bars))
}
isFinished := true
for _, bar := range p.bars {
if !bar.isFinish {
isFinished = false
}
bar.Update()
out += fmt.Sprintf("\r%s\n", bar.String())
}
fmt.Print(out)
return isFinished
}

25
vendor/github.com/cheggaaa/pb/reader.go generated vendored Normal file
View File

@ -0,0 +1,25 @@
package pb
import (
"io"
)
// It's proxy reader, implement io.Reader
type Reader struct {
io.Reader
bar *ProgressBar
}
func (r *Reader) Read(p []byte) (n int, err error) {
n, err = r.Reader.Read(p)
r.bar.Add(n)
return
}
// Close the reader when it implements io.Closer
func (r *Reader) Close() (err error) {
if closer, ok := r.Reader.(io.Closer); ok {
return closer.Close()
}
return
}

17
vendor/github.com/cheggaaa/pb/runecount.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
package pb
import (
"regexp"
"unicode/utf8"
)
// Finds the control character sequences (like colors)
var ctrlFinder = regexp.MustCompile("\x1b\x5b[0-9]+\x6d")
func escapeAwareRuneCountInString(s string) int {
n := utf8.RuneCountInString(s)
for _, sm := range ctrlFinder.FindAllString(s, -1) {
n -= len(sm)
}
return n
}

9
vendor/github.com/cheggaaa/pb/termios_bsd.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
// +build darwin freebsd netbsd openbsd dragonfly
// +build !appengine
package pb
import "syscall"
const ioctlReadTermios = syscall.TIOCGETA
const ioctlWriteTermios = syscall.TIOCSETA

7
vendor/github.com/cheggaaa/pb/termios_nix.go generated vendored Normal file
View File

@ -0,0 +1,7 @@
// +build linux solaris
// +build !appengine
package pb
const ioctlReadTermios = 0x5401 // syscall.TCGETS
const ioctlWriteTermios = 0x5402 // syscall.TCSETS

View File

@ -0,0 +1,176 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

View File

@ -0,0 +1,834 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"io"
)
/*
* Create a new parser object.
*/
func yaml_parser_initialize(parser *yaml_parser_t) bool {
*parser = yaml_parser_t{
raw_buffer: make([]byte, 0, INPUT_RAW_BUFFER_SIZE),
buffer: make([]byte, 0, INPUT_BUFFER_SIZE),
}
return true
}
/*
* Destroy a parser object.
*/
func yaml_parser_delete(parser *yaml_parser_t) {
*parser = yaml_parser_t{}
}
/*
* String read handler.
*/
func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (int, error) {
if parser.input_pos == len(parser.input) {
return 0, io.EOF
}
n := copy(buffer, parser.input[parser.input_pos:])
parser.input_pos += n
return n, nil
}
/*
* File read handler.
*/
func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (int, error) {
return parser.input_reader.Read(buffer)
}
/*
* Set a string input.
*/
func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
if parser.read_handler != nil {
panic("input already set")
}
parser.read_handler = yaml_string_read_handler
parser.input = input
parser.input_pos = 0
}
/*
* Set a reader input
*/
func yaml_parser_set_input_reader(parser *yaml_parser_t, reader io.Reader) {
if parser.read_handler != nil {
panic("input already set")
}
parser.read_handler = yaml_file_read_handler
parser.input_reader = reader
}
/*
* Set a generic input.
*/
func yaml_parser_set_input(parser *yaml_parser_t, handler yaml_read_handler_t) {
if parser.read_handler != nil {
panic("input already set")
}
parser.read_handler = handler
}
/*
* Set the source encoding.
*/
func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
if parser.encoding != yaml_ANY_ENCODING {
panic("encoding already set")
}
parser.encoding = encoding
}
/*
* Create a new emitter object.
*/
func yaml_emitter_initialize(emitter *yaml_emitter_t) {
*emitter = yaml_emitter_t{
buffer: make([]byte, OUTPUT_BUFFER_SIZE),
raw_buffer: make([]byte, 0, OUTPUT_RAW_BUFFER_SIZE),
states: make([]yaml_emitter_state_t, 0, INITIAL_STACK_SIZE),
events: make([]yaml_event_t, 0, INITIAL_QUEUE_SIZE),
}
}
func yaml_emitter_delete(emitter *yaml_emitter_t) {
*emitter = yaml_emitter_t{}
}
/*
* String write handler.
*/
func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
return nil
}
/*
* File write handler.
*/
func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
_, err := emitter.output_writer.Write(buffer)
return err
}
/*
* Set a string output.
*/
func yaml_emitter_set_output_string(emitter *yaml_emitter_t, buffer *[]byte) {
if emitter.write_handler != nil {
panic("output already set")
}
emitter.write_handler = yaml_string_write_handler
emitter.output_buffer = buffer
}
/*
* Set a file output.
*/
func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
if emitter.write_handler != nil {
panic("output already set")
}
emitter.write_handler = yaml_writer_write_handler
emitter.output_writer = w
}
/*
* Set a generic output handler.
*/
func yaml_emitter_set_output(emitter *yaml_emitter_t, handler yaml_write_handler_t) {
if emitter.write_handler != nil {
panic("output already set")
}
emitter.write_handler = handler
}
/*
* Set the output encoding.
*/
func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
if emitter.encoding != yaml_ANY_ENCODING {
panic("encoding already set")
}
emitter.encoding = encoding
}
/*
* Set the canonical output style.
*/
func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
emitter.canonical = canonical
}
/*
* Set the indentation increment.
*/
func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
if indent < 2 || indent > 9 {
indent = 2
}
emitter.best_indent = indent
}
/*
* Set the preferred line width.
*/
func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
if width < 0 {
width = -1
}
emitter.best_width = width
}
/*
* Set if unescaped non-ASCII characters are allowed.
*/
func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
emitter.unicode = unicode
}
/*
* Set the preferred line break character.
*/
func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
emitter.line_break = line_break
}
/*
* Destroy a token object.
*/
// yaml_DECLARE(void)
// yaml_token_delete(yaml_token_t *token)
// {
// assert(token); /* Non-NULL token object expected. */
//
// switch (token.type)
// {
// case yaml_TAG_DIRECTIVE_TOKEN:
// yaml_free(token.data.tag_directive.handle);
// yaml_free(token.data.tag_directive.prefix);
// break;
//
// case yaml_ALIAS_TOKEN:
// yaml_free(token.data.alias.value);
// break;
//
// case yaml_ANCHOR_TOKEN:
// yaml_free(token.data.anchor.value);
// break;
//
// case yaml_TAG_TOKEN:
// yaml_free(token.data.tag.handle);
// yaml_free(token.data.tag.suffix);
// break;
//
// case yaml_SCALAR_TOKEN:
// yaml_free(token.data.scalar.value);
// break;
//
// default:
// break;
// }
//
// memset(token, 0, sizeof(yaml_token_t));
// }
/*
* Check if a string is a valid UTF-8 sequence.
*
* Check 'reader.c' for more details on UTF-8 encoding.
*/
// static int
// yaml_check_utf8(yaml_char_t *start, size_t length)
// {
// yaml_char_t *end = start+length;
// yaml_char_t *pointer = start;
//
// while (pointer < end) {
// unsigned char octet;
// unsigned int width;
// unsigned int value;
// size_t k;
//
// octet = pointer[0];
// width = (octet & 0x80) == 0x00 ? 1 :
// (octet & 0xE0) == 0xC0 ? 2 :
// (octet & 0xF0) == 0xE0 ? 3 :
// (octet & 0xF8) == 0xF0 ? 4 : 0;
// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
// if (!width) return 0;
// if (pointer+width > end) return 0;
// for (k = 1; k < width; k ++) {
// octet = pointer[k];
// if ((octet & 0xC0) != 0x80) return 0;
// value = (value << 6) + (octet & 0x3F);
// }
// if (!((width == 1) ||
// (width == 2 && value >= 0x80) ||
// (width == 3 && value >= 0x800) ||
// (width == 4 && value >= 0x10000))) return 0;
//
// pointer += width;
// }
//
// return 1;
// }
/*
* Create STREAM-START.
*/
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
*event = yaml_event_t{
event_type: yaml_STREAM_START_EVENT,
encoding: encoding,
}
}
/*
* Create STREAM-END.
*/
func yaml_stream_end_event_initialize(event *yaml_event_t) {
*event = yaml_event_t{
event_type: yaml_STREAM_END_EVENT,
}
}
/*
* Create DOCUMENT-START.
*/
func yaml_document_start_event_initialize(event *yaml_event_t,
version_directive *yaml_version_directive_t,
tag_directives []yaml_tag_directive_t,
implicit bool) {
*event = yaml_event_t{
event_type: yaml_DOCUMENT_START_EVENT,
version_directive: version_directive,
tag_directives: tag_directives,
implicit: implicit,
}
}
/*
* Create DOCUMENT-END.
*/
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
*event = yaml_event_t{
event_type: yaml_DOCUMENT_END_EVENT,
implicit: implicit,
}
}
/*
* Create ALIAS.
*/
func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) {
*event = yaml_event_t{
event_type: yaml_ALIAS_EVENT,
anchor: anchor,
}
}
/*
* Create SCALAR.
*/
func yaml_scalar_event_initialize(event *yaml_event_t,
anchor []byte, tag []byte,
value []byte,
plain_implicit bool, quoted_implicit bool,
style yaml_scalar_style_t) {
*event = yaml_event_t{
event_type: yaml_SCALAR_EVENT,
anchor: anchor,
tag: tag,
value: value,
implicit: plain_implicit,
quoted_implicit: quoted_implicit,
style: yaml_style_t(style),
}
}
/*
* Create SEQUENCE-START.
*/
func yaml_sequence_start_event_initialize(event *yaml_event_t,
anchor []byte, tag []byte, implicit bool, style yaml_sequence_style_t) {
*event = yaml_event_t{
event_type: yaml_SEQUENCE_START_EVENT,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(style),
}
}
/*
* Create SEQUENCE-END.
*/
func yaml_sequence_end_event_initialize(event *yaml_event_t) {
*event = yaml_event_t{
event_type: yaml_SEQUENCE_END_EVENT,
}
}
/*
* Create MAPPING-START.
*/
func yaml_mapping_start_event_initialize(event *yaml_event_t,
anchor []byte, tag []byte, implicit bool, style yaml_mapping_style_t) {
*event = yaml_event_t{
event_type: yaml_MAPPING_START_EVENT,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(style),
}
}
/*
* Create MAPPING-END.
*/
func yaml_mapping_end_event_initialize(event *yaml_event_t) {
*event = yaml_event_t{
event_type: yaml_MAPPING_END_EVENT,
}
}
/*
* Destroy an event object.
*/
func yaml_event_delete(event *yaml_event_t) {
*event = yaml_event_t{}
}
// /*
// * Create a document object.
// */
//
// func yaml_document_initialize(document *yaml_document_t,
// version_directive *yaml_version_directive_t,
// tag_directives []yaml_tag_directive_t,
// start_implicit, end_implicit bool) bool {
//
//
// {
// struct {
// YAML_error_type_t error;
// } context;
// struct {
// yaml_node_t *start;
// yaml_node_t *end;
// yaml_node_t *top;
// } nodes = { NULL, NULL, NULL };
// yaml_version_directive_t *version_directive_copy = NULL;
// struct {
// yaml_tag_directive_t *start;
// yaml_tag_directive_t *end;
// yaml_tag_directive_t *top;
// } tag_directives_copy = { NULL, NULL, NULL };
// yaml_tag_directive_t value = { NULL, NULL };
// YAML_mark_t mark = { 0, 0, 0 };
//
// assert(document); /* Non-NULL document object is expected. */
// assert((tag_directives_start && tag_directives_end) ||
// (tag_directives_start == tag_directives_end));
// /* Valid tag directives are expected. */
//
// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error;
//
// if (version_directive) {
// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t));
// if (!version_directive_copy) goto error;
// version_directive_copy.major = version_directive.major;
// version_directive_copy.minor = version_directive.minor;
// }
//
// if (tag_directives_start != tag_directives_end) {
// yaml_tag_directive_t *tag_directive;
// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
// goto error;
// for (tag_directive = tag_directives_start;
// tag_directive != tag_directives_end; tag_directive ++) {
// assert(tag_directive.handle);
// assert(tag_directive.prefix);
// if (!yaml_check_utf8(tag_directive.handle,
// strlen((char *)tag_directive.handle)))
// goto error;
// if (!yaml_check_utf8(tag_directive.prefix,
// strlen((char *)tag_directive.prefix)))
// goto error;
// value.handle = yaml_strdup(tag_directive.handle);
// value.prefix = yaml_strdup(tag_directive.prefix);
// if (!value.handle || !value.prefix) goto error;
// if (!PUSH(&context, tag_directives_copy, value))
// goto error;
// value.handle = NULL;
// value.prefix = NULL;
// }
// }
//
// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
// tag_directives_copy.start, tag_directives_copy.top,
// start_implicit, end_implicit, mark, mark);
//
// return 1;
//
// error:
// STACK_DEL(&context, nodes);
// yaml_free(version_directive_copy);
// while (!STACK_EMPTY(&context, tag_directives_copy)) {
// yaml_tag_directive_t value = POP(&context, tag_directives_copy);
// yaml_free(value.handle);
// yaml_free(value.prefix);
// }
// STACK_DEL(&context, tag_directives_copy);
// yaml_free(value.handle);
// yaml_free(value.prefix);
//
// return 0;
// }
//
// /*
// * Destroy a document object.
// */
//
// yaml_DECLARE(void)
// yaml_document_delete(document *yaml_document_t)
// {
// struct {
// YAML_error_type_t error;
// } context;
// yaml_tag_directive_t *tag_directive;
//
// context.error = yaml_NO_ERROR; /* Eliminate a compliler warning. */
//
// assert(document); /* Non-NULL document object is expected. */
//
// while (!STACK_EMPTY(&context, document.nodes)) {
// yaml_node_t node = POP(&context, document.nodes);
// yaml_free(node.tag);
// switch (node.type) {
// case yaml_SCALAR_NODE:
// yaml_free(node.data.scalar.value);
// break;
// case yaml_SEQUENCE_NODE:
// STACK_DEL(&context, node.data.sequence.items);
// break;
// case yaml_MAPPING_NODE:
// STACK_DEL(&context, node.data.mapping.pairs);
// break;
// default:
// assert(0); /* Should not happen. */
// }
// }
// STACK_DEL(&context, document.nodes);
//
// yaml_free(document.version_directive);
// for (tag_directive = document.tag_directives.start;
// tag_directive != document.tag_directives.end;
// tag_directive++) {
// yaml_free(tag_directive.handle);
// yaml_free(tag_directive.prefix);
// }
// yaml_free(document.tag_directives.start);
//
// memset(document, 0, sizeof(yaml_document_t));
// }
//
// /**
// * Get a document node.
// */
//
// yaml_DECLARE(yaml_node_t *)
// yaml_document_get_node(document *yaml_document_t, int index)
// {
// assert(document); /* Non-NULL document object is expected. */
//
// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
// return document.nodes.start + index - 1;
// }
// return NULL;
// }
//
// /**
// * Get the root object.
// */
//
// yaml_DECLARE(yaml_node_t *)
// yaml_document_get_root_node(document *yaml_document_t)
// {
// assert(document); /* Non-NULL document object is expected. */
//
// if (document.nodes.top != document.nodes.start) {
// return document.nodes.start;
// }
// return NULL;
// }
//
// /*
// * Add a scalar node to a document.
// */
//
// yaml_DECLARE(int)
// yaml_document_add_scalar(document *yaml_document_t,
// yaml_char_t *tag, yaml_char_t *value, int length,
// yaml_scalar_style_t style)
// {
// struct {
// YAML_error_type_t error;
// } context;
// YAML_mark_t mark = { 0, 0, 0 };
// yaml_char_t *tag_copy = NULL;
// yaml_char_t *value_copy = NULL;
// yaml_node_t node;
//
// assert(document); /* Non-NULL document object is expected. */
// assert(value); /* Non-NULL value is expected. */
//
// if (!tag) {
// tag = (yaml_char_t *)yaml_DEFAULT_SCALAR_TAG;
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error;
// tag_copy = yaml_strdup(tag);
// if (!tag_copy) goto error;
//
// if (length < 0) {
// length = strlen((char *)value);
// }
//
// if (!yaml_check_utf8(value, length)) goto error;
// value_copy = yaml_malloc(length+1);
// if (!value_copy) goto error;
// memcpy(value_copy, value, length);
// value_copy[length] = '\0';
//
// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark);
// if (!PUSH(&context, document.nodes, node)) goto error;
//
// return document.nodes.top - document.nodes.start;
//
// error:
// yaml_free(tag_copy);
// yaml_free(value_copy);
//
// return 0;
// }
//
// /*
// * Add a sequence node to a document.
// */
//
// yaml_DECLARE(int)
// yaml_document_add_sequence(document *yaml_document_t,
// yaml_char_t *tag, yaml_sequence_style_t style)
// {
// struct {
// YAML_error_type_t error;
// } context;
// YAML_mark_t mark = { 0, 0, 0 };
// yaml_char_t *tag_copy = NULL;
// struct {
// yaml_node_item_t *start;
// yaml_node_item_t *end;
// yaml_node_item_t *top;
// } items = { NULL, NULL, NULL };
// yaml_node_t node;
//
// assert(document); /* Non-NULL document object is expected. */
//
// if (!tag) {
// tag = (yaml_char_t *)yaml_DEFAULT_SEQUENCE_TAG;
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error;
// tag_copy = yaml_strdup(tag);
// if (!tag_copy) goto error;
//
// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error;
//
// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
// style, mark, mark);
// if (!PUSH(&context, document.nodes, node)) goto error;
//
// return document.nodes.top - document.nodes.start;
//
// error:
// STACK_DEL(&context, items);
// yaml_free(tag_copy);
//
// return 0;
// }
//
// /*
// * Add a mapping node to a document.
// */
//
// yaml_DECLARE(int)
// yaml_document_add_mapping(document *yaml_document_t,
// yaml_char_t *tag, yaml_mapping_style_t style)
// {
// struct {
// YAML_error_type_t error;
// } context;
// YAML_mark_t mark = { 0, 0, 0 };
// yaml_char_t *tag_copy = NULL;
// struct {
// yaml_node_pair_t *start;
// yaml_node_pair_t *end;
// yaml_node_pair_t *top;
// } pairs = { NULL, NULL, NULL };
// yaml_node_t node;
//
// assert(document); /* Non-NULL document object is expected. */
//
// if (!tag) {
// tag = (yaml_char_t *)yaml_DEFAULT_MAPPING_TAG;
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error;
// tag_copy = yaml_strdup(tag);
// if (!tag_copy) goto error;
//
// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error;
//
// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
// style, mark, mark);
// if (!PUSH(&context, document.nodes, node)) goto error;
//
// return document.nodes.top - document.nodes.start;
//
// error:
// STACK_DEL(&context, pairs);
// yaml_free(tag_copy);
//
// return 0;
// }
//
// /*
// * Append an item to a sequence node.
// */
//
// yaml_DECLARE(int)
// yaml_document_append_sequence_item(document *yaml_document_t,
// int sequence, int item)
// {
// struct {
// YAML_error_type_t error;
// } context;
//
// assert(document); /* Non-NULL document is required. */
// assert(sequence > 0
// && document.nodes.start + sequence <= document.nodes.top);
// /* Valid sequence id is required. */
// assert(document.nodes.start[sequence-1].type == yaml_SEQUENCE_NODE);
// /* A sequence node is required. */
// assert(item > 0 && document.nodes.start + item <= document.nodes.top);
// /* Valid item id is required. */
//
// if (!PUSH(&context,
// document.nodes.start[sequence-1].data.sequence.items, item))
// return 0;
//
// return 1;
// }
//
// /*
// * Append a pair of a key and a value to a mapping node.
// */
//
// yaml_DECLARE(int)
// yaml_document_append_mapping_pair(document *yaml_document_t,
// int mapping, int key, int value)
// {
// struct {
// YAML_error_type_t error;
// } context;
//
// yaml_node_pair_t pair;
//
// assert(document); /* Non-NULL document is required. */
// assert(mapping > 0
// && document.nodes.start + mapping <= document.nodes.top);
// /* Valid mapping id is required. */
// assert(document.nodes.start[mapping-1].type == yaml_MAPPING_NODE);
// /* A mapping node is required. */
// assert(key > 0 && document.nodes.start + key <= document.nodes.top);
// /* Valid key id is required. */
// assert(value > 0 && document.nodes.start + value <= document.nodes.top);
// /* Valid value id is required. */
//
// pair.key = key;
// pair.value = value;
//
// if (!PUSH(&context,
// document.nodes.start[mapping-1].data.mapping.pairs, pair))
// return 0;
//
// return 1;
// }
//

View File

@ -0,0 +1,622 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"bytes"
"errors"
"fmt"
"io"
"reflect"
"runtime"
"strconv"
"strings"
)
type Unmarshaler interface {
UnmarshalYAML(tag string, value interface{}) error
}
// A Number represents a JSON number literal.
type Number string
// String returns the literal text of the number.
func (n Number) String() string { return string(n) }
// Float64 returns the number as a float64.
func (n Number) Float64() (float64, error) {
return strconv.ParseFloat(string(n), 64)
}
// Int64 returns the number as an int64.
func (n Number) Int64() (int64, error) {
return strconv.ParseInt(string(n), 10, 64)
}
type Decoder struct {
parser yaml_parser_t
event yaml_event_t
replay_events []yaml_event_t
useNumber bool
anchors map[string][]yaml_event_t
tracking_anchors [][]yaml_event_t
}
type ParserError struct {
ErrorType YAML_error_type_t
Context string
ContextMark YAML_mark_t
Problem string
ProblemMark YAML_mark_t
}
func (e *ParserError) Error() string {
return fmt.Sprintf("yaml: [%s] %s at line %d, column %d", e.Context, e.Problem, e.ProblemMark.line+1, e.ProblemMark.column+1)
}
type UnexpectedEventError struct {
Value string
EventType yaml_event_type_t
At YAML_mark_t
}
func (e *UnexpectedEventError) Error() string {
return fmt.Sprintf("yaml: Unexpect event [%d]: '%s' at line %d, column %d", e.EventType, e.Value, e.At.line+1, e.At.column+1)
}
func recovery(err *error) {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
}
var tmpError error
switch r := r.(type) {
case error:
tmpError = r
case string:
tmpError = errors.New(r)
default:
tmpError = errors.New("Unknown panic: " + reflect.ValueOf(r).String())
}
*err = tmpError
}
}
func Unmarshal(data []byte, v interface{}) error {
d := NewDecoder(bytes.NewBuffer(data))
return d.Decode(v)
}
func NewDecoder(r io.Reader) *Decoder {
d := &Decoder{
anchors: make(map[string][]yaml_event_t),
tracking_anchors: make([][]yaml_event_t, 1),
}
yaml_parser_initialize(&d.parser)
yaml_parser_set_input_reader(&d.parser, r)
return d
}
func (d *Decoder) Decode(v interface{}) (err error) {
defer recovery(&err)
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr || rv.IsNil() {
return fmt.Errorf("Expected a pointer or nil but was a %s at %s", rv.String(), d.event.start_mark)
}
if d.event.event_type == yaml_NO_EVENT {
d.nextEvent()
if d.event.event_type != yaml_STREAM_START_EVENT {
return errors.New("Invalid stream")
}
d.nextEvent()
}
d.document(rv)
return nil
}
func (d *Decoder) UseNumber() { d.useNumber = true }
func (d *Decoder) error(err error) {
panic(err)
}
func (d *Decoder) nextEvent() {
if d.event.event_type == yaml_STREAM_END_EVENT {
d.error(errors.New("The stream is closed"))
}
if d.replay_events != nil {
d.event = d.replay_events[0]
if len(d.replay_events) == 1 {
d.replay_events = nil
} else {
d.replay_events = d.replay_events[1:]
}
} else {
if !yaml_parser_parse(&d.parser, &d.event) {
yaml_event_delete(&d.event)
d.error(&ParserError{
ErrorType: d.parser.error,
Context: d.parser.context,
ContextMark: d.parser.context_mark,
Problem: d.parser.problem,
ProblemMark: d.parser.problem_mark,
})
}
}
last := len(d.tracking_anchors)
// skip aliases when tracking an anchor
if last > 0 && d.event.event_type != yaml_ALIAS_EVENT {
d.tracking_anchors[last-1] = append(d.tracking_anchors[last-1], d.event)
}
}
func (d *Decoder) document(rv reflect.Value) {
if d.event.event_type != yaml_DOCUMENT_START_EVENT {
d.error(fmt.Errorf("Expected document start at %s", d.event.start_mark))
}
d.nextEvent()
d.parse(rv)
if d.event.event_type != yaml_DOCUMENT_END_EVENT {
d.error(fmt.Errorf("Expected document end at %s", d.event.start_mark))
}
d.nextEvent()
}
func (d *Decoder) parse(rv reflect.Value) {
if !rv.IsValid() {
// skip ahead since we cannot store
d.valueInterface()
return
}
anchor := string(d.event.anchor)
switch d.event.event_type {
case yaml_SEQUENCE_START_EVENT:
d.begin_anchor(anchor)
d.sequence(rv)
d.end_anchor(anchor)
case yaml_MAPPING_START_EVENT:
d.begin_anchor(anchor)
d.mapping(rv)
d.end_anchor(anchor)
case yaml_SCALAR_EVENT:
d.begin_anchor(anchor)
d.scalar(rv)
d.end_anchor(anchor)
case yaml_ALIAS_EVENT:
d.alias(rv)
case yaml_DOCUMENT_END_EVENT:
default:
d.error(&UnexpectedEventError{
Value: string(d.event.value),
EventType: d.event.event_type,
At: d.event.start_mark,
})
}
}
func (d *Decoder) begin_anchor(anchor string) {
if anchor != "" {
events := []yaml_event_t{d.event}
d.tracking_anchors = append(d.tracking_anchors, events)
}
}
func (d *Decoder) end_anchor(anchor string) {
if anchor != "" {
events := d.tracking_anchors[len(d.tracking_anchors)-1]
d.tracking_anchors = d.tracking_anchors[0 : len(d.tracking_anchors)-1]
// remove the anchor, replaying events shouldn't have anchors
events[0].anchor = nil
// we went one too many, remove the extra event
events = events[:len(events)-1]
// if nested, append to all the other anchors
for i, e := range d.tracking_anchors {
d.tracking_anchors[i] = append(e, events...)
}
d.anchors[anchor] = events
}
}
func (d *Decoder) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, reflect.Value) {
// If v is a named type and is addressable,
// start with its address, so that if the type has pointer methods,
// we find them.
if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
v = v.Addr()
}
for {
// Load value from interface, but only if the result will be
// usefully addressable.
if v.Kind() == reflect.Interface && !v.IsNil() {
e := v.Elem()
if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
v = e
continue
}
}
if v.Kind() != reflect.Ptr {
break
}
if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
break
}
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
if v.Type().NumMethod() > 0 {
if u, ok := v.Interface().(Unmarshaler); ok {
var temp interface{}
return u, reflect.ValueOf(&temp)
}
}
v = v.Elem()
}
return nil, v
}
func (d *Decoder) sequence(v reflect.Value) {
if d.event.event_type != yaml_SEQUENCE_START_EVENT {
d.error(fmt.Errorf("Expected sequence start at %s", d.event.start_mark))
}
u, pv := d.indirect(v, false)
if u != nil {
defer func() {
if err := u.UnmarshalYAML(yaml_SEQ_TAG, pv.Interface()); err != nil {
d.error(err)
}
}()
_, pv = d.indirect(pv, false)
}
v = pv
// Check type of target.
switch v.Kind() {
case reflect.Interface:
if v.NumMethod() == 0 {
// Decoding into nil interface? Switch to non-reflect code.
v.Set(reflect.ValueOf(d.sequenceInterface()))
return
}
// Otherwise it's invalid.
fallthrough
default:
d.error(fmt.Errorf("Expected an array, slice or interface{} but was a %s at %s", v, d.event.start_mark))
case reflect.Array:
case reflect.Slice:
break
}
d.nextEvent()
i := 0
done:
for {
switch d.event.event_type {
case yaml_SEQUENCE_END_EVENT, yaml_DOCUMENT_END_EVENT:
break done
}
// Get element of array, growing if necessary.
if v.Kind() == reflect.Slice {
// Grow slice if necessary
if i >= v.Cap() {
newcap := v.Cap() + v.Cap()/2
if newcap < 4 {
newcap = 4
}
newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
reflect.Copy(newv, v)
v.Set(newv)
}
if i >= v.Len() {
v.SetLen(i + 1)
}
}
if i < v.Len() {
// Decode into element.
d.parse(v.Index(i))
} else {
// Ran out of fixed array: skip.
d.parse(reflect.Value{})
}
i++
}
if i < v.Len() {
if v.Kind() == reflect.Array {
// Array. Zero the rest.
z := reflect.Zero(v.Type().Elem())
for ; i < v.Len(); i++ {
v.Index(i).Set(z)
}
} else {
v.SetLen(i)
}
}
if i == 0 && v.Kind() == reflect.Slice {
v.Set(reflect.MakeSlice(v.Type(), 0, 0))
}
if d.event.event_type != yaml_DOCUMENT_END_EVENT {
d.nextEvent()
}
}
func (d *Decoder) mapping(v reflect.Value) {
u, pv := d.indirect(v, false)
if u != nil {
defer func() {
if err := u.UnmarshalYAML(yaml_MAP_TAG, pv.Interface()); err != nil {
d.error(err)
}
}()
_, pv = d.indirect(pv, false)
}
v = pv
// Decoding into nil interface? Switch to non-reflect code.
if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
v.Set(reflect.ValueOf(d.mappingInterface()))
return
}
// Check type of target: struct or map[X]Y
switch v.Kind() {
case reflect.Struct:
d.mappingStruct(v)
return
case reflect.Map:
default:
d.error(fmt.Errorf("Expected a struct or map but was a %s at %s ", v, d.event.start_mark))
}
mapt := v.Type()
if v.IsNil() {
v.Set(reflect.MakeMap(mapt))
}
d.nextEvent()
keyt := mapt.Key()
mapElemt := mapt.Elem()
var mapElem reflect.Value
done:
for {
switch d.event.event_type {
case yaml_MAPPING_END_EVENT:
break done
case yaml_DOCUMENT_END_EVENT:
return
}
key := reflect.New(keyt)
d.parse(key.Elem())
if !mapElem.IsValid() {
mapElem = reflect.New(mapElemt).Elem()
} else {
mapElem.Set(reflect.Zero(mapElemt))
}
d.parse(mapElem)
v.SetMapIndex(key.Elem(), mapElem)
}
d.nextEvent()
}
func (d *Decoder) mappingStruct(v reflect.Value) {
structt := v.Type()
fields := cachedTypeFields(structt)
d.nextEvent()
done:
for {
switch d.event.event_type {
case yaml_MAPPING_END_EVENT:
break done
case yaml_DOCUMENT_END_EVENT:
return
}
key := ""
d.parse(reflect.ValueOf(&key))
// Figure out field corresponding to key.
var subv reflect.Value
var f *field
for i := range fields {
ff := &fields[i]
if ff.name == key {
f = ff
break
}
if f == nil && strings.EqualFold(ff.name, key) {
f = ff
}
}
if f != nil {
subv = v
for _, i := range f.index {
if subv.Kind() == reflect.Ptr {
if subv.IsNil() {
subv.Set(reflect.New(subv.Type().Elem()))
}
subv = subv.Elem()
}
subv = subv.Field(i)
}
}
d.parse(subv)
}
d.nextEvent()
}
func (d *Decoder) scalar(v reflect.Value) {
val := string(d.event.value)
wantptr := null_values[val]
u, pv := d.indirect(v, wantptr)
var tag string
if u != nil {
defer func() {
if err := u.UnmarshalYAML(tag, pv.Interface()); err != nil {
d.error(err)
}
}()
_, pv = d.indirect(pv, wantptr)
}
v = pv
var err error
tag, err = resolve(d.event, v, d.useNumber)
if err != nil {
d.error(err)
}
d.nextEvent()
}
func (d *Decoder) alias(rv reflect.Value) {
val, ok := d.anchors[string(d.event.anchor)]
if !ok {
d.error(fmt.Errorf("missing anchor: '%s' at %s", d.event.anchor, d.event.start_mark))
}
d.replay_events = val
d.nextEvent()
d.parse(rv)
}
func (d *Decoder) valueInterface() interface{} {
var v interface{}
anchor := string(d.event.anchor)
switch d.event.event_type {
case yaml_SEQUENCE_START_EVENT:
d.begin_anchor(anchor)
v = d.sequenceInterface()
case yaml_MAPPING_START_EVENT:
d.begin_anchor(anchor)
v = d.mappingInterface()
case yaml_SCALAR_EVENT:
d.begin_anchor(anchor)
v = d.scalarInterface()
case yaml_ALIAS_EVENT:
rv := reflect.ValueOf(&v)
d.alias(rv)
return v
case yaml_DOCUMENT_END_EVENT:
d.error(&UnexpectedEventError{
Value: string(d.event.value),
EventType: d.event.event_type,
At: d.event.start_mark,
})
}
d.end_anchor(anchor)
return v
}
func (d *Decoder) scalarInterface() interface{} {
_, v := resolveInterface(d.event, d.useNumber)
d.nextEvent()
return v
}
// sequenceInterface is like sequence but returns []interface{}.
func (d *Decoder) sequenceInterface() []interface{} {
var v = make([]interface{}, 0)
d.nextEvent()
done:
for {
switch d.event.event_type {
case yaml_SEQUENCE_END_EVENT, yaml_DOCUMENT_END_EVENT:
break done
}
v = append(v, d.valueInterface())
}
if d.event.event_type != yaml_DOCUMENT_END_EVENT {
d.nextEvent()
}
return v
}
// mappingInterface is like mapping but returns map[interface{}]interface{}.
func (d *Decoder) mappingInterface() map[interface{}]interface{} {
m := make(map[interface{}]interface{})
d.nextEvent()
done:
for {
switch d.event.event_type {
case yaml_MAPPING_END_EVENT, yaml_DOCUMENT_END_EVENT:
break done
}
key := d.valueInterface()
// Read value.
m[key] = d.valueInterface()
}
if d.event.event_type != yaml_DOCUMENT_END_EVENT {
d.nextEvent()
}
return m
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,395 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"bytes"
"encoding/base64"
"io"
"math"
"reflect"
"regexp"
"sort"
"strconv"
"time"
)
var (
timeTimeType = reflect.TypeOf(time.Time{})
marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
numberType = reflect.TypeOf(Number(""))
nonPrintable = regexp.MustCompile("[^\t\n\r\u0020-\u007E\u0085\u00A0-\uD7FF\uE000-\uFFFD]")
multiline = regexp.MustCompile("\n|\u0085|\u2028|\u2029")
shortTags = map[string]string{
yaml_NULL_TAG: "!!null",
yaml_BOOL_TAG: "!!bool",
yaml_STR_TAG: "!!str",
yaml_INT_TAG: "!!int",
yaml_FLOAT_TAG: "!!float",
yaml_TIMESTAMP_TAG: "!!timestamp",
yaml_SEQ_TAG: "!!seq",
yaml_MAP_TAG: "!!map",
yaml_BINARY_TAG: "!!binary",
}
)
type Marshaler interface {
MarshalYAML() (tag string, value interface{}, err error)
}
// An Encoder writes JSON objects to an output stream.
type Encoder struct {
w io.Writer
emitter yaml_emitter_t
event yaml_event_t
flow bool
err error
}
func Marshal(v interface{}) ([]byte, error) {
b := bytes.Buffer{}
e := NewEncoder(&b)
err := e.Encode(v)
return b.Bytes(), err
}
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
e := &Encoder{w: w}
yaml_emitter_initialize(&e.emitter)
yaml_emitter_set_output_writer(&e.emitter, e.w)
yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
e.emit()
yaml_document_start_event_initialize(&e.event, nil, nil, true)
e.emit()
return e
}
func (e *Encoder) Encode(v interface{}) (err error) {
defer recovery(&err)
if e.err != nil {
return e.err
}
e.marshal("", reflect.ValueOf(v), true)
yaml_document_end_event_initialize(&e.event, true)
e.emit()
e.emitter.open_ended = false
yaml_stream_end_event_initialize(&e.event)
e.emit()
return nil
}
func (e *Encoder) emit() {
if !yaml_emitter_emit(&e.emitter, &e.event) {
panic("bad emit")
}
}
func (e *Encoder) marshal(tag string, v reflect.Value, allowAddr bool) {
vt := v.Type()
if vt.Implements(marshalerType) {
e.emitMarshaler(tag, v)
return
}
if vt.Kind() != reflect.Ptr && allowAddr {
if reflect.PtrTo(vt).Implements(marshalerType) {
e.emitAddrMarshaler(tag, v)
return
}
}
switch v.Kind() {
case reflect.Interface:
if v.IsNil() {
e.emitNil()
} else {
e.marshal(tag, v.Elem(), allowAddr)
}
case reflect.Map:
e.emitMap(tag, v)
case reflect.Ptr:
if v.IsNil() {
e.emitNil()
} else {
e.marshal(tag, v.Elem(), true)
}
case reflect.Struct:
e.emitStruct(tag, v)
case reflect.Slice:
e.emitSlice(tag, v)
case reflect.String:
e.emitString(tag, v)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
e.emitInt(tag, v)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
e.emitUint(tag, v)
case reflect.Float32, reflect.Float64:
e.emitFloat(tag, v)
case reflect.Bool:
e.emitBool(tag, v)
default:
panic("Can't marshal type yet: " + v.Type().String())
}
}
func (e *Encoder) emitMap(tag string, v reflect.Value) {
e.mapping(tag, func() {
var keys stringValues = v.MapKeys()
sort.Sort(keys)
for _, k := range keys {
e.marshal("", k, true)
e.marshal("", v.MapIndex(k), true)
}
})
}
func (e *Encoder) emitStruct(tag string, v reflect.Value) {
if v.Type() == timeTimeType {
e.emitTime(tag, v)
return
}
fields := cachedTypeFields(v.Type())
e.mapping(tag, func() {
for _, f := range fields {
fv := fieldByIndex(v, f.index)
if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) {
continue
}
e.marshal("", reflect.ValueOf(f.name), true)
e.flow = f.flow
e.marshal("", fv, true)
}
})
}
func (e *Encoder) emitTime(tag string, v reflect.Value) {
t := v.Interface().(time.Time)
bytes, _ := t.MarshalText()
e.emitScalar(string(bytes), "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func isEmptyValue(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
}
return false
}
func (e *Encoder) mapping(tag string, f func()) {
implicit := tag == ""
style := yaml_BLOCK_MAPPING_STYLE
if e.flow {
e.flow = false
style = yaml_FLOW_MAPPING_STYLE
}
yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
e.emit()
f()
yaml_mapping_end_event_initialize(&e.event)
e.emit()
}
func (e *Encoder) emitSlice(tag string, v reflect.Value) {
if v.Type() == byteSliceType {
e.emitBase64(tag, v)
return
}
implicit := tag == ""
style := yaml_BLOCK_SEQUENCE_STYLE
if e.flow {
e.flow = false
style = yaml_FLOW_SEQUENCE_STYLE
}
yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
e.emit()
n := v.Len()
for i := 0; i < n; i++ {
e.marshal("", v.Index(i), true)
}
yaml_sequence_end_event_initialize(&e.event)
e.emit()
}
func (e *Encoder) emitBase64(tag string, v reflect.Value) {
if v.IsNil() {
e.emitNil()
return
}
s := v.Bytes()
dst := make([]byte, base64.StdEncoding.EncodedLen(len(s)))
base64.StdEncoding.Encode(dst, s)
e.emitScalar(string(dst), "", yaml_BINARY_TAG, yaml_DOUBLE_QUOTED_SCALAR_STYLE)
}
func (e *Encoder) emitString(tag string, v reflect.Value) {
var style yaml_scalar_style_t
s := v.String()
if nonPrintable.MatchString(s) {
e.emitBase64(tag, v)
return
}
if v.Type() == numberType {
style = yaml_PLAIN_SCALAR_STYLE
} else {
event := yaml_event_t{
implicit: true,
value: []byte(s),
}
rtag, _ := resolveInterface(event, false)
if tag == "" && rtag != yaml_STR_TAG {
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
} else if multiline.MatchString(s) {
style = yaml_LITERAL_SCALAR_STYLE
} else {
style = yaml_PLAIN_SCALAR_STYLE
}
}
e.emitScalar(s, "", tag, style)
}
func (e *Encoder) emitBool(tag string, v reflect.Value) {
s := strconv.FormatBool(v.Bool())
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *Encoder) emitInt(tag string, v reflect.Value) {
s := strconv.FormatInt(v.Int(), 10)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *Encoder) emitUint(tag string, v reflect.Value) {
s := strconv.FormatUint(v.Uint(), 10)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *Encoder) emitFloat(tag string, v reflect.Value) {
f := v.Float()
var s string
switch {
case math.IsNaN(f):
s = ".nan"
case math.IsInf(f, 1):
s = "+.inf"
case math.IsInf(f, -1):
s = "-.inf"
default:
s = strconv.FormatFloat(f, 'g', -1, v.Type().Bits())
}
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *Encoder) emitNil() {
e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
}
func (e *Encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
implicit := tag == ""
if !implicit {
style = yaml_PLAIN_SCALAR_STYLE
}
stag := shortTags[tag]
if stag == "" {
stag = tag
}
yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(stag), []byte(value), implicit, implicit, style)
e.emit()
}
func (e *Encoder) emitMarshaler(tag string, v reflect.Value) {
if v.Kind() == reflect.Ptr && v.IsNil() {
e.emitNil()
return
}
m := v.Interface().(Marshaler)
if m == nil {
e.emitNil()
return
}
t, val, err := m.MarshalYAML()
if err != nil {
panic(err)
}
if val == nil {
e.emitNil()
return
}
e.marshal(t, reflect.ValueOf(val), false)
}
func (e *Encoder) emitAddrMarshaler(tag string, v reflect.Value) {
if !v.CanAddr() {
e.marshal(tag, v, false)
return
}
va := v.Addr()
if va.IsNil() {
e.emitNil()
return
}
m := v.Interface().(Marshaler)
t, val, err := m.MarshalYAML()
if err != nil {
panic(err)
}
if val == nil {
e.emitNil()
return
}
e.marshal(t, reflect.ValueOf(val), false)
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,465 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"io"
)
/*
* Set the reader error and return 0.
*/
func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string,
offset int, value int) bool {
parser.error = yaml_READER_ERROR
parser.problem = problem
parser.problem_offset = offset
parser.problem_value = value
return false
}
/*
* Byte order marks.
*/
const (
BOM_UTF8 = "\xef\xbb\xbf"
BOM_UTF16LE = "\xff\xfe"
BOM_UTF16BE = "\xfe\xff"
)
/*
* Determine the input stream encoding by checking the BOM symbol. If no BOM is
* found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
*/
func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
/* Ensure that we had enough bytes in the raw buffer. */
for !parser.eof &&
len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
if !yaml_parser_update_raw_buffer(parser) {
return false
}
}
/* Determine the encoding. */
raw := parser.raw_buffer
pos := parser.raw_buffer_pos
remaining := len(raw) - pos
if remaining >= 2 &&
raw[pos] == BOM_UTF16LE[0] && raw[pos+1] == BOM_UTF16LE[1] {
parser.encoding = yaml_UTF16LE_ENCODING
parser.raw_buffer_pos += 2
parser.offset += 2
} else if remaining >= 2 &&
raw[pos] == BOM_UTF16BE[0] && raw[pos+1] == BOM_UTF16BE[1] {
parser.encoding = yaml_UTF16BE_ENCODING
parser.raw_buffer_pos += 2
parser.offset += 2
} else if remaining >= 3 &&
raw[pos] == BOM_UTF8[0] && raw[pos+1] == BOM_UTF8[1] && raw[pos+2] == BOM_UTF8[2] {
parser.encoding = yaml_UTF8_ENCODING
parser.raw_buffer_pos += 3
parser.offset += 3
} else {
parser.encoding = yaml_UTF8_ENCODING
}
return true
}
/*
* Update the raw buffer.
*/
func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
size_read := 0
/* Return if the raw buffer is full. */
if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
return true
}
/* Return on EOF. */
if parser.eof {
return true
}
/* Move the remaining bytes in the raw buffer to the beginning. */
if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
}
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
parser.raw_buffer_pos = 0
/* Call the read handler to fill the buffer. */
size_read, err := parser.read_handler(parser,
parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
if err == io.EOF {
parser.eof = true
} else if err != nil {
return yaml_parser_set_reader_error(parser, "input error: "+err.Error(),
parser.offset, -1)
}
return true
}
/*
* Ensure that the buffer contains at least `length` characters.
* Return 1 on success, 0 on failure.
*
* The length is supposed to be significantly less that the buffer size.
*/
func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
/* Read handler must be set. */
if parser.read_handler == nil {
panic("read handler must be set")
}
/* If the EOF flag is set and the raw buffer is empty, do nothing. */
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
return true
}
/* Return if the buffer contains enough characters. */
if parser.unread >= length {
return true
}
/* Determine the input encoding if it is not known yet. */
if parser.encoding == yaml_ANY_ENCODING {
if !yaml_parser_determine_encoding(parser) {
return false
}
}
/* Move the unread characters to the beginning of the buffer. */
buffer_end := len(parser.buffer)
if 0 < parser.buffer_pos &&
parser.buffer_pos < buffer_end {
copy(parser.buffer, parser.buffer[parser.buffer_pos:])
buffer_end -= parser.buffer_pos
parser.buffer_pos = 0
} else if parser.buffer_pos == buffer_end {
buffer_end = 0
parser.buffer_pos = 0
}
parser.buffer = parser.buffer[:cap(parser.buffer)]
/* Fill the buffer until it has enough characters. */
first := true
for parser.unread < length {
/* Fill the raw buffer if necessary. */
if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
if !yaml_parser_update_raw_buffer(parser) {
parser.buffer = parser.buffer[:buffer_end]
return false
}
}
first = false
/* Decode the raw buffer. */
for parser.raw_buffer_pos != len(parser.raw_buffer) {
var value rune
var w int
raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
incomplete := false
/* Decode the next character. */
switch parser.encoding {
case yaml_UTF8_ENCODING:
/*
* Decode a UTF-8 character. Check RFC 3629
* (http://www.ietf.org/rfc/rfc3629.txt) for more details.
*
* The following table (taken from the RFC) is used for
* decoding.
*
* Char. number range | UTF-8 octet sequence
* (hexadecimal) | (binary)
* --------------------+------------------------------------
* 0000 0000-0000 007F | 0xxxxxxx
* 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
* 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
* 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
*
* Additionally, the characters in the range 0xD800-0xDFFF
* are prohibited as they are reserved for use with UTF-16
* surrogate pairs.
*/
/* Determine the length of the UTF-8 sequence. */
octet := parser.raw_buffer[parser.raw_buffer_pos]
w = width(octet)
/* Check if the leading octet is valid. */
if w == 0 {
return yaml_parser_set_reader_error(parser,
"invalid leading UTF-8 octet",
parser.offset, int(octet))
}
/* Check if the raw buffer contains an incomplete character. */
if w > raw_unread {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-8 octet sequence",
parser.offset, -1)
}
incomplete = true
break
}
/* Decode the leading octet. */
switch {
case octet&0x80 == 0x00:
value = rune(octet & 0x7F)
case octet&0xE0 == 0xC0:
value = rune(octet & 0x1F)
case octet&0xF0 == 0xE0:
value = rune(octet & 0x0F)
case octet&0xF8 == 0xF0:
value = rune(octet & 0x07)
default:
value = 0
}
/* Check and decode the trailing octets. */
for k := 1; k < w; k++ {
octet = parser.raw_buffer[parser.raw_buffer_pos+k]
/* Check if the octet is valid. */
if (octet & 0xC0) != 0x80 {
return yaml_parser_set_reader_error(parser,
"invalid trailing UTF-8 octet",
parser.offset+k, int(octet))
}
/* Decode the octet. */
value = (value << 6) + rune(octet&0x3F)
}
/* Check the length of the sequence against the value. */
switch {
case w == 1:
case w == 2 && value >= 0x80:
case w == 3 && value >= 0x800:
case w == 4 && value >= 0x10000:
default:
return yaml_parser_set_reader_error(parser,
"invalid length of a UTF-8 sequence",
parser.offset, -1)
}
/* Check the range of the value. */
if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
return yaml_parser_set_reader_error(parser,
"invalid Unicode character",
parser.offset, int(value))
}
case yaml_UTF16LE_ENCODING,
yaml_UTF16BE_ENCODING:
var low, high int
if parser.encoding == yaml_UTF16LE_ENCODING {
low, high = 0, 1
} else {
high, low = 1, 0
}
/*
* The UTF-16 encoding is not as simple as one might
* naively think. Check RFC 2781
* (http://www.ietf.org/rfc/rfc2781.txt).
*
* Normally, two subsequent bytes describe a Unicode
* character. However a special technique (called a
* surrogate pair) is used for specifying character
* values larger than 0xFFFF.
*
* A surrogate pair consists of two pseudo-characters:
* high surrogate area (0xD800-0xDBFF)
* low surrogate area (0xDC00-0xDFFF)
*
* The following formulas are used for decoding
* and encoding characters using surrogate pairs:
*
* U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
* U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
* W1 = 110110yyyyyyyyyy
* W2 = 110111xxxxxxxxxx
*
* where U is the character value, W1 is the high surrogate
* area, W2 is the low surrogate area.
*/
/* Check for incomplete UTF-16 character. */
if raw_unread < 2 {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-16 character",
parser.offset, -1)
}
incomplete = true
break
}
/* Get the character. */
value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
/* Check for unexpected low surrogate area. */
if (value & 0xFC00) == 0xDC00 {
return yaml_parser_set_reader_error(parser,
"unexpected low surrogate area",
parser.offset, int(value))
}
/* Check for a high surrogate area. */
if (value & 0xFC00) == 0xD800 {
w = 4
/* Check for incomplete surrogate pair. */
if raw_unread < 4 {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-16 surrogate pair",
parser.offset, -1)
}
incomplete = true
break
}
/* Get the next character. */
value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
/* Check for a low surrogate area. */
if (value2 & 0xFC00) != 0xDC00 {
return yaml_parser_set_reader_error(parser,
"expected low surrogate area",
parser.offset+2, int(value2))
}
/* Generate the value of the surrogate pair. */
value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
} else {
w = 2
}
break
default:
panic("Impossible") /* Impossible. */
}
/* Check if the raw buffer contains enough bytes to form a character. */
if incomplete {
break
}
/*
* Check if the character is in the allowed range:
* #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
* | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
* | [#x10000-#x10FFFF] (32 bit)
*/
if !(value == 0x09 || value == 0x0A || value == 0x0D ||
(value >= 0x20 && value <= 0x7E) ||
(value == 0x85) || (value >= 0xA0 && value <= 0xD7FF) ||
(value >= 0xE000 && value <= 0xFFFD) ||
(value >= 0x10000 && value <= 0x10FFFF)) {
return yaml_parser_set_reader_error(parser,
"control characters are not allowed",
parser.offset, int(value))
}
/* Move the raw pointers. */
parser.raw_buffer_pos += w
parser.offset += w
/* Finally put the character into the buffer. */
/* 0000 0000-0000 007F . 0xxxxxxx */
if value <= 0x7F {
parser.buffer[buffer_end] = byte(value)
} else if value <= 0x7FF {
/* 0000 0080-0000 07FF . 110xxxxx 10xxxxxx */
parser.buffer[buffer_end] = byte(0xC0 + (value >> 6))
parser.buffer[buffer_end+1] = byte(0x80 + (value & 0x3F))
} else if value <= 0xFFFF {
/* 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx */
parser.buffer[buffer_end] = byte(0xE0 + (value >> 12))
parser.buffer[buffer_end+1] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_end+2] = byte(0x80 + (value & 0x3F))
} else {
/* 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */
parser.buffer[buffer_end] = byte(0xF0 + (value >> 18))
parser.buffer[buffer_end+1] = byte(0x80 + ((value >> 12) & 0x3F))
parser.buffer[buffer_end+2] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_end+3] = byte(0x80 + (value & 0x3F))
}
buffer_end += w
parser.unread++
}
/* On EOF, put NUL into the buffer and return. */
if parser.eof {
parser.buffer[buffer_end] = 0
buffer_end++
parser.buffer = parser.buffer[:buffer_end]
parser.unread++
return true
}
}
parser.buffer = parser.buffer[:buffer_end]
return true
}

View File

@ -0,0 +1,449 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"bytes"
"encoding/base64"
"fmt"
"math"
"reflect"
"regexp"
"strconv"
"strings"
"time"
)
var byteSliceType = reflect.TypeOf([]byte(nil))
var binary_tags = [][]byte{[]byte("!binary"), []byte(yaml_BINARY_TAG)}
var bool_values map[string]bool
var null_values map[string]bool
var signs = []byte{'-', '+'}
var nulls = []byte{'~', 'n', 'N'}
var bools = []byte{'t', 'T', 'f', 'F', 'y', 'Y', 'n', 'N', 'o', 'O'}
var timestamp_regexp *regexp.Regexp
var ymd_regexp *regexp.Regexp
func init() {
bool_values = make(map[string]bool)
bool_values["y"] = true
bool_values["yes"] = true
bool_values["n"] = false
bool_values["no"] = false
bool_values["true"] = true
bool_values["false"] = false
bool_values["on"] = true
bool_values["off"] = false
null_values = make(map[string]bool)
null_values["~"] = true
null_values["null"] = true
null_values["Null"] = true
null_values["NULL"] = true
timestamp_regexp = regexp.MustCompile("^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)(?:(?:[Tt]|[ \t]+)([0-9][0-9]?):([0-9][0-9]):([0-9][0-9])(?:\\.([0-9]*))?(?:[ \t]*(?:Z|([-+][0-9][0-9]?)(?::([0-9][0-9])?)?))?)?$")
ymd_regexp = regexp.MustCompile("^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)$")
}
func resolve(event yaml_event_t, v reflect.Value, useNumber bool) (string, error) {
val := string(event.value)
if null_values[val] {
v.Set(reflect.Zero(v.Type()))
return yaml_NULL_TAG, nil
}
switch v.Kind() {
case reflect.String:
if useNumber && v.Type() == numberType {
tag, i := resolveInterface(event, useNumber)
if n, ok := i.(Number); ok {
v.Set(reflect.ValueOf(n))
return tag, nil
}
return "", fmt.Errorf("Not a number: '%s' at %s", event.value, event.start_mark)
}
return resolve_string(val, v, event)
case reflect.Bool:
return resolve_bool(val, v, event)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return resolve_int(val, v, useNumber, event)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return resolve_uint(val, v, useNumber, event)
case reflect.Float32, reflect.Float64:
return resolve_float(val, v, useNumber, event)
case reflect.Interface:
_, i := resolveInterface(event, useNumber)
if i != nil {
v.Set(reflect.ValueOf(i))
} else {
v.Set(reflect.Zero(v.Type()))
}
case reflect.Struct:
return resolve_time(val, v, event)
case reflect.Slice:
if v.Type() != byteSliceType {
return "", fmt.Errorf("Cannot resolve %s into %s at %s", val, v.String(), event.start_mark)
}
b, err := decode_binary(event.value, event)
if err != nil {
return "", err
}
v.Set(reflect.ValueOf(b))
default:
return "", fmt.Errorf("Unknown resolution for '%s' using %s at %s", val, v.String(), event.start_mark)
}
return yaml_STR_TAG, nil
}
func hasBinaryTag(event yaml_event_t) bool {
for _, tag := range binary_tags {
if bytes.Equal(event.tag, tag) {
return true
}
}
return false
}
func decode_binary(value []byte, event yaml_event_t) ([]byte, error) {
b := make([]byte, base64.StdEncoding.DecodedLen(len(value)))
n, err := base64.StdEncoding.Decode(b, value)
if err != nil {
return nil, fmt.Errorf("Invalid base64 text: '%s' at %s", string(b), event.start_mark)
}
return b[:n], nil
}
func resolve_string(val string, v reflect.Value, event yaml_event_t) (string, error) {
if len(event.tag) > 0 {
if hasBinaryTag(event) {
b, err := decode_binary(event.value, event)
if err != nil {
return "", err
}
val = string(b)
}
}
v.SetString(val)
return yaml_STR_TAG, nil
}
func resolve_bool(val string, v reflect.Value, event yaml_event_t) (string, error) {
b, found := bool_values[strings.ToLower(val)]
if !found {
return "", fmt.Errorf("Invalid boolean: '%s' at %s", val, event.start_mark)
}
v.SetBool(b)
return yaml_BOOL_TAG, nil
}
func resolve_int(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) {
original := val
val = strings.Replace(val, "_", "", -1)
var value uint64
isNumberValue := v.Type() == numberType
sign := int64(1)
if val[0] == '-' {
sign = -1
val = val[1:]
} else if val[0] == '+' {
val = val[1:]
}
base := 0
if val == "0" {
if isNumberValue {
v.SetString("0")
} else {
v.Set(reflect.Zero(v.Type()))
}
return yaml_INT_TAG, nil
}
if strings.HasPrefix(val, "0o") {
base = 8
val = val[2:]
}
value, err := strconv.ParseUint(val, base, 64)
if err != nil {
return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark)
}
var val64 int64
if value <= math.MaxInt64 {
val64 = int64(value)
if sign == -1 {
val64 = -val64
}
} else if sign == -1 && value == uint64(math.MaxInt64)+1 {
val64 = math.MinInt64
} else {
return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark)
}
if isNumberValue {
v.SetString(strconv.FormatInt(val64, 10))
} else {
if v.OverflowInt(val64) {
return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark)
}
v.SetInt(val64)
}
return yaml_INT_TAG, nil
}
func resolve_uint(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) {
original := val
val = strings.Replace(val, "_", "", -1)
var value uint64
isNumberValue := v.Type() == numberType
if val[0] == '-' {
return "", fmt.Errorf("Unsigned int with negative value: '%s' at %s", original, event.start_mark)
}
if val[0] == '+' {
val = val[1:]
}
base := 0
if val == "0" {
if isNumberValue {
v.SetString("0")
} else {
v.Set(reflect.Zero(v.Type()))
}
return yaml_INT_TAG, nil
}
if strings.HasPrefix(val, "0o") {
base = 8
val = val[2:]
}
value, err := strconv.ParseUint(val, base, 64)
if err != nil {
return "", fmt.Errorf("Invalid unsigned integer: '%s' at %s", val, event.start_mark)
}
if isNumberValue {
v.SetString(strconv.FormatUint(value, 10))
} else {
if v.OverflowUint(value) {
return "", fmt.Errorf("Invalid unsigned integer: '%s' at %s", val, event.start_mark)
}
v.SetUint(value)
}
return yaml_INT_TAG, nil
}
func resolve_float(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) {
val = strings.Replace(val, "_", "", -1)
var value float64
isNumberValue := v.Type() == numberType
typeBits := 64
if !isNumberValue {
typeBits = v.Type().Bits()
}
sign := 1
if val[0] == '-' {
sign = -1
val = val[1:]
} else if val[0] == '+' {
val = val[1:]
}
valLower := strings.ToLower(val)
if valLower == ".inf" {
value = math.Inf(sign)
} else if valLower == ".nan" {
value = math.NaN()
} else {
var err error
value, err = strconv.ParseFloat(val, typeBits)
value *= float64(sign)
if err != nil {
return "", fmt.Errorf("Invalid float: '%s' at %s", val, event.start_mark)
}
}
if isNumberValue {
v.SetString(strconv.FormatFloat(value, 'g', -1, typeBits))
} else {
if v.OverflowFloat(value) {
return "", fmt.Errorf("Invalid float: '%s' at %s", val, event.start_mark)
}
v.SetFloat(value)
}
return yaml_FLOAT_TAG, nil
}
func resolve_time(val string, v reflect.Value, event yaml_event_t) (string, error) {
var parsedTime time.Time
matches := ymd_regexp.FindStringSubmatch(val)
if len(matches) > 0 {
year, _ := strconv.Atoi(matches[1])
month, _ := strconv.Atoi(matches[2])
day, _ := strconv.Atoi(matches[3])
parsedTime = time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC)
} else {
matches = timestamp_regexp.FindStringSubmatch(val)
if len(matches) == 0 {
return "", fmt.Errorf("Invalid timestamp: '%s' at %s", val, event.start_mark)
}
year, _ := strconv.Atoi(matches[1])
month, _ := strconv.Atoi(matches[2])
day, _ := strconv.Atoi(matches[3])
hour, _ := strconv.Atoi(matches[4])
min, _ := strconv.Atoi(matches[5])
sec, _ := strconv.Atoi(matches[6])
nsec := 0
if matches[7] != "" {
millis, _ := strconv.Atoi(matches[7])
nsec = int(time.Duration(millis) * time.Millisecond)
}
loc := time.UTC
if matches[8] != "" {
sign := matches[8][0]
hr, _ := strconv.Atoi(matches[8][1:])
min := 0
if matches[9] != "" {
min, _ = strconv.Atoi(matches[9])
}
zoneOffset := (hr*60 + min) * 60
if sign == '-' {
zoneOffset = -zoneOffset
}
loc = time.FixedZone("", zoneOffset)
}
parsedTime = time.Date(year, time.Month(month), day, hour, min, sec, nsec, loc)
}
v.Set(reflect.ValueOf(parsedTime))
return "", nil
}
func resolveInterface(event yaml_event_t, useNumber bool) (string, interface{}) {
val := string(event.value)
if len(event.tag) == 0 && !event.implicit {
return "", val
}
if len(val) == 0 {
return yaml_NULL_TAG, nil
}
var result interface{}
sign := false
c := val[0]
switch {
case bytes.IndexByte(signs, c) != -1:
sign = true
fallthrough
case c >= '0' && c <= '9':
i := int64(0)
result = &i
if useNumber {
var n Number
result = &n
}
v := reflect.ValueOf(result).Elem()
if _, err := resolve_int(val, v, useNumber, event); err == nil {
return yaml_INT_TAG, v.Interface()
}
f := float64(0)
result = &f
if useNumber {
var n Number
result = &n
}
v = reflect.ValueOf(result).Elem()
if _, err := resolve_float(val, v, useNumber, event); err == nil {
return yaml_FLOAT_TAG, v.Interface()
}
if !sign {
t := time.Time{}
if _, err := resolve_time(val, reflect.ValueOf(&t).Elem(), event); err == nil {
return "", t
}
}
case bytes.IndexByte(nulls, c) != -1:
if null_values[val] {
return yaml_NULL_TAG, nil
}
b := false
if _, err := resolve_bool(val, reflect.ValueOf(&b).Elem(), event); err == nil {
return yaml_BOOL_TAG, b
}
case c == '.':
f := float64(0)
result = &f
if useNumber {
var n Number
result = &n
}
v := reflect.ValueOf(result).Elem()
if _, err := resolve_float(val, v, useNumber, event); err == nil {
return yaml_FLOAT_TAG, v.Interface()
}
case bytes.IndexByte(bools, c) != -1:
b := false
if _, err := resolve_bool(val, reflect.ValueOf(&b).Elem(), event); err == nil {
return yaml_BOOL_TAG, b
}
}
if hasBinaryTag(event) {
bytes, err := decode_binary(event.value, event)
if err == nil {
return yaml_BINARY_TAG, bytes
}
}
return yaml_STR_TAG, val
}

View File

@ -0,0 +1,62 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"fmt"
"os"
)
func Run_parser(cmd string, args []string) {
for i := 0; i < len(args); i++ {
fmt.Printf("[%d] Scanning '%s'", i, args[i])
file, err := os.Open(args[i])
if err != nil {
panic(fmt.Sprintf("Invalid file '%s': %s", args[i], err.Error()))
}
parser := yaml_parser_t{}
yaml_parser_initialize(&parser)
yaml_parser_set_input_reader(&parser, file)
failed := false
token := yaml_token_t{}
count := 0
for {
if !yaml_parser_scan(&parser, &token) {
failed = true
break
}
if token.token_type == yaml_STREAM_END_TOKEN {
break
}
count++
}
file.Close()
msg := "SUCCESS"
if failed {
msg = "FAILED"
if parser.error != yaml_NO_ERROR {
m := parser.problem_mark
fmt.Printf("ERROR: (%s) %s @ line: %d col: %d\n",
parser.context, parser.problem, m.line, m.column)
}
}
fmt.Printf("%s (%d tokens)\n", msg, count)
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,360 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"reflect"
"sort"
"strings"
"sync"
"unicode"
)
// A field represents a single field found in a struct.
type field struct {
name string
tag bool
index []int
typ reflect.Type
omitEmpty bool
flow bool
}
// byName sorts field by name, breaking ties with depth,
// then breaking ties with "name came from json tag", then
// breaking ties with index sequence.
type byName []field
func (x byName) Len() int { return len(x) }
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byName) Less(i, j int) bool {
if x[i].name != x[j].name {
return x[i].name < x[j].name
}
if len(x[i].index) != len(x[j].index) {
return len(x[i].index) < len(x[j].index)
}
if x[i].tag != x[j].tag {
return x[i].tag
}
return byIndex(x).Less(i, j)
}
// byIndex sorts field by index sequence.
type byIndex []field
func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
return false
}
if xik != x[j].index[k] {
return xik < x[j].index[k]
}
}
return len(x[i].index) < len(x[j].index)
}
// typeFields returns a list of fields that JSON should recognize for the given type.
// The algorithm is breadth-first search over the set of structs to include - the top struct
// and then any reachable anonymous structs.
func typeFields(t reflect.Type) []field {
// Anonymous fields to explore at the current level and the next.
current := []field{}
next := []field{{typ: t}}
// Count of queued names for current level and the next.
count := map[reflect.Type]int{}
nextCount := map[reflect.Type]int{}
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}
// Fields found.
var fields []field
for len(next) > 0 {
current, next = next, current[:0]
count, nextCount = nextCount, map[reflect.Type]int{}
for _, f := range current {
if visited[f.typ] {
continue
}
visited[f.typ] = true
// Scan f.typ for fields to include.
for i := 0; i < f.typ.NumField(); i++ {
sf := f.typ.Field(i)
if sf.PkgPath != "" { // unexported
continue
}
tag := sf.Tag.Get("yaml")
if tag == "-" {
continue
}
name, opts := parseTag(tag)
if !isValidTag(name) {
name = ""
}
index := make([]int, len(f.index)+1)
copy(index, f.index)
index[len(f.index)] = i
ft := sf.Type
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
// Follow pointer.
ft = ft.Elem()
}
// Record found field and index sequence.
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
tagged := name != ""
if name == "" {
name = sf.Name
}
fields = append(fields, field{name, tagged, index, ft,
opts.Contains("omitempty"), opts.Contains("flow")})
if count[f.typ] > 1 {
// If there were multiple instances, add a second,
// so that the annihilation code will see a duplicate.
// It only cares about the distinction between 1 or 2,
// so don't bother generating any more copies.
fields = append(fields, fields[len(fields)-1])
}
continue
}
// Record new anonymous struct to explore in next round.
nextCount[ft]++
if nextCount[ft] == 1 {
next = append(next, field{name: ft.Name(), index: index, typ: ft})
}
}
}
}
sort.Sort(byName(fields))
// Delete all fields that are hidden by the Go rules for embedded fields,
// except that fields with JSON tags are promoted.
// The fields are sorted in primary order of name, secondary order
// of field index length. Loop over names; for each name, delete
// hidden fields by choosing the one dominant field that survives.
out := fields[:0]
for advance, i := 0, 0; i < len(fields); i += advance {
// One iteration per name.
// Find the sequence of fields with the name of this first field.
fi := fields[i]
name := fi.name
for advance = 1; i+advance < len(fields); advance++ {
fj := fields[i+advance]
if fj.name != name {
break
}
}
if advance == 1 { // Only one field with this name
out = append(out, fi)
continue
}
dominant, ok := dominantField(fields[i : i+advance])
if ok {
out = append(out, dominant)
}
}
fields = out
sort.Sort(byIndex(fields))
return fields
}
// dominantField looks through the fields, all of which are known to
// have the same name, to find the single field that dominates the
// others using Go's embedding rules, modified by the presence of
// JSON tags. If there are multiple top-level fields, the boolean
// will be false: This condition is an error in Go and we skip all
// the fields.
func dominantField(fields []field) (field, bool) {
// The fields are sorted in increasing index-length order. The winner
// must therefore be one with the shortest index length. Drop all
// longer entries, which is easy: just truncate the slice.
length := len(fields[0].index)
tagged := -1 // Index of first tagged field.
for i, f := range fields {
if len(f.index) > length {
fields = fields[:i]
break
}
if f.tag {
if tagged >= 0 {
// Multiple tagged fields at the same level: conflict.
// Return no field.
return field{}, false
}
tagged = i
}
}
if tagged >= 0 {
return fields[tagged], true
}
// All remaining fields have the same length. If there's more than one,
// we have a conflict (two fields named "X" at the same level) and we
// return no field.
if len(fields) > 1 {
return field{}, false
}
return fields[0], true
}
var fieldCache struct {
sync.RWMutex
m map[reflect.Type][]field
}
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
func cachedTypeFields(t reflect.Type) []field {
fieldCache.RLock()
f := fieldCache.m[t]
fieldCache.RUnlock()
if f != nil {
return f
}
// Compute fields without lock.
// Might duplicate effort but won't hold other computations back.
f = typeFields(t)
if f == nil {
f = []field{}
}
fieldCache.Lock()
if fieldCache.m == nil {
fieldCache.m = map[reflect.Type][]field{}
}
fieldCache.m[t] = f
fieldCache.Unlock()
return f
}
// tagOptions is the string following a comma in a struct field's "json"
// tag, or the empty string. It does not include the leading comma.
type tagOptions string
func isValidTag(s string) bool {
if s == "" {
return false
}
for _, c := range s {
switch {
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
// Backslash and quote chars are reserved, but
// otherwise any punctuation chars are allowed
// in a tag name.
default:
if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
return false
}
}
}
return true
}
func fieldByIndex(v reflect.Value, index []int) reflect.Value {
for _, i := range index {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
return reflect.Value{}
}
v = v.Elem()
}
v = v.Field(i)
}
return v
}
func typeByIndex(t reflect.Type, index []int) reflect.Type {
for _, i := range index {
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
t = t.Field(i).Type
}
return t
}
// stringValues is a slice of reflect.Value holding *reflect.StringValue.
// It implements the methods to sort by string.
type stringValues []reflect.Value
func (sv stringValues) Len() int { return len(sv) }
func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
func (sv stringValues) Less(i, j int) bool {
av, ak := getElem(sv[i])
bv, bk := getElem(sv[j])
if ak == reflect.String && bk == reflect.String {
return av.String() < bv.String()
}
return ak < bk
}
func getElem(v reflect.Value) (reflect.Value, reflect.Kind) {
k := v.Kind()
for k == reflect.Interface || k == reflect.Ptr && !v.IsNil() {
v = v.Elem()
k = v.Kind()
}
return v, k
}
// parseTag splits a struct field's json tag into its name and
// comma-separated options.
func parseTag(tag string) (string, tagOptions) {
if idx := strings.Index(tag, ","); idx != -1 {
return tag[:idx], tagOptions(tag[idx+1:])
}
return tag, tagOptions("")
}
// Contains reports whether a comma-separated list of options
// contains a particular substr flag. substr must be surrounded by a
// string boundary or commas.
func (o tagOptions) Contains(optionName string) bool {
if len(o) == 0 {
return false
}
s := string(o)
for s != "" {
var next string
i := strings.Index(s, ",")
if i >= 0 {
s, next = s[:i], s[i+1:]
}
if s == optionName {
return true
}
s = next
}
return false
}

View File

@ -0,0 +1,128 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
/*
* Set the writer error and return 0.
*/
func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
emitter.error = yaml_WRITER_ERROR
emitter.problem = problem
return false
}
/*
* Flush the output buffer.
*/
func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
if emitter.write_handler == nil {
panic("Write handler must be set") /* Write handler must be set. */
}
if emitter.encoding == yaml_ANY_ENCODING {
panic("Encoding must be set") /* Output encoding must be set. */
}
/* Check if the buffer is empty. */
if emitter.buffer_pos == 0 {
return true
}
/* If the output encoding is UTF-8, we don't need to recode the buffer. */
if emitter.encoding == yaml_UTF8_ENCODING {
if err := emitter.write_handler(emitter,
emitter.buffer[:emitter.buffer_pos]); err != nil {
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
}
emitter.buffer_pos = 0
return true
}
/* Recode the buffer into the raw buffer. */
var low, high int
if emitter.encoding == yaml_UTF16LE_ENCODING {
low, high = 0, 1
} else {
high, low = 1, 0
}
pos := 0
for pos < emitter.buffer_pos {
/*
* See the "reader.c" code for more details on UTF-8 encoding. Note
* that we assume that the buffer contains a valid UTF-8 sequence.
*/
/* Read the next UTF-8 character. */
octet := emitter.buffer[pos]
var w int
var value rune
switch {
case octet&0x80 == 0x00:
w, value = 1, rune(octet&0x7F)
case octet&0xE0 == 0xC0:
w, value = 2, rune(octet&0x1F)
case octet&0xF0 == 0xE0:
w, value = 3, rune(octet&0x0F)
case octet&0xF8 == 0xF0:
w, value = 4, rune(octet&0x07)
}
for k := 1; k < w; k++ {
octet = emitter.buffer[pos+k]
value = (value << 6) + (rune(octet) & 0x3F)
}
pos += w
/* Write the character. */
if value < 0x10000 {
var b [2]byte
b[high] = byte(value >> 8)
b[low] = byte(value & 0xFF)
emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
} else {
/* Write the character using a surrogate pair (check "reader.c"). */
var b [4]byte
value -= 0x10000
b[high] = byte(0xD8 + (value >> 18))
b[low] = byte((value >> 10) & 0xFF)
b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
b[low+2] = byte(value & 0xFF)
emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
}
}
/* Write the raw buffer. */
// Write the raw buffer.
if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
}
emitter.buffer_pos = 0
emitter.raw_buffer = emitter.raw_buffer[:0]
return true
}

View File

@ -0,0 +1,22 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
const (
yaml_VERSION_MAJOR = 0
yaml_VERSION_MINOR = 1
yaml_VERSION_PATCH = 6
yaml_VERSION_STRING = "0.1.6"
)

View File

@ -0,0 +1,891 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
const (
INPUT_RAW_BUFFER_SIZE = 1024
/*
* The size of the input buffer.
*
* It should be possible to decode the whole raw buffer.
*/
INPUT_BUFFER_SIZE = (INPUT_RAW_BUFFER_SIZE * 3)
/*
* The size of the output buffer.
*/
OUTPUT_BUFFER_SIZE = 512
/*
* The size of the output raw buffer.
*
* It should be possible to encode the whole output buffer.
*/
OUTPUT_RAW_BUFFER_SIZE = (OUTPUT_BUFFER_SIZE*2 + 2)
INITIAL_STACK_SIZE = 16
INITIAL_QUEUE_SIZE = 16
)
func width(b byte) int {
if b&0x80 == 0 {
return 1
}
if b&0xE0 == 0xC0 {
return 2
}
if b&0xF0 == 0xE0 {
return 3
}
if b&0xF8 == 0xF0 {
return 4
}
return 0
}
func copy_bytes(dest []byte, dest_pos *int, src []byte, src_pos *int) {
w := width(src[*src_pos])
switch w {
case 4:
dest[*dest_pos+3] = src[*src_pos+3]
fallthrough
case 3:
dest[*dest_pos+2] = src[*src_pos+2]
fallthrough
case 2:
dest[*dest_pos+1] = src[*src_pos+1]
fallthrough
case 1:
dest[*dest_pos] = src[*src_pos]
default:
panic("invalid width")
}
*dest_pos += w
*src_pos += w
}
// /*
// * Check if the character at the specified position is an alphabetical
// * character, a digit, '_', or '-'.
// */
func is_alpha(b byte) bool {
return (b >= '0' && b <= '9') ||
(b >= 'A' && b <= 'Z') ||
(b >= 'a' && b <= 'z') ||
b == '_' || b == '-'
}
// /*
// * Check if the character at the specified position is a digit.
// */
//
func is_digit(b byte) bool {
return b >= '0' && b <= '9'
}
// /*
// * Get the value of a digit.
// */
//
func as_digit(b byte) int {
return int(b) - '0'
}
// /*
// * Check if the character at the specified position is a hex-digit.
// */
//
func is_hex(b byte) bool {
return (b >= '0' && b <= '9') ||
(b >= 'A' && b <= 'F') ||
(b >= 'a' && b <= 'f')
}
//
// /*
// * Get the value of a hex-digit.
// */
//
func as_hex(b byte) int {
if b >= 'A' && b <= 'F' {
return int(b) - 'A' + 10
} else if b >= 'a' && b <= 'f' {
return int(b) - 'a' + 10
}
return int(b) - '0'
}
// #define AS_HEX_AT(string,offset) \
// (((string).pointer[offset] >= (yaml_char_t) 'A' && \
// (string).pointer[offset] <= (yaml_char_t) 'F') ? \
// ((string).pointer[offset] - (yaml_char_t) 'A' + 10) : \
// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
// (string).pointer[offset] <= (yaml_char_t) 'f') ? \
// ((string).pointer[offset] - (yaml_char_t) 'a' + 10) : \
// ((string).pointer[offset] - (yaml_char_t) '0'))
// /*
// * Check if the character is a line break, space, tab, or NUL.
// */
func is_blankz_at(b []byte, i int) bool {
return is_blank(b[i]) || is_breakz_at(b, i)
}
// /*
// * Check if the character at the specified position is a line break.
// */
func is_break_at(b []byte, i int) bool {
return b[i] == '\r' || /* CR (#xD)*/
b[i] == '\n' || /* LF (#xA) */
(b[i] == 0xC2 && b[i+1] == 0x85) || /* NEL (#x85) */
(b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8) || /* LS (#x2028) */
(b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) /* PS (#x2029) */
}
func is_breakz_at(b []byte, i int) bool {
return is_break_at(b, i) || is_z(b[i])
}
func is_crlf_at(b []byte, i int) bool {
return b[i] == '\r' && b[i+1] == '\n'
}
// /*
// * Check if the character at the specified position is NUL.
// */
func is_z(b byte) bool {
return b == 0x0
}
// /*
// * Check if the character at the specified position is space.
// */
func is_space(b byte) bool {
return b == ' '
}
//
// /*
// * Check if the character at the specified position is tab.
// */
func is_tab(b byte) bool {
return b == '\t'
}
// /*
// * Check if the character at the specified position is blank (space or tab).
// */
func is_blank(b byte) bool {
return is_space(b) || is_tab(b)
}
// /*
// * Check if the character is ASCII.
// */
func is_ascii(b byte) bool {
return b <= '\x7f'
}
// /*
// * Check if the character can be printed unescaped.
// */
func is_printable_at(b []byte, i int) bool {
return ((b[i] == 0x0A) || /* . == #x0A */
(b[i] >= 0x20 && b[i] <= 0x7E) || /* #x20 <= . <= #x7E */
(b[i] == 0xC2 && b[i+1] >= 0xA0) || /* #0xA0 <= . <= #xD7FF */
(b[i] > 0xC2 && b[i] < 0xED) ||
(b[i] == 0xED && b[i+1] < 0xA0) ||
(b[i] == 0xEE) ||
(b[i] == 0xEF && /* && . != #xFEFF */
!(b[i+1] == 0xBB && b[i+2] == 0xBF) &&
!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
}
func insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
// collapse the slice
if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
if parser.tokens_head != len(parser.tokens) {
// move the tokens down
copy(parser.tokens, parser.tokens[parser.tokens_head:])
}
// readjust the length
parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
parser.tokens_head = 0
}
parser.tokens = append(parser.tokens, *token)
if pos < 0 {
return
}
copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
parser.tokens[parser.tokens_head+pos] = *token
}
// /*
// * Check if the character at the specified position is BOM.
// */
//
func is_bom_at(b []byte, i int) bool {
return b[i] == 0xEF && b[i+1] == 0xBB && b[i+2] == 0xBF
}
//
// #ifdef HAVE_CONFIG_H
// #include <config.h>
// #endif
//
// #include "./yaml.h"
//
// #include <assert.h>
// #include <limits.h>
//
// /*
// * Memory management.
// */
//
// yaml_DECLARE(void *)
// yaml_malloc(size_t size);
//
// yaml_DECLARE(void *)
// yaml_realloc(void *ptr, size_t size);
//
// yaml_DECLARE(void)
// yaml_free(void *ptr);
//
// yaml_DECLARE(yaml_char_t *)
// yaml_strdup(const yaml_char_t *);
//
// /*
// * Reader: Ensure that the buffer contains at least `length` characters.
// */
//
// yaml_DECLARE(int)
// yaml_parser_update_buffer(yaml_parser_t *parser, size_t length);
//
// /*
// * Scanner: Ensure that the token stack contains at least one token ready.
// */
//
// yaml_DECLARE(int)
// yaml_parser_fetch_more_tokens(yaml_parser_t *parser);
//
// /*
// * The size of the input raw buffer.
// */
//
// #define INPUT_RAW_BUFFER_SIZE 16384
//
// /*
// * The size of the input buffer.
// *
// * It should be possible to decode the whole raw buffer.
// */
//
// #define INPUT_BUFFER_SIZE (INPUT_RAW_BUFFER_SIZE*3)
//
// /*
// * The size of the output buffer.
// */
//
// #define OUTPUT_BUFFER_SIZE 16384
//
// /*
// * The size of the output raw buffer.
// *
// * It should be possible to encode the whole output buffer.
// */
//
// #define OUTPUT_RAW_BUFFER_SIZE (OUTPUT_BUFFER_SIZE*2+2)
//
// /*
// * The size of other stacks and queues.
// */
//
// #define INITIAL_STACK_SIZE 16
// #define INITIAL_QUEUE_SIZE 16
// #define INITIAL_STRING_SIZE 16
//
// /*
// * Buffer management.
// */
//
// #define BUFFER_INIT(context,buffer,size) \
// (((buffer).start = yaml_malloc(size)) ? \
// ((buffer).last = (buffer).pointer = (buffer).start, \
// (buffer).end = (buffer).start+(size), \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// #define BUFFER_DEL(context,buffer) \
// (yaml_free((buffer).start), \
// (buffer).start = (buffer).pointer = (buffer).end = 0)
//
// /*
// * String management.
// */
//
// typedef struct {
// yaml_char_t *start;
// yaml_char_t *end;
// yaml_char_t *pointer;
// } yaml_string_t;
//
// yaml_DECLARE(int)
// yaml_string_extend(yaml_char_t **start,
// yaml_char_t **pointer, yaml_char_t **end);
//
// yaml_DECLARE(int)
// yaml_string_join(
// yaml_char_t **a_start, yaml_char_t **a_pointer, yaml_char_t **a_end,
// yaml_char_t **b_start, yaml_char_t **b_pointer, yaml_char_t **b_end);
//
// #define NULL_STRING { NULL, NULL, NULL }
//
// #define STRING(string,length) { (string), (string)+(length), (string) }
//
// #define STRING_ASSIGN(value,string,length) \
// ((value).start = (string), \
// (value).end = (string)+(length), \
// (value).pointer = (string))
//
// #define STRING_INIT(context,string,size) \
// (((string).start = yaml_malloc(size)) ? \
// ((string).pointer = (string).start, \
// (string).end = (string).start+(size), \
// memset((string).start, 0, (size)), \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// #define STRING_DEL(context,string) \
// (yaml_free((string).start), \
// (string).start = (string).pointer = (string).end = 0)
//
// #define STRING_EXTEND(context,string) \
// (((string).pointer+5 < (string).end) \
// || yaml_string_extend(&(string).start, \
// &(string).pointer, &(string).end))
//
// #define CLEAR(context,string) \
// ((string).pointer = (string).start, \
// memset((string).start, 0, (string).end-(string).start))
//
// #define JOIN(context,string_a,string_b) \
// ((yaml_string_join(&(string_a).start, &(string_a).pointer, \
// &(string_a).end, &(string_b).start, \
// &(string_b).pointer, &(string_b).end)) ? \
// ((string_b).pointer = (string_b).start, \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// /*
// * String check operations.
// */
//
// /*
// * Check the octet at the specified position.
// */
//
// #define CHECK_AT(string,octet,offset) \
// ((string).pointer[offset] == (yaml_char_t)(octet))
//
// /*
// * Check the current octet in the buffer.
// */
//
// #define CHECK(string,octet) CHECK_AT((string),(octet),0)
//
// /*
// * Check if the character at the specified position is an alphabetical
// * character, a digit, '_', or '-'.
// */
//
// #define IS_ALPHA_AT(string,offset) \
// (((string).pointer[offset] >= (yaml_char_t) '0' && \
// (string).pointer[offset] <= (yaml_char_t) '9') || \
// ((string).pointer[offset] >= (yaml_char_t) 'A' && \
// (string).pointer[offset] <= (yaml_char_t) 'Z') || \
// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
// (string).pointer[offset] <= (yaml_char_t) 'z') || \
// (string).pointer[offset] == '_' || \
// (string).pointer[offset] == '-')
//
// #define IS_ALPHA(string) IS_ALPHA_AT((string),0)
//
// /*
// * Check if the character at the specified position is a digit.
// */
//
// #define IS_DIGIT_AT(string,offset) \
// (((string).pointer[offset] >= (yaml_char_t) '0' && \
// (string).pointer[offset] <= (yaml_char_t) '9'))
//
// #define IS_DIGIT(string) IS_DIGIT_AT((string),0)
//
// /*
// * Get the value of a digit.
// */
//
// #define AS_DIGIT_AT(string,offset) \
// ((string).pointer[offset] - (yaml_char_t) '0')
//
// #define AS_DIGIT(string) AS_DIGIT_AT((string),0)
//
// /*
// * Check if the character at the specified position is a hex-digit.
// */
//
// #define IS_HEX_AT(string,offset) \
// (((string).pointer[offset] >= (yaml_char_t) '0' && \
// (string).pointer[offset] <= (yaml_char_t) '9') || \
// ((string).pointer[offset] >= (yaml_char_t) 'A' && \
// (string).pointer[offset] <= (yaml_char_t) 'F') || \
// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
// (string).pointer[offset] <= (yaml_char_t) 'f'))
//
// #define IS_HEX(string) IS_HEX_AT((string),0)
//
// /*
// * Get the value of a hex-digit.
// */
//
// #define AS_HEX_AT(string,offset) \
// (((string).pointer[offset] >= (yaml_char_t) 'A' && \
// (string).pointer[offset] <= (yaml_char_t) 'F') ? \
// ((string).pointer[offset] - (yaml_char_t) 'A' + 10) : \
// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
// (string).pointer[offset] <= (yaml_char_t) 'f') ? \
// ((string).pointer[offset] - (yaml_char_t) 'a' + 10) : \
// ((string).pointer[offset] - (yaml_char_t) '0'))
//
// #define AS_HEX(string) AS_HEX_AT((string),0)
//
// /*
// * Check if the character is ASCII.
// */
//
// #define IS_ASCII_AT(string,offset) \
// ((string).pointer[offset] <= (yaml_char_t) '\x7F')
//
// #define IS_ASCII(string) IS_ASCII_AT((string),0)
//
// /*
// * Check if the character can be printed unescaped.
// */
//
// #define IS_PRINTABLE_AT(string,offset) \
// (((string).pointer[offset] == 0x0A) /* . == #x0A */ \
// || ((string).pointer[offset] >= 0x20 /* #x20 <= . <= #x7E */ \
// && (string).pointer[offset] <= 0x7E) \
// || ((string).pointer[offset] == 0xC2 /* #0xA0 <= . <= #xD7FF */ \
// && (string).pointer[offset+1] >= 0xA0) \
// || ((string).pointer[offset] > 0xC2 \
// && (string).pointer[offset] < 0xED) \
// || ((string).pointer[offset] == 0xED \
// && (string).pointer[offset+1] < 0xA0) \
// || ((string).pointer[offset] == 0xEE) \
// || ((string).pointer[offset] == 0xEF /* #xE000 <= . <= #xFFFD */ \
// && !((string).pointer[offset+1] == 0xBB /* && . != #xFEFF */ \
// && (string).pointer[offset+2] == 0xBF) \
// && !((string).pointer[offset+1] == 0xBF \
// && ((string).pointer[offset+2] == 0xBE \
// || (string).pointer[offset+2] == 0xBF))))
//
// #define IS_PRINTABLE(string) IS_PRINTABLE_AT((string),0)
//
// /*
// * Check if the character at the specified position is NUL.
// */
//
// #define IS_Z_AT(string,offset) CHECK_AT((string),'\0',(offset))
//
// #define IS_Z(string) IS_Z_AT((string),0)
//
// /*
// * Check if the character at the specified position is BOM.
// */
//
// #define IS_BOM_AT(string,offset) \
// (CHECK_AT((string),'\xEF',(offset)) \
// && CHECK_AT((string),'\xBB',(offset)+1) \
// && CHECK_AT((string),'\xBF',(offset)+2)) /* BOM (#xFEFF) */
//
// #define IS_BOM(string) IS_BOM_AT(string,0)
//
// /*
// * Check if the character at the specified position is space.
// */
//
// #define IS_SPACE_AT(string,offset) CHECK_AT((string),' ',(offset))
//
// #define IS_SPACE(string) IS_SPACE_AT((string),0)
//
// /*
// * Check if the character at the specified position is tab.
// */
//
// #define IS_TAB_AT(string,offset) CHECK_AT((string),'\t',(offset))
//
// #define IS_TAB(string) IS_TAB_AT((string),0)
//
// /*
// * Check if the character at the specified position is blank (space or tab).
// */
//
// #define IS_BLANK_AT(string,offset) \
// (IS_SPACE_AT((string),(offset)) || IS_TAB_AT((string),(offset)))
//
// #define IS_BLANK(string) IS_BLANK_AT((string),0)
//
// /*
// * Check if the character at the specified position is a line break.
// */
//
// #define IS_BREAK_AT(string,offset) \
// (CHECK_AT((string),'\r',(offset)) /* CR (#xD)*/ \
// || CHECK_AT((string),'\n',(offset)) /* LF (#xA) */ \
// || (CHECK_AT((string),'\xC2',(offset)) \
// && CHECK_AT((string),'\x85',(offset)+1)) /* NEL (#x85) */ \
// || (CHECK_AT((string),'\xE2',(offset)) \
// && CHECK_AT((string),'\x80',(offset)+1) \
// && CHECK_AT((string),'\xA8',(offset)+2)) /* LS (#x2028) */ \
// || (CHECK_AT((string),'\xE2',(offset)) \
// && CHECK_AT((string),'\x80',(offset)+1) \
// && CHECK_AT((string),'\xA9',(offset)+2))) /* PS (#x2029) */
//
// #define IS_BREAK(string) IS_BREAK_AT((string),0)
//
// #define IS_CRLF_AT(string,offset) \
// (CHECK_AT((string),'\r',(offset)) && CHECK_AT((string),'\n',(offset)+1))
//
// #define IS_CRLF(string) IS_CRLF_AT((string),0)
//
// /*
// * Check if the character is a line break or NUL.
// */
//
// #define IS_BREAKZ_AT(string,offset) \
// (IS_BREAK_AT((string),(offset)) || IS_Z_AT((string),(offset)))
//
// #define IS_BREAKZ(string) IS_BREAKZ_AT((string),0)
//
// /*
// * Check if the character is a line break, space, or NUL.
// */
//
// #define IS_SPACEZ_AT(string,offset) \
// (IS_SPACE_AT((string),(offset)) || IS_BREAKZ_AT((string),(offset)))
//
// #define IS_SPACEZ(string) IS_SPACEZ_AT((string),0)
//
// /*
// * Check if the character is a line break, space, tab, or NUL.
// */
//
// #define IS_BLANKZ_AT(string,offset) \
// (IS_BLANK_AT((string),(offset)) || IS_BREAKZ_AT((string),(offset)))
//
// #define IS_BLANKZ(string) IS_BLANKZ_AT((string),0)
//
// /*
// * Determine the width of the character.
// */
//
// #define WIDTH_AT(string,offset) \
// (((string).pointer[offset] & 0x80) == 0x00 ? 1 : \
// ((string).pointer[offset] & 0xE0) == 0xC0 ? 2 : \
// ((string).pointer[offset] & 0xF0) == 0xE0 ? 3 : \
// ((string).pointer[offset] & 0xF8) == 0xF0 ? 4 : 0)
//
// #define WIDTH(string) WIDTH_AT((string),0)
//
// /*
// * Move the string pointer to the next character.
// */
//
// #define MOVE(string) ((string).pointer += WIDTH((string)))
//
// /*
// * Copy a character and move the pointers of both strings.
// */
//
// #define COPY(string_a,string_b) \
// ((*(string_b).pointer & 0x80) == 0x00 ? \
// (*((string_a).pointer++) = *((string_b).pointer++)) : \
// (*(string_b).pointer & 0xE0) == 0xC0 ? \
// (*((string_a).pointer++) = *((string_b).pointer++), \
// *((string_a).pointer++) = *((string_b).pointer++)) : \
// (*(string_b).pointer & 0xF0) == 0xE0 ? \
// (*((string_a).pointer++) = *((string_b).pointer++), \
// *((string_a).pointer++) = *((string_b).pointer++), \
// *((string_a).pointer++) = *((string_b).pointer++)) : \
// (*(string_b).pointer & 0xF8) == 0xF0 ? \
// (*((string_a).pointer++) = *((string_b).pointer++), \
// *((string_a).pointer++) = *((string_b).pointer++), \
// *((string_a).pointer++) = *((string_b).pointer++), \
// *((string_a).pointer++) = *((string_b).pointer++)) : 0)
//
// /*
// * Stack and queue management.
// */
//
// yaml_DECLARE(int)
// yaml_stack_extend(void **start, void **top, void **end);
//
// yaml_DECLARE(int)
// yaml_queue_extend(void **start, void **head, void **tail, void **end);
//
// #define STACK_INIT(context,stack,size) \
// (((stack).start = yaml_malloc((size)*sizeof(*(stack).start))) ? \
// ((stack).top = (stack).start, \
// (stack).end = (stack).start+(size), \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// #define STACK_DEL(context,stack) \
// (yaml_free((stack).start), \
// (stack).start = (stack).top = (stack).end = 0)
//
// #define STACK_EMPTY(context,stack) \
// ((stack).start == (stack).top)
//
// #define PUSH(context,stack,value) \
// (((stack).top != (stack).end \
// || yaml_stack_extend((void **)&(stack).start, \
// (void **)&(stack).top, (void **)&(stack).end)) ? \
// (*((stack).top++) = value, \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// #define POP(context,stack) \
// (*(--(stack).top))
//
// #define QUEUE_INIT(context,queue,size) \
// (((queue).start = yaml_malloc((size)*sizeof(*(queue).start))) ? \
// ((queue).head = (queue).tail = (queue).start, \
// (queue).end = (queue).start+(size), \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// #define QUEUE_DEL(context,queue) \
// (yaml_free((queue).start), \
// (queue).start = (queue).head = (queue).tail = (queue).end = 0)
//
// #define QUEUE_EMPTY(context,queue) \
// ((queue).head == (queue).tail)
//
// #define ENQUEUE(context,queue,value) \
// (((queue).tail != (queue).end \
// || yaml_queue_extend((void **)&(queue).start, (void **)&(queue).head, \
// (void **)&(queue).tail, (void **)&(queue).end)) ? \
// (*((queue).tail++) = value, \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// #define DEQUEUE(context,queue) \
// (*((queue).head++))
//
// #define QUEUE_INSERT(context,queue,index,value) \
// (((queue).tail != (queue).end \
// || yaml_queue_extend((void **)&(queue).start, (void **)&(queue).head, \
// (void **)&(queue).tail, (void **)&(queue).end)) ? \
// (memmove((queue).head+(index)+1,(queue).head+(index), \
// ((queue).tail-(queue).head-(index))*sizeof(*(queue).start)), \
// *((queue).head+(index)) = value, \
// (queue).tail++, \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// /*
// * Token initializers.
// */
//
// #define TOKEN_INIT(token,token_type,token_start_mark,token_end_mark) \
// (memset(&(token), 0, sizeof(yaml_token_t)), \
// (token).type = (token_type), \
// (token).start_mark = (token_start_mark), \
// (token).end_mark = (token_end_mark))
//
// #define STREAM_START_TOKEN_INIT(token,token_encoding,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_STREAM_START_TOKEN,(start_mark),(end_mark)), \
// (token).data.stream_start.encoding = (token_encoding))
//
// #define STREAM_END_TOKEN_INIT(token,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_STREAM_END_TOKEN,(start_mark),(end_mark)))
//
// #define ALIAS_TOKEN_INIT(token,token_value,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_ALIAS_TOKEN,(start_mark),(end_mark)), \
// (token).data.alias.value = (token_value))
//
// #define ANCHOR_TOKEN_INIT(token,token_value,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_ANCHOR_TOKEN,(start_mark),(end_mark)), \
// (token).data.anchor.value = (token_value))
//
// #define TAG_TOKEN_INIT(token,token_handle,token_suffix,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_TAG_TOKEN,(start_mark),(end_mark)), \
// (token).data.tag.handle = (token_handle), \
// (token).data.tag.suffix = (token_suffix))
//
// #define SCALAR_TOKEN_INIT(token,token_value,token_length,token_style,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_SCALAR_TOKEN,(start_mark),(end_mark)), \
// (token).data.scalar.value = (token_value), \
// (token).data.scalar.length = (token_length), \
// (token).data.scalar.style = (token_style))
//
// #define VERSION_DIRECTIVE_TOKEN_INIT(token,token_major,token_minor,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_VERSION_DIRECTIVE_TOKEN,(start_mark),(end_mark)), \
// (token).data.version_directive.major = (token_major), \
// (token).data.version_directive.minor = (token_minor))
//
// #define TAG_DIRECTIVE_TOKEN_INIT(token,token_handle,token_prefix,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_TAG_DIRECTIVE_TOKEN,(start_mark),(end_mark)), \
// (token).data.tag_directive.handle = (token_handle), \
// (token).data.tag_directive.prefix = (token_prefix))
//
// /*
// * Event initializers.
// */
//
// #define EVENT_INIT(event,event_type,event_start_mark,event_end_mark) \
// (memset(&(event), 0, sizeof(yaml_event_t)), \
// (event).type = (event_type), \
// (event).start_mark = (event_start_mark), \
// (event).end_mark = (event_end_mark))
//
// #define STREAM_START_EVENT_INIT(event,event_encoding,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_STREAM_START_EVENT,(start_mark),(end_mark)), \
// (event).data.stream_start.encoding = (event_encoding))
//
// #define STREAM_END_EVENT_INIT(event,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_STREAM_END_EVENT,(start_mark),(end_mark)))
//
// #define DOCUMENT_START_EVENT_INIT(event,event_version_directive, \
// event_tag_directives_start,event_tag_directives_end,event_implicit,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_DOCUMENT_START_EVENT,(start_mark),(end_mark)), \
// (event).data.document_start.version_directive = (event_version_directive), \
// (event).data.document_start.tag_directives.start = (event_tag_directives_start), \
// (event).data.document_start.tag_directives.end = (event_tag_directives_end), \
// (event).data.document_start.implicit = (event_implicit))
//
// #define DOCUMENT_END_EVENT_INIT(event,event_implicit,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_DOCUMENT_END_EVENT,(start_mark),(end_mark)), \
// (event).data.document_end.implicit = (event_implicit))
//
// #define ALIAS_EVENT_INIT(event,event_anchor,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_ALIAS_EVENT,(start_mark),(end_mark)), \
// (event).data.alias.anchor = (event_anchor))
//
// #define SCALAR_EVENT_INIT(event,event_anchor,event_tag,event_value,event_length, \
// event_plain_implicit, event_quoted_implicit,event_style,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_SCALAR_EVENT,(start_mark),(end_mark)), \
// (event).data.scalar.anchor = (event_anchor), \
// (event).data.scalar.tag = (event_tag), \
// (event).data.scalar.value = (event_value), \
// (event).data.scalar.length = (event_length), \
// (event).data.scalar.plain_implicit = (event_plain_implicit), \
// (event).data.scalar.quoted_implicit = (event_quoted_implicit), \
// (event).data.scalar.style = (event_style))
//
// #define SEQUENCE_START_EVENT_INIT(event,event_anchor,event_tag, \
// event_implicit,event_style,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_SEQUENCE_START_EVENT,(start_mark),(end_mark)), \
// (event).data.sequence_start.anchor = (event_anchor), \
// (event).data.sequence_start.tag = (event_tag), \
// (event).data.sequence_start.implicit = (event_implicit), \
// (event).data.sequence_start.style = (event_style))
//
// #define SEQUENCE_END_EVENT_INIT(event,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_SEQUENCE_END_EVENT,(start_mark),(end_mark)))
//
// #define MAPPING_START_EVENT_INIT(event,event_anchor,event_tag, \
// event_implicit,event_style,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_MAPPING_START_EVENT,(start_mark),(end_mark)), \
// (event).data.mapping_start.anchor = (event_anchor), \
// (event).data.mapping_start.tag = (event_tag), \
// (event).data.mapping_start.implicit = (event_implicit), \
// (event).data.mapping_start.style = (event_style))
//
// #define MAPPING_END_EVENT_INIT(event,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_MAPPING_END_EVENT,(start_mark),(end_mark)))
//
// /*
// * Document initializer.
// */
//
// #define DOCUMENT_INIT(document,document_nodes_start,document_nodes_end, \
// document_version_directive,document_tag_directives_start, \
// document_tag_directives_end,document_start_implicit, \
// document_end_implicit,document_start_mark,document_end_mark) \
// (memset(&(document), 0, sizeof(yaml_document_t)), \
// (document).nodes.start = (document_nodes_start), \
// (document).nodes.end = (document_nodes_end), \
// (document).nodes.top = (document_nodes_start), \
// (document).version_directive = (document_version_directive), \
// (document).tag_directives.start = (document_tag_directives_start), \
// (document).tag_directives.end = (document_tag_directives_end), \
// (document).start_implicit = (document_start_implicit), \
// (document).end_implicit = (document_end_implicit), \
// (document).start_mark = (document_start_mark), \
// (document).end_mark = (document_end_mark))
//
// /*
// * Node initializers.
// */
//
// #define NODE_INIT(node,node_type,node_tag,node_start_mark,node_end_mark) \
// (memset(&(node), 0, sizeof(yaml_node_t)), \
// (node).type = (node_type), \
// (node).tag = (node_tag), \
// (node).start_mark = (node_start_mark), \
// (node).end_mark = (node_end_mark))
//
// #define SCALAR_NODE_INIT(node,node_tag,node_value,node_length, \
// node_style,start_mark,end_mark) \
// (NODE_INIT((node),yaml_SCALAR_NODE,(node_tag),(start_mark),(end_mark)), \
// (node).data.scalar.value = (node_value), \
// (node).data.scalar.length = (node_length), \
// (node).data.scalar.style = (node_style))
//
// #define SEQUENCE_NODE_INIT(node,node_tag,node_items_start,node_items_end, \
// node_style,start_mark,end_mark) \
// (NODE_INIT((node),yaml_SEQUENCE_NODE,(node_tag),(start_mark),(end_mark)), \
// (node).data.sequence.items.start = (node_items_start), \
// (node).data.sequence.items.end = (node_items_end), \
// (node).data.sequence.items.top = (node_items_start), \
// (node).data.sequence.style = (node_style))
//
// #define MAPPING_NODE_INIT(node,node_tag,node_pairs_start,node_pairs_end, \
// node_style,start_mark,end_mark) \
// (NODE_INIT((node),yaml_MAPPING_NODE,(node_tag),(start_mark),(end_mark)), \
// (node).data.mapping.pairs.start = (node_pairs_start), \
// (node).data.mapping.pairs.end = (node_pairs_end), \
// (node).data.mapping.pairs.top = (node_pairs_start), \
// (node).data.mapping.style = (node_style))
//

View File

@ -0,0 +1,953 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"fmt"
"io"
)
/** The version directive data. */
type yaml_version_directive_t struct {
major int // The major version number
minor int // The minor version number
}
/** The tag directive data. */
type yaml_tag_directive_t struct {
handle []byte // The tag handle
prefix []byte // The tag prefix
}
/** The stream encoding. */
type yaml_encoding_t int
const (
/** Let the parser choose the encoding. */
yaml_ANY_ENCODING yaml_encoding_t = iota
/** The defau lt UTF-8 encoding. */
yaml_UTF8_ENCODING
/** The UTF-16-LE encoding with BOM. */
yaml_UTF16LE_ENCODING
/** The UTF-16-BE encoding with BOM. */
yaml_UTF16BE_ENCODING
)
/** Line break types. */
type yaml_break_t int
const (
yaml_ANY_BREAK yaml_break_t = iota /** Let the parser choose the break type. */
yaml_CR_BREAK /** Use CR for line breaks (Mac style). */
yaml_LN_BREAK /** Use LN for line breaks (Unix style). */
yaml_CRLN_BREAK /** Use CR LN for line breaks (DOS style). */
)
/** Many bad things could happen with the parser and emitter. */
type YAML_error_type_t int
const (
/** No error is produced. */
yaml_NO_ERROR YAML_error_type_t = iota
/** Cannot allocate or reallocate a block of memory. */
yaml_MEMORY_ERROR
/** Cannot read or decode the input stream. */
yaml_READER_ERROR
/** Cannot scan the input stream. */
yaml_SCANNER_ERROR
/** Cannot parse the input stream. */
yaml_PARSER_ERROR
/** Cannot compose a YAML document. */
yaml_COMPOSER_ERROR
/** Cannot write to the output stream. */
yaml_WRITER_ERROR
/** Cannot emit a YAML stream. */
yaml_EMITTER_ERROR
)
/** The pointer position. */
type YAML_mark_t struct {
/** The position index. */
index int
/** The position line. */
line int
/** The position column. */
column int
}
func (m YAML_mark_t) String() string {
return fmt.Sprintf("line %d, column %d", m.line, m.column)
}
/** @} */
/**
* @defgroup styles Node Styles
* @{
*/
type yaml_style_t int
/** Scalar styles. */
type yaml_scalar_style_t yaml_style_t
const (
/** Let the emitter choose the style. */
yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
/** The plain scalar style. */
yaml_PLAIN_SCALAR_STYLE
/** The single-quoted scalar style. */
yaml_SINGLE_QUOTED_SCALAR_STYLE
/** The double-quoted scalar style. */
yaml_DOUBLE_QUOTED_SCALAR_STYLE
/** The literal scalar style. */
yaml_LITERAL_SCALAR_STYLE
/** The folded scalar style. */
yaml_FOLDED_SCALAR_STYLE
)
/** Sequence styles. */
type yaml_sequence_style_t yaml_style_t
const (
/** Let the emitter choose the style. */
yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
/** The block sequence style. */
yaml_BLOCK_SEQUENCE_STYLE
/** The flow sequence style. */
yaml_FLOW_SEQUENCE_STYLE
)
/** Mapping styles. */
type yaml_mapping_style_t yaml_style_t
const (
/** Let the emitter choose the style. */
yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
/** The block mapping style. */
yaml_BLOCK_MAPPING_STYLE
/** The flow mapping style. */
yaml_FLOW_MAPPING_STYLE
/* yaml_FLOW_SET_MAPPING_STYLE */
)
/** @} */
/**
* @defgroup tokens Tokens
* @{
*/
/** Token types. */
type yaml_token_type_t int
const (
/** An empty token. */
yaml_NO_TOKEN yaml_token_type_t = iota
/** A STREAM-START token. */
yaml_STREAM_START_TOKEN
/** A STREAM-END token. */
yaml_STREAM_END_TOKEN
/** A VERSION-DIRECTIVE token. */
yaml_VERSION_DIRECTIVE_TOKEN
/** A TAG-DIRECTIVE token. */
yaml_TAG_DIRECTIVE_TOKEN
/** A DOCUMENT-START token. */
yaml_DOCUMENT_START_TOKEN
/** A DOCUMENT-END token. */
yaml_DOCUMENT_END_TOKEN
/** A BLOCK-SEQUENCE-START token. */
yaml_BLOCK_SEQUENCE_START_TOKEN
/** A BLOCK-SEQUENCE-END token. */
yaml_BLOCK_MAPPING_START_TOKEN
/** A BLOCK-END token. */
yaml_BLOCK_END_TOKEN
/** A FLOW-SEQUENCE-START token. */
yaml_FLOW_SEQUENCE_START_TOKEN
/** A FLOW-SEQUENCE-END token. */
yaml_FLOW_SEQUENCE_END_TOKEN
/** A FLOW-MAPPING-START token. */
yaml_FLOW_MAPPING_START_TOKEN
/** A FLOW-MAPPING-END token. */
yaml_FLOW_MAPPING_END_TOKEN
/** A BLOCK-ENTRY token. */
yaml_BLOCK_ENTRY_TOKEN
/** A FLOW-ENTRY token. */
yaml_FLOW_ENTRY_TOKEN
/** A KEY token. */
yaml_KEY_TOKEN
/** A VALUE token. */
yaml_VALUE_TOKEN
/** An ALIAS token. */
yaml_ALIAS_TOKEN
/** An ANCHOR token. */
yaml_ANCHOR_TOKEN
/** A TAG token. */
yaml_TAG_TOKEN
/** A SCALAR token. */
yaml_SCALAR_TOKEN
)
/** The token structure. */
type yaml_token_t struct {
/** The token type. */
token_type yaml_token_type_t
/** The token data. */
/** The stream start (for @c yaml_STREAM_START_TOKEN). */
encoding yaml_encoding_t
/** The alias (for @c yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN,yaml_TAG_TOKEN ). */
/** The anchor (for @c ). */
/** The scalar value (for @c ). */
value []byte
/** The tag suffix. */
suffix []byte
/** The scalar value (for @c yaml_SCALAR_TOKEN). */
/** The scalar style. */
style yaml_scalar_style_t
/** The version directive (for @c yaml_VERSION_DIRECTIVE_TOKEN). */
version_directive yaml_version_directive_t
/** The tag directive (for @c yaml_TAG_DIRECTIVE_TOKEN). */
prefix []byte
/** The beginning of the token. */
start_mark YAML_mark_t
/** The end of the token. */
end_mark YAML_mark_t
major, minor int
}
/**
* @defgroup events Events
* @{
*/
/** Event types. */
type yaml_event_type_t int
const (
/** An empty event. */
yaml_NO_EVENT yaml_event_type_t = iota
/** A STREAM-START event. */
yaml_STREAM_START_EVENT
/** A STREAM-END event. */
yaml_STREAM_END_EVENT
/** A DOCUMENT-START event. */
yaml_DOCUMENT_START_EVENT
/** A DOCUMENT-END event. */
yaml_DOCUMENT_END_EVENT
/** An ALIAS event. */
yaml_ALIAS_EVENT
/** A SCALAR event. */
yaml_SCALAR_EVENT
/** A SEQUENCE-START event. */
yaml_SEQUENCE_START_EVENT
/** A SEQUENCE-END event. */
yaml_SEQUENCE_END_EVENT
/** A MAPPING-START event. */
yaml_MAPPING_START_EVENT
/** A MAPPING-END event. */
yaml_MAPPING_END_EVENT
)
/** The event structure. */
type yaml_event_t struct {
/** The event type. */
event_type yaml_event_type_t
/** The stream parameters (for @c yaml_STREAM_START_EVENT). */
encoding yaml_encoding_t
/** The document parameters (for @c yaml_DOCUMENT_START_EVENT). */
version_directive *yaml_version_directive_t
/** The beginning and end of the tag directives list. */
tag_directives []yaml_tag_directive_t
/** The document parameters (for @c yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT,yaml_MAPPING_START_EVENT). */
/** Is the document indicator implicit? */
implicit bool
/** The alias parameters (for @c yaml_ALIAS_EVENT,yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */
/** The anchor. */
anchor []byte
/** The scalar parameters (for @c yaml_SCALAR_EVENT,yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */
/** The tag. */
tag []byte
/** The scalar value. */
value []byte
/** Is the tag optional for the plain style? */
plain_implicit bool
/** Is the tag optional for any non-plain style? */
quoted_implicit bool
/** The sequence parameters (for @c yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */
/** The sequence style. */
/** The scalar style. */
style yaml_style_t
/** The beginning of the event. */
start_mark, end_mark YAML_mark_t
}
/**
* @defgroup nodes Nodes
* @{
*/
const (
/** The tag @c !!null with the only possible value: @c null. */
yaml_NULL_TAG = "tag:yaml.org,2002:null"
/** The tag @c !!bool with the values: @c true and @c falce. */
yaml_BOOL_TAG = "tag:yaml.org,2002:bool"
/** The tag @c !!str for string values. */
yaml_STR_TAG = "tag:yaml.org,2002:str"
/** The tag @c !!int for integer values. */
yaml_INT_TAG = "tag:yaml.org,2002:int"
/** The tag @c !!float for float values. */
yaml_FLOAT_TAG = "tag:yaml.org,2002:float"
/** The tag @c !!timestamp for date and time values. */
yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp"
/** The tag @c !!seq is used to denote sequences. */
yaml_SEQ_TAG = "tag:yaml.org,2002:seq"
/** The tag @c !!map is used to denote mapping. */
yaml_MAP_TAG = "tag:yaml.org,2002:map"
/** The default scalar tag is @c !!str. */
yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG
/** The default sequence tag is @c !!seq. */
yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG
/** The default mapping tag is @c !!map. */
yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG
yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
)
/** Node types. */
type yaml_node_type_t int
const (
/** An empty node. */
yaml_NO_NODE yaml_node_type_t = iota
/** A scalar node. */
yaml_SCALAR_NODE
/** A sequence node. */
yaml_SEQUENCE_NODE
/** A mapping node. */
yaml_MAPPING_NODE
)
/** An element of a sequence node. */
type yaml_node_item_t int
/** An element of a mapping node. */
type yaml_node_pair_t struct {
/** The key of the element. */
key int
/** The value of the element. */
value int
}
/** The node structure. */
type yaml_node_t struct {
/** The node type. */
node_type yaml_node_type_t
/** The node tag. */
tag []byte
/** The scalar parameters (for @c yaml_SCALAR_NODE). */
scalar struct {
/** The scalar value. */
value []byte
/** The scalar style. */
style yaml_scalar_style_t
}
/** The sequence parameters (for @c yaml_SEQUENCE_NODE). */
sequence struct {
/** The stack of sequence items. */
items []yaml_node_item_t
/** The sequence style. */
style yaml_sequence_style_t
}
/** The mapping parameters (for @c yaml_MAPPING_NODE). */
mapping struct {
/** The stack of mapping pairs (key, value). */
pairs []yaml_node_pair_t
/** The mapping style. */
style yaml_mapping_style_t
}
/** The beginning of the node. */
start_mark YAML_mark_t
/** The end of the node. */
end_mark YAML_mark_t
}
/** The document structure. */
type yaml_document_t struct {
/** The document nodes. */
nodes []yaml_node_t
/** The version directive. */
version_directive *yaml_version_directive_t
/** The list of tag directives. */
tags []yaml_tag_directive_t
/** Is the document start indicator implicit? */
start_implicit bool
/** Is the document end indicator implicit? */
end_implicit bool
/** The beginning of the document. */
start_mark YAML_mark_t
/** The end of the document. */
end_mark YAML_mark_t
}
/**
* The prototype of a read handler.
*
* The read handler is called when the parser needs to read more bytes from the
* source. The handler should write not more than @a size bytes to the @a
* buffer. The number of written bytes should be set to the @a length variable.
*
* @param[in,out] data A pointer to an application data specified by
* yaml_parser_set_input().
* @param[out] buffer The buffer to write the data from the source.
* @param[in] size The size of the buffer.
* @param[out] size_read The actual number of bytes read from the source.
*
* @returns On success, the handler should return @c 1. If the handler failed,
* the returned value should be @c 0. On EOF, the handler should set the
* @a size_read to @c 0 and return @c 1.
*/
type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
/**
* This structure holds information about a potential simple key.
*/
type yaml_simple_key_t struct {
/** Is a simple key possible? */
possible bool
/** Is a simple key required? */
required bool
/** The number of the token. */
token_number int
/** The position mark. */
mark YAML_mark_t
}
/**
* The states of the parser.
*/
type yaml_parser_state_t int
const (
/** Expect STREAM-START. */
yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
/** Expect the beginning of an implicit document. */
yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
/** Expect DOCUMENT-START. */
yaml_PARSE_DOCUMENT_START_STATE
/** Expect the content of a document. */
yaml_PARSE_DOCUMENT_CONTENT_STATE
/** Expect DOCUMENT-END. */
yaml_PARSE_DOCUMENT_END_STATE
/** Expect a block node. */
yaml_PARSE_BLOCK_NODE_STATE
/** Expect a block node or indentless sequence. */
yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE
/** Expect a flow node. */
yaml_PARSE_FLOW_NODE_STATE
/** Expect the first entry of a block sequence. */
yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
/** Expect an entry of a block sequence. */
yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
/** Expect an entry of an indentless sequence. */
yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
/** Expect the first key of a block mapping. */
yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
/** Expect a block mapping key. */
yaml_PARSE_BLOCK_MAPPING_KEY_STATE
/** Expect a block mapping value. */
yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
/** Expect the first entry of a flow sequence. */
yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
/** Expect an entry of a flow sequence. */
yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
/** Expect a key of an ordered mapping. */
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
/** Expect a value of an ordered mapping. */
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
/** Expect the and of an ordered mapping entry. */
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
/** Expect the first key of a flow mapping. */
yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
/** Expect a key of a flow mapping. */
yaml_PARSE_FLOW_MAPPING_KEY_STATE
/** Expect a value of a flow mapping. */
yaml_PARSE_FLOW_MAPPING_VALUE_STATE
/** Expect an empty value of a flow mapping. */
yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE
/** Expect nothing. */
yaml_PARSE_END_STATE
)
/**
* This structure holds aliases data.
*/
type yaml_alias_data_t struct {
/** The anchor. */
anchor []byte
/** The node id. */
index int
/** The anchor mark. */
mark YAML_mark_t
}
/**
* The parser structure.
*
* All members are internal. Manage the structure using the @c yaml_parser_
* family of functions.
*/
type yaml_parser_t struct {
/**
* @name Error handling
* @{
*/
/** Error type. */
error YAML_error_type_t
/** Error description. */
problem string
/** The byte about which the problem occured. */
problem_offset int
/** The problematic value (@c -1 is none). */
problem_value int
/** The problem position. */
problem_mark YAML_mark_t
/** The error context. */
context string
/** The context position. */
context_mark YAML_mark_t
/**
* @}
*/
/**
* @name Reader stuff
* @{
*/
/** Read handler. */
read_handler yaml_read_handler_t
/** Reader input data. */
input_reader io.Reader
input []byte
input_pos int
/** EOF flag */
eof bool
/** The working buffer. */
buffer []byte
buffer_pos int
/* The number of unread characters in the buffer. */
unread int
/** The raw buffer. */
raw_buffer []byte
raw_buffer_pos int
/** The input encoding. */
encoding yaml_encoding_t
/** The offset of the current position (in bytes). */
offset int
/** The mark of the current position. */
mark YAML_mark_t
/**
* @}
*/
/**
* @name Scanner stuff
* @{
*/
/** Have we started to scan the input stream? */
stream_start_produced bool
/** Have we reached the end of the input stream? */
stream_end_produced bool
/** The number of unclosed '[' and '{' indicators. */
flow_level int
/** The tokens queue. */
tokens []yaml_token_t
tokens_head int
/** The number of tokens fetched from the queue. */
tokens_parsed int
/* Does the tokens queue contain a token ready for dequeueing. */
token_available bool
/** The indentation levels stack. */
indents []int
/** The current indentation level. */
indent int
/** May a simple key occur at the current position? */
simple_key_allowed bool
/** The stack of simple keys. */
simple_keys []yaml_simple_key_t
/**
* @}
*/
/**
* @name Parser stuff
* @{
*/
/** The parser states stack. */
states []yaml_parser_state_t
/** The current parser state. */
state yaml_parser_state_t
/** The stack of marks. */
marks []YAML_mark_t
/** The list of TAG directives. */
tag_directives []yaml_tag_directive_t
/**
* @}
*/
/**
* @name Dumper stuff
* @{
*/
/** The alias data. */
aliases []yaml_alias_data_t
/** The currently parsed document. */
document *yaml_document_t
/**
* @}
*/
}
/**
* The prototype of a write handler.
*
* The write handler is called when the emitter needs to flush the accumulated
* characters to the output. The handler should write @a size bytes of the
* @a buffer to the output.
*
* @param[in,out] data A pointer to an application data specified by
* yaml_emitter_set_output().
* @param[in] buffer The buffer with bytes to be written.
* @param[in] size The size of the buffer.
*
* @returns On success, the handler should return @c 1. If the handler failed,
* the returned value should be @c 0.
*/
type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
/** The emitter states. */
type yaml_emitter_state_t int
const (
/** Expect STREAM-START. */
yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
/** Expect the first DOCUMENT-START or STREAM-END. */
yaml_EMIT_FIRST_DOCUMENT_START_STATE
/** Expect DOCUMENT-START or STREAM-END. */
yaml_EMIT_DOCUMENT_START_STATE
/** Expect the content of a document. */
yaml_EMIT_DOCUMENT_CONTENT_STATE
/** Expect DOCUMENT-END. */
yaml_EMIT_DOCUMENT_END_STATE
/** Expect the first item of a flow sequence. */
yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
/** Expect an item of a flow sequence. */
yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE
/** Expect the first key of a flow mapping. */
yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
/** Expect a key of a flow mapping. */
yaml_EMIT_FLOW_MAPPING_KEY_STATE
/** Expect a value for a simple key of a flow mapping. */
yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE
/** Expect a value of a flow mapping. */
yaml_EMIT_FLOW_MAPPING_VALUE_STATE
/** Expect the first item of a block sequence. */
yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
/** Expect an item of a block sequence. */
yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE
/** Expect the first key of a block mapping. */
yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
/** Expect the key of a block mapping. */
yaml_EMIT_BLOCK_MAPPING_KEY_STATE
/** Expect a value for a simple key of a block mapping. */
yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE
/** Expect a value of a block mapping. */
yaml_EMIT_BLOCK_MAPPING_VALUE_STATE
/** Expect nothing. */
yaml_EMIT_END_STATE
)
/**
* The emitter structure.
*
* All members are internal. Manage the structure using the @c yaml_emitter_
* family of functions.
*/
type yaml_emitter_t struct {
/**
* @name Error handling
* @{
*/
/** Error type. */
error YAML_error_type_t
/** Error description. */
problem string
/**
* @}
*/
/**
* @name Writer stuff
* @{
*/
/** Write handler. */
write_handler yaml_write_handler_t
/** Standard (string or file) output data. */
output_buffer *[]byte
output_writer io.Writer
/** The working buffer. */
buffer []byte
buffer_pos int
/** The raw buffer. */
raw_buffer []byte
raw_buffer_pos int
/** The stream encoding. */
encoding yaml_encoding_t
/**
* @}
*/
/**
* @name Emitter stuff
* @{
*/
/** If the output is in the canonical style? */
canonical bool
/** The number of indentation spaces. */
best_indent int
/** The preferred width of the output lines. */
best_width int
/** Allow unescaped non-ASCII characters? */
unicode bool
/** The preferred line break. */
line_break yaml_break_t
/** The stack of states. */
states []yaml_emitter_state_t
/** The current emitter state. */
state yaml_emitter_state_t
/** The event queue. */
events []yaml_event_t
events_head int
/** The stack of indentation levels. */
indents []int
/** The list of tag directives. */
tag_directives []yaml_tag_directive_t
/** The current indentation level. */
indent int
/** The current flow level. */
flow_level int
/** Is it the document root context? */
root_context bool
/** Is it a sequence context? */
sequence_context bool
/** Is it a mapping context? */
mapping_context bool
/** Is it a simple mapping key context? */
simple_key_context bool
/** The current line. */
line int
/** The current column. */
column int
/** If the last character was a whitespace? */
whitespace bool
/** If the last character was an indentation character (' ', '-', '?', ':')? */
indention bool
/** If an explicit document end is required? */
open_ended bool
/** Anchor analysis. */
anchor_data struct {
/** The anchor value. */
anchor []byte
/** Is it an alias? */
alias bool
}
/** Tag analysis. */
tag_data struct {
/** The tag handle. */
handle []byte
/** The tag suffix. */
suffix []byte
}
/** Scalar analysis. */
scalar_data struct {
/** The scalar value. */
value []byte
/** Does the scalar contain line breaks? */
multiline bool
/** Can the scalar be expessed in the flow plain style? */
flow_plain_allowed bool
/** Can the scalar be expressed in the block plain style? */
block_plain_allowed bool
/** Can the scalar be expressed in the single quoted style? */
single_quoted_allowed bool
/** Can the scalar be expressed in the literal or folded styles? */
block_allowed bool
/** The output style. */
style yaml_scalar_style_t
}
/**
* @}
*/
/**
* @name Dumper stuff
* @{
*/
/** If the stream was already opened? */
opened bool
/** If the stream was already closed? */
closed bool
/** The information associated with the document nodes. */
anchors *struct {
/** The number of references. */
references int
/** The anchor id. */
anchor int
/** If the node has been emitted? */
serialized bool
}
/** The last assigned anchor id. */
last_anchor_id int
/** The currently emitted document. */
document *yaml_document_t
/**
* @}
*/
}

202
vendor/github.com/coreos/etcd/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

5
vendor/github.com/coreos/etcd/NOTICE generated vendored Normal file
View File

@ -0,0 +1,5 @@
CoreOS Project
Copyright 2014 CoreOS, Inc
This product includes software developed at CoreOS, Inc.
(http://www.coreos.com/).

820
vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go generated vendored Normal file
View File

@ -0,0 +1,820 @@
// Code generated by protoc-gen-gogo.
// source: auth.proto
// DO NOT EDIT!
/*
Package authpb is a generated protocol buffer package.
It is generated from these files:
auth.proto
It has these top-level messages:
User
Permission
Role
*/
package authpb
import (
"fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
const _ = proto.ProtoPackageIsVersion1
type Permission_Type int32
const (
READ Permission_Type = 0
WRITE Permission_Type = 1
READWRITE Permission_Type = 2
)
var Permission_Type_name = map[int32]string{
0: "READ",
1: "WRITE",
2: "READWRITE",
}
var Permission_Type_value = map[string]int32{
"READ": 0,
"WRITE": 1,
"READWRITE": 2,
}
func (x Permission_Type) String() string {
return proto.EnumName(Permission_Type_name, int32(x))
}
func (Permission_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1, 0} }
// User is a single entry in the bucket authUsers
type User struct {
Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Password []byte `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
Roles []string `protobuf:"bytes,3,rep,name=roles" json:"roles,omitempty"`
}
func (m *User) Reset() { *m = User{} }
func (m *User) String() string { return proto.CompactTextString(m) }
func (*User) ProtoMessage() {}
func (*User) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{0} }
// Permission is a single entity
type Permission struct {
PermType Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"`
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
}
func (m *Permission) Reset() { *m = Permission{} }
func (m *Permission) String() string { return proto.CompactTextString(m) }
func (*Permission) ProtoMessage() {}
func (*Permission) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{1} }
// Role is a single entry in the bucket authRoles
type Role struct {
Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
KeyPermission []*Permission `protobuf:"bytes,2,rep,name=keyPermission" json:"keyPermission,omitempty"`
}
func (m *Role) Reset() { *m = Role{} }
func (m *Role) String() string { return proto.CompactTextString(m) }
func (*Role) ProtoMessage() {}
func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorAuth, []int{2} }
func init() {
proto.RegisterType((*User)(nil), "authpb.User")
proto.RegisterType((*Permission)(nil), "authpb.Permission")
proto.RegisterType((*Role)(nil), "authpb.Role")
proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value)
}
func (m *User) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *User) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Name) > 0 {
data[i] = 0xa
i++
i = encodeVarintAuth(data, i, uint64(len(m.Name)))
i += copy(data[i:], m.Name)
}
if len(m.Password) > 0 {
data[i] = 0x12
i++
i = encodeVarintAuth(data, i, uint64(len(m.Password)))
i += copy(data[i:], m.Password)
}
if len(m.Roles) > 0 {
for _, s := range m.Roles {
data[i] = 0x1a
i++
l = len(s)
for l >= 1<<7 {
data[i] = uint8(uint64(l)&0x7f | 0x80)
l >>= 7
i++
}
data[i] = uint8(l)
i++
i += copy(data[i:], s)
}
}
return i, nil
}
func (m *Permission) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *Permission) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.PermType != 0 {
data[i] = 0x8
i++
i = encodeVarintAuth(data, i, uint64(m.PermType))
}
if len(m.Key) > 0 {
data[i] = 0x12
i++
i = encodeVarintAuth(data, i, uint64(len(m.Key)))
i += copy(data[i:], m.Key)
}
if len(m.RangeEnd) > 0 {
data[i] = 0x1a
i++
i = encodeVarintAuth(data, i, uint64(len(m.RangeEnd)))
i += copy(data[i:], m.RangeEnd)
}
return i, nil
}
func (m *Role) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *Role) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Name) > 0 {
data[i] = 0xa
i++
i = encodeVarintAuth(data, i, uint64(len(m.Name)))
i += copy(data[i:], m.Name)
}
if len(m.KeyPermission) > 0 {
for _, msg := range m.KeyPermission {
data[i] = 0x12
i++
i = encodeVarintAuth(data, i, uint64(msg.Size()))
n, err := msg.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n
}
}
return i, nil
}
func encodeFixed64Auth(data []byte, offset int, v uint64) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
data[offset+4] = uint8(v >> 32)
data[offset+5] = uint8(v >> 40)
data[offset+6] = uint8(v >> 48)
data[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Auth(data []byte, offset int, v uint32) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintAuth(data []byte, offset int, v uint64) int {
for v >= 1<<7 {
data[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
data[offset] = uint8(v)
return offset + 1
}
func (m *User) Size() (n int) {
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovAuth(uint64(l))
}
l = len(m.Password)
if l > 0 {
n += 1 + l + sovAuth(uint64(l))
}
if len(m.Roles) > 0 {
for _, s := range m.Roles {
l = len(s)
n += 1 + l + sovAuth(uint64(l))
}
}
return n
}
func (m *Permission) Size() (n int) {
var l int
_ = l
if m.PermType != 0 {
n += 1 + sovAuth(uint64(m.PermType))
}
l = len(m.Key)
if l > 0 {
n += 1 + l + sovAuth(uint64(l))
}
l = len(m.RangeEnd)
if l > 0 {
n += 1 + l + sovAuth(uint64(l))
}
return n
}
func (m *Role) Size() (n int) {
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovAuth(uint64(l))
}
if len(m.KeyPermission) > 0 {
for _, e := range m.KeyPermission {
l = e.Size()
n += 1 + l + sovAuth(uint64(l))
}
}
return n
}
func sovAuth(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozAuth(x uint64) (n int) {
return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *User) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: User: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: User: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = append(m.Name[:0], data[iNdEx:postIndex]...)
if m.Name == nil {
m.Name = []byte{}
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Password = append(m.Password[:0], data[iNdEx:postIndex]...)
if m.Password == nil {
m.Password = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Roles = append(m.Roles, string(data[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipAuth(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthAuth
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Permission) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Permission: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Permission: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field PermType", wireType)
}
m.PermType = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.PermType |= (Permission_Type(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], data[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.RangeEnd = append(m.RangeEnd[:0], data[iNdEx:postIndex]...)
if m.RangeEnd == nil {
m.RangeEnd = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipAuth(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthAuth
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Role) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Role: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = append(m.Name[:0], data[iNdEx:postIndex]...)
if m.Name == nil {
m.Name = []byte{}
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field KeyPermission", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowAuth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthAuth
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.KeyPermission = append(m.KeyPermission, &Permission{})
if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipAuth(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthAuth
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipAuth(data []byte) (n int, err error) {
l := len(data)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowAuth
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowAuth
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if data[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowAuth
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthAuth
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowAuth
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipAuth(data[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow")
)
var fileDescriptorAuth = []byte{
// 288 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30,
0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78,
0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c,
0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d,
0x64, 0x43, 0xdc, 0xed, 0xfb, 0xbe, 0xff, 0x97, 0xe4, 0x97, 0x3f, 0x40, 0xfe, 0xb6, 0x7e, 0x4d,
0x1a, 0x29, 0xd6, 0x02, 0x0f, 0x7b, 0xdd, 0xcc, 0x27, 0xe3, 0x52, 0x94, 0x42, 0x47, 0x57, 0xbd,
0x32, 0xd3, 0xe8, 0x01, 0xdc, 0x27, 0xc5, 0x25, 0xc6, 0xe0, 0xd6, 0x79, 0xc5, 0x09, 0x0a, 0x51,
0x7c, 0xca, 0xb4, 0xc6, 0x13, 0xf0, 0x9a, 0x5c, 0xa9, 0x77, 0x21, 0x0b, 0x62, 0xeb, 0x7c, 0xef,
0xf1, 0x18, 0x06, 0x52, 0xac, 0xb8, 0x22, 0x4e, 0xe8, 0xc4, 0x3e, 0x33, 0x26, 0xfa, 0x44, 0x00,
0x8f, 0x5c, 0x56, 0x0b, 0xa5, 0x16, 0xa2, 0xc6, 0x33, 0xf0, 0x1a, 0x2e, 0xab, 0xac, 0x6d, 0xcc,
0xc5, 0x67, 0xd3, 0xf3, 0xc4, 0xd0, 0x24, 0x87, 0x56, 0xd2, 0x8f, 0xd9, 0xbe, 0x88, 0x03, 0x70,
0x96, 0xbc, 0xfd, 0x7d, 0xb0, 0x97, 0xf8, 0x02, 0x7c, 0x99, 0xd7, 0x25, 0x7f, 0xe1, 0x75, 0x41,
0x1c, 0x03, 0xa2, 0x83, 0xb4, 0x2e, 0xa2, 0x4b, 0x70, 0xf5, 0x31, 0x0f, 0x5c, 0x96, 0xde, 0xdc,
0x05, 0x16, 0xf6, 0x61, 0xf0, 0xcc, 0xee, 0xb3, 0x34, 0x40, 0x78, 0x04, 0x7e, 0x1f, 0x1a, 0x6b,
0x47, 0x19, 0xb8, 0x4c, 0xac, 0xf8, 0xd1, 0xcf, 0x5e, 0xc3, 0x68, 0xc9, 0xdb, 0x03, 0x16, 0xb1,
0x43, 0x27, 0x3e, 0x99, 0xe2, 0xff, 0xc0, 0xec, 0x6f, 0xf1, 0x96, 0x6c, 0x76, 0xd4, 0xda, 0xee,
0xa8, 0xb5, 0xe9, 0x28, 0xda, 0x76, 0x14, 0x7d, 0x75, 0x14, 0x7d, 0x7c, 0x53, 0x6b, 0x3e, 0xd4,
0x3b, 0x9e, 0xfd, 0x04, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x76, 0x8d, 0x4f, 0x8f, 0x01, 0x00, 0x00,
}

237
vendor/github.com/coreos/etcd/client/auth_role.go generated vendored Normal file
View File

@ -0,0 +1,237 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"bytes"
"encoding/json"
"net/http"
"net/url"
"golang.org/x/net/context"
)
type Role struct {
Role string `json:"role"`
Permissions Permissions `json:"permissions"`
Grant *Permissions `json:"grant,omitempty"`
Revoke *Permissions `json:"revoke,omitempty"`
}
type Permissions struct {
KV rwPermission `json:"kv"`
}
type rwPermission struct {
Read []string `json:"read"`
Write []string `json:"write"`
}
type PermissionType int
const (
ReadPermission PermissionType = iota
WritePermission
ReadWritePermission
)
// NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to
// interact with etcd's role creation and modification features.
func NewAuthRoleAPI(c Client) AuthRoleAPI {
return &httpAuthRoleAPI{
client: c,
}
}
type AuthRoleAPI interface {
// AddRole adds a role.
AddRole(ctx context.Context, role string) error
// RemoveRole removes a role.
RemoveRole(ctx context.Context, role string) error
// GetRole retrieves role details.
GetRole(ctx context.Context, role string) (*Role, error)
// GrantRoleKV grants a role some permission prefixes for the KV store.
GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
// RevokeRoleKV revokes some permission prefixes for a role on the KV store.
RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
// ListRoles lists roles.
ListRoles(ctx context.Context) ([]string, error)
}
type httpAuthRoleAPI struct {
client httpClient
}
type authRoleAPIAction struct {
verb string
name string
role *Role
}
type authRoleAPIList struct{}
func (list *authRoleAPIList) HTTPRequest(ep url.URL) *http.Request {
u := v2AuthURL(ep, "roles", "")
req, _ := http.NewRequest("GET", u.String(), nil)
req.Header.Set("Content-Type", "application/json")
return req
}
func (l *authRoleAPIAction) HTTPRequest(ep url.URL) *http.Request {
u := v2AuthURL(ep, "roles", l.name)
if l.role == nil {
req, _ := http.NewRequest(l.verb, u.String(), nil)
return req
}
b, err := json.Marshal(l.role)
if err != nil {
panic(err)
}
body := bytes.NewReader(b)
req, _ := http.NewRequest(l.verb, u.String(), body)
req.Header.Set("Content-Type", "application/json")
return req
}
func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) {
resp, body, err := r.client.Do(ctx, &authRoleAPIList{})
if err != nil {
return nil, err
}
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
return nil, err
}
var roleList struct {
Roles []Role `json:"roles"`
}
if err = json.Unmarshal(body, &roleList); err != nil {
return nil, err
}
ret := make([]string, 0, len(roleList.Roles))
for _, r := range roleList.Roles {
ret = append(ret, r.Role)
}
return ret, nil
}
func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error {
role := &Role{
Role: rolename,
}
return r.addRemoveRole(ctx, &authRoleAPIAction{
verb: "PUT",
name: rolename,
role: role,
})
}
func (r *httpAuthRoleAPI) RemoveRole(ctx context.Context, rolename string) error {
return r.addRemoveRole(ctx, &authRoleAPIAction{
verb: "DELETE",
name: rolename,
})
}
func (r *httpAuthRoleAPI) addRemoveRole(ctx context.Context, req *authRoleAPIAction) error {
resp, body, err := r.client.Do(ctx, req)
if err != nil {
return err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
var sec authError
err := json.Unmarshal(body, &sec)
if err != nil {
return err
}
return sec
}
return nil
}
func (r *httpAuthRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) {
return r.modRole(ctx, &authRoleAPIAction{
verb: "GET",
name: rolename,
})
}
func buildRWPermission(prefixes []string, permType PermissionType) rwPermission {
var out rwPermission
switch permType {
case ReadPermission:
out.Read = prefixes
case WritePermission:
out.Write = prefixes
case ReadWritePermission:
out.Read = prefixes
out.Write = prefixes
}
return out
}
func (r *httpAuthRoleAPI) GrantRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) {
rwp := buildRWPermission(prefixes, permType)
role := &Role{
Role: rolename,
Grant: &Permissions{
KV: rwp,
},
}
return r.modRole(ctx, &authRoleAPIAction{
verb: "PUT",
name: rolename,
role: role,
})
}
func (r *httpAuthRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) {
rwp := buildRWPermission(prefixes, permType)
role := &Role{
Role: rolename,
Revoke: &Permissions{
KV: rwp,
},
}
return r.modRole(ctx, &authRoleAPIAction{
verb: "PUT",
name: rolename,
role: role,
})
}
func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (*Role, error) {
resp, body, err := r.client.Do(ctx, req)
if err != nil {
return nil, err
}
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
var sec authError
err = json.Unmarshal(body, &sec)
if err != nil {
return nil, err
}
return nil, sec
}
var role Role
if err = json.Unmarshal(body, &role); err != nil {
return nil, err
}
return &role, nil
}

320
vendor/github.com/coreos/etcd/client/auth_user.go generated vendored Normal file
View File

@ -0,0 +1,320 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"bytes"
"encoding/json"
"net/http"
"net/url"
"path"
"golang.org/x/net/context"
)
var (
defaultV2AuthPrefix = "/v2/auth"
)
type User struct {
User string `json:"user"`
Password string `json:"password,omitempty"`
Roles []string `json:"roles"`
Grant []string `json:"grant,omitempty"`
Revoke []string `json:"revoke,omitempty"`
}
// userListEntry is the user representation given by the server for ListUsers
type userListEntry struct {
User string `json:"user"`
Roles []Role `json:"roles"`
}
type UserRoles struct {
User string `json:"user"`
Roles []Role `json:"roles"`
}
func v2AuthURL(ep url.URL, action string, name string) *url.URL {
if name != "" {
ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name)
return &ep
}
ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action)
return &ep
}
// NewAuthAPI constructs a new AuthAPI that uses HTTP to
// interact with etcd's general auth features.
func NewAuthAPI(c Client) AuthAPI {
return &httpAuthAPI{
client: c,
}
}
type AuthAPI interface {
// Enable auth.
Enable(ctx context.Context) error
// Disable auth.
Disable(ctx context.Context) error
}
type httpAuthAPI struct {
client httpClient
}
func (s *httpAuthAPI) Enable(ctx context.Context) error {
return s.enableDisable(ctx, &authAPIAction{"PUT"})
}
func (s *httpAuthAPI) Disable(ctx context.Context) error {
return s.enableDisable(ctx, &authAPIAction{"DELETE"})
}
func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error {
resp, body, err := s.client.Do(ctx, req)
if err != nil {
return err
}
if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
var sec authError
err = json.Unmarshal(body, &sec)
if err != nil {
return err
}
return sec
}
return nil
}
type authAPIAction struct {
verb string
}
func (l *authAPIAction) HTTPRequest(ep url.URL) *http.Request {
u := v2AuthURL(ep, "enable", "")
req, _ := http.NewRequest(l.verb, u.String(), nil)
return req
}
type authError struct {
Message string `json:"message"`
Code int `json:"-"`
}
func (e authError) Error() string {
return e.Message
}
// NewAuthUserAPI constructs a new AuthUserAPI that uses HTTP to
// interact with etcd's user creation and modification features.
func NewAuthUserAPI(c Client) AuthUserAPI {
return &httpAuthUserAPI{
client: c,
}
}
type AuthUserAPI interface {
// AddUser adds a user.
AddUser(ctx context.Context, username string, password string) error
// RemoveUser removes a user.
RemoveUser(ctx context.Context, username string) error
// GetUser retrieves user details.
GetUser(ctx context.Context, username string) (*User, error)
// GrantUser grants a user some permission roles.
GrantUser(ctx context.Context, username string, roles []string) (*User, error)
// RevokeUser revokes some permission roles from a user.
RevokeUser(ctx context.Context, username string, roles []string) (*User, error)
// ChangePassword changes the user's password.
ChangePassword(ctx context.Context, username string, password string) (*User, error)
// ListUsers lists the users.
ListUsers(ctx context.Context) ([]string, error)
}
type httpAuthUserAPI struct {
client httpClient
}
type authUserAPIAction struct {
verb string
username string
user *User
}
type authUserAPIList struct{}
func (list *authUserAPIList) HTTPRequest(ep url.URL) *http.Request {
u := v2AuthURL(ep, "users", "")
req, _ := http.NewRequest("GET", u.String(), nil)
req.Header.Set("Content-Type", "application/json")
return req
}
func (l *authUserAPIAction) HTTPRequest(ep url.URL) *http.Request {
u := v2AuthURL(ep, "users", l.username)
if l.user == nil {
req, _ := http.NewRequest(l.verb, u.String(), nil)
return req
}
b, err := json.Marshal(l.user)
if err != nil {
panic(err)
}
body := bytes.NewReader(b)
req, _ := http.NewRequest(l.verb, u.String(), body)
req.Header.Set("Content-Type", "application/json")
return req
}
func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) {
resp, body, err := u.client.Do(ctx, &authUserAPIList{})
if err != nil {
return nil, err
}
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
var sec authError
err = json.Unmarshal(body, &sec)
if err != nil {
return nil, err
}
return nil, sec
}
var userList struct {
Users []userListEntry `json:"users"`
}
if err = json.Unmarshal(body, &userList); err != nil {
return nil, err
}
ret := make([]string, 0, len(userList.Users))
for _, u := range userList.Users {
ret = append(ret, u.User)
}
return ret, nil
}
func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error {
user := &User{
User: username,
Password: password,
}
return u.addRemoveUser(ctx, &authUserAPIAction{
verb: "PUT",
username: username,
user: user,
})
}
func (u *httpAuthUserAPI) RemoveUser(ctx context.Context, username string) error {
return u.addRemoveUser(ctx, &authUserAPIAction{
verb: "DELETE",
username: username,
})
}
func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAction) error {
resp, body, err := u.client.Do(ctx, req)
if err != nil {
return err
}
if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
var sec authError
err = json.Unmarshal(body, &sec)
if err != nil {
return err
}
return sec
}
return nil
}
func (u *httpAuthUserAPI) GetUser(ctx context.Context, username string) (*User, error) {
return u.modUser(ctx, &authUserAPIAction{
verb: "GET",
username: username,
})
}
func (u *httpAuthUserAPI) GrantUser(ctx context.Context, username string, roles []string) (*User, error) {
user := &User{
User: username,
Grant: roles,
}
return u.modUser(ctx, &authUserAPIAction{
verb: "PUT",
username: username,
user: user,
})
}
func (u *httpAuthUserAPI) RevokeUser(ctx context.Context, username string, roles []string) (*User, error) {
user := &User{
User: username,
Revoke: roles,
}
return u.modUser(ctx, &authUserAPIAction{
verb: "PUT",
username: username,
user: user,
})
}
func (u *httpAuthUserAPI) ChangePassword(ctx context.Context, username string, password string) (*User, error) {
user := &User{
User: username,
Password: password,
}
return u.modUser(ctx, &authUserAPIAction{
verb: "PUT",
username: username,
user: user,
})
}
func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (*User, error) {
resp, body, err := u.client.Do(ctx, req)
if err != nil {
return nil, err
}
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
var sec authError
err = json.Unmarshal(body, &sec)
if err != nil {
return nil, err
}
return nil, sec
}
var user User
if err = json.Unmarshal(body, &user); err != nil {
var userR UserRoles
if urerr := json.Unmarshal(body, &userR); urerr != nil {
return nil, err
}
user.User = userR.User
for _, r := range userR.Roles {
user.Roles = append(user.Roles, r.Role)
}
}
return &user, nil
}

18
vendor/github.com/coreos/etcd/client/cancelreq.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// borrowed from golang/net/context/ctxhttp/cancelreq.go
package client
import "net/http"
func requestCanceler(tr CancelableTransport, req *http.Request) func() {
ch := make(chan struct{})
req.Cancel = ch
return func() {
close(ch)
}
}

609
vendor/github.com/coreos/etcd/client/client.go generated vendored Normal file
View File

@ -0,0 +1,609 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"errors"
"fmt"
"io/ioutil"
"math/rand"
"net"
"net/http"
"net/url"
"reflect"
"sort"
"strconv"
"sync"
"time"
"golang.org/x/net/context"
)
var (
ErrNoEndpoints = errors.New("client: no endpoints available")
ErrTooManyRedirects = errors.New("client: too many redirects")
ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured")
ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available")
errTooManyRedirectChecks = errors.New("client: too many redirect checks")
// oneShotCtxValue is set on a context using WithValue(&oneShotValue) so
// that Do() will not retry a request
oneShotCtxValue interface{}
)
var DefaultRequestTimeout = 5 * time.Second
var DefaultTransport CancelableTransport = &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
}
type EndpointSelectionMode int
const (
// EndpointSelectionRandom is the default value of the 'SelectionMode'.
// As the name implies, the client object will pick a node from the members
// of the cluster in a random fashion. If the cluster has three members, A, B,
// and C, the client picks any node from its three members as its request
// destination.
EndpointSelectionRandom EndpointSelectionMode = iota
// If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader',
// requests are sent directly to the cluster leader. This reduces
// forwarding roundtrips compared to making requests to etcd followers
// who then forward them to the cluster leader. In the event of a leader
// failure, however, clients configured this way cannot prioritize among
// the remaining etcd followers. Therefore, when a client sets 'SelectionMode'
// to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to
// maintain its knowledge of current cluster state.
//
// This mode should be used with Client.AutoSync().
EndpointSelectionPrioritizeLeader
)
type Config struct {
// Endpoints defines a set of URLs (schemes, hosts and ports only)
// that can be used to communicate with a logical etcd cluster. For
// example, a three-node cluster could be provided like so:
//
// Endpoints: []string{
// "http://node1.example.com:2379",
// "http://node2.example.com:2379",
// "http://node3.example.com:2379",
// }
//
// If multiple endpoints are provided, the Client will attempt to
// use them all in the event that one or more of them are unusable.
//
// If Client.Sync is ever called, the Client may cache an alternate
// set of endpoints to continue operation.
Endpoints []string
// Transport is used by the Client to drive HTTP requests. If not
// provided, DefaultTransport will be used.
Transport CancelableTransport
// CheckRedirect specifies the policy for handling HTTP redirects.
// If CheckRedirect is not nil, the Client calls it before
// following an HTTP redirect. The sole argument is the number of
// requests that have already been made. If CheckRedirect returns
// an error, Client.Do will not make any further requests and return
// the error back it to the caller.
//
// If CheckRedirect is nil, the Client uses its default policy,
// which is to stop after 10 consecutive requests.
CheckRedirect CheckRedirectFunc
// Username specifies the user credential to add as an authorization header
Username string
// Password is the password for the specified user to add as an authorization header
// to the request.
Password string
// HeaderTimeoutPerRequest specifies the time limit to wait for response
// header in a single request made by the Client. The timeout includes
// connection time, any redirects, and header wait time.
//
// For non-watch GET request, server returns the response body immediately.
// For PUT/POST/DELETE request, server will attempt to commit request
// before responding, which is expected to take `100ms + 2 * RTT`.
// For watch request, server returns the header immediately to notify Client
// watch start. But if server is behind some kind of proxy, the response
// header may be cached at proxy, and Client cannot rely on this behavior.
//
// Especially, wait request will ignore this timeout.
//
// One API call may send multiple requests to different etcd servers until it
// succeeds. Use context of the API to specify the overall timeout.
//
// A HeaderTimeoutPerRequest of zero means no timeout.
HeaderTimeoutPerRequest time.Duration
// SelectionMode is an EndpointSelectionMode enum that specifies the
// policy for choosing the etcd cluster node to which requests are sent.
SelectionMode EndpointSelectionMode
}
func (cfg *Config) transport() CancelableTransport {
if cfg.Transport == nil {
return DefaultTransport
}
return cfg.Transport
}
func (cfg *Config) checkRedirect() CheckRedirectFunc {
if cfg.CheckRedirect == nil {
return DefaultCheckRedirect
}
return cfg.CheckRedirect
}
// CancelableTransport mimics net/http.Transport, but requires that
// the object also support request cancellation.
type CancelableTransport interface {
http.RoundTripper
CancelRequest(req *http.Request)
}
type CheckRedirectFunc func(via int) error
// DefaultCheckRedirect follows up to 10 redirects, but no more.
var DefaultCheckRedirect CheckRedirectFunc = func(via int) error {
if via > 10 {
return ErrTooManyRedirects
}
return nil
}
type Client interface {
// Sync updates the internal cache of the etcd cluster's membership.
Sync(context.Context) error
// AutoSync periodically calls Sync() every given interval.
// The recommended sync interval is 10 seconds to 1 minute, which does
// not bring too much overhead to server and makes client catch up the
// cluster change in time.
//
// The example to use it:
//
// for {
// err := client.AutoSync(ctx, 10*time.Second)
// if err == context.DeadlineExceeded || err == context.Canceled {
// break
// }
// log.Print(err)
// }
AutoSync(context.Context, time.Duration) error
// Endpoints returns a copy of the current set of API endpoints used
// by Client to resolve HTTP requests. If Sync has ever been called,
// this may differ from the initial Endpoints provided in the Config.
Endpoints() []string
// SetEndpoints sets the set of API endpoints used by Client to resolve
// HTTP requests. If the given endpoints are not valid, an error will be
// returned
SetEndpoints(eps []string) error
httpClient
}
func New(cfg Config) (Client, error) {
c := &httpClusterClient{
clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest),
rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
selectionMode: cfg.SelectionMode,
}
if cfg.Username != "" {
c.credentials = &credentials{
username: cfg.Username,
password: cfg.Password,
}
}
if err := c.SetEndpoints(cfg.Endpoints); err != nil {
return nil, err
}
return c, nil
}
type httpClient interface {
Do(context.Context, httpAction) (*http.Response, []byte, error)
}
func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory {
return func(ep url.URL) httpClient {
return &redirectFollowingHTTPClient{
checkRedirect: cr,
client: &simpleHTTPClient{
transport: tr,
endpoint: ep,
headerTimeout: headerTimeout,
},
}
}
}
type credentials struct {
username string
password string
}
type httpClientFactory func(url.URL) httpClient
type httpAction interface {
HTTPRequest(url.URL) *http.Request
}
type httpClusterClient struct {
clientFactory httpClientFactory
endpoints []url.URL
pinned int
credentials *credentials
sync.RWMutex
rand *rand.Rand
selectionMode EndpointSelectionMode
}
func (c *httpClusterClient) getLeaderEndpoint() (string, error) {
mAPI := NewMembersAPI(c)
leader, err := mAPI.Leader(context.Background())
if err != nil {
return "", err
}
return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs?
}
func (c *httpClusterClient) SetEndpoints(eps []string) error {
if len(eps) == 0 {
return ErrNoEndpoints
}
neps := make([]url.URL, len(eps))
for i, ep := range eps {
u, err := url.Parse(ep)
if err != nil {
return err
}
neps[i] = *u
}
switch c.selectionMode {
case EndpointSelectionRandom:
c.endpoints = shuffleEndpoints(c.rand, neps)
c.pinned = 0
case EndpointSelectionPrioritizeLeader:
c.endpoints = neps
lep, err := c.getLeaderEndpoint()
if err != nil {
return ErrNoLeaderEndpoint
}
for i := range c.endpoints {
if c.endpoints[i].String() == lep {
c.pinned = i
break
}
}
// If endpoints doesn't have the lu, just keep c.pinned = 0.
// Forwarding between follower and leader would be required but it works.
default:
return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode)
}
return nil
}
func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
action := act
c.RLock()
leps := len(c.endpoints)
eps := make([]url.URL, leps)
n := copy(eps, c.endpoints)
pinned := c.pinned
if c.credentials != nil {
action = &authedAction{
act: act,
credentials: *c.credentials,
}
}
c.RUnlock()
if leps == 0 {
return nil, nil, ErrNoEndpoints
}
if leps != n {
return nil, nil, errors.New("unable to pick endpoint: copy failed")
}
var resp *http.Response
var body []byte
var err error
cerr := &ClusterError{}
isOneShot := ctx.Value(&oneShotCtxValue) != nil
for i := pinned; i < leps+pinned; i++ {
k := i % leps
hc := c.clientFactory(eps[k])
resp, body, err = hc.Do(ctx, action)
if err != nil {
cerr.Errors = append(cerr.Errors, err)
if err == ctx.Err() {
return nil, nil, ctx.Err()
}
if err == context.Canceled || err == context.DeadlineExceeded {
return nil, nil, err
}
if isOneShot {
return nil, nil, err
}
continue
}
if resp.StatusCode/100 == 5 {
switch resp.StatusCode {
case http.StatusInternalServerError, http.StatusServiceUnavailable:
// TODO: make sure this is a no leader response
cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String()))
default:
cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
}
if isOneShot {
return nil, nil, cerr.Errors[0]
}
continue
}
if k != pinned {
c.Lock()
c.pinned = k
c.Unlock()
}
return resp, body, nil
}
return nil, nil, cerr
}
func (c *httpClusterClient) Endpoints() []string {
c.RLock()
defer c.RUnlock()
eps := make([]string, len(c.endpoints))
for i, ep := range c.endpoints {
eps[i] = ep.String()
}
return eps
}
func (c *httpClusterClient) Sync(ctx context.Context) error {
mAPI := NewMembersAPI(c)
ms, err := mAPI.List(ctx)
if err != nil {
return err
}
c.Lock()
defer c.Unlock()
var eps []string
for _, m := range ms {
eps = append(eps, m.ClientURLs...)
}
sort.Sort(sort.StringSlice(eps))
ceps := make([]string, len(c.endpoints))
for i, cep := range c.endpoints {
ceps[i] = cep.String()
}
sort.Sort(sort.StringSlice(ceps))
// fast path if no change happens
// this helps client to pin the endpoint when no cluster change
if reflect.DeepEqual(eps, ceps) {
return nil
}
return c.SetEndpoints(eps)
}
func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
err := c.Sync(ctx)
if err != nil {
return err
}
select {
case <-ctx.Done():
return ctx.Err()
case <-ticker.C:
}
}
}
type roundTripResponse struct {
resp *http.Response
err error
}
type simpleHTTPClient struct {
transport CancelableTransport
endpoint url.URL
headerTimeout time.Duration
}
func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
req := act.HTTPRequest(c.endpoint)
if err := printcURL(req); err != nil {
return nil, nil, err
}
isWait := false
if req != nil && req.URL != nil {
ws := req.URL.Query().Get("wait")
if len(ws) != 0 {
var err error
isWait, err = strconv.ParseBool(ws)
if err != nil {
return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req)
}
}
}
var hctx context.Context
var hcancel context.CancelFunc
if !isWait && c.headerTimeout > 0 {
hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout)
} else {
hctx, hcancel = context.WithCancel(ctx)
}
defer hcancel()
reqcancel := requestCanceler(c.transport, req)
rtchan := make(chan roundTripResponse, 1)
go func() {
resp, err := c.transport.RoundTrip(req)
rtchan <- roundTripResponse{resp: resp, err: err}
close(rtchan)
}()
var resp *http.Response
var err error
select {
case rtresp := <-rtchan:
resp, err = rtresp.resp, rtresp.err
case <-hctx.Done():
// cancel and wait for request to actually exit before continuing
reqcancel()
rtresp := <-rtchan
resp = rtresp.resp
switch {
case ctx.Err() != nil:
err = ctx.Err()
case hctx.Err() != nil:
err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String())
default:
panic("failed to get error from context")
}
}
// always check for resp nil-ness to deal with possible
// race conditions between channels above
defer func() {
if resp != nil {
resp.Body.Close()
}
}()
if err != nil {
return nil, nil, err
}
var body []byte
done := make(chan struct{})
go func() {
body, err = ioutil.ReadAll(resp.Body)
done <- struct{}{}
}()
select {
case <-ctx.Done():
resp.Body.Close()
<-done
return nil, nil, ctx.Err()
case <-done:
}
return resp, body, err
}
type authedAction struct {
act httpAction
credentials credentials
}
func (a *authedAction) HTTPRequest(url url.URL) *http.Request {
r := a.act.HTTPRequest(url)
r.SetBasicAuth(a.credentials.username, a.credentials.password)
return r
}
type redirectFollowingHTTPClient struct {
client httpClient
checkRedirect CheckRedirectFunc
}
func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
next := act
for i := 0; i < 100; i++ {
if i > 0 {
if err := r.checkRedirect(i); err != nil {
return nil, nil, err
}
}
resp, body, err := r.client.Do(ctx, next)
if err != nil {
return nil, nil, err
}
if resp.StatusCode/100 == 3 {
hdr := resp.Header.Get("Location")
if hdr == "" {
return nil, nil, fmt.Errorf("Location header not set")
}
loc, err := url.Parse(hdr)
if err != nil {
return nil, nil, fmt.Errorf("Location header not valid URL: %s", hdr)
}
next = &redirectedHTTPAction{
action: act,
location: *loc,
}
continue
}
return resp, body, nil
}
return nil, nil, errTooManyRedirectChecks
}
type redirectedHTTPAction struct {
action httpAction
location url.URL
}
func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request {
orig := r.action.HTTPRequest(ep)
orig.URL = &r.location
return orig
}
func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL {
p := r.Perm(len(eps))
neps := make([]url.URL, len(eps))
for i, k := range p {
neps[i] = eps[k]
}
return neps
}

37
vendor/github.com/coreos/etcd/client/cluster_error.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import "fmt"
type ClusterError struct {
Errors []error
}
func (ce *ClusterError) Error() string {
s := ErrClusterUnavailable.Error()
for i, e := range ce.Errors {
s += fmt.Sprintf("; error #%d: %s\n", i, e)
}
return s
}
func (ce *ClusterError) Detail() string {
s := ""
for i, e := range ce.Errors {
s += fmt.Sprintf("error #%d: %s\n", i, e)
}
return s
}

70
vendor/github.com/coreos/etcd/client/curl.go generated vendored Normal file
View File

@ -0,0 +1,70 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"os"
)
var (
cURLDebug = false
)
func EnablecURLDebug() {
cURLDebug = true
}
func DisablecURLDebug() {
cURLDebug = false
}
// printcURL prints the cURL equivalent request to stderr.
// It returns an error if the body of the request cannot
// be read.
// The caller MUST cancel the request if there is an error.
func printcURL(req *http.Request) error {
if !cURLDebug {
return nil
}
var (
command string
b []byte
err error
)
if req.URL != nil {
command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String())
}
if req.Body != nil {
b, err = ioutil.ReadAll(req.Body)
if err != nil {
return err
}
command += fmt.Sprintf(" -d %q", string(b))
}
fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command)
// reset body
body := bytes.NewBuffer(b)
req.Body = ioutil.NopCloser(body)
return nil
}

21
vendor/github.com/coreos/etcd/client/discover.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
// Discoverer is an interface that wraps the Discover method.
type Discoverer interface {
// Discover looks up the etcd servers for the domain.
Discover(domain string) ([]string, error)
}

73
vendor/github.com/coreos/etcd/client/doc.go generated vendored Normal file
View File

@ -0,0 +1,73 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package client provides bindings for the etcd APIs.
Create a Config and exchange it for a Client:
import (
"net/http"
"github.com/coreos/etcd/client"
"golang.org/x/net/context"
)
cfg := client.Config{
Endpoints: []string{"http://127.0.0.1:2379"},
Transport: DefaultTransport,
}
c, err := client.New(cfg)
if err != nil {
// handle error
}
Clients are safe for concurrent use by multiple goroutines.
Create a KeysAPI using the Client, then use it to interact with etcd:
kAPI := client.NewKeysAPI(c)
// create a new key /foo with the value "bar"
_, err = kAPI.Create(context.Background(), "/foo", "bar")
if err != nil {
// handle error
}
// delete the newly created key only if the value is still "bar"
_, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"})
if err != nil {
// handle error
}
Use a custom context to set timeouts on your operations:
import "time"
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
// set a new key, ignoring it's previous state
_, err := kAPI.Set(ctx, "/ping", "pong", nil)
if err != nil {
if err == context.DeadlineExceeded {
// request took longer than 5s
} else {
// handle error
}
}
*/
package client

1000
vendor/github.com/coreos/etcd/client/keys.generated.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

668
vendor/github.com/coreos/etcd/client/keys.go generated vendored Normal file
View File

@ -0,0 +1,668 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
//go:generate codecgen -d 1819 -r "Node|Response|Nodes" -o keys.generated.go keys.go
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/coreos/etcd/pkg/pathutil"
"github.com/ugorji/go/codec"
"golang.org/x/net/context"
)
const (
ErrorCodeKeyNotFound = 100
ErrorCodeTestFailed = 101
ErrorCodeNotFile = 102
ErrorCodeNotDir = 104
ErrorCodeNodeExist = 105
ErrorCodeRootROnly = 107
ErrorCodeDirNotEmpty = 108
ErrorCodeUnauthorized = 110
ErrorCodePrevValueRequired = 201
ErrorCodeTTLNaN = 202
ErrorCodeIndexNaN = 203
ErrorCodeInvalidField = 209
ErrorCodeInvalidForm = 210
ErrorCodeRaftInternal = 300
ErrorCodeLeaderElect = 301
ErrorCodeWatcherCleared = 400
ErrorCodeEventIndexCleared = 401
)
type Error struct {
Code int `json:"errorCode"`
Message string `json:"message"`
Cause string `json:"cause"`
Index uint64 `json:"index"`
}
func (e Error) Error() string {
return fmt.Sprintf("%v: %v (%v) [%v]", e.Code, e.Message, e.Cause, e.Index)
}
var (
ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint.")
ErrEmptyBody = errors.New("client: response body is empty")
)
// PrevExistType is used to define an existence condition when setting
// or deleting Nodes.
type PrevExistType string
const (
PrevIgnore = PrevExistType("")
PrevExist = PrevExistType("true")
PrevNoExist = PrevExistType("false")
)
var (
defaultV2KeysPrefix = "/v2/keys"
)
// NewKeysAPI builds a KeysAPI that interacts with etcd's key-value
// API over HTTP.
func NewKeysAPI(c Client) KeysAPI {
return NewKeysAPIWithPrefix(c, defaultV2KeysPrefix)
}
// NewKeysAPIWithPrefix acts like NewKeysAPI, but allows the caller
// to provide a custom base URL path. This should only be used in
// very rare cases.
func NewKeysAPIWithPrefix(c Client, p string) KeysAPI {
return &httpKeysAPI{
client: c,
prefix: p,
}
}
type KeysAPI interface {
// Get retrieves a set of Nodes from etcd
Get(ctx context.Context, key string, opts *GetOptions) (*Response, error)
// Set assigns a new value to a Node identified by a given key. The caller
// may define a set of conditions in the SetOptions. If SetOptions.Dir=true
// then value is ignored.
Set(ctx context.Context, key, value string, opts *SetOptions) (*Response, error)
// Delete removes a Node identified by the given key, optionally destroying
// all of its children as well. The caller may define a set of required
// conditions in an DeleteOptions object.
Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error)
// Create is an alias for Set w/ PrevExist=false
Create(ctx context.Context, key, value string) (*Response, error)
// CreateInOrder is used to atomically create in-order keys within the given directory.
CreateInOrder(ctx context.Context, dir, value string, opts *CreateInOrderOptions) (*Response, error)
// Update is an alias for Set w/ PrevExist=true
Update(ctx context.Context, key, value string) (*Response, error)
// Watcher builds a new Watcher targeted at a specific Node identified
// by the given key. The Watcher may be configured at creation time
// through a WatcherOptions object. The returned Watcher is designed
// to emit events that happen to a Node, and optionally to its children.
Watcher(key string, opts *WatcherOptions) Watcher
}
type WatcherOptions struct {
// AfterIndex defines the index after-which the Watcher should
// start emitting events. For example, if a value of 5 is
// provided, the first event will have an index >= 6.
//
// Setting AfterIndex to 0 (default) means that the Watcher
// should start watching for events starting at the current
// index, whatever that may be.
AfterIndex uint64
// Recursive specifies whether or not the Watcher should emit
// events that occur in children of the given keyspace. If set
// to false (default), events will be limited to those that
// occur for the exact key.
Recursive bool
}
type CreateInOrderOptions struct {
// TTL defines a period of time after-which the Node should
// expire and no longer exist. Values <= 0 are ignored. Given
// that the zero-value is ignored, TTL cannot be used to set
// a TTL of 0.
TTL time.Duration
}
type SetOptions struct {
// PrevValue specifies what the current value of the Node must
// be in order for the Set operation to succeed.
//
// Leaving this field empty means that the caller wishes to
// ignore the current value of the Node. This cannot be used
// to compare the Node's current value to an empty string.
//
// PrevValue is ignored if Dir=true
PrevValue string
// PrevIndex indicates what the current ModifiedIndex of the
// Node must be in order for the Set operation to succeed.
//
// If PrevIndex is set to 0 (default), no comparison is made.
PrevIndex uint64
// PrevExist specifies whether the Node must currently exist
// (PrevExist) or not (PrevNoExist). If the caller does not
// care about existence, set PrevExist to PrevIgnore, or simply
// leave it unset.
PrevExist PrevExistType
// TTL defines a period of time after-which the Node should
// expire and no longer exist. Values <= 0 are ignored. Given
// that the zero-value is ignored, TTL cannot be used to set
// a TTL of 0.
TTL time.Duration
// Refresh set to true means a TTL value can be updated
// without firing a watch or changing the node value. A
// value must not be provided when refreshing a key.
Refresh bool
// Dir specifies whether or not this Node should be created as a directory.
Dir bool
}
type GetOptions struct {
// Recursive defines whether or not all children of the Node
// should be returned.
Recursive bool
// Sort instructs the server whether or not to sort the Nodes.
// If true, the Nodes are sorted alphabetically by key in
// ascending order (A to z). If false (default), the Nodes will
// not be sorted and the ordering used should not be considered
// predictable.
Sort bool
// Quorum specifies whether it gets the latest committed value that
// has been applied in quorum of members, which ensures external
// consistency (or linearizability).
Quorum bool
}
type DeleteOptions struct {
// PrevValue specifies what the current value of the Node must
// be in order for the Delete operation to succeed.
//
// Leaving this field empty means that the caller wishes to
// ignore the current value of the Node. This cannot be used
// to compare the Node's current value to an empty string.
PrevValue string
// PrevIndex indicates what the current ModifiedIndex of the
// Node must be in order for the Delete operation to succeed.
//
// If PrevIndex is set to 0 (default), no comparison is made.
PrevIndex uint64
// Recursive defines whether or not all children of the Node
// should be deleted. If set to true, all children of the Node
// identified by the given key will be deleted. If left unset
// or explicitly set to false, only a single Node will be
// deleted.
Recursive bool
// Dir specifies whether or not this Node should be removed as a directory.
Dir bool
}
type Watcher interface {
// Next blocks until an etcd event occurs, then returns a Response
// representing that event. The behavior of Next depends on the
// WatcherOptions used to construct the Watcher. Next is designed to
// be called repeatedly, each time blocking until a subsequent event
// is available.
//
// If the provided context is cancelled, Next will return a non-nil
// error. Any other failures encountered while waiting for the next
// event (connection issues, deserialization failures, etc) will
// also result in a non-nil error.
Next(context.Context) (*Response, error)
}
type Response struct {
// Action is the name of the operation that occurred. Possible values
// include get, set, delete, update, create, compareAndSwap,
// compareAndDelete and expire.
Action string `json:"action"`
// Node represents the state of the relevant etcd Node.
Node *Node `json:"node"`
// PrevNode represents the previous state of the Node. PrevNode is non-nil
// only if the Node existed before the action occurred and the action
// caused a change to the Node.
PrevNode *Node `json:"prevNode"`
// Index holds the cluster-level index at the time the Response was generated.
// This index is not tied to the Node(s) contained in this Response.
Index uint64 `json:"-"`
}
type Node struct {
// Key represents the unique location of this Node (e.g. "/foo/bar").
Key string `json:"key"`
// Dir reports whether node describes a directory.
Dir bool `json:"dir,omitempty"`
// Value is the current data stored on this Node. If this Node
// is a directory, Value will be empty.
Value string `json:"value"`
// Nodes holds the children of this Node, only if this Node is a directory.
// This slice of will be arbitrarily deep (children, grandchildren, great-
// grandchildren, etc.) if a recursive Get or Watch request were made.
Nodes Nodes `json:"nodes"`
// CreatedIndex is the etcd index at-which this Node was created.
CreatedIndex uint64 `json:"createdIndex"`
// ModifiedIndex is the etcd index at-which this Node was last modified.
ModifiedIndex uint64 `json:"modifiedIndex"`
// Expiration is the server side expiration time of the key.
Expiration *time.Time `json:"expiration,omitempty"`
// TTL is the time to live of the key in second.
TTL int64 `json:"ttl,omitempty"`
}
func (n *Node) String() string {
return fmt.Sprintf("{Key: %s, CreatedIndex: %d, ModifiedIndex: %d, TTL: %d}", n.Key, n.CreatedIndex, n.ModifiedIndex, n.TTL)
}
// TTLDuration returns the Node's TTL as a time.Duration object
func (n *Node) TTLDuration() time.Duration {
return time.Duration(n.TTL) * time.Second
}
type Nodes []*Node
// interfaces for sorting
func (ns Nodes) Len() int { return len(ns) }
func (ns Nodes) Less(i, j int) bool { return ns[i].Key < ns[j].Key }
func (ns Nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] }
type httpKeysAPI struct {
client httpClient
prefix string
}
func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions) (*Response, error) {
act := &setAction{
Prefix: k.prefix,
Key: key,
Value: val,
}
if opts != nil {
act.PrevValue = opts.PrevValue
act.PrevIndex = opts.PrevIndex
act.PrevExist = opts.PrevExist
act.TTL = opts.TTL
act.Refresh = opts.Refresh
act.Dir = opts.Dir
}
doCtx := ctx
if act.PrevExist == PrevNoExist {
doCtx = context.WithValue(doCtx, &oneShotCtxValue, &oneShotCtxValue)
}
resp, body, err := k.client.Do(doCtx, act)
if err != nil {
return nil, err
}
return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
}
func (k *httpKeysAPI) Create(ctx context.Context, key, val string) (*Response, error) {
return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevNoExist})
}
func (k *httpKeysAPI) CreateInOrder(ctx context.Context, dir, val string, opts *CreateInOrderOptions) (*Response, error) {
act := &createInOrderAction{
Prefix: k.prefix,
Dir: dir,
Value: val,
}
if opts != nil {
act.TTL = opts.TTL
}
resp, body, err := k.client.Do(ctx, act)
if err != nil {
return nil, err
}
return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
}
func (k *httpKeysAPI) Update(ctx context.Context, key, val string) (*Response, error) {
return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevExist})
}
func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) {
act := &deleteAction{
Prefix: k.prefix,
Key: key,
}
if opts != nil {
act.PrevValue = opts.PrevValue
act.PrevIndex = opts.PrevIndex
act.Dir = opts.Dir
act.Recursive = opts.Recursive
}
doCtx := context.WithValue(ctx, &oneShotCtxValue, &oneShotCtxValue)
resp, body, err := k.client.Do(doCtx, act)
if err != nil {
return nil, err
}
return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
}
func (k *httpKeysAPI) Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) {
act := &getAction{
Prefix: k.prefix,
Key: key,
}
if opts != nil {
act.Recursive = opts.Recursive
act.Sorted = opts.Sort
act.Quorum = opts.Quorum
}
resp, body, err := k.client.Do(ctx, act)
if err != nil {
return nil, err
}
return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
}
func (k *httpKeysAPI) Watcher(key string, opts *WatcherOptions) Watcher {
act := waitAction{
Prefix: k.prefix,
Key: key,
}
if opts != nil {
act.Recursive = opts.Recursive
if opts.AfterIndex > 0 {
act.WaitIndex = opts.AfterIndex + 1
}
}
return &httpWatcher{
client: k.client,
nextWait: act,
}
}
type httpWatcher struct {
client httpClient
nextWait waitAction
}
func (hw *httpWatcher) Next(ctx context.Context) (*Response, error) {
for {
httpresp, body, err := hw.client.Do(ctx, &hw.nextWait)
if err != nil {
return nil, err
}
resp, err := unmarshalHTTPResponse(httpresp.StatusCode, httpresp.Header, body)
if err != nil {
if err == ErrEmptyBody {
continue
}
return nil, err
}
hw.nextWait.WaitIndex = resp.Node.ModifiedIndex + 1
return resp, nil
}
}
// v2KeysURL forms a URL representing the location of a key.
// The endpoint argument represents the base URL of an etcd
// server. The prefix is the path needed to route from the
// provided endpoint's path to the root of the keys API
// (typically "/v2/keys").
func v2KeysURL(ep url.URL, prefix, key string) *url.URL {
// We concatenate all parts together manually. We cannot use
// path.Join because it does not reserve trailing slash.
// We call CanonicalURLPath to further cleanup the path.
if prefix != "" && prefix[0] != '/' {
prefix = "/" + prefix
}
if key != "" && key[0] != '/' {
key = "/" + key
}
ep.Path = pathutil.CanonicalURLPath(ep.Path + prefix + key)
return &ep
}
type getAction struct {
Prefix string
Key string
Recursive bool
Sorted bool
Quorum bool
}
func (g *getAction) HTTPRequest(ep url.URL) *http.Request {
u := v2KeysURL(ep, g.Prefix, g.Key)
params := u.Query()
params.Set("recursive", strconv.FormatBool(g.Recursive))
params.Set("sorted", strconv.FormatBool(g.Sorted))
params.Set("quorum", strconv.FormatBool(g.Quorum))
u.RawQuery = params.Encode()
req, _ := http.NewRequest("GET", u.String(), nil)
return req
}
type waitAction struct {
Prefix string
Key string
WaitIndex uint64
Recursive bool
}
func (w *waitAction) HTTPRequest(ep url.URL) *http.Request {
u := v2KeysURL(ep, w.Prefix, w.Key)
params := u.Query()
params.Set("wait", "true")
params.Set("waitIndex", strconv.FormatUint(w.WaitIndex, 10))
params.Set("recursive", strconv.FormatBool(w.Recursive))
u.RawQuery = params.Encode()
req, _ := http.NewRequest("GET", u.String(), nil)
return req
}
type setAction struct {
Prefix string
Key string
Value string
PrevValue string
PrevIndex uint64
PrevExist PrevExistType
TTL time.Duration
Refresh bool
Dir bool
}
func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
u := v2KeysURL(ep, a.Prefix, a.Key)
params := u.Query()
form := url.Values{}
// we're either creating a directory or setting a key
if a.Dir {
params.Set("dir", strconv.FormatBool(a.Dir))
} else {
// These options are only valid for setting a key
if a.PrevValue != "" {
params.Set("prevValue", a.PrevValue)
}
form.Add("value", a.Value)
}
// Options which apply to both setting a key and creating a dir
if a.PrevIndex != 0 {
params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10))
}
if a.PrevExist != PrevIgnore {
params.Set("prevExist", string(a.PrevExist))
}
if a.TTL > 0 {
form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10))
}
if a.Refresh {
form.Add("refresh", "true")
}
u.RawQuery = params.Encode()
body := strings.NewReader(form.Encode())
req, _ := http.NewRequest("PUT", u.String(), body)
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
return req
}
type deleteAction struct {
Prefix string
Key string
PrevValue string
PrevIndex uint64
Dir bool
Recursive bool
}
func (a *deleteAction) HTTPRequest(ep url.URL) *http.Request {
u := v2KeysURL(ep, a.Prefix, a.Key)
params := u.Query()
if a.PrevValue != "" {
params.Set("prevValue", a.PrevValue)
}
if a.PrevIndex != 0 {
params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10))
}
if a.Dir {
params.Set("dir", "true")
}
if a.Recursive {
params.Set("recursive", "true")
}
u.RawQuery = params.Encode()
req, _ := http.NewRequest("DELETE", u.String(), nil)
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
return req
}
type createInOrderAction struct {
Prefix string
Dir string
Value string
TTL time.Duration
}
func (a *createInOrderAction) HTTPRequest(ep url.URL) *http.Request {
u := v2KeysURL(ep, a.Prefix, a.Dir)
form := url.Values{}
form.Add("value", a.Value)
if a.TTL > 0 {
form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10))
}
body := strings.NewReader(form.Encode())
req, _ := http.NewRequest("POST", u.String(), body)
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
return req
}
func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Response, err error) {
switch code {
case http.StatusOK, http.StatusCreated:
if len(body) == 0 {
return nil, ErrEmptyBody
}
res, err = unmarshalSuccessfulKeysResponse(header, body)
default:
err = unmarshalFailedKeysResponse(body)
}
return
}
func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) {
var res Response
err := codec.NewDecoderBytes(body, new(codec.JsonHandle)).Decode(&res)
if err != nil {
return nil, ErrInvalidJSON
}
if header.Get("X-Etcd-Index") != "" {
res.Index, err = strconv.ParseUint(header.Get("X-Etcd-Index"), 10, 64)
if err != nil {
return nil, err
}
}
return &res, nil
}
func unmarshalFailedKeysResponse(body []byte) error {
var etcdErr Error
if err := json.Unmarshal(body, &etcdErr); err != nil {
return ErrInvalidJSON
}
return etcdErr
}

304
vendor/github.com/coreos/etcd/client/members.go generated vendored Normal file
View File

@ -0,0 +1,304 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"net/url"
"path"
"golang.org/x/net/context"
"github.com/coreos/etcd/pkg/types"
)
var (
defaultV2MembersPrefix = "/v2/members"
defaultLeaderSuffix = "/leader"
)
type Member struct {
// ID is the unique identifier of this Member.
ID string `json:"id"`
// Name is a human-readable, non-unique identifier of this Member.
Name string `json:"name"`
// PeerURLs represents the HTTP(S) endpoints this Member uses to
// participate in etcd's consensus protocol.
PeerURLs []string `json:"peerURLs"`
// ClientURLs represents the HTTP(S) endpoints on which this Member
// serves it's client-facing APIs.
ClientURLs []string `json:"clientURLs"`
}
type memberCollection []Member
func (c *memberCollection) UnmarshalJSON(data []byte) error {
d := struct {
Members []Member
}{}
if err := json.Unmarshal(data, &d); err != nil {
return err
}
if d.Members == nil {
*c = make([]Member, 0)
return nil
}
*c = d.Members
return nil
}
type memberCreateOrUpdateRequest struct {
PeerURLs types.URLs
}
func (m *memberCreateOrUpdateRequest) MarshalJSON() ([]byte, error) {
s := struct {
PeerURLs []string `json:"peerURLs"`
}{
PeerURLs: make([]string, len(m.PeerURLs)),
}
for i, u := range m.PeerURLs {
s.PeerURLs[i] = u.String()
}
return json.Marshal(&s)
}
// NewMembersAPI constructs a new MembersAPI that uses HTTP to
// interact with etcd's membership API.
func NewMembersAPI(c Client) MembersAPI {
return &httpMembersAPI{
client: c,
}
}
type MembersAPI interface {
// List enumerates the current cluster membership.
List(ctx context.Context) ([]Member, error)
// Add instructs etcd to accept a new Member into the cluster.
Add(ctx context.Context, peerURL string) (*Member, error)
// Remove demotes an existing Member out of the cluster.
Remove(ctx context.Context, mID string) error
// Update instructs etcd to update an existing Member in the cluster.
Update(ctx context.Context, mID string, peerURLs []string) error
// Leader gets current leader of the cluster
Leader(ctx context.Context) (*Member, error)
}
type httpMembersAPI struct {
client httpClient
}
func (m *httpMembersAPI) List(ctx context.Context) ([]Member, error) {
req := &membersAPIActionList{}
resp, body, err := m.client.Do(ctx, req)
if err != nil {
return nil, err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
return nil, err
}
var mCollection memberCollection
if err := json.Unmarshal(body, &mCollection); err != nil {
return nil, err
}
return []Member(mCollection), nil
}
func (m *httpMembersAPI) Add(ctx context.Context, peerURL string) (*Member, error) {
urls, err := types.NewURLs([]string{peerURL})
if err != nil {
return nil, err
}
req := &membersAPIActionAdd{peerURLs: urls}
resp, body, err := m.client.Do(ctx, req)
if err != nil {
return nil, err
}
if err := assertStatusCode(resp.StatusCode, http.StatusCreated, http.StatusConflict); err != nil {
return nil, err
}
if resp.StatusCode != http.StatusCreated {
var merr membersError
if err := json.Unmarshal(body, &merr); err != nil {
return nil, err
}
return nil, merr
}
var memb Member
if err := json.Unmarshal(body, &memb); err != nil {
return nil, err
}
return &memb, nil
}
func (m *httpMembersAPI) Update(ctx context.Context, memberID string, peerURLs []string) error {
urls, err := types.NewURLs(peerURLs)
if err != nil {
return err
}
req := &membersAPIActionUpdate{peerURLs: urls, memberID: memberID}
resp, body, err := m.client.Do(ctx, req)
if err != nil {
return err
}
if err := assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusNotFound, http.StatusConflict); err != nil {
return err
}
if resp.StatusCode != http.StatusNoContent {
var merr membersError
if err := json.Unmarshal(body, &merr); err != nil {
return err
}
return merr
}
return nil
}
func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error {
req := &membersAPIActionRemove{memberID: memberID}
resp, _, err := m.client.Do(ctx, req)
if err != nil {
return err
}
return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone)
}
func (m *httpMembersAPI) Leader(ctx context.Context) (*Member, error) {
req := &membersAPIActionLeader{}
resp, body, err := m.client.Do(ctx, req)
if err != nil {
return nil, err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
return nil, err
}
var leader Member
if err := json.Unmarshal(body, &leader); err != nil {
return nil, err
}
return &leader, nil
}
type membersAPIActionList struct{}
func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request {
u := v2MembersURL(ep)
req, _ := http.NewRequest("GET", u.String(), nil)
return req
}
type membersAPIActionRemove struct {
memberID string
}
func (d *membersAPIActionRemove) HTTPRequest(ep url.URL) *http.Request {
u := v2MembersURL(ep)
u.Path = path.Join(u.Path, d.memberID)
req, _ := http.NewRequest("DELETE", u.String(), nil)
return req
}
type membersAPIActionAdd struct {
peerURLs types.URLs
}
func (a *membersAPIActionAdd) HTTPRequest(ep url.URL) *http.Request {
u := v2MembersURL(ep)
m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs}
b, _ := json.Marshal(&m)
req, _ := http.NewRequest("POST", u.String(), bytes.NewReader(b))
req.Header.Set("Content-Type", "application/json")
return req
}
type membersAPIActionUpdate struct {
memberID string
peerURLs types.URLs
}
func (a *membersAPIActionUpdate) HTTPRequest(ep url.URL) *http.Request {
u := v2MembersURL(ep)
m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs}
u.Path = path.Join(u.Path, a.memberID)
b, _ := json.Marshal(&m)
req, _ := http.NewRequest("PUT", u.String(), bytes.NewReader(b))
req.Header.Set("Content-Type", "application/json")
return req
}
func assertStatusCode(got int, want ...int) (err error) {
for _, w := range want {
if w == got {
return nil
}
}
return fmt.Errorf("unexpected status code %d", got)
}
type membersAPIActionLeader struct{}
func (l *membersAPIActionLeader) HTTPRequest(ep url.URL) *http.Request {
u := v2MembersURL(ep)
u.Path = path.Join(u.Path, defaultLeaderSuffix)
req, _ := http.NewRequest("GET", u.String(), nil)
return req
}
// v2MembersURL add the necessary path to the provided endpoint
// to route requests to the default v2 members API.
func v2MembersURL(ep url.URL) *url.URL {
ep.Path = path.Join(ep.Path, defaultV2MembersPrefix)
return &ep
}
type membersError struct {
Message string `json:"message"`
Code int `json:"-"`
}
func (e membersError) Error() string {
return e.Message
}

65
vendor/github.com/coreos/etcd/client/srv.go generated vendored Normal file
View File

@ -0,0 +1,65 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"fmt"
"net"
"net/url"
)
var (
// indirection for testing
lookupSRV = net.LookupSRV
)
type srvDiscover struct{}
// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records.
func NewSRVDiscover() Discoverer {
return &srvDiscover{}
}
// Discover looks up the etcd servers for the domain.
func (d *srvDiscover) Discover(domain string) ([]string, error) {
var urls []*url.URL
updateURLs := func(service, scheme string) error {
_, addrs, err := lookupSRV(service, "tcp", domain)
if err != nil {
return err
}
for _, srv := range addrs {
urls = append(urls, &url.URL{
Scheme: scheme,
Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)),
})
}
return nil
}
errHTTPS := updateURLs("etcd-client-ssl", "https")
errHTTP := updateURLs("etcd-client", "http")
if errHTTPS != nil && errHTTP != nil {
return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP)
}
endpoints := make([]string, len(urls))
for i := range urls {
endpoints[i] = urls[i].String()
}
return endpoints, nil
}

53
vendor/github.com/coreos/etcd/client/util.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"regexp"
)
var (
roleNotFoundRegExp *regexp.Regexp
userNotFoundRegExp *regexp.Regexp
)
func init() {
roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.")
userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.")
}
// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound.
func IsKeyNotFound(err error) bool {
if cErr, ok := err.(Error); ok {
return cErr.Code == ErrorCodeKeyNotFound
}
return false
}
// IsRoleNotFound returns true if the error means role not found of v2 API.
func IsRoleNotFound(err error) bool {
if ae, ok := err.(authError); ok {
return roleNotFoundRegExp.MatchString(ae.Message)
}
return false
}
// IsUserNotFound returns true if the error means user not found of v2 API.
func IsUserNotFound(err error) bool {
if ae, ok := err.(authError); ok {
return userNotFoundRegExp.MatchString(ae.Message)
}
return false
}

229
vendor/github.com/coreos/etcd/clientv3/auth.go generated vendored Normal file
View File

@ -0,0 +1,229 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"fmt"
"strings"
"github.com/coreos/etcd/auth/authpb"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type (
AuthEnableResponse pb.AuthEnableResponse
AuthDisableResponse pb.AuthDisableResponse
AuthenticateResponse pb.AuthenticateResponse
AuthUserAddResponse pb.AuthUserAddResponse
AuthUserDeleteResponse pb.AuthUserDeleteResponse
AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse
AuthUserGrantRoleResponse pb.AuthUserGrantRoleResponse
AuthUserGetResponse pb.AuthUserGetResponse
AuthUserRevokeRoleResponse pb.AuthUserRevokeRoleResponse
AuthRoleAddResponse pb.AuthRoleAddResponse
AuthRoleGrantPermissionResponse pb.AuthRoleGrantPermissionResponse
AuthRoleGetResponse pb.AuthRoleGetResponse
AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse
AuthRoleDeleteResponse pb.AuthRoleDeleteResponse
AuthUserListResponse pb.AuthUserListResponse
AuthRoleListResponse pb.AuthRoleListResponse
PermissionType authpb.Permission_Type
)
const (
PermRead = authpb.READ
PermWrite = authpb.WRITE
PermReadWrite = authpb.READWRITE
)
type Auth interface {
// AuthEnable enables auth of an etcd cluster.
AuthEnable(ctx context.Context) (*AuthEnableResponse, error)
// AuthDisable disables auth of an etcd cluster.
AuthDisable(ctx context.Context) (*AuthDisableResponse, error)
// UserAdd adds a new user to an etcd cluster.
UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error)
// UserDelete deletes a user from an etcd cluster.
UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error)
// UserChangePassword changes a password of a user.
UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error)
// UserGrantRole grants a role to a user.
UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error)
// UserGet gets a detailed information of a user.
UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error)
// UserList gets a list of all users.
UserList(ctx context.Context) (*AuthUserListResponse, error)
// UserRevokeRole revokes a role of a user.
UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error)
// RoleAdd adds a new role to an etcd cluster.
RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error)
// RoleGrantPermission grants a permission to a role.
RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error)
// RoleGet gets a detailed information of a role.
RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error)
// RoleList gets a list of all roles.
RoleList(ctx context.Context) (*AuthRoleListResponse, error)
// RoleRevokePermission revokes a permission from a role.
RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error)
// RoleDelete deletes a role.
RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error)
}
type auth struct {
c *Client
conn *grpc.ClientConn // conn in-use
remote pb.AuthClient
}
func NewAuth(c *Client) Auth {
conn := c.ActiveConnection()
return &auth{
conn: c.ActiveConnection(),
remote: pb.NewAuthClient(conn),
c: c,
}
}
func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{})
return (*AuthEnableResponse)(resp), toErr(ctx, err)
}
func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{})
return (*AuthDisableResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) {
resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password})
return (*AuthUserAddResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) {
resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name})
return (*AuthUserDeleteResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) {
resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password})
return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) {
resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role})
return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, grpc.FailFast(false))
return (*AuthUserGetResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) {
resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, grpc.FailFast(false))
return (*AuthUserListResponse)(resp), toErr(ctx, err)
}
func (auth *auth) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) {
resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role})
return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) {
resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name})
return (*AuthRoleAddResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) {
perm := &authpb.Permission{
Key: []byte(key),
RangeEnd: []byte(rangeEnd),
PermType: authpb.Permission_Type(permType),
}
resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm})
return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, grpc.FailFast(false))
return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, grpc.FailFast(false))
return (*AuthRoleListResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) {
resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd})
return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err)
}
func (auth *auth) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) {
resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role})
return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err)
}
func StrToPermissionType(s string) (PermissionType, error) {
val, ok := authpb.Permission_Type_value[strings.ToUpper(s)]
if ok {
return PermissionType(val), nil
}
return PermissionType(-1), fmt.Errorf("invalid permission type: %s", s)
}
type authenticator struct {
conn *grpc.ClientConn // conn in-use
remote pb.AuthClient
}
func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, grpc.FailFast(false))
return (*AuthenticateResponse)(resp), toErr(ctx, err)
}
func (auth *authenticator) close() {
auth.conn.Close()
}
func newAuthenticator(endpoint string, opts []grpc.DialOption) (*authenticator, error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return nil, err
}
return &authenticator{
conn: conn,
remote: pb.NewAuthClient(conn),
}, nil
}

147
vendor/github.com/coreos/etcd/clientv3/balancer.go generated vendored Normal file
View File

@ -0,0 +1,147 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"net/url"
"strings"
"sync"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
// simpleBalancer does the bare minimum to expose multiple eps
// to the grpc reconnection code path
type simpleBalancer struct {
// addrs are the client's endpoints for grpc
addrs []grpc.Address
// notifyCh notifies grpc of the set of addresses for connecting
notifyCh chan []grpc.Address
// readyc closes once the first connection is up
readyc chan struct{}
readyOnce sync.Once
// mu protects upEps, pinAddr, and connectingAddr
mu sync.RWMutex
// upEps holds the current endpoints that have an active connection
upEps map[string]struct{}
// upc closes when upEps transitions from empty to non-zero or the balancer closes.
upc chan struct{}
// pinAddr is the currently pinned address; set to the empty string on
// intialization and shutdown.
pinAddr string
}
func newSimpleBalancer(eps []string) *simpleBalancer {
notifyCh := make(chan []grpc.Address, 1)
addrs := make([]grpc.Address, len(eps))
for i := range eps {
addrs[i].Addr = getHost(eps[i])
}
notifyCh <- addrs
sb := &simpleBalancer{
addrs: addrs,
notifyCh: notifyCh,
readyc: make(chan struct{}),
upEps: make(map[string]struct{}),
upc: make(chan struct{}),
}
return sb
}
func (b *simpleBalancer) Start(target string) error { return nil }
func (b *simpleBalancer) ConnectNotify() <-chan struct{} {
b.mu.Lock()
defer b.mu.Unlock()
return b.upc
}
func (b *simpleBalancer) Up(addr grpc.Address) func(error) {
b.mu.Lock()
if len(b.upEps) == 0 {
// notify waiting Get()s and pin first connected address
close(b.upc)
b.pinAddr = addr.Addr
}
b.upEps[addr.Addr] = struct{}{}
b.mu.Unlock()
// notify client that a connection is up
b.readyOnce.Do(func() { close(b.readyc) })
return func(err error) {
b.mu.Lock()
delete(b.upEps, addr.Addr)
if len(b.upEps) == 0 && b.pinAddr != "" {
b.upc = make(chan struct{})
} else if b.pinAddr == addr.Addr {
// choose new random up endpoint
for k := range b.upEps {
b.pinAddr = k
break
}
}
b.mu.Unlock()
}
}
func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) {
var addr string
for {
b.mu.RLock()
ch := b.upc
b.mu.RUnlock()
select {
case <-ch:
case <-ctx.Done():
return grpc.Address{Addr: ""}, nil, ctx.Err()
}
b.mu.RLock()
addr = b.pinAddr
upEps := len(b.upEps)
b.mu.RUnlock()
if addr == "" {
return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing
}
if upEps > 0 {
break
}
}
return grpc.Address{Addr: addr}, func() {}, nil
}
func (b *simpleBalancer) Notify() <-chan []grpc.Address { return b.notifyCh }
func (b *simpleBalancer) Close() error {
b.mu.Lock()
close(b.notifyCh)
// terminate all waiting Get()s
b.pinAddr = ""
if len(b.upEps) == 0 {
close(b.upc)
}
b.mu.Unlock()
return nil
}
func getHost(ep string) string {
url, uerr := url.Parse(ep)
if uerr != nil || !strings.Contains(ep, "://") {
return ep
}
return url.Host
}

324
vendor/github.com/coreos/etcd/clientv3/client.go generated vendored Normal file
View File

@ -0,0 +1,324 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"crypto/tls"
"errors"
"fmt"
"io/ioutil"
"log"
"net"
"net/url"
"strings"
"time"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/metadata"
)
var (
ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints")
)
// Client provides and manages an etcd v3 client session.
type Client struct {
Cluster
KV
Lease
Watcher
Auth
Maintenance
conn *grpc.ClientConn
cfg Config
creds *credentials.TransportCredentials
balancer *simpleBalancer
retryWrapper retryRpcFunc
ctx context.Context
cancel context.CancelFunc
// Username is a username for authentication
Username string
// Password is a password for authentication
Password string
}
// New creates a new etcdv3 client from a given configuration.
func New(cfg Config) (*Client, error) {
if len(cfg.Endpoints) == 0 {
return nil, ErrNoAvailableEndpoints
}
return newClient(&cfg)
}
// NewFromURL creates a new etcdv3 client from a URL.
func NewFromURL(url string) (*Client, error) {
return New(Config{Endpoints: []string{url}})
}
// NewFromConfigFile creates a new etcdv3 client from a configuration file.
func NewFromConfigFile(path string) (*Client, error) {
cfg, err := configFromFile(path)
if err != nil {
return nil, err
}
return New(*cfg)
}
// Close shuts down the client's etcd connections.
func (c *Client) Close() error {
c.cancel()
return toErr(c.ctx, c.conn.Close())
}
// Ctx is a context for "out of band" messages (e.g., for sending
// "clean up" message when another context is canceled). It is
// canceled on client Close().
func (c *Client) Ctx() context.Context { return c.ctx }
// Endpoints lists the registered endpoints for the client.
func (c *Client) Endpoints() []string { return c.cfg.Endpoints }
type authTokenCredential struct {
token string
}
func (cred authTokenCredential) RequireTransportSecurity() bool {
return false
}
func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
return map[string]string{
"token": cred.token,
}, nil
}
func (c *Client) dialTarget(endpoint string) (proto string, host string, creds *credentials.TransportCredentials) {
proto = "tcp"
host = endpoint
creds = c.creds
url, uerr := url.Parse(endpoint)
if uerr != nil || !strings.Contains(endpoint, "://") {
return
}
// strip scheme:// prefix since grpc dials by host
host = url.Host
switch url.Scheme {
case "unix":
proto = "unix"
case "http":
creds = nil
case "https":
if creds != nil {
break
}
tlsconfig := &tls.Config{}
emptyCreds := credentials.NewTLS(tlsconfig)
creds = &emptyCreds
default:
return "", "", nil
}
return
}
// dialSetupOpts gives the dial opts prior to any authentication
func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts []grpc.DialOption) {
if c.cfg.DialTimeout > 0 {
opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)}
}
opts = append(opts, dopts...)
// grpc issues TLS cert checks using the string passed into dial so
// that string must be the host. To recover the full scheme://host URL,
// have a map from hosts to the original endpoint.
host2ep := make(map[string]string)
for i := range c.cfg.Endpoints {
_, host, _ := c.dialTarget(c.cfg.Endpoints[i])
host2ep[host] = c.cfg.Endpoints[i]
}
f := func(host string, t time.Duration) (net.Conn, error) {
proto, host, _ := c.dialTarget(host2ep[host])
if proto == "" {
return nil, fmt.Errorf("unknown scheme for %q", host)
}
select {
case <-c.ctx.Done():
return nil, c.ctx.Err()
default:
}
return net.DialTimeout(proto, host, t)
}
opts = append(opts, grpc.WithDialer(f))
_, _, creds := c.dialTarget(endpoint)
if creds != nil {
opts = append(opts, grpc.WithTransportCredentials(*creds))
} else {
opts = append(opts, grpc.WithInsecure())
}
return opts
}
// Dial connects to a single endpoint using the client's config.
func (c *Client) Dial(endpoint string) (*grpc.ClientConn, error) {
return c.dial(endpoint)
}
func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
opts := c.dialSetupOpts(endpoint, dopts...)
host := getHost(endpoint)
if c.Username != "" && c.Password != "" {
// use dial options without dopts to avoid reusing the client balancer
auth, err := newAuthenticator(host, c.dialSetupOpts(endpoint))
if err != nil {
return nil, err
}
defer auth.close()
resp, err := auth.authenticate(c.ctx, c.Username, c.Password)
if err != nil {
return nil, err
}
opts = append(opts, grpc.WithPerRPCCredentials(authTokenCredential{token: resp.Token}))
}
conn, err := grpc.Dial(host, opts...)
if err != nil {
return nil, err
}
return conn, nil
}
// WithRequireLeader requires client requests to only succeed
// when the cluster has a leader.
func WithRequireLeader(ctx context.Context) context.Context {
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
return metadata.NewContext(ctx, md)
}
func newClient(cfg *Config) (*Client, error) {
if cfg == nil {
cfg = &Config{}
}
var creds *credentials.TransportCredentials
if cfg.TLS != nil {
c := credentials.NewTLS(cfg.TLS)
creds = &c
}
// use a temporary skeleton client to bootstrap first connection
ctx, cancel := context.WithCancel(context.TODO())
client := &Client{
conn: nil,
cfg: *cfg,
creds: creds,
ctx: ctx,
cancel: cancel,
}
if cfg.Username != "" && cfg.Password != "" {
client.Username = cfg.Username
client.Password = cfg.Password
}
client.balancer = newSimpleBalancer(cfg.Endpoints)
conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer))
if err != nil {
return nil, err
}
client.conn = conn
client.retryWrapper = client.newRetryWrapper()
// wait for a connection
if cfg.DialTimeout > 0 {
hasConn := false
waitc := time.After(cfg.DialTimeout)
select {
case <-client.balancer.readyc:
hasConn = true
case <-ctx.Done():
case <-waitc:
}
if !hasConn {
client.cancel()
conn.Close()
return nil, grpc.ErrClientConnTimeout
}
}
client.Cluster = NewCluster(client)
client.KV = NewKV(client)
client.Lease = NewLease(client)
client.Watcher = NewWatcher(client)
client.Auth = NewAuth(client)
client.Maintenance = NewMaintenance(client)
if cfg.Logger != nil {
logger.Set(cfg.Logger)
} else {
// disable client side grpc by default
logger.Set(log.New(ioutil.Discard, "", 0))
}
return client, nil
}
// ActiveConnection returns the current in-use connection
func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn }
// isHaltErr returns true if the given error and context indicate no forward
// progress can be made, even after reconnecting.
func isHaltErr(ctx context.Context, err error) bool {
if ctx != nil && ctx.Err() != nil {
return true
}
if err == nil {
return false
}
eErr := rpctypes.Error(err)
if _, ok := eErr.(rpctypes.EtcdError); ok {
return eErr != rpctypes.ErrStopped && eErr != rpctypes.ErrNoLeader
}
// treat etcdserver errors not recognized by the client as halting
return isConnClosing(err) || strings.Contains(err.Error(), "etcdserver:")
}
// isConnClosing returns true if the error matches a grpc client closing error
func isConnClosing(err error) bool {
return strings.Contains(err.Error(), grpc.ErrClientConnClosing.Error())
}
func toErr(ctx context.Context, err error) error {
if err == nil {
return nil
}
err = rpctypes.Error(err)
switch {
case ctx.Err() != nil && strings.Contains(err.Error(), "context"):
err = ctx.Err()
case strings.Contains(err.Error(), ErrNoAvailableEndpoints.Error()):
err = ErrNoAvailableEndpoints
case strings.Contains(err.Error(), grpc.ErrClientConnClosing.Error()):
err = grpc.ErrClientConnClosing
}
return err
}

102
vendor/github.com/coreos/etcd/clientv3/cluster.go generated vendored Normal file
View File

@ -0,0 +1,102 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type (
Member pb.Member
MemberListResponse pb.MemberListResponse
MemberAddResponse pb.MemberAddResponse
MemberRemoveResponse pb.MemberRemoveResponse
MemberUpdateResponse pb.MemberUpdateResponse
)
type Cluster interface {
// MemberList lists the current cluster membership.
MemberList(ctx context.Context) (*MemberListResponse, error)
// MemberAdd adds a new member into the cluster.
MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
// MemberRemove removes an existing member from the cluster.
MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error)
// MemberUpdate updates the peer addresses of the member.
MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error)
}
type cluster struct {
remote pb.ClusterClient
}
func NewCluster(c *Client) Cluster {
return &cluster{remote: RetryClusterClient(c)}
}
func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
r := &pb.MemberAddRequest{PeerURLs: peerAddrs}
resp, err := c.remote.MemberAdd(ctx, r)
if err == nil {
return (*MemberAddResponse)(resp), nil
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
return nil, toErr(ctx, err)
}
func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
r := &pb.MemberRemoveRequest{ID: id}
resp, err := c.remote.MemberRemove(ctx, r)
if err == nil {
return (*MemberRemoveResponse)(resp), nil
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
return nil, toErr(ctx, err)
}
func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
// it is safe to retry on update.
for {
r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
resp, err := c.remote.MemberUpdate(ctx, r)
if err == nil {
return (*MemberUpdateResponse)(resp), nil
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
}
}
func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
// it is safe to retry on list.
for {
resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, grpc.FailFast(false))
if err == nil {
return (*MemberListResponse)(resp), nil
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
}
}

53
vendor/github.com/coreos/etcd/clientv3/compact_op.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
)
// CompactOp represents a compact operation.
type CompactOp struct {
revision int64
physical bool
}
// CompactOption configures compact operation.
type CompactOption func(*CompactOp)
func (op *CompactOp) applyCompactOpts(opts []CompactOption) {
for _, opt := range opts {
opt(op)
}
}
// OpCompact wraps slice CompactOption to create a CompactOp.
func OpCompact(rev int64, opts ...CompactOption) CompactOp {
ret := CompactOp{revision: rev}
ret.applyCompactOpts(opts)
return ret
}
func (op CompactOp) toRequest() *pb.CompactionRequest {
return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical}
}
// WithCompactPhysical makes compact RPC call wait until
// the compaction is physically applied to the local database
// such that compacted entries are totally removed from the
// backend database.
func WithCompactPhysical() CompactOption {
return func(op *CompactOp) { op.physical = true }
}

91
vendor/github.com/coreos/etcd/clientv3/compare.go generated vendored Normal file
View File

@ -0,0 +1,91 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
)
type CompareTarget int
type CompareResult int
const (
CompareVersion CompareTarget = iota
CompareCreated
CompareModified
CompareValue
)
type Cmp pb.Compare
func Compare(cmp Cmp, result string, v interface{}) Cmp {
var r pb.Compare_CompareResult
switch result {
case "=":
r = pb.Compare_EQUAL
case ">":
r = pb.Compare_GREATER
case "<":
r = pb.Compare_LESS
default:
panic("Unknown result op")
}
cmp.Result = r
switch cmp.Target {
case pb.Compare_VALUE:
val, ok := v.(string)
if !ok {
panic("bad compare value")
}
cmp.TargetUnion = &pb.Compare_Value{Value: []byte(val)}
case pb.Compare_VERSION:
cmp.TargetUnion = &pb.Compare_Version{Version: mustInt64(v)}
case pb.Compare_CREATE:
cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)}
case pb.Compare_MOD:
cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)}
default:
panic("Unknown compare type")
}
return cmp
}
func Value(key string) Cmp {
return Cmp{Key: []byte(key), Target: pb.Compare_VALUE}
}
func Version(key string) Cmp {
return Cmp{Key: []byte(key), Target: pb.Compare_VERSION}
}
func CreateRevision(key string) Cmp {
return Cmp{Key: []byte(key), Target: pb.Compare_CREATE}
}
func ModRevision(key string) Cmp {
return Cmp{Key: []byte(key), Target: pb.Compare_MOD}
}
func mustInt64(val interface{}) int64 {
if v, ok := val.(int64); ok {
return v
}
if v, ok := val.(int); ok {
return int64(v)
}
panic("bad value")
}

110
vendor/github.com/coreos/etcd/clientv3/config.go generated vendored Normal file
View File

@ -0,0 +1,110 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"crypto/tls"
"crypto/x509"
"io/ioutil"
"time"
"github.com/coreos/etcd/pkg/tlsutil"
"github.com/ghodss/yaml"
)
type Config struct {
// Endpoints is a list of URLs
Endpoints []string
// DialTimeout is the timeout for failing to establish a connection.
DialTimeout time.Duration
// TLS holds the client secure credentials, if any.
TLS *tls.Config
// Logger is the logger used by client library.
Logger Logger
// Username is a username for authentication
Username string
// Password is a password for authentication
Password string
}
type yamlConfig struct {
Endpoints []string `json:"endpoints"`
DialTimeout time.Duration `json:"dial-timeout"`
InsecureTransport bool `json:"insecure-transport"`
InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify"`
Certfile string `json:"cert-file"`
Keyfile string `json:"key-file"`
CAfile string `json:"ca-file"`
}
func configFromFile(fpath string) (*Config, error) {
b, err := ioutil.ReadFile(fpath)
if err != nil {
return nil, err
}
yc := &yamlConfig{}
err = yaml.Unmarshal(b, yc)
if err != nil {
return nil, err
}
cfg := &Config{
Endpoints: yc.Endpoints,
DialTimeout: yc.DialTimeout,
}
if yc.InsecureTransport {
cfg.TLS = nil
return cfg, nil
}
var (
cert *tls.Certificate
cp *x509.CertPool
)
if yc.Certfile != "" && yc.Keyfile != "" {
cert, err = tlsutil.NewCert(yc.Certfile, yc.Keyfile, nil)
if err != nil {
return nil, err
}
}
if yc.CAfile != "" {
cp, err = tlsutil.NewCertPool([]string{yc.CAfile})
if err != nil {
return nil, err
}
}
tlscfg := &tls.Config{
MinVersion: tls.VersionTLS10,
InsecureSkipVerify: yc.InsecureSkipTLSVerify,
RootCAs: cp,
}
if cert != nil {
tlscfg.Certificates = []tls.Certificate{*cert}
}
cfg.TLS = tlscfg
return cfg, nil
}

64
vendor/github.com/coreos/etcd/clientv3/doc.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package clientv3 implements the official Go etcd client for v3.
//
// Create client using `clientv3.New`:
//
// cli, err := clientv3.New(clientv3.Config{
// Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"},
// DialTimeout: 5 * time.Second,
// })
// if err != nil {
// // handle error!
// }
// defer cli.Close()
//
// Make sure to close the client after using it. If the client is not closed, the
// connection will have leaky goroutines.
//
// To specify client request timeout, pass context.WithTimeout to APIs:
//
// ctx, cancel := context.WithTimeout(context.Background(), timeout)
// resp, err := kvc.Put(ctx, "sample_key", "sample_value")
// cancel()
// if err != nil {
// // handle error!
// }
// // use the response
//
// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed.
// Clients are safe for concurrent use by multiple goroutines.
//
// etcd client returns 2 types of errors:
//
// 1. context error: canceled or deadline exceeded.
// 2. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/error.go.
//
// Here is the example code to handle client errors:
//
// resp, err := kvc.Put(ctx, "", "")
// if err != nil {
// if err == context.Canceled {
// // ctx is canceled by another routine
// } else if err == context.DeadlineExceeded {
// // ctx is attached with a deadline and it exceeded
// } else if verr, ok := err.(*v3rpc.ErrEmptyKey); ok {
// // process (verr.Errors)
// } else {
// // bad cluster endpoints, which are not etcd servers
// }
// }
//
package clientv3

180
vendor/github.com/coreos/etcd/clientv3/kv.go generated vendored Normal file
View File

@ -0,0 +1,180 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type (
CompactResponse pb.CompactionResponse
PutResponse pb.PutResponse
GetResponse pb.RangeResponse
DeleteResponse pb.DeleteRangeResponse
TxnResponse pb.TxnResponse
)
type KV interface {
// Put puts a key-value pair into etcd.
// Note that key,value can be plain bytes array and string is
// an immutable representation of that bytes array.
// To get a string of bytes, do string([]byte(0x10, 0x20)).
Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error)
// Get retrieves keys.
// By default, Get will return the value for "key", if any.
// When passed WithRange(end), Get will return the keys in the range [key, end).
// When passed WithFromKey(), Get returns keys greater than or equal to key.
// When passed WithRev(rev) with rev > 0, Get retrieves keys at the given revision;
// if the required revision is compacted, the request will fail with ErrCompacted .
// When passed WithLimit(limit), the number of returned keys is bounded by limit.
// When passed WithSort(), the keys will be sorted.
Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error)
// Delete deletes a key, or optionally using WithRange(end), [key, end).
Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error)
// Compact compacts etcd KV history before the given rev.
Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error)
// Do applies a single Op on KV without a transaction.
// Do is useful when declaring operations to be issued at a later time
// whereas Get/Put/Delete are for better suited for when the operation
// should be immediately issued at time of declaration.
// Do applies a single Op on KV without a transaction.
// Do is useful when creating arbitrary operations to be issued at a
// later time; the user can range over the operations, calling Do to
// execute them. Get/Put/Delete, on the other hand, are best suited
// for when the operation should be issued at the time of declaration.
Do(ctx context.Context, op Op) (OpResponse, error)
// Txn creates a transaction.
Txn(ctx context.Context) Txn
}
type OpResponse struct {
put *PutResponse
get *GetResponse
del *DeleteResponse
}
func (op OpResponse) Put() *PutResponse { return op.put }
func (op OpResponse) Get() *GetResponse { return op.get }
func (op OpResponse) Del() *DeleteResponse { return op.del }
type kv struct {
remote pb.KVClient
}
func NewKV(c *Client) KV {
return &kv{remote: RetryKVClient(c)}
}
func NewKVFromKVClient(remote pb.KVClient) KV {
return &kv{remote: remote}
}
func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
r, err := kv.Do(ctx, OpPut(key, val, opts...))
return r.put, toErr(ctx, err)
}
func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) {
r, err := kv.Do(ctx, OpGet(key, opts...))
return r.get, toErr(ctx, err)
}
func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) {
r, err := kv.Do(ctx, OpDelete(key, opts...))
return r.del, toErr(ctx, err)
}
func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) {
resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), grpc.FailFast(false))
if err != nil {
return nil, toErr(ctx, err)
}
return (*CompactResponse)(resp), err
}
func (kv *kv) Txn(ctx context.Context) Txn {
return &txn{
kv: kv,
ctx: ctx,
}
}
func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
for {
resp, err := kv.do(ctx, op)
if err == nil {
return resp, nil
}
if isHaltErr(ctx, err) {
return resp, toErr(ctx, err)
}
// do not retry on modifications
if op.isWrite() {
return resp, toErr(ctx, err)
}
}
}
func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) {
var err error
switch op.t {
// TODO: handle other ops
case tRange:
var resp *pb.RangeResponse
r := &pb.RangeRequest{
Key: op.key,
RangeEnd: op.end,
Limit: op.limit,
Revision: op.rev,
Serializable: op.serializable,
KeysOnly: op.keysOnly,
CountOnly: op.countOnly,
}
if op.sort != nil {
r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
}
resp, err = kv.remote.Range(ctx, r, grpc.FailFast(false))
if err == nil {
return OpResponse{get: (*GetResponse)(resp)}, nil
}
case tPut:
var resp *pb.PutResponse
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV}
resp, err = kv.remote.Put(ctx, r)
if err == nil {
return OpResponse{put: (*PutResponse)(resp)}, nil
}
case tDeleteRange:
var resp *pb.DeleteRangeResponse
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
resp, err = kv.remote.DeleteRange(ctx, r)
if err == nil {
return OpResponse{del: (*DeleteResponse)(resp)}, nil
}
default:
panic("Unknown op")
}
return OpResponse{}, err
}

462
vendor/github.com/coreos/etcd/clientv3/lease.go generated vendored Normal file
View File

@ -0,0 +1,462 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"sync"
"time"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type (
LeaseRevokeResponse pb.LeaseRevokeResponse
LeaseID int64
)
// LeaseGrantResponse is used to convert the protobuf grant response.
type LeaseGrantResponse struct {
*pb.ResponseHeader
ID LeaseID
TTL int64
Error string
}
// LeaseKeepAliveResponse is used to convert the protobuf keepalive response.
type LeaseKeepAliveResponse struct {
*pb.ResponseHeader
ID LeaseID
TTL int64
}
const (
// defaultTTL is the assumed lease TTL used for the first keepalive
// deadline before the actual TTL is known to the client.
defaultTTL = 5 * time.Second
// a small buffer to store unsent lease responses.
leaseResponseChSize = 16
// NoLease is a lease ID for the absence of a lease.
NoLease LeaseID = 0
)
type Lease interface {
// Grant creates a new lease.
Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)
// Revoke revokes the given lease.
Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
// KeepAlive keeps the given lease alive forever.
KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
// KeepAliveOnce renews the lease once. In most of the cases, Keepalive
// should be used instead of KeepAliveOnce.
KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
// Close releases all resources Lease keeps for efficient communication
// with the etcd server.
Close() error
}
type lessor struct {
mu sync.Mutex // guards all fields
// donec is closed when recvKeepAliveLoop stops
donec chan struct{}
remote pb.LeaseClient
stream pb.Lease_LeaseKeepAliveClient
streamCancel context.CancelFunc
stopCtx context.Context
stopCancel context.CancelFunc
keepAlives map[LeaseID]*keepAlive
// firstKeepAliveTimeout is the timeout for the first keepalive request
// before the actual TTL is known to the lease client
firstKeepAliveTimeout time.Duration
}
// keepAlive multiplexes a keepalive for a lease over multiple channels
type keepAlive struct {
chs []chan<- *LeaseKeepAliveResponse
ctxs []context.Context
// deadline is the time the keep alive channels close if no response
deadline time.Time
// nextKeepAlive is when to send the next keep alive message
nextKeepAlive time.Time
// donec is closed on lease revoke, expiration, or cancel.
donec chan struct{}
}
func NewLease(c *Client) Lease {
l := &lessor{
donec: make(chan struct{}),
keepAlives: make(map[LeaseID]*keepAlive),
remote: RetryLeaseClient(c),
firstKeepAliveTimeout: c.cfg.DialTimeout + time.Second,
}
if l.firstKeepAliveTimeout == time.Second {
l.firstKeepAliveTimeout = defaultTTL
}
l.stopCtx, l.stopCancel = context.WithCancel(context.Background())
go l.recvKeepAliveLoop()
go l.deadlineLoop()
return l
}
func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
cctx, cancel := context.WithCancel(ctx)
done := cancelWhenStop(cancel, l.stopCtx.Done())
defer close(done)
for {
r := &pb.LeaseGrantRequest{TTL: ttl}
resp, err := l.remote.LeaseGrant(cctx, r)
if err == nil {
gresp := &LeaseGrantResponse{
ResponseHeader: resp.GetHeader(),
ID: LeaseID(resp.ID),
TTL: resp.TTL,
Error: resp.Error,
}
return gresp, nil
}
if isHaltErr(cctx, err) {
return nil, toErr(ctx, err)
}
if nerr := l.newStream(); nerr != nil {
return nil, nerr
}
}
}
func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
cctx, cancel := context.WithCancel(ctx)
done := cancelWhenStop(cancel, l.stopCtx.Done())
defer close(done)
for {
r := &pb.LeaseRevokeRequest{ID: int64(id)}
resp, err := l.remote.LeaseRevoke(cctx, r)
if err == nil {
return (*LeaseRevokeResponse)(resp), nil
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
if nerr := l.newStream(); nerr != nil {
return nil, nerr
}
}
}
func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
ch := make(chan *LeaseKeepAliveResponse, leaseResponseChSize)
l.mu.Lock()
ka, ok := l.keepAlives[id]
if !ok {
// create fresh keep alive
ka = &keepAlive{
chs: []chan<- *LeaseKeepAliveResponse{ch},
ctxs: []context.Context{ctx},
deadline: time.Now().Add(l.firstKeepAliveTimeout),
nextKeepAlive: time.Now(),
donec: make(chan struct{}),
}
l.keepAlives[id] = ka
} else {
// add channel and context to existing keep alive
ka.ctxs = append(ka.ctxs, ctx)
ka.chs = append(ka.chs, ch)
}
l.mu.Unlock()
go l.keepAliveCtxCloser(id, ctx, ka.donec)
return ch, nil
}
func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
cctx, cancel := context.WithCancel(ctx)
done := cancelWhenStop(cancel, l.stopCtx.Done())
defer close(done)
for {
resp, err := l.keepAliveOnce(cctx, id)
if err == nil {
if resp.TTL == 0 {
err = rpctypes.ErrLeaseNotFound
}
return resp, err
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
if nerr := l.newStream(); nerr != nil {
return nil, nerr
}
}
}
func (l *lessor) Close() error {
l.stopCancel()
<-l.donec
return nil
}
func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-chan struct{}) {
select {
case <-donec:
return
case <-l.donec:
return
case <-ctx.Done():
}
l.mu.Lock()
defer l.mu.Unlock()
ka, ok := l.keepAlives[id]
if !ok {
return
}
// close channel and remove context if still associated with keep alive
for i, c := range ka.ctxs {
if c == ctx {
close(ka.chs[i])
ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...)
ka.chs = append(ka.chs[:i], ka.chs[i+1:]...)
break
}
}
// remove if no one more listeners
if len(ka.chs) == 0 {
delete(l.keepAlives, id)
}
}
func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
cctx, cancel := context.WithCancel(ctx)
defer cancel()
stream, err := l.remote.LeaseKeepAlive(cctx, grpc.FailFast(false))
if err != nil {
return nil, toErr(ctx, err)
}
err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)})
if err != nil {
return nil, toErr(ctx, err)
}
resp, rerr := stream.Recv()
if rerr != nil {
return nil, toErr(ctx, rerr)
}
karesp := &LeaseKeepAliveResponse{
ResponseHeader: resp.GetHeader(),
ID: LeaseID(resp.ID),
TTL: resp.TTL,
}
return karesp, nil
}
func (l *lessor) recvKeepAliveLoop() {
defer func() {
l.mu.Lock()
close(l.donec)
for _, ka := range l.keepAlives {
ka.Close()
}
l.keepAlives = make(map[LeaseID]*keepAlive)
l.mu.Unlock()
}()
stream, serr := l.resetRecv()
for serr == nil {
resp, err := stream.Recv()
if err != nil {
if isHaltErr(l.stopCtx, err) {
return
}
stream, serr = l.resetRecv()
continue
}
l.recvKeepAlive(resp)
}
}
// resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests
func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
if err := l.newStream(); err != nil {
return nil, err
}
stream := l.getKeepAliveStream()
go l.sendKeepAliveLoop(stream)
return stream, nil
}
// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse
func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
karesp := &LeaseKeepAliveResponse{
ResponseHeader: resp.GetHeader(),
ID: LeaseID(resp.ID),
TTL: resp.TTL,
}
l.mu.Lock()
defer l.mu.Unlock()
ka, ok := l.keepAlives[karesp.ID]
if !ok {
return
}
if karesp.TTL <= 0 {
// lease expired; close all keep alive channels
delete(l.keepAlives, karesp.ID)
ka.Close()
return
}
// send update to all channels
nextKeepAlive := time.Now().Add(1 + time.Duration(karesp.TTL/3)*time.Second)
ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
for _, ch := range ka.chs {
select {
case ch <- karesp:
ka.nextKeepAlive = nextKeepAlive
default:
}
}
}
// deadlineLoop reaps any keep alive channels that have not received a response
// within the lease TTL
func (l *lessor) deadlineLoop() {
for {
select {
case <-time.After(time.Second):
case <-l.donec:
return
}
now := time.Now()
l.mu.Lock()
for id, ka := range l.keepAlives {
if ka.deadline.Before(now) {
// waited too long for response; lease may be expired
ka.Close()
delete(l.keepAlives, id)
}
}
l.mu.Unlock()
}
}
// sendKeepAliveLoop sends LeaseKeepAliveRequests for the lifetime of a lease stream
func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
for {
select {
case <-time.After(500 * time.Millisecond):
case <-stream.Context().Done():
return
case <-l.donec:
return
case <-l.stopCtx.Done():
return
}
var tosend []LeaseID
now := time.Now()
l.mu.Lock()
for id, ka := range l.keepAlives {
if ka.nextKeepAlive.Before(now) {
tosend = append(tosend, id)
}
}
l.mu.Unlock()
for _, id := range tosend {
r := &pb.LeaseKeepAliveRequest{ID: int64(id)}
if err := stream.Send(r); err != nil {
// TODO do something with this error?
return
}
}
}
}
func (l *lessor) getKeepAliveStream() pb.Lease_LeaseKeepAliveClient {
l.mu.Lock()
defer l.mu.Unlock()
return l.stream
}
func (l *lessor) newStream() error {
sctx, cancel := context.WithCancel(l.stopCtx)
stream, err := l.remote.LeaseKeepAlive(sctx, grpc.FailFast(false))
if err != nil {
cancel()
return toErr(sctx, err)
}
l.mu.Lock()
defer l.mu.Unlock()
if l.stream != nil && l.streamCancel != nil {
l.stream.CloseSend()
l.streamCancel()
}
l.streamCancel = cancel
l.stream = stream
return nil
}
func (ka *keepAlive) Close() {
close(ka.donec)
for _, ch := range ka.chs {
close(ch)
}
}
// cancelWhenStop calls cancel when the given stopc fires. It returns a done chan. done
// should be closed when the work is finished. When done fires, cancelWhenStop will release
// its internal resource.
func cancelWhenStop(cancel context.CancelFunc, stopc <-chan struct{}) chan<- struct{} {
done := make(chan struct{}, 1)
go func() {
select {
case <-stopc:
case <-done:
}
cancel()
}()
return done
}

64
vendor/github.com/coreos/etcd/clientv3/logger.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"log"
"os"
"sync"
"google.golang.org/grpc/grpclog"
)
type Logger grpclog.Logger
var (
logger settableLogger
)
type settableLogger struct {
l grpclog.Logger
mu sync.RWMutex
}
func init() {
// use go's standard logger by default like grpc
logger.mu.Lock()
logger.l = log.New(os.Stderr, "", log.LstdFlags)
grpclog.SetLogger(&logger)
logger.mu.Unlock()
}
func (s *settableLogger) Set(l Logger) {
s.mu.Lock()
logger.l = l
s.mu.Unlock()
}
func (s *settableLogger) Get() Logger {
s.mu.RLock()
l := logger.l
s.mu.RUnlock()
return l
}
// implement the grpclog.Logger interface
func (s *settableLogger) Fatal(args ...interface{}) { s.Get().Fatal(args...) }
func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.Get().Fatalf(format, args...) }
func (s *settableLogger) Fatalln(args ...interface{}) { s.Get().Fatalln(args...) }
func (s *settableLogger) Print(args ...interface{}) { s.Get().Print(args...) }
func (s *settableLogger) Printf(format string, args ...interface{}) { s.Get().Printf(format, args...) }
func (s *settableLogger) Println(args ...interface{}) { s.Get().Println(args...) }

164
vendor/github.com/coreos/etcd/clientv3/maintenance.go generated vendored Normal file
View File

@ -0,0 +1,164 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"io"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type (
DefragmentResponse pb.DefragmentResponse
AlarmResponse pb.AlarmResponse
AlarmMember pb.AlarmMember
StatusResponse pb.StatusResponse
)
type Maintenance interface {
// AlarmList gets all active alarms.
AlarmList(ctx context.Context) (*AlarmResponse, error)
// AlarmDisarm disarms a given alarm.
AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error)
// Defragment defragments storage backend of the etcd member with given endpoint.
// Defragment is only needed when deleting a large number of keys and want to reclaim
// the resources.
// Defragment is an expensive operation. User should avoid defragmenting multiple members
// at the same time.
// To defragment multiple members in the cluster, user need to call defragment multiple
// times with different endpoints.
Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error)
// Status gets the status of the endpoint.
Status(ctx context.Context, endpoint string) (*StatusResponse, error)
// Snapshot provides a reader for a snapshot of a backend.
Snapshot(ctx context.Context) (io.ReadCloser, error)
}
type maintenance struct {
c *Client
remote pb.MaintenanceClient
}
func NewMaintenance(c *Client) Maintenance {
return &maintenance{c: c, remote: pb.NewMaintenanceClient(c.conn)}
}
func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
req := &pb.AlarmRequest{
Action: pb.AlarmRequest_GET,
MemberID: 0, // all
Alarm: pb.AlarmType_NONE, // all
}
for {
resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false))
if err == nil {
return (*AlarmResponse)(resp), nil
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
}
}
func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) {
req := &pb.AlarmRequest{
Action: pb.AlarmRequest_DEACTIVATE,
MemberID: am.MemberID,
Alarm: am.Alarm,
}
if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE {
ar, err := m.AlarmList(ctx)
if err != nil {
return nil, toErr(ctx, err)
}
ret := AlarmResponse{}
for _, am := range ar.Alarms {
dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am))
if derr != nil {
return nil, toErr(ctx, derr)
}
ret.Alarms = append(ret.Alarms, dresp.Alarms...)
}
return &ret, nil
}
resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false))
if err == nil {
return (*AlarmResponse)(resp), nil
}
return nil, toErr(ctx, err)
}
func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) {
conn, err := m.c.Dial(endpoint)
if err != nil {
return nil, toErr(ctx, err)
}
defer conn.Close()
remote := pb.NewMaintenanceClient(conn)
resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, grpc.FailFast(false))
if err != nil {
return nil, toErr(ctx, err)
}
return (*DefragmentResponse)(resp), nil
}
func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) {
conn, err := m.c.Dial(endpoint)
if err != nil {
return nil, toErr(ctx, err)
}
defer conn.Close()
remote := pb.NewMaintenanceClient(conn)
resp, err := remote.Status(ctx, &pb.StatusRequest{}, grpc.FailFast(false))
if err != nil {
return nil, toErr(ctx, err)
}
return (*StatusResponse)(resp), nil
}
func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, grpc.FailFast(false))
if err != nil {
return nil, toErr(ctx, err)
}
pr, pw := io.Pipe()
go func() {
for {
resp, err := ss.Recv()
if err != nil {
pw.CloseWithError(err)
return
}
if resp == nil && err == nil {
break
}
if _, werr := pw.Write(resp.Blob); werr != nil {
pw.CloseWithError(werr)
return
}
}
pw.Close()
}()
return pr, nil
}

322
vendor/github.com/coreos/etcd/clientv3/op.go generated vendored Normal file
View File

@ -0,0 +1,322 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
type opType int
const (
// A default Op has opType 0, which is invalid.
tRange opType = iota + 1
tPut
tDeleteRange
)
var (
noPrefixEnd = []byte{0}
)
// Op represents an Operation that kv can execute.
type Op struct {
t opType
key []byte
end []byte
// for range
limit int64
sort *SortOption
serializable bool
keysOnly bool
countOnly bool
// for range, watch
rev int64
// for watch, put, delete
prevKV bool
// progressNotify is for progress updates.
progressNotify bool
// createdNotify is for created event
createdNotify bool
// filters for watchers
filterPut bool
filterDelete bool
// for put
val []byte
leaseID LeaseID
}
func (op Op) toRequestOp() *pb.RequestOp {
switch op.t {
case tRange:
r := &pb.RangeRequest{
Key: op.key,
RangeEnd: op.end,
Limit: op.limit,
Revision: op.rev,
Serializable: op.serializable,
KeysOnly: op.keysOnly,
CountOnly: op.countOnly,
}
if op.sort != nil {
r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
}
return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: r}}
case tPut:
r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV}
return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}}
case tDeleteRange:
r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}}
default:
panic("Unknown Op")
}
}
func (op Op) isWrite() bool {
return op.t != tRange
}
func OpGet(key string, opts ...OpOption) Op {
ret := Op{t: tRange, key: []byte(key)}
ret.applyOpts(opts)
return ret
}
func OpDelete(key string, opts ...OpOption) Op {
ret := Op{t: tDeleteRange, key: []byte(key)}
ret.applyOpts(opts)
switch {
case ret.leaseID != 0:
panic("unexpected lease in delete")
case ret.limit != 0:
panic("unexpected limit in delete")
case ret.rev != 0:
panic("unexpected revision in delete")
case ret.sort != nil:
panic("unexpected sort in delete")
case ret.serializable:
panic("unexpected serializable in delete")
case ret.countOnly:
panic("unexpected countOnly in delete")
case ret.filterDelete, ret.filterPut:
panic("unexpected filter in delete")
case ret.createdNotify:
panic("unexpected createdNotify in delete")
}
return ret
}
func OpPut(key, val string, opts ...OpOption) Op {
ret := Op{t: tPut, key: []byte(key), val: []byte(val)}
ret.applyOpts(opts)
switch {
case ret.end != nil:
panic("unexpected range in put")
case ret.limit != 0:
panic("unexpected limit in put")
case ret.rev != 0:
panic("unexpected revision in put")
case ret.sort != nil:
panic("unexpected sort in put")
case ret.serializable:
panic("unexpected serializable in put")
case ret.countOnly:
panic("unexpected countOnly in put")
case ret.filterDelete, ret.filterPut:
panic("unexpected filter in put")
case ret.createdNotify:
panic("unexpected createdNotify in put")
}
return ret
}
func opWatch(key string, opts ...OpOption) Op {
ret := Op{t: tRange, key: []byte(key)}
ret.applyOpts(opts)
switch {
case ret.leaseID != 0:
panic("unexpected lease in watch")
case ret.limit != 0:
panic("unexpected limit in watch")
case ret.sort != nil:
panic("unexpected sort in watch")
case ret.serializable:
panic("unexpected serializable in watch")
case ret.countOnly:
panic("unexpected countOnly in watch")
}
return ret
}
func (op *Op) applyOpts(opts []OpOption) {
for _, opt := range opts {
opt(op)
}
}
// OpOption configures Operations like Get, Put, Delete.
type OpOption func(*Op)
// WithLease attaches a lease ID to a key in 'Put' request.
func WithLease(leaseID LeaseID) OpOption {
return func(op *Op) { op.leaseID = leaseID }
}
// WithLimit limits the number of results to return from 'Get' request.
func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } }
// WithRev specifies the store revision for 'Get' request.
// Or the start revision of 'Watch' request.
func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } }
// WithSort specifies the ordering in 'Get' request. It requires
// 'WithRange' and/or 'WithPrefix' to be specified too.
// 'target' specifies the target to sort by: key, version, revisions, value.
// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'.
func WithSort(target SortTarget, order SortOrder) OpOption {
return func(op *Op) {
if target == SortByKey && order == SortAscend {
// If order != SortNone, server fetches the entire key-space,
// and then applies the sort and limit, if provided.
// Since current mvcc.Range implementation returns results
// sorted by keys in lexiographically ascending order,
// client should ignore SortOrder if the target is SortByKey.
order = SortNone
}
op.sort = &SortOption{target, order}
}
}
// GetPrefixRangeEnd gets the range end of the prefix.
// 'Get(foo, WithPrefix())' is equal to 'Get(foo, WithRange(GetPrefixRangeEnd(foo))'.
func GetPrefixRangeEnd(prefix string) string {
return string(getPrefix([]byte(prefix)))
}
func getPrefix(key []byte) []byte {
end := make([]byte, len(key))
copy(end, key)
for i := len(end) - 1; i >= 0; i-- {
if end[i] < 0xff {
end[i] = end[i] + 1
end = end[:i+1]
return end
}
}
// next prefix does not exist (e.g., 0xffff);
// default to WithFromKey policy
return noPrefixEnd
}
// WithPrefix enables 'Get', 'Delete', or 'Watch' requests to operate
// on the keys with matching prefix. For example, 'Get(foo, WithPrefix())'
// can return 'foo1', 'foo2', and so on.
func WithPrefix() OpOption {
return func(op *Op) {
op.end = getPrefix(op.key)
}
}
// WithRange specifies the range of 'Get' or 'Delete' requests.
// For example, 'Get' requests with 'WithRange(end)' returns
// the keys in the range [key, end).
func WithRange(endKey string) OpOption {
return func(op *Op) { op.end = []byte(endKey) }
}
// WithFromKey specifies the range of 'Get' or 'Delete' requests
// to be equal or greater than the key in the argument.
func WithFromKey() OpOption { return WithRange("\x00") }
// WithSerializable makes 'Get' request serializable. By default,
// it's linearizable. Serializable requests are better for lower latency
// requirement.
func WithSerializable() OpOption {
return func(op *Op) { op.serializable = true }
}
// WithKeysOnly makes the 'Get' request return only the keys and the corresponding
// values will be omitted.
func WithKeysOnly() OpOption {
return func(op *Op) { op.keysOnly = true }
}
// WithCountOnly makes the 'Get' request return only the count of keys.
func WithCountOnly() OpOption {
return func(op *Op) { op.countOnly = true }
}
// WithFirstCreate gets the key with the oldest creation revision in the request range.
func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) }
// WithLastCreate gets the key with the latest creation revision in the request range.
func WithLastCreate() []OpOption { return withTop(SortByCreateRevision, SortDescend) }
// WithFirstKey gets the lexically first key in the request range.
func WithFirstKey() []OpOption { return withTop(SortByKey, SortAscend) }
// WithLastKey gets the lexically last key in the request range.
func WithLastKey() []OpOption { return withTop(SortByKey, SortDescend) }
// WithFirstRev gets the key with the oldest modification revision in the request range.
func WithFirstRev() []OpOption { return withTop(SortByModRevision, SortAscend) }
// WithLastRev gets the key with the latest modification revision in the request range.
func WithLastRev() []OpOption { return withTop(SortByModRevision, SortDescend) }
// withTop gets the first key over the get's prefix given a sort order
func withTop(target SortTarget, order SortOrder) []OpOption {
return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)}
}
// WithProgressNotify makes watch server send periodic progress updates
// every 10 minutes when there is no incoming events.
// Progress updates have zero events in WatchResponse.
func WithProgressNotify() OpOption {
return func(op *Op) {
op.progressNotify = true
}
}
// WithCreatedNotify makes watch server sends the created event.
func WithCreatedNotify() OpOption {
return func(op *Op) {
op.createdNotify = true
}
}
// WithFilterPut discards PUT events from the watcher.
func WithFilterPut() OpOption {
return func(op *Op) { op.filterPut = true }
}
// WithFilterDelete discards DELETE events from the watcher.
func WithFilterDelete() OpOption {
return func(op *Op) { op.filterDelete = true }
}
// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted,
// nothing will be returned.
func WithPrevKV() OpOption {
return func(op *Op) {
op.prevKV = true
}
}

243
vendor/github.com/coreos/etcd/clientv3/retry.go generated vendored Normal file
View File

@ -0,0 +1,243 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type rpcFunc func(ctx context.Context) error
type retryRpcFunc func(context.Context, rpcFunc)
func (c *Client) newRetryWrapper() retryRpcFunc {
return func(rpcCtx context.Context, f rpcFunc) {
for {
err := f(rpcCtx)
// ignore grpc conn closing on fail-fast calls; they are transient errors
if err == nil || !isConnClosing(err) {
return
}
select {
case <-c.balancer.ConnectNotify():
case <-rpcCtx.Done():
case <-c.ctx.Done():
return
}
}
}
}
type retryKVClient struct {
pb.KVClient
retryf retryRpcFunc
}
// RetryKVClient implements a KVClient that uses the client's FailFast retry policy.
func RetryKVClient(c *Client) pb.KVClient {
return &retryKVClient{pb.NewKVClient(c.conn), c.retryWrapper}
}
func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
rkv.retryf(ctx, func(rctx context.Context) error {
resp, err = rkv.KVClient.Put(rctx, in, opts...)
return err
})
return resp, err
}
func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
rkv.retryf(ctx, func(rctx context.Context) error {
resp, err = rkv.KVClient.DeleteRange(rctx, in, opts...)
return err
})
return resp, err
}
func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
rkv.retryf(ctx, func(rctx context.Context) error {
resp, err = rkv.KVClient.Txn(rctx, in, opts...)
return err
})
return resp, err
}
func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
rkv.retryf(ctx, func(rctx context.Context) error {
resp, err = rkv.KVClient.Compact(rctx, in, opts...)
return err
})
return resp, err
}
type retryLeaseClient struct {
pb.LeaseClient
retryf retryRpcFunc
}
// RetryLeaseClient implements a LeaseClient that uses the client's FailFast retry policy.
func RetryLeaseClient(c *Client) pb.LeaseClient {
return &retryLeaseClient{pb.NewLeaseClient(c.conn), c.retryWrapper}
}
func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
rlc.retryf(ctx, func(rctx context.Context) error {
resp, err = rlc.LeaseClient.LeaseGrant(rctx, in, opts...)
return err
})
return resp, err
}
func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
rlc.retryf(ctx, func(rctx context.Context) error {
resp, err = rlc.LeaseClient.LeaseRevoke(rctx, in, opts...)
return err
})
return resp, err
}
type retryClusterClient struct {
pb.ClusterClient
retryf retryRpcFunc
}
// RetryClusterClient implements a ClusterClient that uses the client's FailFast retry policy.
func RetryClusterClient(c *Client) pb.ClusterClient {
return &retryClusterClient{pb.NewClusterClient(c.conn), c.retryWrapper}
}
func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
rcc.retryf(ctx, func(rctx context.Context) error {
resp, err = rcc.ClusterClient.MemberAdd(rctx, in, opts...)
return err
})
return resp, err
}
func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
rcc.retryf(ctx, func(rctx context.Context) error {
resp, err = rcc.ClusterClient.MemberRemove(rctx, in, opts...)
return err
})
return resp, err
}
func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
rcc.retryf(ctx, func(rctx context.Context) error {
resp, err = rcc.ClusterClient.MemberUpdate(rctx, in, opts...)
return err
})
return resp, err
}
type retryAuthClient struct {
pb.AuthClient
retryf retryRpcFunc
}
// RetryAuthClient implements a AuthClient that uses the client's FailFast retry policy.
func RetryAuthClient(c *Client) pb.AuthClient {
return &retryAuthClient{pb.NewAuthClient(c.conn), c.retryWrapper}
}
func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.AuthEnable(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.AuthDisable(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.UserAdd(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.UserDelete(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.UserChangePassword(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.UserGrantRole(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.UserRevokeRole(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.RoleAdd(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.RoleDelete(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.RoleGrantPermission(rctx, in, opts...)
return err
})
return resp, err
}
func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
rac.retryf(ctx, func(rctx context.Context) error {
resp, err = rac.AuthClient.RoleRevokePermission(rctx, in, opts...)
return err
})
return resp, err
}

37
vendor/github.com/coreos/etcd/clientv3/sort.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
type SortTarget int
type SortOrder int
const (
SortNone SortOrder = iota
SortAscend
SortDescend
)
const (
SortByKey SortTarget = iota
SortByVersion
SortByCreateRevision
SortByModRevision
SortByValue
)
type SortOption struct {
Target SortTarget
Order SortOrder
}

160
vendor/github.com/coreos/etcd/clientv3/txn.go generated vendored Normal file
View File

@ -0,0 +1,160 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"sync"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"golang.org/x/net/context"
)
// Txn is the interface that wraps mini-transactions.
//
// Tx.If(
// Compare(Value(k1), ">", v1),
// Compare(Version(k1), "=", 2)
// ).Then(
// OpPut(k2,v2), OpPut(k3,v3)
// ).Else(
// OpPut(k4,v4), OpPut(k5,v5)
// ).Commit()
//
type Txn interface {
// If takes a list of comparison. If all comparisons passed in succeed,
// the operations passed into Then() will be executed. Or the operations
// passed into Else() will be executed.
If(cs ...Cmp) Txn
// Then takes a list of operations. The Ops list will be executed, if the
// comparisons passed in If() succeed.
Then(ops ...Op) Txn
// Else takes a list of operations. The Ops list will be executed, if the
// comparisons passed in If() fail.
Else(ops ...Op) Txn
// Commit tries to commit the transaction.
Commit() (*TxnResponse, error)
// TODO: add a Do for shortcut the txn without any condition?
}
type txn struct {
kv *kv
ctx context.Context
mu sync.Mutex
cif bool
cthen bool
celse bool
isWrite bool
cmps []*pb.Compare
sus []*pb.RequestOp
fas []*pb.RequestOp
}
func (txn *txn) If(cs ...Cmp) Txn {
txn.mu.Lock()
defer txn.mu.Unlock()
if txn.cif {
panic("cannot call If twice!")
}
if txn.cthen {
panic("cannot call If after Then!")
}
if txn.celse {
panic("cannot call If after Else!")
}
txn.cif = true
for i := range cs {
txn.cmps = append(txn.cmps, (*pb.Compare)(&cs[i]))
}
return txn
}
func (txn *txn) Then(ops ...Op) Txn {
txn.mu.Lock()
defer txn.mu.Unlock()
if txn.cthen {
panic("cannot call Then twice!")
}
if txn.celse {
panic("cannot call Then after Else!")
}
txn.cthen = true
for _, op := range ops {
txn.isWrite = txn.isWrite || op.isWrite()
txn.sus = append(txn.sus, op.toRequestOp())
}
return txn
}
func (txn *txn) Else(ops ...Op) Txn {
txn.mu.Lock()
defer txn.mu.Unlock()
if txn.celse {
panic("cannot call Else twice!")
}
txn.celse = true
for _, op := range ops {
txn.isWrite = txn.isWrite || op.isWrite()
txn.fas = append(txn.fas, op.toRequestOp())
}
return txn
}
func (txn *txn) Commit() (*TxnResponse, error) {
txn.mu.Lock()
defer txn.mu.Unlock()
for {
resp, err := txn.commit()
if err == nil {
return resp, err
}
if isHaltErr(txn.ctx, err) {
return nil, toErr(txn.ctx, err)
}
if txn.isWrite {
return nil, toErr(txn.ctx, err)
}
}
}
func (txn *txn) commit() (*TxnResponse, error) {
r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
resp, err := txn.kv.remote.Txn(txn.ctx, r)
if err != nil {
return nil, err
}
return (*TxnResponse)(resp), nil
}

755
vendor/github.com/coreos/etcd/clientv3/watch.go generated vendored Normal file
View File

@ -0,0 +1,755 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"fmt"
"sync"
"time"
v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
const (
EventTypeDelete = mvccpb.DELETE
EventTypePut = mvccpb.PUT
closeSendErrTimeout = 250 * time.Millisecond
)
type Event mvccpb.Event
type WatchChan <-chan WatchResponse
type Watcher interface {
// Watch watches on a key or prefix. The watched events will be returned
// through the returned channel.
// If the watch is slow or the required rev is compacted, the watch request
// might be canceled from the server-side and the chan will be closed.
// 'opts' can be: 'WithRev' and/or 'WithPrefix'.
Watch(ctx context.Context, key string, opts ...OpOption) WatchChan
// Close closes the watcher and cancels all watch requests.
Close() error
}
type WatchResponse struct {
Header pb.ResponseHeader
Events []*Event
// CompactRevision is the minimum revision the watcher may receive.
CompactRevision int64
// Canceled is used to indicate watch failure.
// If the watch failed and the stream was about to close, before the channel is closed,
// the channel sends a final response that has Canceled set to true with a non-nil Err().
Canceled bool
// Created is used to indicate the creation of the watcher.
Created bool
closeErr error
}
// IsCreate returns true if the event tells that the key is newly created.
func (e *Event) IsCreate() bool {
return e.Type == EventTypePut && e.Kv.CreateRevision == e.Kv.ModRevision
}
// IsModify returns true if the event tells that a new value is put on existing key.
func (e *Event) IsModify() bool {
return e.Type == EventTypePut && e.Kv.CreateRevision != e.Kv.ModRevision
}
// Err is the error value if this WatchResponse holds an error.
func (wr *WatchResponse) Err() error {
switch {
case wr.closeErr != nil:
return v3rpc.Error(wr.closeErr)
case wr.CompactRevision != 0:
return v3rpc.ErrCompacted
case wr.Canceled:
return v3rpc.ErrFutureRev
}
return nil
}
// IsProgressNotify returns true if the WatchResponse is progress notification.
func (wr *WatchResponse) IsProgressNotify() bool {
return len(wr.Events) == 0 && !wr.Canceled && !wr.Created
}
// watcher implements the Watcher interface
type watcher struct {
remote pb.WatchClient
// mu protects the grpc streams map
mu sync.RWMutex
// streams holds all the active grpc streams keyed by ctx value.
streams map[string]*watchGrpcStream
}
type watchGrpcStream struct {
owner *watcher
remote pb.WatchClient
// ctx controls internal remote.Watch requests
ctx context.Context
// ctxKey is the key used when looking up this stream's context
ctxKey string
cancel context.CancelFunc
// mu protects the streams map
mu sync.RWMutex
// streams holds all active watchers
streams map[int64]*watcherStream
// reqc sends a watch request from Watch() to the main goroutine
reqc chan *watchRequest
// respc receives data from the watch client
respc chan *pb.WatchResponse
// stopc is sent to the main goroutine to stop all processing
stopc chan struct{}
// donec closes to broadcast shutdown
donec chan struct{}
// errc transmits errors from grpc Recv to the watch stream reconn logic
errc chan error
// the error that closed the watch stream
closeErr error
}
// watchRequest is issued by the subscriber to start a new watcher
type watchRequest struct {
ctx context.Context
key string
end string
rev int64
// send created notification event if this field is true
createdNotify bool
// progressNotify is for progress updates
progressNotify bool
// filters is the list of events to filter out
filters []pb.WatchCreateRequest_FilterType
// get the previous key-value pair before the event happens
prevKV bool
// retc receives a chan WatchResponse once the watcher is established
retc chan chan WatchResponse
}
// watcherStream represents a registered watcher
type watcherStream struct {
// initReq is the request that initiated this request
initReq watchRequest
// outc publishes watch responses to subscriber
outc chan<- WatchResponse
// recvc buffers watch responses before publishing
recvc chan *WatchResponse
id int64
// lastRev is revision last successfully sent over outc
lastRev int64
// resumec indicates the stream must recover at a given revision
resumec chan int64
}
func NewWatcher(c *Client) Watcher {
return NewWatchFromWatchClient(pb.NewWatchClient(c.conn))
}
func NewWatchFromWatchClient(wc pb.WatchClient) Watcher {
return &watcher{
remote: wc,
streams: make(map[string]*watchGrpcStream),
}
}
// never closes
var valCtxCh = make(chan struct{})
var zeroTime = time.Unix(0, 0)
// ctx with only the values; never Done
type valCtx struct{ context.Context }
func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false }
func (vc *valCtx) Done() <-chan struct{} { return valCtxCh }
func (vc *valCtx) Err() error { return nil }
func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
ctx, cancel := context.WithCancel(&valCtx{inctx})
wgs := &watchGrpcStream{
owner: w,
remote: w.remote,
ctx: ctx,
ctxKey: fmt.Sprintf("%v", inctx),
cancel: cancel,
streams: make(map[int64]*watcherStream),
respc: make(chan *pb.WatchResponse),
reqc: make(chan *watchRequest),
stopc: make(chan struct{}),
donec: make(chan struct{}),
errc: make(chan error, 1),
}
go wgs.run()
return wgs
}
// Watch posts a watch request to run() and waits for a new watcher channel
func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan {
ow := opWatch(key, opts...)
retc := make(chan chan WatchResponse, 1)
var filters []pb.WatchCreateRequest_FilterType
if ow.filterPut {
filters = append(filters, pb.WatchCreateRequest_NOPUT)
}
if ow.filterDelete {
filters = append(filters, pb.WatchCreateRequest_NODELETE)
}
wr := &watchRequest{
ctx: ctx,
createdNotify: ow.createdNotify,
key: string(ow.key),
end: string(ow.end),
rev: ow.rev,
progressNotify: ow.progressNotify,
filters: filters,
prevKV: ow.prevKV,
retc: retc,
}
ok := false
ctxKey := fmt.Sprintf("%v", ctx)
// find or allocate appropriate grpc watch stream
w.mu.Lock()
if w.streams == nil {
// closed
w.mu.Unlock()
ch := make(chan WatchResponse)
close(ch)
return ch
}
wgs := w.streams[ctxKey]
if wgs == nil {
wgs = w.newWatcherGrpcStream(ctx)
w.streams[ctxKey] = wgs
}
donec := wgs.donec
reqc := wgs.reqc
w.mu.Unlock()
// couldn't create channel; return closed channel
closeCh := make(chan WatchResponse, 1)
// submit request
select {
case reqc <- wr:
ok = true
case <-wr.ctx.Done():
wgs.stopIfEmpty()
case <-donec:
if wgs.closeErr != nil {
closeCh <- WatchResponse{closeErr: wgs.closeErr}
break
}
// retry; may have dropped stream from no ctxs
return w.Watch(ctx, key, opts...)
}
// receive channel
if ok {
select {
case ret := <-retc:
return ret
case <-ctx.Done():
case <-donec:
if wgs.closeErr != nil {
closeCh <- WatchResponse{closeErr: wgs.closeErr}
break
}
// retry; may have dropped stream from no ctxs
return w.Watch(ctx, key, opts...)
}
}
close(closeCh)
return closeCh
}
func (w *watcher) Close() (err error) {
w.mu.Lock()
streams := w.streams
w.streams = nil
w.mu.Unlock()
for _, wgs := range streams {
if werr := wgs.Close(); werr != nil {
err = werr
}
}
return err
}
func (w *watchGrpcStream) Close() (err error) {
w.mu.Lock()
if w.stopc != nil {
close(w.stopc)
w.stopc = nil
}
w.mu.Unlock()
<-w.donec
select {
case err = <-w.errc:
default:
}
return toErr(w.ctx, err)
}
func (w *watchGrpcStream) addStream(resp *pb.WatchResponse, pendingReq *watchRequest) {
if pendingReq == nil {
// no pending request; ignore
return
}
if resp.Canceled || resp.CompactRevision != 0 {
// a cancel at id creation time means the start revision has
// been compacted out of the store
ret := make(chan WatchResponse, 1)
ret <- WatchResponse{
Header: *resp.Header,
CompactRevision: resp.CompactRevision,
Canceled: true}
close(ret)
pendingReq.retc <- ret
return
}
ret := make(chan WatchResponse)
if resp.WatchId == -1 {
// failed; no channel
close(ret)
pendingReq.retc <- ret
return
}
ws := &watcherStream{
initReq: *pendingReq,
id: resp.WatchId,
outc: ret,
// buffered so unlikely to block on sending while holding mu
recvc: make(chan *WatchResponse, 4),
resumec: make(chan int64),
}
if pendingReq.rev == 0 {
// note the header revision so that a put following a current watcher
// disconnect will arrive on the watcher channel after reconnect
ws.initReq.rev = resp.Header.Revision
}
w.mu.Lock()
w.streams[ws.id] = ws
w.mu.Unlock()
// pass back the subscriber channel for the watcher
pendingReq.retc <- ret
// send messages to subscriber
go w.serveStream(ws)
}
// closeStream closes the watcher resources and removes it
func (w *watchGrpcStream) closeStream(ws *watcherStream) {
w.mu.Lock()
// cancels request stream; subscriber receives nil channel
close(ws.initReq.retc)
// close subscriber's channel
close(ws.outc)
delete(w.streams, ws.id)
w.mu.Unlock()
}
// run is the root of the goroutines for managing a watcher client
func (w *watchGrpcStream) run() {
var wc pb.Watch_WatchClient
var closeErr error
defer func() {
w.owner.mu.Lock()
w.closeErr = closeErr
if w.owner.streams != nil {
delete(w.owner.streams, w.ctxKey)
}
close(w.donec)
w.owner.mu.Unlock()
w.cancel()
}()
// already stopped?
w.mu.RLock()
stopc := w.stopc
w.mu.RUnlock()
if stopc == nil {
return
}
// start a stream with the etcd grpc server
if wc, closeErr = w.newWatchClient(); closeErr != nil {
return
}
var pendingReq, failedReq *watchRequest
curReqC := w.reqc
cancelSet := make(map[int64]struct{})
for {
select {
// Watch() requested
case pendingReq = <-curReqC:
// no more watch requests until there's a response
curReqC = nil
if err := wc.Send(pendingReq.toPB()); err == nil {
// pendingReq now waits on w.respc
break
}
failedReq = pendingReq
// New events from the watch client
case pbresp := <-w.respc:
switch {
case pbresp.Created:
// response to pending req, try to add
w.addStream(pbresp, pendingReq)
pendingReq = nil
curReqC = w.reqc
w.dispatchEvent(pbresp)
case pbresp.Canceled:
delete(cancelSet, pbresp.WatchId)
// shutdown serveStream, if any
w.mu.Lock()
if ws, ok := w.streams[pbresp.WatchId]; ok {
close(ws.recvc)
delete(w.streams, ws.id)
}
numStreams := len(w.streams)
w.mu.Unlock()
if numStreams == 0 {
// don't leak watcher streams
return
}
default:
// dispatch to appropriate watch stream
if ok := w.dispatchEvent(pbresp); ok {
break
}
// watch response on unexpected watch id; cancel id
if _, ok := cancelSet[pbresp.WatchId]; ok {
break
}
cancelSet[pbresp.WatchId] = struct{}{}
cr := &pb.WatchRequest_CancelRequest{
CancelRequest: &pb.WatchCancelRequest{
WatchId: pbresp.WatchId,
},
}
req := &pb.WatchRequest{RequestUnion: cr}
wc.Send(req)
}
// watch client failed to recv; spawn another if possible
// TODO report watch client errors from errc?
case err := <-w.errc:
if toErr(w.ctx, err) == v3rpc.ErrNoLeader {
closeErr = err
return
}
if wc, closeErr = w.newWatchClient(); closeErr != nil {
return
}
curReqC = w.reqc
if pendingReq != nil {
failedReq = pendingReq
}
cancelSet = make(map[int64]struct{})
case <-stopc:
return
}
// send failed; queue for retry
if failedReq != nil {
go func(wr *watchRequest) {
select {
case w.reqc <- wr:
case <-wr.ctx.Done():
case <-w.donec:
}
}(pendingReq)
failedReq = nil
pendingReq = nil
}
}
}
// dispatchEvent sends a WatchResponse to the appropriate watcher stream
func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
w.mu.RLock()
defer w.mu.RUnlock()
ws, ok := w.streams[pbresp.WatchId]
if !ok {
return false
}
events := make([]*Event, len(pbresp.Events))
for i, ev := range pbresp.Events {
events[i] = (*Event)(ev)
}
wr := &WatchResponse{
Header: *pbresp.Header,
Events: events,
CompactRevision: pbresp.CompactRevision,
Created: pbresp.Created,
Canceled: pbresp.Canceled,
}
ws.recvc <- wr
return true
}
// serveWatchClient forwards messages from the grpc stream to run()
func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) {
for {
resp, err := wc.Recv()
if err != nil {
select {
case w.errc <- err:
case <-w.donec:
}
return
}
select {
case w.respc <- resp:
case <-w.donec:
return
}
}
}
// serveStream forwards watch responses from run() to the subscriber
func (w *watchGrpcStream) serveStream(ws *watcherStream) {
var closeErr error
emptyWr := &WatchResponse{}
wrs := []*WatchResponse{}
resuming := false
closing := false
for !closing {
curWr := emptyWr
outc := ws.outc
// ignore created event if create notify is not requested or
// we already sent the initial created event (when we are on the resume path).
if len(wrs) > 0 && wrs[0].Created &&
(!ws.initReq.createdNotify || ws.lastRev != 0) {
wrs = wrs[1:]
}
if len(wrs) > 0 {
curWr = wrs[0]
} else {
outc = nil
}
select {
case outc <- *curWr:
if wrs[0].Err() != nil {
closing = true
break
}
var newRev int64
if len(wrs[0].Events) > 0 {
newRev = wrs[0].Events[len(wrs[0].Events)-1].Kv.ModRevision
} else {
newRev = wrs[0].Header.Revision
}
if newRev != ws.lastRev {
ws.lastRev = newRev
}
wrs[0] = nil
wrs = wrs[1:]
case wr, ok := <-ws.recvc:
if !ok {
// shutdown from closeStream
return
}
// resume up to last seen event if disconnected
if resuming && wr.Err() == nil {
resuming = false
// trim events already seen
for i := 0; i < len(wr.Events); i++ {
if wr.Events[i].Kv.ModRevision > ws.lastRev {
wr.Events = wr.Events[i:]
break
}
}
// only forward new events
if wr.Events[0].Kv.ModRevision == ws.lastRev {
break
}
}
resuming = false
// TODO don't keep buffering if subscriber stops reading
wrs = append(wrs, wr)
case resumeRev := <-ws.resumec:
wrs = nil
resuming = true
if resumeRev == -1 {
// pause serving stream while resume gets set up
break
}
if resumeRev != ws.lastRev {
panic("unexpected resume revision")
}
case <-w.donec:
closing = true
closeErr = w.closeErr
case <-ws.initReq.ctx.Done():
closing = true
}
}
// try to send off close error
if closeErr != nil {
select {
case ws.outc <- WatchResponse{closeErr: w.closeErr}:
case <-w.donec:
case <-time.After(closeSendErrTimeout):
}
}
w.closeStream(ws)
w.stopIfEmpty()
// lazily send cancel message if events on missing id
}
func (wgs *watchGrpcStream) stopIfEmpty() {
wgs.mu.Lock()
if len(wgs.streams) == 0 && wgs.stopc != nil {
close(wgs.stopc)
wgs.stopc = nil
}
wgs.mu.Unlock()
}
func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
ws, rerr := w.resume()
if rerr != nil {
return nil, rerr
}
go w.serveWatchClient(ws)
return ws, nil
}
// resume creates a new WatchClient with all current watchers reestablished
func (w *watchGrpcStream) resume() (ws pb.Watch_WatchClient, err error) {
for {
if ws, err = w.openWatchClient(); err != nil {
break
} else if err = w.resumeWatchers(ws); err == nil {
break
}
}
return ws, v3rpc.Error(err)
}
// openWatchClient retries opening a watchclient until retryConnection fails
func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
for {
w.mu.Lock()
stopc := w.stopc
w.mu.Unlock()
if stopc == nil {
if err == nil {
err = context.Canceled
}
return nil, err
}
if ws, err = w.remote.Watch(w.ctx, grpc.FailFast(false)); ws != nil && err == nil {
break
}
if isHaltErr(w.ctx, err) {
return nil, v3rpc.Error(err)
}
}
return ws, nil
}
// resumeWatchers rebuilds every registered watcher on a new client
func (w *watchGrpcStream) resumeWatchers(wc pb.Watch_WatchClient) error {
w.mu.RLock()
streams := make([]*watcherStream, 0, len(w.streams))
for _, ws := range w.streams {
streams = append(streams, ws)
}
w.mu.RUnlock()
for _, ws := range streams {
// pause serveStream
ws.resumec <- -1
// reconstruct watcher from initial request
if ws.lastRev != 0 {
ws.initReq.rev = ws.lastRev
}
if err := wc.Send(ws.initReq.toPB()); err != nil {
return err
}
// wait for request ack
resp, err := wc.Recv()
if err != nil {
return err
} else if len(resp.Events) != 0 || !resp.Created {
return fmt.Errorf("watcher: unexpected response (%+v)", resp)
}
// id may be different since new remote watcher; update map
w.mu.Lock()
delete(w.streams, ws.id)
ws.id = resp.WatchId
w.streams[ws.id] = ws
w.mu.Unlock()
// unpause serveStream
ws.resumec <- ws.lastRev
}
return nil
}
// toPB converts an internal watch request structure to its protobuf messagefunc (wr *watchRequest)
func (wr *watchRequest) toPB() *pb.WatchRequest {
req := &pb.WatchCreateRequest{
StartRevision: wr.rev,
Key: []byte(wr.key),
RangeEnd: []byte(wr.end),
ProgressNotify: wr.progressNotify,
Filters: wr.filters,
PrevKv: wr.prevKV,
}
cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
return &pb.WatchRequest{RequestUnion: cr}
}

View File

@ -0,0 +1,16 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package rpctypes has types and values shared by the etcd server and client for v3 RPC interaction.
package rpctypes

View File

@ -0,0 +1,156 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rpctypes
import (
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
var (
// server-side error
ErrGRPCEmptyKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: key is not provided")
ErrGRPCTooManyOps = grpc.Errorf(codes.InvalidArgument, "etcdserver: too many operations in txn request")
ErrGRPCDuplicateKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: duplicate key given in txn request")
ErrGRPCCompacted = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted")
ErrGRPCFutureRev = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision")
ErrGRPCNoSpace = grpc.Errorf(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded")
ErrGRPCLeaseNotFound = grpc.Errorf(codes.NotFound, "etcdserver: requested lease not found")
ErrGRPCLeaseExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: lease already exists")
ErrGRPCMemberExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: member ID already exist")
ErrGRPCPeerURLExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: Peer URLs already exists")
ErrGRPCMemberBadURLs = grpc.Errorf(codes.InvalidArgument, "etcdserver: given member URLs are invalid")
ErrGRPCMemberNotFound = grpc.Errorf(codes.NotFound, "etcdserver: member not found")
ErrGRPCRequestTooLarge = grpc.Errorf(codes.InvalidArgument, "etcdserver: request is too large")
ErrGRPCRequestTooManyRequests = grpc.Errorf(codes.ResourceExhausted, "etcdserver: too many requests")
ErrGRPCRootUserNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not exist")
ErrGRPCRootRoleNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not have root role")
ErrGRPCUserAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name already exists")
ErrGRPCUserNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name not found")
ErrGRPCRoleAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name already exists")
ErrGRPCRoleNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name not found")
ErrGRPCAuthFailed = grpc.Errorf(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password")
ErrGRPCPermissionDenied = grpc.Errorf(codes.PermissionDenied, "etcdserver: permission denied")
ErrGRPCRoleNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role is not granted to the user")
ErrGRPCPermissionNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission is not granted to the role")
ErrGRPCAuthNotEnabled = grpc.Errorf(codes.FailedPrecondition, "etcdserver: authentication is not enabled")
ErrGRPCNoLeader = grpc.Errorf(codes.Unavailable, "etcdserver: no leader")
ErrGRPCNotCapable = grpc.Errorf(codes.Unavailable, "etcdserver: not capable")
ErrGRPCStopped = grpc.Errorf(codes.Unavailable, "etcdserver: server stopped")
errStringToError = map[string]error{
grpc.ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey,
grpc.ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps,
grpc.ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey,
grpc.ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted,
grpc.ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev,
grpc.ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace,
grpc.ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound,
grpc.ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist,
grpc.ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist,
grpc.ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist,
grpc.ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs,
grpc.ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound,
grpc.ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge,
grpc.ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests,
grpc.ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist,
grpc.ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist,
grpc.ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist,
grpc.ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound,
grpc.ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist,
grpc.ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound,
grpc.ErrorDesc(ErrGRPCAuthFailed): ErrGRPCAuthFailed,
grpc.ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied,
grpc.ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted,
grpc.ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted,
grpc.ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled,
grpc.ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader,
grpc.ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable,
grpc.ErrorDesc(ErrGRPCStopped): ErrGRPCStopped,
}
// client-side error
ErrEmptyKey = Error(ErrGRPCEmptyKey)
ErrTooManyOps = Error(ErrGRPCTooManyOps)
ErrDuplicateKey = Error(ErrGRPCDuplicateKey)
ErrCompacted = Error(ErrGRPCCompacted)
ErrFutureRev = Error(ErrGRPCFutureRev)
ErrNoSpace = Error(ErrGRPCNoSpace)
ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound)
ErrLeaseExist = Error(ErrGRPCLeaseExist)
ErrMemberExist = Error(ErrGRPCMemberExist)
ErrPeerURLExist = Error(ErrGRPCPeerURLExist)
ErrMemberBadURLs = Error(ErrGRPCMemberBadURLs)
ErrMemberNotFound = Error(ErrGRPCMemberNotFound)
ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge)
ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests)
ErrRootUserNotExist = Error(ErrGRPCRootUserNotExist)
ErrRootRoleNotExist = Error(ErrGRPCRootRoleNotExist)
ErrUserAlreadyExist = Error(ErrGRPCUserAlreadyExist)
ErrUserNotFound = Error(ErrGRPCUserNotFound)
ErrRoleAlreadyExist = Error(ErrGRPCRoleAlreadyExist)
ErrRoleNotFound = Error(ErrGRPCRoleNotFound)
ErrAuthFailed = Error(ErrGRPCAuthFailed)
ErrPermissionDenied = Error(ErrGRPCPermissionDenied)
ErrRoleNotGranted = Error(ErrGRPCRoleNotGranted)
ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted)
ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled)
ErrNoLeader = Error(ErrGRPCNoLeader)
ErrNotCapable = Error(ErrGRPCNotCapable)
ErrStopped = Error(ErrGRPCStopped)
)
// EtcdError defines gRPC server errors.
// (https://github.com/grpc/grpc-go/blob/master/rpc_util.go#L319-L323)
type EtcdError struct {
code codes.Code
desc string
}
// Code returns grpc/codes.Code.
// TODO: define clientv3/codes.Code.
func (e EtcdError) Code() codes.Code {
return e.code
}
func (e EtcdError) Error() string {
return e.desc
}
func Error(err error) error {
if err == nil {
return nil
}
verr, ok := errStringToError[grpc.ErrorDesc(err)]
if !ok { // not gRPC error
return err
}
return EtcdError{code: grpc.Code(verr), desc: grpc.ErrorDesc(verr)}
}

View File

@ -0,0 +1,20 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rpctypes
var (
MetadataRequireLeaderKey = "hasleader"
MetadataHasLeader = "true"
)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

29
vendor/github.com/coreos/etcd/main.go generated vendored Normal file
View File

@ -0,0 +1,29 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package main is a simple wrapper of the real etcd entrypoint package
// (located at github.com/coreos/etcd/etcdmain) to ensure that etcd is still
// "go getable"; e.g. `go get github.com/coreos/etcd` works as expected and
// builds a binary in $GOBIN/etcd
//
// This package should NOT be extended or modified in any way; to modify the
// etcd binary, work in the `github.com/coreos/etcd/etcdmain` package.
//
package main
import "github.com/coreos/etcd/etcdmain"
func main() {
etcdmain.Main()
}

731
vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go generated vendored Normal file
View File

@ -0,0 +1,731 @@
// Code generated by protoc-gen-gogo.
// source: kv.proto
// DO NOT EDIT!
/*
Package mvccpb is a generated protocol buffer package.
It is generated from these files:
kv.proto
It has these top-level messages:
KeyValue
Event
*/
package mvccpb
import (
"fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
const _ = proto.ProtoPackageIsVersion1
type Event_EventType int32
const (
PUT Event_EventType = 0
DELETE Event_EventType = 1
)
var Event_EventType_name = map[int32]string{
0: "PUT",
1: "DELETE",
}
var Event_EventType_value = map[string]int32{
"PUT": 0,
"DELETE": 1,
}
func (x Event_EventType) String() string {
return proto.EnumName(Event_EventType_name, int32(x))
}
func (Event_EventType) EnumDescriptor() ([]byte, []int) { return fileDescriptorKv, []int{1, 0} }
type KeyValue struct {
// key is the key in bytes. An empty key is not allowed.
Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
// create_revision is the revision of last creation on this key.
CreateRevision int64 `protobuf:"varint,2,opt,name=create_revision,json=createRevision,proto3" json:"create_revision,omitempty"`
// mod_revision is the revision of last modification on this key.
ModRevision int64 `protobuf:"varint,3,opt,name=mod_revision,json=modRevision,proto3" json:"mod_revision,omitempty"`
// version is the version of the key. A deletion resets
// the version to zero and any modification of the key
// increases its version.
Version int64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"`
// value is the value held by the key, in bytes.
Value []byte `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"`
// lease is the ID of the lease that attached to key.
// When the attached lease expires, the key will be deleted.
// If lease is 0, then no lease is attached to the key.
Lease int64 `protobuf:"varint,6,opt,name=lease,proto3" json:"lease,omitempty"`
}
func (m *KeyValue) Reset() { *m = KeyValue{} }
func (m *KeyValue) String() string { return proto.CompactTextString(m) }
func (*KeyValue) ProtoMessage() {}
func (*KeyValue) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{0} }
type Event struct {
// type is the kind of event. If type is a PUT, it indicates
// new data has been stored to the key. If type is a DELETE,
// it indicates the key was deleted.
Type Event_EventType `protobuf:"varint,1,opt,name=type,proto3,enum=mvccpb.Event_EventType" json:"type,omitempty"`
// kv holds the KeyValue for the event.
// A PUT event contains current kv pair.
// A PUT event with kv.Version=1 indicates the creation of a key.
// A DELETE/EXPIRE event contains the deleted key with
// its modification revision set to the revision of deletion.
Kv *KeyValue `protobuf:"bytes,2,opt,name=kv" json:"kv,omitempty"`
// prev_kv holds the key-value pair before the event happens.
PrevKv *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv" json:"prev_kv,omitempty"`
}
func (m *Event) Reset() { *m = Event{} }
func (m *Event) String() string { return proto.CompactTextString(m) }
func (*Event) ProtoMessage() {}
func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorKv, []int{1} }
func init() {
proto.RegisterType((*KeyValue)(nil), "mvccpb.KeyValue")
proto.RegisterType((*Event)(nil), "mvccpb.Event")
proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value)
}
func (m *KeyValue) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *KeyValue) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Key) > 0 {
data[i] = 0xa
i++
i = encodeVarintKv(data, i, uint64(len(m.Key)))
i += copy(data[i:], m.Key)
}
if m.CreateRevision != 0 {
data[i] = 0x10
i++
i = encodeVarintKv(data, i, uint64(m.CreateRevision))
}
if m.ModRevision != 0 {
data[i] = 0x18
i++
i = encodeVarintKv(data, i, uint64(m.ModRevision))
}
if m.Version != 0 {
data[i] = 0x20
i++
i = encodeVarintKv(data, i, uint64(m.Version))
}
if len(m.Value) > 0 {
data[i] = 0x2a
i++
i = encodeVarintKv(data, i, uint64(len(m.Value)))
i += copy(data[i:], m.Value)
}
if m.Lease != 0 {
data[i] = 0x30
i++
i = encodeVarintKv(data, i, uint64(m.Lease))
}
return i, nil
}
func (m *Event) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *Event) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Type != 0 {
data[i] = 0x8
i++
i = encodeVarintKv(data, i, uint64(m.Type))
}
if m.Kv != nil {
data[i] = 0x12
i++
i = encodeVarintKv(data, i, uint64(m.Kv.Size()))
n1, err := m.Kv.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n1
}
if m.PrevKv != nil {
data[i] = 0x1a
i++
i = encodeVarintKv(data, i, uint64(m.PrevKv.Size()))
n2, err := m.PrevKv.MarshalTo(data[i:])
if err != nil {
return 0, err
}
i += n2
}
return i, nil
}
func encodeFixed64Kv(data []byte, offset int, v uint64) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
data[offset+4] = uint8(v >> 32)
data[offset+5] = uint8(v >> 40)
data[offset+6] = uint8(v >> 48)
data[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Kv(data []byte, offset int, v uint32) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintKv(data []byte, offset int, v uint64) int {
for v >= 1<<7 {
data[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
data[offset] = uint8(v)
return offset + 1
}
func (m *KeyValue) Size() (n int) {
var l int
_ = l
l = len(m.Key)
if l > 0 {
n += 1 + l + sovKv(uint64(l))
}
if m.CreateRevision != 0 {
n += 1 + sovKv(uint64(m.CreateRevision))
}
if m.ModRevision != 0 {
n += 1 + sovKv(uint64(m.ModRevision))
}
if m.Version != 0 {
n += 1 + sovKv(uint64(m.Version))
}
l = len(m.Value)
if l > 0 {
n += 1 + l + sovKv(uint64(l))
}
if m.Lease != 0 {
n += 1 + sovKv(uint64(m.Lease))
}
return n
}
func (m *Event) Size() (n int) {
var l int
_ = l
if m.Type != 0 {
n += 1 + sovKv(uint64(m.Type))
}
if m.Kv != nil {
l = m.Kv.Size()
n += 1 + l + sovKv(uint64(l))
}
if m.PrevKv != nil {
l = m.PrevKv.Size()
n += 1 + l + sovKv(uint64(l))
}
return n
}
func sovKv(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozKv(x uint64) (n int) {
return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *KeyValue) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: KeyValue: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKv
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], data[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType)
}
m.CreateRevision = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.CreateRevision |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType)
}
m.ModRevision = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.ModRevision |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
}
m.Version = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Version |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKv
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Value = append(m.Value[:0], data[iNdEx:postIndex]...)
if m.Value == nil {
m.Value = []byte{}
}
iNdEx = postIndex
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
}
m.Lease = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Lease |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipKv(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKv
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Event) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Event: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
m.Type = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Type |= (Event_EventType(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKv
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Kv == nil {
m.Kv = &KeyValue{}
}
if err := m.Kv.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKv
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.PrevKv == nil {
m.PrevKv = &KeyValue{}
}
if err := m.PrevKv.Unmarshal(data[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKv(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthKv
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipKv(data []byte) (n int, err error) {
l := len(data)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowKv
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowKv
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if data[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowKv
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthKv
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowKv
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipKv(data[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowKv = fmt.Errorf("proto: integer overflow")
)
var fileDescriptorKv = []byte{
// 303 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40,
0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18,
0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94,
0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa,
0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3,
0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae,
0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7,
0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3,
0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d,
0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b,
0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23,
0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36,
0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34,
0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad,
0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30,
0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a,
0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94,
0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff,
0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00,
}

31
vendor/github.com/coreos/etcd/pkg/pathutil/path.go generated vendored Normal file
View File

@ -0,0 +1,31 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package pathutil implements utility functions for handling slash-separated
// paths.
package pathutil
import "path"
// CanonicalURLPath returns the canonical url path for p, which follows the rules:
// 1. the path always starts with "/"
// 2. replace multiple slashes with a single slash
// 3. replace each '.' '..' path name element with equivalent one
// 4. keep the trailing slash
// The function is borrowed from stdlib http.cleanPath in server.go.
func CanonicalURLPath(p string) string {
if p == "" {
return "/"
}
if p[0] != '/' {
p = "/" + p
}
np := path.Clean(p)
// path.Clean removes trailing slash except for root,
// put the trailing slash back if necessary.
if p[len(p)-1] == '/' && np != "/" {
np += "/"
}
return np
}

16
vendor/github.com/coreos/etcd/pkg/tlsutil/doc.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package tlsutil provides utility functions for handling TLS.
package tlsutil

72
vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go generated vendored Normal file
View File

@ -0,0 +1,72 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tlsutil
import (
"crypto/tls"
"crypto/x509"
"encoding/pem"
"io/ioutil"
)
// NewCertPool creates x509 certPool with provided CA files.
func NewCertPool(CAFiles []string) (*x509.CertPool, error) {
certPool := x509.NewCertPool()
for _, CAFile := range CAFiles {
pemByte, err := ioutil.ReadFile(CAFile)
if err != nil {
return nil, err
}
for {
var block *pem.Block
block, pemByte = pem.Decode(pemByte)
if block == nil {
break
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, err
}
certPool.AddCert(cert)
}
}
return certPool, nil
}
// NewCert generates TLS cert by using the given cert,key and parse function.
func NewCert(certfile, keyfile string, parseFunc func([]byte, []byte) (tls.Certificate, error)) (*tls.Certificate, error) {
cert, err := ioutil.ReadFile(certfile)
if err != nil {
return nil, err
}
key, err := ioutil.ReadFile(keyfile)
if err != nil {
return nil, err
}
if parseFunc == nil {
parseFunc = tls.X509KeyPair
}
tlsCert, err := parseFunc(cert, key)
if err != nil {
return nil, err
}
return &tlsCert, nil
}

17
vendor/github.com/coreos/etcd/pkg/types/doc.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package types declares various data types and implements type-checking
// functions.
package types

41
vendor/github.com/coreos/etcd/pkg/types/id.go generated vendored Normal file
View File

@ -0,0 +1,41 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"strconv"
)
// ID represents a generic identifier which is canonically
// stored as a uint64 but is typically represented as a
// base-16 string for input/output
type ID uint64
func (i ID) String() string {
return strconv.FormatUint(uint64(i), 16)
}
// IDFromString attempts to create an ID from a base-16 string.
func IDFromString(s string) (ID, error) {
i, err := strconv.ParseUint(s, 16, 64)
return ID(i), err
}
// IDSlice implements the sort interface
type IDSlice []ID
func (p IDSlice) Len() int { return len(p) }
func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) }
func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }

178
vendor/github.com/coreos/etcd/pkg/types/set.go generated vendored Normal file
View File

@ -0,0 +1,178 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"reflect"
"sort"
"sync"
)
type Set interface {
Add(string)
Remove(string)
Contains(string) bool
Equals(Set) bool
Length() int
Values() []string
Copy() Set
Sub(Set) Set
}
func NewUnsafeSet(values ...string) *unsafeSet {
set := &unsafeSet{make(map[string]struct{})}
for _, v := range values {
set.Add(v)
}
return set
}
func NewThreadsafeSet(values ...string) *tsafeSet {
us := NewUnsafeSet(values...)
return &tsafeSet{us, sync.RWMutex{}}
}
type unsafeSet struct {
d map[string]struct{}
}
// Add adds a new value to the set (no-op if the value is already present)
func (us *unsafeSet) Add(value string) {
us.d[value] = struct{}{}
}
// Remove removes the given value from the set
func (us *unsafeSet) Remove(value string) {
delete(us.d, value)
}
// Contains returns whether the set contains the given value
func (us *unsafeSet) Contains(value string) (exists bool) {
_, exists = us.d[value]
return
}
// ContainsAll returns whether the set contains all given values
func (us *unsafeSet) ContainsAll(values []string) bool {
for _, s := range values {
if !us.Contains(s) {
return false
}
}
return true
}
// Equals returns whether the contents of two sets are identical
func (us *unsafeSet) Equals(other Set) bool {
v1 := sort.StringSlice(us.Values())
v2 := sort.StringSlice(other.Values())
v1.Sort()
v2.Sort()
return reflect.DeepEqual(v1, v2)
}
// Length returns the number of elements in the set
func (us *unsafeSet) Length() int {
return len(us.d)
}
// Values returns the values of the Set in an unspecified order.
func (us *unsafeSet) Values() (values []string) {
values = make([]string, 0)
for val := range us.d {
values = append(values, val)
}
return
}
// Copy creates a new Set containing the values of the first
func (us *unsafeSet) Copy() Set {
cp := NewUnsafeSet()
for val := range us.d {
cp.Add(val)
}
return cp
}
// Sub removes all elements in other from the set
func (us *unsafeSet) Sub(other Set) Set {
oValues := other.Values()
result := us.Copy().(*unsafeSet)
for _, val := range oValues {
if _, ok := result.d[val]; !ok {
continue
}
delete(result.d, val)
}
return result
}
type tsafeSet struct {
us *unsafeSet
m sync.RWMutex
}
func (ts *tsafeSet) Add(value string) {
ts.m.Lock()
defer ts.m.Unlock()
ts.us.Add(value)
}
func (ts *tsafeSet) Remove(value string) {
ts.m.Lock()
defer ts.m.Unlock()
ts.us.Remove(value)
}
func (ts *tsafeSet) Contains(value string) (exists bool) {
ts.m.RLock()
defer ts.m.RUnlock()
return ts.us.Contains(value)
}
func (ts *tsafeSet) Equals(other Set) bool {
ts.m.RLock()
defer ts.m.RUnlock()
return ts.us.Equals(other)
}
func (ts *tsafeSet) Length() int {
ts.m.RLock()
defer ts.m.RUnlock()
return ts.us.Length()
}
func (ts *tsafeSet) Values() (values []string) {
ts.m.RLock()
defer ts.m.RUnlock()
return ts.us.Values()
}
func (ts *tsafeSet) Copy() Set {
ts.m.RLock()
defer ts.m.RUnlock()
usResult := ts.us.Copy().(*unsafeSet)
return &tsafeSet{usResult, sync.RWMutex{}}
}
func (ts *tsafeSet) Sub(other Set) Set {
ts.m.RLock()
defer ts.m.RUnlock()
usResult := ts.us.Sub(other).(*unsafeSet)
return &tsafeSet{usResult, sync.RWMutex{}}
}

22
vendor/github.com/coreos/etcd/pkg/types/slice.go generated vendored Normal file
View File

@ -0,0 +1,22 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
// Uint64Slice implements sort interface
type Uint64Slice []uint64
func (p Uint64Slice) Len() int { return len(p) }
func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }

82
vendor/github.com/coreos/etcd/pkg/types/urls.go generated vendored Normal file
View File

@ -0,0 +1,82 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"errors"
"fmt"
"net"
"net/url"
"sort"
"strings"
)
type URLs []url.URL
func NewURLs(strs []string) (URLs, error) {
all := make([]url.URL, len(strs))
if len(all) == 0 {
return nil, errors.New("no valid URLs given")
}
for i, in := range strs {
in = strings.TrimSpace(in)
u, err := url.Parse(in)
if err != nil {
return nil, err
}
if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" {
return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in)
}
if _, _, err := net.SplitHostPort(u.Host); err != nil {
return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in)
}
if u.Path != "" {
return nil, fmt.Errorf("URL must not contain a path: %s", in)
}
all[i] = *u
}
us := URLs(all)
us.Sort()
return us, nil
}
func MustNewURLs(strs []string) URLs {
urls, err := NewURLs(strs)
if err != nil {
panic(err)
}
return urls
}
func (us URLs) String() string {
return strings.Join(us.StringSlice(), ",")
}
func (us *URLs) Sort() {
sort.Sort(us)
}
func (us URLs) Len() int { return len(us) }
func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() }
func (us URLs) Swap(i, j int) { us[i], us[j] = us[j], us[i] }
func (us URLs) StringSlice() []string {
out := make([]string, len(us))
for i := range us {
out[i] = us[i].String()
}
return out
}

Some files were not shown because too many files have changed in this diff Show More